metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JorritWillaert/SudokuSolverFromImage",
"score": 4
} |
#### File: SudokuSolverFromImage/digits_classification/model.py
```python
import torch
from torch import nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
class MNISTNet(nn.Module):
def __init__(self):
super(MNISTNet, self).__init__()
self.network = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),
nn.ReLU(),
nn.Flatten(),
nn.Linear(36864, 1000),
nn.Linear(1000, 10)
)
def forward(self, x):
return self.network(x)
if __name__ == "__main__":
train_dataset = datasets.MNIST(root='dataset/', train=True, transform=transforms.ToTensor(), download=True)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=64)
iterdata = iter(train_dataloader)
images, labels = next(iterdata)
net = MNISTNet()
print(net.forward(images).shape) # Output shape = [N, 10]
``` |
{
"source": "Jorropo/porth",
"score": 2
} |
#### File: Jorropo/porth/porth.py
```python
import os
import sys
import subprocess
import shlex
from os import path
from typing import *
from enum import IntEnum, Enum, auto
from dataclasses import dataclass, field
from copy import copy
from time import sleep
import traceback
PORTH_EXT = '.porth'
X86_64_RET_STACK_CAP=8192
# TODO: INCLUDE_LIMIT should be probably customizable
INCLUDE_LIMIT=100
debug=False
Loc=Tuple[str, int, int]
class Keyword(Enum):
IF=auto()
IFSTAR=auto()
ELSE=auto()
END=auto()
WHILE=auto()
DO=auto()
INCLUDE=auto()
MEMORY=auto()
PROC=auto()
CONST=auto()
OFFSET=auto()
RESET=auto()
ASSERT=auto()
IN=auto()
BIKESHEDDER=auto()
class DataType(IntEnum):
INT=auto()
BOOL=auto()
PTR=auto()
assert len(DataType) == 3, "Exhaustive data type definition"
DATATYPE_BY_NAME: Dict[str, DataType] = {
"int" : DataType.INT ,
"bool": DataType.BOOL,
"ptr" : DataType.PTR ,
}
DATATYPE_NAMES: Dict[DataType, str] = {v: k for k, v in DATATYPE_BY_NAME.items()}
assert len(DataType) == 3, 'Exhaustive casts for all data types'
class Intrinsic(Enum):
PLUS=auto()
MINUS=auto()
MUL=auto()
# TODO: split divmod intrinsic into div and mod back
# It was never useful
DIVMOD=auto()
MAX=auto()
EQ=auto()
GT=auto()
LT=auto()
GE=auto()
LE=auto()
NE=auto()
SHR=auto()
SHL=auto()
OR=auto()
AND=auto()
NOT=auto()
PRINT=auto()
DUP=auto()
SWAP=auto()
DROP=auto()
OVER=auto()
ROT=auto()
LOAD8=auto()
STORE8=auto()
LOAD16=auto()
STORE16=auto()
LOAD32=auto()
STORE32=auto()
LOAD64=auto()
STORE64=auto()
CAST_PTR=auto()
CAST_INT=auto()
CAST_BOOL=auto()
ARGC=auto()
ARGV=auto()
ENVP=auto()
HERE=auto()
SYSCALL0=auto()
SYSCALL1=auto()
SYSCALL2=auto()
SYSCALL3=auto()
SYSCALL4=auto()
SYSCALL5=auto()
SYSCALL6=auto()
STOP=auto()
class OpType(Enum):
PUSH_INT=auto()
PUSH_PTR=auto()
PUSH_BOOL=auto()
PUSH_STR=auto()
PUSH_CSTR=auto()
PUSH_MEM=auto()
PUSH_LOCAL_MEM=auto()
INTRINSIC=auto()
IF=auto()
IFSTAR=auto()
ELSE=auto()
END=auto()
WHILE=auto()
DO=auto()
SKIP_PROC=auto()
PREP_PROC=auto()
RET=auto()
CALL=auto()
class TokenType(Enum):
WORD=auto()
INT=auto()
STR=auto()
CSTR=auto()
CHAR=auto()
KEYWORD=auto()
assert len(TokenType) == 6, "Exhaustive Token type definition. The `value` field of the Token dataclass may require an update"
@dataclass
class Token:
typ: TokenType
text: str
loc: Loc
value: Union[int, str, Keyword]
OpAddr=int
MemAddr=int
@dataclass
class Op:
typ: OpType
token: Token
operand: Optional[Union[int, str, Intrinsic, OpAddr]] = None
@dataclass
class Program:
ops: List[Op] = field(default_factory=list)
memory_capacity: int = 0
SimPtr=int
def get_cstr_from_mem(mem: bytearray, ptr: SimPtr) -> bytes:
end = ptr
while mem[end] != 0:
end += 1
return mem[ptr:end]
def get_cstr_list_from_mem(mem: bytearray, ptr: SimPtr) -> List[str]:
result = []
while deref_u64(mem, ptr) != 0:
result.append(get_cstr_from_mem(mem, deref_u64(mem, ptr)).decode('utf-8'))
ptr += 8
return result
def deref_u64(mem: bytearray, ptr: SimPtr) -> int:
return int.from_bytes(mem[ptr:ptr+8], byteorder='little')
def mem_alloc(mem: bytearray, size: int) -> SimPtr:
result = len(mem)
mem += bytearray(size)
return result
@dataclass
class SimBuffer:
start: SimPtr
capacity: int
size: int = 0
def sim_buffer_append(mem: bytearray, sim_buf: SimBuffer, data: bytes) -> SimPtr:
size = len(data)
assert sim_buf.size + size <= sim_buf.capacity, "Simulated buffer overflow"
ptr = sim_buf.start + sim_buf.size
mem[ptr:ptr+size] = data
sim_buf.size += size
return ptr
SIM_STR_CAPACITY = 640_000
SIM_ARGV_CAPACITY = 640_000
SIM_ENVP_CAPACITY = 640_000
SIM_LOCAL_MEMORY_CAPACITY = 640_000
def simulate_little_endian_linux(program: Program, argv: List[str]):
AT_FDCWD=-100
O_RDONLY=0
ENOENT=2
CLOCK_MONOTONIC=1
stack: List[int] = []
# TODO: I think ret_stack should be located in the local memory just like on x86_64
ret_stack: List[OpAddr] = []
mem = bytearray(1) # NOTE: 1 is just a little bit of a padding at the beginning of the memory to make 0 an "invalid" address
str_buf = SimBuffer(start=mem_alloc(mem, SIM_STR_CAPACITY), capacity=SIM_STR_CAPACITY)
str_ptrs: Dict[int, int] = {}
argv_buf = SimBuffer(start=mem_alloc(mem, SIM_ARGV_CAPACITY), capacity=SIM_ARGV_CAPACITY)
argc = 0
envp_buf = SimBuffer(start=mem_alloc(mem, SIM_ENVP_CAPACITY), capacity=SIM_ENVP_CAPACITY)
local_memory_ptr = mem_alloc(mem, SIM_LOCAL_MEMORY_CAPACITY)
local_memory_rsp = local_memory_ptr + SIM_LOCAL_MEMORY_CAPACITY
mem_buf_ptr = mem_alloc(mem, program.memory_capacity)
for arg in argv:
arg_ptr = sim_buffer_append(mem, str_buf, arg.encode('utf-8') + b'\0')
sim_buffer_append(mem, argv_buf, arg_ptr.to_bytes(8, byteorder='little'))
argc += 1
sim_buffer_append(mem, argv_buf, (0).to_bytes(8, byteorder='little'))
for k, v in os.environ.items():
env_ptr = sim_buffer_append(mem, str_buf, f'{k}={v}'.encode('utf-8') + b'\0')
sim_buffer_append(mem, envp_buf, env_ptr.to_bytes(8, byteorder='little'))
sim_buffer_append(mem, envp_buf, (0).to_bytes(8, byteorder='little'))
fds: List[BinaryIO] = [sys.stdin.buffer, sys.stdout.buffer, sys.stderr.buffer]
ip = 0
while ip < len(program.ops):
assert len(OpType) == 18, "Exhaustive op handling in simulate_little_endian_linux"
op = program.ops[ip]
try:
if op.typ in [OpType.PUSH_INT, OpType.PUSH_BOOL, OpType.PUSH_PTR]:
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
stack.append(op.operand)
ip += 1
elif op.typ == OpType.PUSH_STR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8')
stack.append(len(value))
if ip not in str_ptrs:
str_ptrs[ip] = sim_buffer_append(mem, str_buf, value)
stack.append(str_ptrs[ip])
ip += 1
elif op.typ == OpType.PUSH_CSTR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
if ip not in str_ptrs:
value = op.operand.encode('utf-8') + b'\0'
str_ptrs[ip] = sim_buffer_append(mem, str_buf, value)
stack.append(str_ptrs[ip])
ip += 1
elif op.typ == OpType.PUSH_MEM:
assert isinstance(op.operand, MemAddr), "This could be a bug in the parsing step"
stack.append(mem_buf_ptr + op.operand)
ip += 1
elif op.typ == OpType.PUSH_LOCAL_MEM:
assert isinstance(op.operand, MemAddr)
stack.append(local_memory_rsp + op.operand)
ip += 1
elif op.typ in [OpType.IF, OpType.IFSTAR]:
a = stack.pop()
if a == 0:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
else:
ip += 1
elif op.typ == OpType.WHILE:
ip += 1
elif op.typ == OpType.ELSE:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
elif op.typ == OpType.END:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
elif op.typ == OpType.DO:
a = stack.pop()
if a == 0:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
else:
ip += 1
elif op.typ == OpType.SKIP_PROC:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
elif op.typ == OpType.PREP_PROC:
assert isinstance(op.operand, int)
local_memory_rsp -= op.operand
ip += 1
elif op.typ == OpType.RET:
assert isinstance(op.operand, int)
local_memory_rsp += op.operand
ip = ret_stack.pop()
elif op.typ == OpType.CALL:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ret_stack.append(ip + 1)
ip = op.operand
elif op.typ == OpType.INTRINSIC:
assert len(Intrinsic) == 45, "Exhaustive handling of intrinsic in simulate_little_endian_linux()"
if op.operand == Intrinsic.PLUS:
a = stack.pop()
b = stack.pop()
stack.append(a + b)
ip += 1
elif op.operand == Intrinsic.MINUS:
a = stack.pop()
b = stack.pop()
stack.append(b - a)
ip += 1
elif op.operand == Intrinsic.MUL:
a = stack.pop()
b = stack.pop()
stack.append(b * a)
ip += 1
elif op.operand == Intrinsic.DIVMOD:
a = stack.pop()
b = stack.pop()
stack.append(b // a)
stack.append(b % a)
ip += 1
elif op.operand == Intrinsic.MAX:
a = stack.pop()
b = stack.pop()
stack.append(max(a, b))
ip += 1
elif op.operand == Intrinsic.EQ:
a = stack.pop()
b = stack.pop()
stack.append(int(a == b))
ip += 1
elif op.operand == Intrinsic.GT:
a = stack.pop()
b = stack.pop()
stack.append(int(b > a))
ip += 1
elif op.operand == Intrinsic.LT:
a = stack.pop()
b = stack.pop()
stack.append(int(b < a))
ip += 1
elif op.operand == Intrinsic.GE:
a = stack.pop()
b = stack.pop()
stack.append(int(b >= a))
ip += 1
elif op.operand == Intrinsic.LE:
a = stack.pop()
b = stack.pop()
stack.append(int(b <= a))
ip += 1
elif op.operand == Intrinsic.NE:
a = stack.pop()
b = stack.pop()
stack.append(int(b != a))
ip += 1
elif op.operand == Intrinsic.SHR:
a = stack.pop()
b = stack.pop()
stack.append(int(b >> a))
ip += 1
elif op.operand == Intrinsic.SHL:
a = stack.pop()
b = stack.pop()
stack.append(int(b << a))
ip += 1
elif op.operand == Intrinsic.OR:
a = stack.pop()
b = stack.pop()
stack.append(int(a | b))
ip += 1
elif op.operand == Intrinsic.AND:
a = stack.pop()
b = stack.pop()
stack.append(int(a & b))
ip += 1
elif op.operand == Intrinsic.NOT:
a = stack.pop()
stack.append(int(~a))
ip += 1
elif op.operand == Intrinsic.PRINT:
a = stack.pop()
fds[1].write(b"%d\n" % a)
fds[1].flush()
ip += 1
elif op.operand == Intrinsic.DUP:
a = stack.pop()
stack.append(a)
stack.append(a)
ip += 1
elif op.operand == Intrinsic.SWAP:
a = stack.pop()
b = stack.pop()
stack.append(a)
stack.append(b)
ip += 1
elif op.operand == Intrinsic.DROP:
stack.pop()
ip += 1
elif op.operand == Intrinsic.OVER:
a = stack.pop()
b = stack.pop()
stack.append(b)
stack.append(a)
stack.append(b)
ip += 1
elif op.operand == Intrinsic.ROT:
a = stack.pop()
b = stack.pop()
c = stack.pop()
stack.append(b)
stack.append(a)
stack.append(c)
ip += 1
elif op.operand == Intrinsic.LOAD8:
addr = stack.pop()
byte = mem[addr]
stack.append(byte)
ip += 1
elif op.operand == Intrinsic.STORE8:
store_addr = stack.pop()
store_value = stack.pop()
mem[store_addr] = store_value & 0xFF
ip += 1
elif op.operand == Intrinsic.LOAD16:
load_addr = stack.pop()
stack.append(int.from_bytes(mem[load_addr:load_addr+2], byteorder="little"))
ip += 1
elif op.operand == Intrinsic.STORE16:
store_addr = stack.pop();
store_value = stack.pop()
mem[store_addr:store_addr+2] = store_value.to_bytes(length=2, byteorder="little", signed=(store_value < 0));
ip += 1
elif op.operand == Intrinsic.LOAD32:
load_addr = stack.pop()
stack.append(int.from_bytes(mem[load_addr:load_addr+4], byteorder="little"))
ip += 1
elif op.operand == Intrinsic.STORE32:
store_addr = stack.pop();
store_value = stack.pop()
mem[store_addr:store_addr+4] = store_value.to_bytes(length=4, byteorder="little", signed=(store_value < 0));
ip += 1
elif op.operand == Intrinsic.LOAD64:
load_addr = stack.pop()
stack.append(int.from_bytes(mem[load_addr:load_addr+8], byteorder="little"))
ip += 1
elif op.operand == Intrinsic.STORE64:
store_addr = stack.pop();
store_value = stack.pop()
mem[store_addr:store_addr+8] = store_value.to_bytes(length=8, byteorder="little", signed=(store_value < 0));
ip += 1
elif op.operand == Intrinsic.ARGC:
stack.append(argc)
ip += 1
elif op.operand == Intrinsic.ARGV:
stack.append(argv_buf.start)
ip += 1
elif op.operand == Intrinsic.ENVP:
stack.append(envp_buf.start)
ip += 1
elif op.operand == Intrinsic.HERE:
value = ("%s:%d:%d" % op.token.loc).encode('utf-8')
stack.append(len(value))
if ip not in str_ptrs:
str_ptrs[ip] = sim_buffer_append(mem, str_buf, value)
stack.append(str_ptrs[ip])
ip += 1
elif op.operand == Intrinsic.CAST_PTR:
# Ignore the type casting. It's only useful for type_check_program() phase
ip += 1
elif op.operand == Intrinsic.CAST_BOOL:
# Ignore the type casting. It's only useful for type_check_program() phase
ip += 1
elif op.operand == Intrinsic.CAST_INT:
# Ignore the type casting. It's only useful for type_check_program() phase
ip += 1
elif op.operand == Intrinsic.SYSCALL0:
syscall_number = stack.pop();
if syscall_number == 39: # SYS_getpid
stack.append(os.getpid());
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL1:
syscall_number = stack.pop()
arg1 = stack.pop()
if syscall_number == 60: # SYS_exit
exit(arg1)
elif syscall_number == 3: # SYS_close
fds[arg1].close()
stack.append(0)
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL2:
assert False, "not implemented"
elif op.operand == Intrinsic.SYSCALL3:
syscall_number = stack.pop()
arg1 = stack.pop()
arg2 = stack.pop()
arg3 = stack.pop()
if syscall_number == 0: # SYS_read
fd = arg1
buf = arg2
count = arg3
# NOTE: trying to behave like a POSIX tty in canonical mode by making the data available
# on each newline
# https://en.wikipedia.org/wiki/POSIX_terminal_interface#Canonical_mode_processing
data = fds[fd].readline(count)
mem[buf:buf+len(data)] = data
stack.append(len(data))
elif syscall_number == 1: # SYS_write
fd = arg1
buf = arg2
count = arg3
fds[fd].write(mem[buf:buf+count])
fds[fd].flush()
stack.append(count)
elif syscall_number == 59: # SYS_execve
execve_path = get_cstr_from_mem(mem, arg1).decode('utf-8')
execve_argv = get_cstr_list_from_mem(mem, arg2)
execve_envp = { k: v for s in get_cstr_list_from_mem(mem, arg3) for (k, v) in (s.split('='), ) }
try:
os.execve(execve_path, execve_argv, execve_envp)
except FileNotFoundError:
stack.append(-ENOENT)
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL4:
syscall_number = stack.pop()
arg1 = stack.pop()
arg2 = stack.pop()
arg3 = stack.pop()
arg4 = stack.pop()
if syscall_number == 230: # clock_nanosleep
clock_id = arg1
flags = arg2
request_ptr = arg3
remain_ptr = arg4
assert clock_id == CLOCK_MONOTONIC, "Only CLOCK_MONOTONIC is implemented for SYS_clock_nanosleep"
assert flags == 0, "Only relative time is supported for SYS_clock_nanosleep"
assert request_ptr != 0, "request cannot be NULL for SYS_clock_nanosleep. We should probably return -1 in that case..."
assert remain_ptr == 0, "remain is not supported for SYS_clock_nanosleep"
seconds = int.from_bytes(mem[request_ptr:request_ptr+8], byteorder='little')
nano_seconds = int.from_bytes(mem[request_ptr+8:request_ptr+8+8], byteorder='little')
sleep(float(seconds)+float(nano_seconds)*1e-09)
stack.append(0)
elif syscall_number == 257: # SYS_openat
dirfd = arg1
pathname_ptr = arg2
flags = arg3
mode = arg4
if dirfd != AT_FDCWD:
assert False, f"openat: unsupported dirfd: {dirfd}"
if flags != O_RDONLY:
assert False, f"openat: unsupported flags: {flags}"
if mode != 0:
assert False, f"openat: unsupported mode: {mode}"
pathname = get_cstr_from_mem(mem, pathname_ptr).decode('utf-8')
fd = len(fds)
try:
fds.append(open(pathname, 'rb'))
stack.append(fd)
except FileNotFoundError:
stack.append(-ENOENT)
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL5:
assert False, "not implemented"
elif op.operand == Intrinsic.SYSCALL6:
assert False, "not implemented"
elif op.operand == Intrinsic.STOP:
pass
else:
assert False, "unreachable"
else:
assert False, "unreachable"
except Exception as e:
compiler_error(op.token.loc, "Python Exception during simulation")
traceback.print_exception(type(e), e, e.__traceback__)
exit(1)
def compiler_diagnostic(loc: Loc, tag: str, message: str):
print("%s:%d:%d: %s: %s" % (loc + (tag, message)), file=sys.stderr)
def compiler_error(loc: Loc, message: str):
compiler_diagnostic(loc, 'ERROR', message)
def compiler_note(loc: Loc, message: str):
compiler_diagnostic(loc, 'NOTE', message)
def not_enough_arguments(op: Op):
assert len(OpType) == 18, f"Exhaustive handling of Op types in not_enough_arguments() (expected {len(OpType)}). Keep in mind that not all of the ops should be handled in here. Only those that consume elements from the stack."
if op.typ == OpType.INTRINSIC:
assert isinstance(op.operand, Intrinsic)
compiler_error(op.token.loc, "not enough arguments for the `%s` intrinsic" % INTRINSIC_NAMES[op.operand])
elif op.typ == OpType.DO:
compiler_error(op.token.loc, "not enough arguments for the do-block")
else:
assert False, "unsupported type of operation"
DataStack=List[Tuple[Union[DataType, str], Loc]]
@dataclass
class Context:
stack: DataStack
ip: OpAddr
outs: List[Tuple[Union[DataType, str], Loc]]
@dataclass
class Contract:
ins: Sequence[Tuple[Union[DataType, str], Loc]]
outs: Sequence[Tuple[Union[DataType, str], Loc]]
@dataclass
class CompilerMessage:
loc: Loc
label: str
text: str
def human_type_name(typ: Union[DataType, str]) -> str:
if isinstance(typ, DataType):
return f"type `{DATATYPE_NAMES[typ]}`"
elif isinstance(typ, str):
return f"generic type {repr(typ)}"
else:
assert False, "unreachable"
MessageGroup=List[CompilerMessage]
def type_check_contracts(intro_token: Token, ctx: Context, contracts: List[Contract]):
log: List[MessageGroup] = []
for contract in contracts:
ins = list(contract.ins)
stack = copy(ctx.stack)
error = False
generics: Dict[str, Union[DataType, str]] = {}
arg_count = 0
while len(stack) > 0 and len(ins) > 0:
actual, actual_loc = stack.pop()
expected, expected_loc = ins.pop()
if isinstance(expected, DataType):
if actual != expected:
error = True
log.append([CompilerMessage(loc=intro_token.loc, label="ERROR", text=f"Argument {arg_count} of `{intro_token.text}` is expected to be {human_type_name(expected)} but got {human_type_name(actual)}"),
CompilerMessage(loc=actual_loc, label="NOTE", text=f"Argument {arg_count} was provided here"),
CompilerMessage(loc=expected_loc, label="NOTE", text=f"Expected type was declared here")])
break;
elif isinstance(expected, str):
if expected in generics:
if actual != generics[expected]:
error = True
log.append([CompilerMessage(loc=intro_token.loc, label="ERROR", text=f"Argument {arg_count} of `{intro_token.text}` is expected to be {human_type_name(generics[expected])} but got {human_type_name(actual)}"),
CompilerMessage(loc=actual_loc, label="NOTE", text=f"Argument {arg_count} was provided here"),
CompilerMessage(loc=expected_loc, label="NOTE", text=f"Expected type was declared here")])
break;
else:
generics[expected] = actual
else:
assert False, "unreachable"
arg_count += 1
if error:
continue
if len(stack) < len(ins):
group = []
group.append(CompilerMessage(loc=intro_token.loc, label="ERROR", text=f"Not enough arguments provided for `{intro_token.value}`. Expected {len(contract.ins)} but got {arg_count}."))
group.append(CompilerMessage(loc=intro_token.loc, label="NOTE", text=f"Not provided arguments:"))
while len(ins) > 0:
typ, loc = ins.pop()
if isinstance(typ, DataType):
group.append(CompilerMessage(loc=loc, label="NOTE", text=f"{DATATYPE_NAMES[typ]}"))
elif isinstance(typ, str):
if typ in generics:
group.append(CompilerMessage(loc=loc, label="NOTE", text=human_type_name(generics[typ])))
else:
group.append(CompilerMessage(loc=loc, label="NOTE", text=human_type_name(typ)))
else:
assert False, "unreachable"
log.append(group)
continue
for typ, loc in contract.outs:
if isinstance(typ, DataType):
stack.append((typ, intro_token.loc))
elif isinstance(typ, str):
if typ in generics:
stack.append((generics[typ], intro_token.loc))
else:
# log.append([CompilerMessage(loc=loc, label="ERROR", text=f"Unknown generic {repr(typ)} in output parameters. All generics should appear at least once in input parameters first.")])
# continue
assert False, "Unreachable. Such function won't compile in the first place since you can't produce an instance of a generic type at the moment"
else:
assert False, "unreachable"
ctx.stack = stack
return
for group in log:
for msg in group:
compiler_diagnostic(msg.loc, msg.label, msg.text)
print(file=sys.stderr)
exit(1)
def type_check_context_outs(ctx: Context):
while len(ctx.stack) > 0 and len(ctx.outs) > 0:
actual_typ, actual_loc = ctx.stack.pop()
expected_typ, expected_loc = ctx.outs.pop()
if expected_typ != actual_typ:
compiler_error(actual_loc, f"Unexpected {human_type_name(actual_typ)} on the stack")
compiler_note(expected_loc, f"Expected {human_type_name(expected_typ)}")
exit(1)
if len(ctx.stack) > len(ctx.outs):
top_typ, top_loc = ctx.stack.pop()
compiler_error(top_loc, f"Unhandled data on the stack:")
compiler_note(top_loc, f"{human_type_name(top_typ)}")
while len(ctx.stack) > 0:
typ, loc = ctx.stack.pop()
compiler_note(loc, f"{human_type_name(typ)}")
exit(1)
elif len(ctx.stack) < len(ctx.outs):
top_typ, top_loc = ctx.outs.pop()
compiler_error(top_loc, f"Insufficient data on the stack. Expected:")
compiler_note(top_loc, f"{human_type_name(top_typ)}")
while len(ctx.outs) > 0:
typ, loc = ctx.outs.pop()
compiler_note(loc, f"and {human_type_name(typ)}")
exit(1)
def type_check_program(program: Program, proc_contracts: Dict[OpAddr, Contract]):
visited_dos: Dict[OpAddr, DataStack] = {}
contexts: List[Context] = [Context(stack=[], ip=0, outs=[])]
for proc_addr, proc_contract in reversed(list(proc_contracts.items())):
contexts.append(Context(
stack=list(proc_contract.ins),
ip=proc_addr,
outs=list(proc_contract.outs)
))
while len(contexts) > 0:
ctx = contexts[-1];
if ctx.ip >= len(program.ops):
type_check_context_outs(ctx)
contexts.pop()
continue
op = program.ops[ctx.ip]
assert len(OpType) == 18, "Exhaustive ops handling in type_check_program()"
if op.typ == OpType.PUSH_INT:
ctx.stack.append((DataType.INT, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.PUSH_BOOL:
ctx.stack.append((DataType.BOOL, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.PUSH_PTR:
ctx.stack.append((DataType.PTR, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.PUSH_STR:
ctx.stack.append((DataType.INT, op.token.loc))
ctx.stack.append((DataType.PTR, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.PUSH_CSTR:
ctx.stack.append((DataType.PTR, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.PUSH_MEM:
ctx.stack.append((DataType.PTR, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.PUSH_LOCAL_MEM:
ctx.stack.append((DataType.PTR, op.token.loc))
ctx.ip += 1
elif op.typ == OpType.SKIP_PROC:
assert isinstance(op.operand, OpAddr)
ctx.ip = op.operand
elif op.typ == OpType.PREP_PROC:
ctx.ip += 1
elif op.typ == OpType.CALL:
assert isinstance(op.operand, OpAddr)
type_check_contracts(op.token, ctx, [proc_contracts[op.operand]])
ctx.ip += 1
elif op.typ == OpType.RET:
type_check_context_outs(ctx)
contexts.pop()
elif op.typ == OpType.INTRINSIC:
assert len(Intrinsic) == 45, "Exhaustive intrinsic handling in type_check_program()"
assert isinstance(op.operand, Intrinsic), "This could be a bug in compilation step"
if op.operand == Intrinsic.PLUS:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.PTR, op.token.loc)]),
Contract(ins=[(DataType.INT, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.PTR, op.token.loc)]),
])
elif op.operand == Intrinsic.MINUS:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.PTR, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.MUL:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.DIVMOD:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.MAX:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.EQ:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.BOOL, op.token.loc), (DataType.BOOL, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.GT:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.LT:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.GE:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.LE:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.NE:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.BOOL, op.token.loc), (DataType.BOOL, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
Contract(ins=[(DataType.PTR, op.token.loc), (DataType.PTR, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.SHR:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.SHL:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.OR:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
Contract(ins=[(DataType.BOOL, op.token.loc), (DataType.BOOL, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.AND:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
Contract(ins=[(DataType.BOOL, op.token.loc), (DataType.BOOL, op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.NOT:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.PRINT:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc)], outs=[])
])
elif op.operand == Intrinsic.DUP:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc)], outs=[("a", op.token.loc), ("a", op.token.loc)])
])
elif op.operand == Intrinsic.SWAP:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc)], outs=[("b", op.token.loc), ("a", op.token.loc)])
])
elif op.operand == Intrinsic.DROP:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc)], outs=[])
])
elif op.operand == Intrinsic.OVER:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc)], outs=[("a", op.token.loc), ("b", op.token.loc), ("a", op.token.loc)])
])
elif op.operand == Intrinsic.ROT:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc), ("c", op.token.loc)], outs=[("b", op.token.loc), ("c", op.token.loc), ("a", op.token.loc)])
])
elif op.operand == Intrinsic.LOAD8:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.PTR, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.STORE8:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), (DataType.PTR, op.token.loc)], outs=[]),
])
elif op.operand == Intrinsic.LOAD16:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.PTR, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.STORE16:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), (DataType.PTR, op.token.loc)], outs=[]),
])
elif op.operand == Intrinsic.LOAD32:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.PTR, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.STORE32:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), (DataType.PTR, op.token.loc)], outs=[]),
])
elif op.operand == Intrinsic.LOAD64:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.PTR, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.STORE64:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), (DataType.PTR, op.token.loc)], outs=[]),
])
elif op.operand == Intrinsic.CAST_PTR:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc)], outs=[(DataType.PTR, op.token.loc)]),
])
elif op.operand == Intrinsic.CAST_INT:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.CAST_BOOL:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc)], outs=[(DataType.BOOL, op.token.loc)]),
])
elif op.operand == Intrinsic.ARGC:
type_check_contracts(op.token, ctx, [
Contract(ins=[], outs=[(DataType.INT, op.token.loc)])
])
elif op.operand == Intrinsic.ARGV:
type_check_contracts(op.token, ctx, [
Contract(ins=[], outs=[(DataType.PTR, op.token.loc)])
])
elif op.operand == Intrinsic.ENVP:
type_check_contracts(op.token, ctx, [
Contract(ins=[], outs=[(DataType.PTR, op.token.loc)])
])
elif op.operand == Intrinsic.HERE:
type_check_contracts(op.token, ctx, [
Contract(ins=[], outs=[(DataType.INT, op.token.loc), (DataType.PTR, op.token.loc)])
])
elif op.operand == Intrinsic.SYSCALL0:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.SYSCALL1:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.SYSCALL2:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.SYSCALL3:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc), ("c", op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.SYSCALL4:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc), ("c", op.token.loc), ("d", op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.SYSCALL5:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc), ("c", op.token.loc), ("d", op.token.loc), ("e", op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.SYSCALL6:
type_check_contracts(op.token, ctx, [
Contract(ins=[("a", op.token.loc), ("b", op.token.loc), ("c", op.token.loc), ("d", op.token.loc), ("e", op.token.loc), ("f", op.token.loc), (DataType.INT, op.token.loc)], outs=[(DataType.INT, op.token.loc)]),
])
elif op.operand == Intrinsic.STOP:
# TODO: we need some sort of a flag that would allow us to ignore all the stop requests
compiler_diagnostic(op.token.loc, "DEBUG", "Stopping the compilation. Current stack state:")
if len(ctx.stack) > 0:
for typ, loc in reversed(ctx.stack):
compiler_diagnostic(loc, "ITEM", human_type_name(typ))
else:
compiler_diagnostic(op.token.loc, "DEBUG", "<EMPTY>")
exit(1)
else:
assert False, "unreachable"
ctx.ip += 1
elif op.typ == OpType.IF:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.BOOL, op.token.loc)], outs=[])
])
ctx.ip += 1
assert isinstance(op.operand, OpAddr)
contexts.append(Context(stack=copy(ctx.stack), ip=op.operand, outs=copy(ctx.outs)))
ctx = contexts[-1]
elif op.typ == OpType.IFSTAR:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.BOOL, op.token.loc)], outs=[])
])
ctx.ip += 1
assert isinstance(op.operand, OpAddr)
contexts.append(Context(stack=copy(ctx.stack), ip=op.operand, outs=copy(ctx.outs)))
ctx = contexts[-1]
elif op.typ == OpType.WHILE:
ctx.ip += 1
elif op.typ == OpType.END:
assert isinstance(op.operand, OpAddr)
ctx.ip = op.operand
elif op.typ == OpType.ELSE:
assert isinstance(op.operand, OpAddr)
ctx.ip = op.operand
elif op.typ == OpType.DO:
type_check_contracts(op.token, ctx, [
Contract(ins=[(DataType.BOOL, op.token.loc)], outs=[])
])
assert isinstance(op.operand, OpAddr)
if ctx.ip in visited_dos:
expected_types = list(map(lambda x: x[0], visited_dos[ctx.ip]))
actual_types = list(map(lambda x: x[0], ctx.stack))
if expected_types != actual_types:
compiler_error(op.token.loc, 'Loops are not allowed to alter types and amount of elements on the stack between iterations!')
compiler_note(op.token.loc, '-- Stack BEFORE a single iteration --')
if len(visited_dos[ctx.ip]) == 0:
compiler_note(op.token.loc, '<empty>')
else:
for typ, loc in visited_dos[ctx.ip]:
compiler_note(loc, human_type_name(typ))
compiler_note(op.token.loc, '-- Stack AFTER a single iteration --')
if len(ctx.stack) == 0:
compiler_note(op.token.loc, '<empty>')
else:
for typ, loc in ctx.stack:
compiler_note(loc, human_type_name(typ))
exit(1)
contexts.pop()
else:
visited_dos[ctx.ip] = copy(ctx.stack)
ctx.ip += 1
contexts.append(Context(stack=copy(ctx.stack), ip=op.operand, outs=copy(ctx.outs)))
ctx = contexts[-1]
else:
assert False, "unreachable"
def generate_nasm_linux_x86_64(program: Program, out_file_path: str):
strs: List[bytes] = []
with open(out_file_path, "w") as out:
out.write("BITS 64\n")
out.write("segment .text\n")
out.write("print:\n")
out.write(" mov r9, -3689348814741910323\n")
out.write(" sub rsp, 40\n")
out.write(" mov BYTE [rsp+31], 10\n")
out.write(" lea rcx, [rsp+30]\n")
out.write(".L2:\n")
out.write(" mov rax, rdi\n")
out.write(" lea r8, [rsp+32]\n")
out.write(" mul r9\n")
out.write(" mov rax, rdi\n")
out.write(" sub r8, rcx\n")
out.write(" shr rdx, 3\n")
out.write(" lea rsi, [rdx+rdx*4]\n")
out.write(" add rsi, rsi\n")
out.write(" sub rax, rsi\n")
out.write(" add eax, 48\n")
out.write(" mov BYTE [rcx], al\n")
out.write(" mov rax, rdi\n")
out.write(" mov rdi, rdx\n")
out.write(" mov rdx, rcx\n")
out.write(" sub rcx, 1\n")
out.write(" cmp rax, 9\n")
out.write(" ja .L2\n")
out.write(" lea rax, [rsp+32]\n")
out.write(" mov edi, 1\n")
out.write(" sub rdx, rax\n")
out.write(" xor eax, eax\n")
out.write(" lea rsi, [rsp+32+rdx]\n")
out.write(" mov rdx, r8\n")
out.write(" mov rax, 1\n")
out.write(" syscall\n")
out.write(" add rsp, 40\n")
out.write(" ret\n")
out.write("global _start\n")
out.write("_start:\n")
out.write(" mov [args_ptr], rsp\n")
out.write(" mov rax, ret_stack_end\n")
out.write(" mov [ret_stack_rsp], rax\n")
for ip in range(len(program.ops)):
op = program.ops[ip]
assert len(OpType) == 18, "Exhaustive ops handling in generate_nasm_linux_x86_64"
out.write("addr_%d:\n" % ip)
if op.typ in [OpType.PUSH_INT, OpType.PUSH_BOOL, OpType.PUSH_PTR]:
assert isinstance(op.operand, int), f"This could be a bug in the parsing step {op.operand}"
out.write(" mov rax, %d\n" % op.operand)
out.write(" push rax\n")
elif op.typ == OpType.PUSH_STR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8')
n = len(value)
out.write(" mov rax, %d\n" % n)
out.write(" push rax\n")
out.write(" push str_%d\n" % len(strs))
strs.append(value)
elif op.typ == OpType.PUSH_CSTR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8') + b'\0'
out.write(" push str_%d\n" % len(strs))
strs.append(value)
elif op.typ == OpType.PUSH_MEM:
assert isinstance(op.operand, MemAddr), "This could be a bug in the parsing step"
out.write(" mov rax, mem\n")
out.write(" add rax, %d\n" % op.operand)
out.write(" push rax\n")
elif op.typ == OpType.PUSH_LOCAL_MEM:
assert isinstance(op.operand, MemAddr)
out.write(" mov rax, [ret_stack_rsp]\n");
out.write(" add rax, %d\n" % op.operand)
out.write(" push rax\n")
elif op.typ in [OpType.IF, OpType.IFSTAR]:
out.write(" pop rax\n")
out.write(" test rax, rax\n")
assert isinstance(op.operand, OpAddr), f"This could be a bug in the parsing step {op.operand}"
out.write(" jz addr_%d\n" % op.operand)
elif op.typ == OpType.WHILE:
pass
elif op.typ == OpType.ELSE:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
out.write(" jmp addr_%d\n" % op.operand)
elif op.typ == OpType.END:
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
out.write(" jmp addr_%d\n" % op.operand)
elif op.typ == OpType.DO:
out.write(" pop rax\n")
out.write(" test rax, rax\n")
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
out.write(" jz addr_%d\n" % op.operand)
elif op.typ == OpType.SKIP_PROC:
assert isinstance(op.operand, OpAddr), f"This could be a bug in the parsing step: {op.operand}"
out.write(" jmp addr_%d\n" % op.operand)
elif op.typ == OpType.PREP_PROC:
assert isinstance(op.operand, int)
out.write(" sub rsp, %d\n" % op.operand)
out.write(" mov [ret_stack_rsp], rsp\n")
out.write(" mov rsp, rax\n")
elif op.typ == OpType.CALL:
assert isinstance(op.operand, OpAddr), f"This could be a bug in the parsing step: {op.operand}"
out.write(" mov rax, rsp\n")
out.write(" mov rsp, [ret_stack_rsp]\n")
out.write(" call addr_%d\n" % op.operand)
out.write(" mov [ret_stack_rsp], rsp\n")
out.write(" mov rsp, rax\n")
elif op.typ == OpType.RET:
assert isinstance(op.operand, int)
out.write(" mov rax, rsp\n")
out.write(" mov rsp, [ret_stack_rsp]\n")
out.write(" add rsp, %d\n" % op.operand)
out.write(" ret\n")
elif op.typ == OpType.INTRINSIC:
assert len(Intrinsic) == 45, "Exhaustive intrinsic handling in generate_nasm_linux_x86_64()"
if op.operand == Intrinsic.PLUS:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" add rax, rbx\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.MINUS:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" sub rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.MUL:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" mul rbx\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.MAX:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" cmp rbx, rax\n")
out.write(" cmovge rax, rbx\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.DIVMOD:
out.write(" xor rdx, rdx\n")
out.write(" pop rbx\n")
out.write(" pop rax\n")
out.write(" div rbx\n")
out.write(" push rax\n");
out.write(" push rdx\n");
elif op.operand == Intrinsic.SHR:
out.write(" pop rcx\n")
out.write(" pop rbx\n")
out.write(" shr rbx, cl\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.SHL:
out.write(" pop rcx\n")
out.write(" pop rbx\n")
out.write(" shl rbx, cl\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.OR:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" or rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.AND:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" and rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.NOT:
out.write(" pop rax\n")
out.write(" not rax\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.PRINT:
out.write(" pop rdi\n")
out.write(" call print\n")
elif op.operand == Intrinsic.EQ:
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" cmp rax, rbx\n");
out.write(" cmove rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.GT:
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovg rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.LT:
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovl rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.GE:
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovge rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.LE:
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovle rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.NE:
out.write(" mov rcx, 0\n")
out.write(" mov rdx, 1\n")
out.write(" pop rbx\n")
out.write(" pop rax\n")
out.write(" cmp rax, rbx\n")
out.write(" cmovne rcx, rdx\n")
out.write(" push rcx\n")
elif op.operand == Intrinsic.DUP:
out.write(" pop rax\n")
out.write(" push rax\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SWAP:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" push rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.DROP:
out.write(" pop rax\n")
elif op.operand == Intrinsic.OVER:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" push rbx\n")
out.write(" push rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.ROT:
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" pop rcx\n")
out.write(" push rbx\n")
out.write(" push rax\n")
out.write(" push rcx\n")
elif op.operand == Intrinsic.LOAD8:
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov bl, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.STORE8:
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" mov [rax], bl\n");
elif op.operand == Intrinsic.LOAD16:
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov bx, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.STORE16:
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" mov [rax], bx\n");
elif op.operand == Intrinsic.LOAD32:
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov ebx, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.STORE32:
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" mov [rax], ebx\n");
elif op.operand == Intrinsic.LOAD64:
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov rbx, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.STORE64:
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" mov [rax], rbx\n");
elif op.operand == Intrinsic.ARGC:
out.write(" mov rax, [args_ptr]\n")
out.write(" mov rax, [rax]\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.ARGV:
out.write(" mov rax, [args_ptr]\n")
out.write(" add rax, 8\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.ENVP:
out.write(" mov rax, [args_ptr]\n")
out.write(" mov rax, [rax]\n")
out.write(" add rax, 2\n")
out.write(" shl rax, 3\n")
out.write(" mov rbx, [args_ptr]\n")
out.write(" add rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.HERE:
value = ("%s:%d:%d" % op.token.loc).encode('utf-8')
n = len(value)
out.write(" mov rax, %d\n" % n)
out.write(" push rax\n")
out.write(" push str_%d\n" % len(strs))
strs.append(value)
elif op.operand in [Intrinsic.CAST_PTR, Intrinsic.CAST_INT, Intrinsic.CAST_BOOL]:
pass
elif op.operand == Intrinsic.SYSCALL0:
out.write(" pop rax\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL1:
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL2:
out.write(" pop rax\n");
out.write(" pop rdi\n");
out.write(" pop rsi\n");
out.write(" syscall\n");
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL3:
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL4:
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" pop r10\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL5:
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" pop r10\n")
out.write(" pop r8\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL6:
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" pop r10\n")
out.write(" pop r8\n")
out.write(" pop r9\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.STOP:
pass
else:
assert False, "unreachable"
else:
assert False, "unreachable"
out.write("addr_%d:\n" % len(program.ops))
out.write(" mov rax, 60\n")
out.write(" mov rdi, 0\n")
out.write(" syscall\n")
out.write("segment .data\n")
for index, s in enumerate(strs):
out.write("str_%d: db %s\n" % (index, ','.join(map(str, list(s)))))
out.write("segment .bss\n")
out.write("args_ptr: resq 1\n")
out.write("ret_stack_rsp: resq 1\n")
out.write("ret_stack: resb %d\n" % X86_64_RET_STACK_CAP)
out.write("ret_stack_end:\n")
out.write("mem: resb %d\n" % program.memory_capacity)
assert len(Keyword) == 15, f"Exhaustive KEYWORD_NAMES definition. {len(Keyword)}"
KEYWORD_BY_NAMES: Dict[str, Keyword] = {
'if': Keyword.IF,
'if*': Keyword.IFSTAR,
'else': Keyword.ELSE,
'while': Keyword.WHILE,
'do': Keyword.DO,
'include': Keyword.INCLUDE,
'memory': Keyword.MEMORY,
'proc': Keyword.PROC,
'end': Keyword.END,
'const': Keyword.CONST,
'offset': Keyword.OFFSET,
'reset': Keyword.RESET,
'assert': Keyword.ASSERT,
'in': Keyword.IN,
'--': Keyword.BIKESHEDDER,
}
KEYWORD_NAMES: Dict[Keyword, str] = {v: k for k, v in KEYWORD_BY_NAMES.items()}
assert len(Intrinsic) == 45, "Exhaustive INTRINSIC_BY_NAMES definition"
INTRINSIC_BY_NAMES: Dict[str, Intrinsic] = {
'+': Intrinsic.PLUS,
'-': Intrinsic.MINUS,
'*': Intrinsic.MUL,
'divmod': Intrinsic.DIVMOD,
'max': Intrinsic.MAX,
'print': Intrinsic.PRINT,
'=': Intrinsic.EQ,
'>': Intrinsic.GT,
'<': Intrinsic.LT,
'>=': Intrinsic.GE,
'<=': Intrinsic.LE,
'!=': Intrinsic.NE,
'shr': Intrinsic.SHR,
'shl': Intrinsic.SHL,
'or': Intrinsic.OR,
'and': Intrinsic.AND,
'not': Intrinsic.NOT,
'dup': Intrinsic.DUP,
'swap': Intrinsic.SWAP,
'drop': Intrinsic.DROP,
'over': Intrinsic.OVER,
'rot': Intrinsic.ROT,
'!8': Intrinsic.STORE8,
'@8': Intrinsic.LOAD8,
'!16': Intrinsic.STORE16,
'@16': Intrinsic.LOAD16,
'!32': Intrinsic.STORE32,
'@32': Intrinsic.LOAD32,
'!64': Intrinsic.STORE64,
'@64': Intrinsic.LOAD64,
'cast(ptr)': Intrinsic.CAST_PTR,
'cast(int)': Intrinsic.CAST_INT,
'cast(bool)': Intrinsic.CAST_BOOL,
'argc': Intrinsic.ARGC,
'argv': Intrinsic.ARGV,
'envp': Intrinsic.ENVP,
'here': Intrinsic.HERE,
'syscall0': Intrinsic.SYSCALL0,
'syscall1': Intrinsic.SYSCALL1,
'syscall2': Intrinsic.SYSCALL2,
'syscall3': Intrinsic.SYSCALL3,
'syscall4': Intrinsic.SYSCALL4,
'syscall5': Intrinsic.SYSCALL5,
'syscall6': Intrinsic.SYSCALL6,
'???': Intrinsic.STOP,
}
INTRINSIC_NAMES: Dict[Intrinsic, str] = {v: k for k, v in INTRINSIC_BY_NAMES.items()}
class HumanNumber(Enum):
Singular=auto()
Plural=auto()
def human(obj: TokenType, number: HumanNumber = HumanNumber.Singular) -> str:
'''Human readable representation of an object that can be used in error messages'''
assert len(HumanNumber) == 2, "Exhaustive handling of number category in human()"
if number == HumanNumber.Singular:
assert len(TokenType) == 6, "Exhaustive handling of token types in human()"
if obj == TokenType.WORD:
return "a word"
elif obj == TokenType.INT:
return "an integer"
elif obj == TokenType.STR:
return "a string"
elif obj == TokenType.CSTR:
return "a C-style string"
elif obj == TokenType.CHAR:
return "a character"
elif obj == TokenType.KEYWORD:
return "a keyword"
else:
assert False, "unreachable"
elif number == HumanNumber.Plural:
assert len(TokenType) == 6, "Exhaustive handling of token types in human()"
if obj == TokenType.WORD:
return "words"
elif obj == TokenType.INT:
return "integers"
elif obj == TokenType.STR:
return "strings"
elif obj == TokenType.CSTR:
return "C-style strings"
elif obj == TokenType.CHAR:
return "characters"
elif obj == TokenType.KEYWORD:
return "keywords"
else:
assert False, "unreachable"
else:
assert False, "unreachable"
@dataclass
class Memory:
offset: MemAddr
loc: Loc
@dataclass
class Proc:
addr: OpAddr
loc: Loc
contract: Contract
local_memories: Dict[str, Memory]
local_memory_capacity: int
@dataclass
class Const:
value: int
typ: DataType
loc: Loc
@dataclass
class ParseContext:
stack: List[OpAddr] = field(default_factory=list)
ops: List[Op] = field(default_factory=list)
memories: Dict[str, Memory] = field(default_factory=dict)
memory_capacity: int = 0
procs: Dict[str, Proc] = field(default_factory=dict)
consts: Dict[str, Const] = field(default_factory=dict)
current_proc: Optional[Proc] = None
iota: int = 0
# TODO: consider getting rid of the ip variable in ParseContext
ip: OpAddr = 0
def check_name_redefinition(ctx: ParseContext, name: str, loc: Loc):
if ctx.current_proc is None:
if name in ctx.memories:
compiler_error(loc, "redefinition of a memory region `%s`" % name)
compiler_note(ctx.memories[name].loc, "the original definition is located here")
exit(1)
else:
if name in ctx.current_proc.local_memories:
compiler_error(loc, "redefinition of a local memory region `%s`" % name)
compiler_note(ctx.current_proc.local_memories[name].loc, "the original definition is located here")
exit(1)
if name in INTRINSIC_BY_NAMES:
compiler_error(loc, "redefinition of an intrinsic word `%s`" % (name, ))
exit(1)
if name in ctx.procs:
compiler_error(loc, "redefinition of a proc `%s`" % (name, ))
compiler_note(ctx.procs[name].loc, "the original definition is located here")
exit(1)
if name in ctx.consts:
compiler_error(loc, "redefinition of a constant `%s`" % (name, ))
compiler_note(ctx.consts[name].loc, "the original definition is located here")
exit(1)
# TODO: use type contracts for eval_const_value ffs
def eval_const_value(ctx: ParseContext, rtokens: List[Token]) -> Tuple[int, DataType]:
stack: List[Tuple[int, DataType]] = []
while len(rtokens) > 0:
token = rtokens.pop()
if token.typ == TokenType.KEYWORD:
assert isinstance(token.value, Keyword)
if token.value == Keyword.END:
break
elif token.value == Keyword.OFFSET:
if len(stack) < 1:
compiler_error(token.loc, f"not enough arguments for `{KEYWORD_NAMES[token.value]}` keyword")
exit(1)
offset, typ = stack.pop()
if typ is not DataType.INT:
compiler_error(token.loc, f"`{KEYWORD_NAMES[token.value]}` expects type {DataType.INT} but got {typ}")
exit(1)
stack.append((ctx.iota, DataType.INT))
ctx.iota += offset
elif token.value == Keyword.RESET:
stack.append((ctx.iota, DataType.INT))
ctx.iota = 0
else:
compiler_error(token.loc, f"unsupported keyword `{KEYWORD_NAMES[token.value]}` in compile time evaluation")
exit(1)
elif token.typ == TokenType.INT:
assert isinstance(token.value, int)
stack.append((token.value, DataType.INT))
elif token.typ == TokenType.WORD:
assert isinstance(token.value, str)
if token.value == INTRINSIC_NAMES[Intrinsic.PLUS]:
if len(stack) < 2:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
a, a_type = stack.pop()
b, b_type = stack.pop()
if a_type == DataType.INT and b_type == DataType.INT:
stack.append((a + b, DataType.INT))
elif a_type == DataType.INT and b_type == DataType.PTR:
stack.append((a + b, DataType.PTR))
elif a_type == DataType.PTR and b_type == DataType.INT:
stack.append((a + b, DataType.PTR))
else:
compiler_error(token.loc, f"Invalid argument types for `{token.value}` intrinsic: {(a_type, b_type)}")
compiler_note(token.loc, f"Expected:")
compiler_note(token.loc, f" {(DataType.INT, DataType.INT)}")
compiler_note(token.loc, f" {(DataType.INT, DataType.PTR)}")
compiler_note(token.loc, f" {(DataType.PTR, DataType.INT)}")
exit(1)
elif token.value == INTRINSIC_NAMES[Intrinsic.MINUS]:
if len(stack) < 2:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
b, b_type = stack.pop()
a, a_type = stack.pop()
if a_type == DataType.INT and b_type == DataType.INT:
stack.append((a - b, DataType.INT))
elif a_type == DataType.PTR and b_type == DataType.PTR:
stack.append((a - b, DataType.INT))
elif a_type == DataType.PTR and b_type == DataType.INT:
stack.append((a - b, DataType.PTR))
else:
compiler_error(token.loc, f"Invalid argument types for `{token.value}` intrinsic: {(a_type, b_type)}")
compiler_note(token.loc, f"Expected:")
compiler_note(token.loc, f" {(DataType.INT, DataType.INT)}")
compiler_note(token.loc, f" {(DataType.PTR, DataType.PTR)}")
compiler_note(token.loc, f" {(DataType.PTR, DataType.INT)}")
exit(1)
elif token.value == INTRINSIC_NAMES[Intrinsic.MUL]:
if len(stack) < 2:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
a, a_type = stack.pop()
b, b_type = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((a * b, DataType.INT))
else:
compiler_error(token.loc, f"Invalid argument types for `{token.value}` intrinsic: {(a_type, b_type)}")
compiler_note(token.loc, f"Expected:")
compiler_note(token.loc, f" {(DataType.INT, DataType.INT)}")
exit(1)
elif token.value == INTRINSIC_NAMES[Intrinsic.DIVMOD]:
if len(stack) < 2:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
a, a_type = stack.pop()
b, b_type = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((b//a, DataType.INT))
stack.append((b%a, DataType.INT))
else:
compiler_error(token.loc, f"Invalid argument types for `{token.value}` intrinsic: {(a_type, b_type)}")
compiler_note(token.loc, f"Expected:")
compiler_note(token.loc, f" {(DataType.INT, DataType.INT)}")
exit(1)
elif token.value == INTRINSIC_NAMES[Intrinsic.DROP]:
if len(stack) < 1:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
stack.pop()
elif token.value == INTRINSIC_NAMES[Intrinsic.CAST_BOOL]:
if len(stack) < 1:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
value, typ = stack.pop()
stack.append((value, DataType.BOOL))
elif token.value == INTRINSIC_NAMES[Intrinsic.CAST_INT]:
if len(stack) < 1:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
value, typ = stack.pop()
stack.append((value, DataType.INT))
elif token.value == INTRINSIC_NAMES[Intrinsic.CAST_PTR]:
if len(stack) < 1:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
value, typ = stack.pop()
stack.append((value, DataType.PTR))
elif token.value == INTRINSIC_NAMES[Intrinsic.EQ]:
if len(stack) < 2:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
a, a_type = stack.pop()
b, b_type = stack.pop()
if a_type != b_type:
compiler_error(token.loc, f"intrinsic `{token.value}` expects the arguments to have the same type. The actual types are")
compiler_note(token.loc, f" {(a_type, b_type)}")
exit(1)
stack.append((int(a == b), DataType.BOOL))
elif token.value == INTRINSIC_NAMES[Intrinsic.MAX]:
if len(stack) < 2:
compiler_error(token.loc, f"not enough arguments for `{token.value}` intrinsic")
exit(1)
a, a_type = stack.pop()
b, b_type = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((max(a, b), DataType.INT))
else:
compiler_error(token.loc, f"Invalid argument types for `{token.value}` intrinsic: {(a_type, b_type)}")
compiler_note(token.loc, f"Expected:")
compiler_note(token.loc, f" {(DataType.INT, DataType.INT)}")
exit(1)
elif token.value in ctx.consts:
const = ctx.consts[token.value]
stack.append((const.value, const.typ))
else:
compiler_error(token.loc, f"unsupported word `{token.value}` in compile time evaluation")
exit(1)
else:
compiler_error(token.loc, f"{human(token.typ, HumanNumber.Plural)} are not supported in compile time evaluation")
exit(1)
if len(stack) != 1:
compiler_error(token.loc, "The result of expression in compile time evaluation must be a single number")
exit(1)
return stack.pop()
def parse_contract_list(rtokens: List[Token], stoppers: List[Keyword]) -> Tuple[List[Tuple[Union[DataType, str], Loc]], Keyword]:
args: List[Tuple[Union[DataType, str], Loc]] = []
while len(rtokens) > 0:
token = rtokens.pop()
if token.typ == TokenType.WORD:
assert isinstance(token.value, str)
if token.value in DATATYPE_BY_NAME:
args.append((DATATYPE_BY_NAME[token.value], token.loc))
else:
compiler_error(token.loc, f"Unknown data type {token.value}")
exit(1)
elif token.typ == TokenType.KEYWORD:
assert isinstance(token.value, Keyword)
if token.value in stoppers:
return (args, token.value)
else:
compiler_error(token.loc, f"Unexpected keyword {KEYWORD_NAMES[token.value]}")
exit(1)
elif token.typ == TokenType.STR:
assert isinstance(token.value, str)
args.append((token.value, token.loc))
else:
compiler_error(token.loc, f"{human(token.typ, HumanNumber.Plural)} are not allowed in procedure definition.")
exit(1)
compiler_error(token.loc, f"Unexpected end of file. Expected keywords: ")
for keyword in stoppers:
compiler_note(token.loc, f" {KEYWORD_NAMES[keyword]}")
exit(1)
def parse_proc_contract(rtokens: List[Token]) -> Contract:
contract = Contract(ins=[], outs=[])
contract.ins, stopper = parse_contract_list(rtokens, [Keyword.BIKESHEDDER, Keyword.IN])
if stopper == Keyword.IN:
return contract
contract.outs, stopper = parse_contract_list(rtokens, [Keyword.IN])
assert stopper == Keyword.IN
return contract
def parse_program_from_tokens(ctx: ParseContext, tokens: List[Token], include_paths: List[str], included: int):
rtokens: List[Token] = list(reversed(tokens))
while len(rtokens) > 0:
token = rtokens.pop()
assert len(TokenType) == 6, "Exhaustive token handling in parse_program_from_tokens"
if token.typ == TokenType.WORD:
assert isinstance(token.value, str), "This could be a bug in the lexer"
if token.value in INTRINSIC_BY_NAMES:
ctx.ops.append(Op(typ=OpType.INTRINSIC, token=token, operand=INTRINSIC_BY_NAMES[token.value]))
ctx.ip += 1
elif ctx.current_proc is not None and token.value in ctx.current_proc.local_memories:
ctx.ops.append(Op(typ=OpType.PUSH_LOCAL_MEM, token=token, operand=ctx.current_proc.local_memories[token.value].offset))
ctx.ip += 1
elif token.value in ctx.memories:
ctx.ops.append(Op(typ=OpType.PUSH_MEM, token=token, operand=ctx.memories[token.value].offset))
ctx.ip += 1
elif token.value in ctx.procs:
ctx.ops.append(Op(typ=OpType.CALL, token=token, operand=ctx.procs[token.value].addr))
ctx.ip += 1
elif token.value in ctx.consts:
const = ctx.consts[token.value]
if const.typ == DataType.INT:
ctx.ops.append(Op(typ=OpType.PUSH_INT, token=token, operand=const.value))
elif const.typ == DataType.BOOL:
ctx.ops.append(Op(typ=OpType.PUSH_BOOL, token=token, operand=const.value))
elif const.typ == DataType.PTR:
ctx.ops.append(Op(typ=OpType.PUSH_PTR, token=token, operand=const.value))
else:
assert False, "unreachable"
ctx.ip += 1
else:
compiler_error(token.loc, "unknown word `%s`" % token.value)
exit(1)
elif token.typ == TokenType.INT:
assert isinstance(token.value, int), "This could be a bug in the lexer"
ctx.ops.append(Op(typ=OpType.PUSH_INT, operand=token.value, token=token))
ctx.ip += 1
elif token.typ == TokenType.STR:
assert isinstance(token.value, str), "This could be a bug in the lexer"
ctx.ops.append(Op(typ=OpType.PUSH_STR, operand=token.value, token=token));
ctx.ip += 1
elif token.typ == TokenType.CSTR:
assert isinstance(token.value, str), "This could be a bug in the lexer"
ctx.ops.append(Op(typ=OpType.PUSH_CSTR, operand=token.value, token=token));
ctx.ip += 1
elif token.typ == TokenType.CHAR:
assert isinstance(token.value, int)
ctx.ops.append(Op(typ=OpType.PUSH_INT, operand=token.value, token=token));
ctx.ip += 1
elif token.typ == TokenType.KEYWORD:
assert len(Keyword) == 15, "Exhaustive keywords handling in parse_program_from_tokens()"
if token.value == Keyword.IF:
ctx.ops.append(Op(typ=OpType.IF, token=token))
ctx.stack.append(ctx.ip)
ctx.ip += 1
elif token.value == Keyword.IFSTAR:
if len(ctx.stack) == 0:
compiler_error(token.loc, '`if*` can only come after `else`')
exit(1)
else_ip = ctx.stack[-1]
if ctx.ops[else_ip].typ != OpType.ELSE:
compiler_error(ctx.ops[else_ip].token.loc, '`if*` can only come after `else`')
exit(1)
ctx.ops.append(Op(typ=OpType.IFSTAR, token=token))
ctx.stack.append(ctx.ip)
ctx.ip += 1
elif token.value == Keyword.ELSE:
if len(ctx.stack) == 0:
compiler_error(token.loc, '`else` can only come after `if` or `if*`')
exit(1)
if_ip = ctx.stack.pop()
if ctx.ops[if_ip].typ == OpType.IF:
ctx.ops[if_ip].operand = ctx.ip + 1
ctx.stack.append(ctx.ip)
ctx.ops.append(Op(typ=OpType.ELSE, token=token))
ctx.ip += 1
elif ctx.ops[if_ip].typ == OpType.IFSTAR:
else_before_ifstar_ip = None if len(ctx.stack) == 0 else ctx.stack.pop()
assert else_before_ifstar_ip is not None and ctx.ops[else_before_ifstar_ip].typ == OpType.ELSE, "At this point we should've already checked that `if*` comes after `else`. Otherwise this is a compiler bug."
ctx.ops[if_ip].operand = ctx.ip + 1
ctx.ops[else_before_ifstar_ip].operand = ctx.ip
ctx.stack.append(ctx.ip)
ctx.ops.append(Op(typ=OpType.ELSE, token=token))
ctx.ip += 1
else:
compiler_error(ctx.ops[if_ip].token.loc, f'`else` can only come after `if` or `if*`')
exit(1)
elif token.value == Keyword.END:
block_ip = ctx.stack.pop()
if ctx.ops[block_ip].typ == OpType.ELSE:
ctx.ops.append(Op(typ=OpType.END, token=token))
ctx.ops[block_ip].operand = ctx.ip
ctx.ops[ctx.ip].operand = ctx.ip + 1
elif ctx.ops[block_ip].typ == OpType.DO:
ctx.ops.append(Op(typ=OpType.END, token=token))
assert ctx.ops[block_ip].operand is not None
while_ip = ctx.ops[block_ip].operand
assert isinstance(while_ip, OpAddr)
if ctx.ops[while_ip].typ != OpType.WHILE:
compiler_error(ctx.ops[while_ip].token.loc, '`end` can only close `do` blocks that are preceded by `while`')
exit(1)
ctx.ops[ctx.ip].operand = while_ip
ctx.ops[block_ip].operand = ctx.ip + 1
elif ctx.ops[block_ip].typ == OpType.PREP_PROC:
assert ctx.current_proc is not None
ctx.ops[block_ip].operand = ctx.current_proc.local_memory_capacity
block_ip = ctx.stack.pop()
assert ctx.ops[block_ip].typ == OpType.SKIP_PROC
ctx.ops.append(Op(typ=OpType.RET, token=token, operand=ctx.current_proc.local_memory_capacity))
ctx.ops[block_ip].operand = ctx.ip + 1
ctx.current_proc = None
elif ctx.ops[block_ip].typ == OpType.IFSTAR:
else_before_ifstar_ip = None if len(ctx.stack) == 0 else ctx.stack.pop()
assert else_before_ifstar_ip is not None and ctx.ops[else_before_ifstar_ip].typ == OpType.ELSE, "At this point we should've already checked that `if*` comes after `else`. Otherwise this is a compiler bug."
ctx.ops.append(Op(typ=OpType.END, token=token))
ctx.ops[block_ip].operand = ctx.ip
ctx.ops[else_before_ifstar_ip].operand = ctx.ip
ctx.ops[ctx.ip].operand = ctx.ip + 1
elif ctx.ops[block_ip].typ == OpType.IF:
ctx.ops.append(Op(typ=OpType.END, token=token))
ctx.ops[block_ip].operand = ctx.ip
ctx.ops[ctx.ip].operand = ctx.ip + 1
else:
compiler_error(ctx.ops[block_ip].token.loc, '`end` can only close `if`, `else`, `do`, or `proc` blocks for now')
exit(1)
ctx.ip += 1
elif token.value == Keyword.WHILE:
ctx.ops.append(Op(typ=OpType.WHILE, token=token))
ctx.stack.append(ctx.ip)
ctx.ip += 1
elif token.value == Keyword.DO:
ctx.ops.append(Op(typ=OpType.DO, token=token))
if len(ctx.stack) == 0:
compiler_error(token.loc, "`do` is not preceded by `while`")
exit(1)
while_ip = ctx.stack.pop()
if ctx.ops[while_ip].typ != OpType.WHILE:
compiler_error(token.loc, "`do` is not preceded by `while`")
exit(1)
ctx.ops[ctx.ip].operand = while_ip
ctx.stack.append(ctx.ip)
ctx.ip += 1
elif token.value == Keyword.INCLUDE:
if len(rtokens) == 0:
compiler_error(token.loc, "expected path to the include file but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.STR:
compiler_error(token.loc, "expected path to the include file to be %s but found %s" % (human(TokenType.STR), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
if included >= INCLUDE_LIMIT:
compiler_error(token.loc, f"Include limit is exceeded. A file was included {included} times.")
exit(1)
file_included = False
try:
parse_program_from_file(ctx, token.value, include_paths, included + 1)
file_included = True
except FileNotFoundError:
for include_path in include_paths:
try:
parse_program_from_file(ctx, path.join(include_path, token.value), include_paths, included + 1)
file_included = True
break
except FileNotFoundError:
continue
if not file_included:
compiler_error(token.loc, "file `%s` not found" % token.value)
exit(1)
elif token.value == Keyword.ASSERT:
if len(rtokens) == 0:
compiler_error(token.loc, "expected assert messsage but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.STR:
compiler_error(token.loc, "expected assert message to be %s but found %s" % (human(TokenType.STR), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
assert_message = token.value
assert_value, assert_typ = eval_const_value(ctx, rtokens)
if assert_typ != DataType.BOOL:
compiler_error(token.loc, "assertion expects the expression to be of type `bool`")
exit(1)
if assert_value == 0:
compiler_error(token.loc, f"Static Assertion Failed: {assert_message}");
exit(1)
elif token.value == Keyword.CONST:
if len(rtokens) == 0:
compiler_error(token.loc, "expected const name but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.WORD:
compiler_error(token.loc, "expected const name to be %s but found %s" % (human(TokenType.WORD), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
const_name = token.value
const_loc = token.loc
check_name_redefinition(ctx, token.value, token.loc)
const_value, const_typ = eval_const_value(ctx, rtokens)
ctx.consts[const_name] = Const(value=const_value, loc=const_loc, typ=const_typ)
elif token.value == Keyword.MEMORY:
if len(rtokens) == 0:
compiler_error(token.loc, "expected memory name but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.WORD:
compiler_error(token.loc, "expected memory name to be %s but found %s" % (human(TokenType.WORD), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
memory_name = token.value
memory_loc = token.loc
memory_size, memory_size_type = eval_const_value(ctx, rtokens)
if memory_size_type != DataType.INT:
compiler_error(token.loc, f"Memory size must be of type {DataType.INT} but it is of type {memory_size_type}")
exit(1)
check_name_redefinition(ctx, token.value, token.loc)
if ctx.current_proc is None:
ctx.memories[memory_name] = Memory(offset=ctx.memory_capacity, loc=memory_loc)
ctx.memory_capacity += memory_size
else:
# TODO: local memory regions can shadow the global ones
# Is that something we actually want?
ctx.current_proc.local_memories[memory_name] = Memory(offset=ctx.current_proc.local_memory_capacity, loc=memory_loc)
ctx.current_proc.local_memory_capacity += memory_size
elif token.value == Keyword.PROC:
if ctx.current_proc is None:
ctx.ops.append(Op(typ=OpType.SKIP_PROC, token=token))
ctx.stack.append(ctx.ip)
ctx.ip += 1
proc_addr = ctx.ip
ctx.ops.append(Op(typ=OpType.PREP_PROC, token=token))
ctx.stack.append(ctx.ip)
ctx.ip += 1
if len(rtokens) == 0:
compiler_error(token.loc, "expected procedure name but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.WORD:
compiler_error(token.loc, "expected procedure name to be %s but found %s" % (human(TokenType.WORD), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
proc_loc = token.loc
proc_name = token.value
check_name_redefinition(ctx, token.value, token.loc)
proc_contract = parse_proc_contract(rtokens)
ctx.procs[proc_name] = Proc(addr=proc_addr, loc=token.loc, local_memories={}, local_memory_capacity=0, contract=proc_contract)
ctx.current_proc = ctx.procs[proc_name]
else:
# TODO: forbid constant definition inside of proc
compiler_error(token.loc, "defining procedures inside of procedures is not allowed")
compiler_note(ctx.current_proc.loc, "the current procedure starts here")
exit(1)
elif token.value in [Keyword.OFFSET, Keyword.RESET]:
compiler_error(token.loc, f"keyword `{token.text}` is supported only in compile time evaluation context")
exit(1)
elif token.value in [Keyword.IN, Keyword.BIKESHEDDER]:
compiler_error(token.loc, f"Unexpected keyword `{token.text}`")
exit(1)
else:
assert False, 'unreachable';
else:
assert False, 'unreachable'
if len(ctx.stack) > 0:
compiler_error(ctx.ops[ctx.stack.pop()].token.loc, 'unclosed block')
exit(1)
def find_col(line: str, start: int, predicate: Callable[[str], bool]) -> int:
while start < len(line) and not predicate(line[start]):
start += 1
return start
def unescape_string(s: str) -> str:
# NOTE: unicode_escape assumes latin-1 encoding, so we kinda have
# to do this weird round trip
return s.encode('utf-8').decode('unicode_escape').encode('latin-1').decode('utf-8')
def find_string_literal_end(line: str, start: int, quote: str = '"') -> int:
while start < len(line):
if line[start] == '\\':
start += 2
elif line[start] == quote:
break
else:
start += 1
return start
def lex_lines(file_path: str, lines: List[str]) -> Generator[Token, None, None]:
assert len(TokenType) == 6, 'Exhaustive handling of token types in lex_lines'
row = 0
str_literal_buf = ""
while row < len(lines):
line = lines[row]
col = find_col(line, 0, lambda x: not x.isspace())
col_end = 0
while col < len(line):
loc = (file_path, row + 1, col + 1)
if line[col] == '"':
while row < len(lines):
start = col
if str_literal_buf == "":
start += 1
else:
line = lines[row]
col_end = find_string_literal_end(line, start)
if col_end >= len(line) or line[col_end] != '"':
str_literal_buf += line[start:]
row +=1
col = 0
else:
str_literal_buf += line[start:col_end]
break
if row >= len(lines):
compiler_error(loc, "unclosed string literal")
exit(1)
assert line[col_end] == '"'
col_end += 1
text_of_token = str_literal_buf
str_literal_buf = ""
if col_end < len(line) and line[col_end] == 'c':
col_end += 1
yield Token(TokenType.CSTR, text_of_token, loc, unescape_string(text_of_token))
else:
yield Token(TokenType.STR, text_of_token, loc, unescape_string(text_of_token))
col = find_col(line, col_end, lambda x: not x.isspace())
elif line[col] == "'":
col_end = find_string_literal_end(line, col+1, quote="'")
if col_end >= len(line) or line[col_end] != "'":
compiler_error(loc, "unclosed character literal")
exit(1)
text_of_token = line[col+1:col_end]
char_bytes = unescape_string(text_of_token).encode('utf-8')
if len(char_bytes) != 1:
compiler_error(loc, "only a single byte is allowed inside of a character literal")
exit(1)
yield Token(TokenType.CHAR, text_of_token, loc, char_bytes[0])
col = find_col(line, col_end+1, lambda x: not x.isspace())
else:
col_end = find_col(line, col, lambda x: x.isspace())
text_of_token = line[col:col_end]
try:
yield Token(TokenType.INT, text_of_token, loc, int(text_of_token))
except ValueError:
if text_of_token in KEYWORD_BY_NAMES:
yield Token(TokenType.KEYWORD, text_of_token, loc, KEYWORD_BY_NAMES[text_of_token])
else:
# TODO: `69//` is recognized as a single word
# And not a number plus a comment
if text_of_token.startswith("//"):
break
yield Token(TokenType.WORD, text_of_token, loc, text_of_token)
col = find_col(line, col_end, lambda x: not x.isspace())
row += 1
def lex_file(file_path: str) -> List[Token]:
with open(file_path, "r", encoding='utf-8') as f:
return [token for token in lex_lines(file_path, f.readlines())]
def parse_program_from_file(ctx: ParseContext, file_path: str, include_paths: List[str], included: int = 0):
parse_program_from_tokens(ctx, lex_file(file_path), include_paths, included)
def cmd_call_echoed(cmd: List[str], silent: bool) -> int:
if not silent:
print("[CMD] %s" % " ".join(map(shlex.quote, cmd)))
return subprocess.call(cmd)
# TODO: with a lot of procs the control flow graphs becomes useless even on small programs
# Maybe we should eliminate unreachable code or something
# TODO: test.py never touches generate_control_flow_graph_as_dot_file
# Which leads to constantly forgetting to update the implementation
def generate_control_flow_graph_as_dot_file(program: Program, dot_path: str):
with open(dot_path, "w") as f:
f.write("digraph Program {\n")
assert len(OpType) == 16, f"Exhaustive handling of OpType in generate_control_flow_graph_as_dot_file(), {len(OpType)}"
for ip in range(len(program.ops)):
op = program.ops[ip]
if op.typ == OpType.INTRINSIC:
assert isinstance(op.operand, Intrinsic)
f.write(f" Node_{ip} [label={repr(repr(INTRINSIC_NAMES[op.operand]))}];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_STR:
assert isinstance(op.operand, str)
f.write(f" Node_{ip} [label={repr(repr(op.operand))}];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_CSTR:
assert isinstance(op.operand, str)
f.write(f" Node_{ip} [label={repr(repr(op.operand))}];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_INT:
assert isinstance(op.operand, int)
f.write(f" Node_{ip} [label={op.operand}]\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_MEM:
assert isinstance(op.operand, int)
f.write(f" Node_{ip} [label=\"mem({op.operand})\"]\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_LOCAL_MEM:
assert isinstance(op.operand, int)
f.write(f" Node_{ip} [label=\"local_mem({op.operand})\"]\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ in [OpType.IF, OpType.IFSTAR]:
assert isinstance(op.operand, OpAddr), f"{op.operand}"
f.write(f" Node_{ip} [shape=record label=if];\n")
f.write(f" Node_{ip} -> Node_{ip + 1} [label=true];\n")
f.write(f" Node_{ip} -> Node_{op.operand} [label=false style=dashed];\n")
elif op.typ == OpType.WHILE:
f.write(f" Node_{ip} [shape=record label=while];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.DO:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=do];\n")
f.write(f" Node_{ip} -> Node_{ip + 1} [label=true];\n")
f.write(f" Node_{ip} -> Node_{op.operand} [label=false style=dashed];\n")
elif op.typ == OpType.ELSE:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=else];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
elif op.typ == OpType.END:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=end];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
elif op.typ == OpType.SKIP_PROC:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=skip_proc];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
elif op.typ == OpType.PREP_PROC:
f.write(f" Node_{ip} [shape=record label=prep_proc];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.RET:
f.write(f" Node_{ip} [shape=record label=ret];\n")
elif op.typ == OpType.CALL:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=call];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
else:
assert False, f"unimplemented operation {op.typ}"
f.write(f" Node_{len(program.ops)} [label=halt];\n")
f.write("}\n")
def usage(compiler_name: str):
print("Usage: %s [OPTIONS] <SUBCOMMAND> [ARGS]" % compiler_name)
print(" OPTIONS:")
print(" -debug Enable debug mode.")
print(" -I <path> Add the path to the include search list")
print(" -unsafe Disable type checking.")
print(" SUBCOMMAND:")
print(" sim <file> Simulate the program")
print(" com [OPTIONS] <file> Compile the program")
print(" OPTIONS:")
print(" -r Run the program after successful compilation")
print(" -o <file|dir> Customize the output path")
print(" -s Silent mode. Don't print any info about compilation phases.")
print(" -cf Dump Control Flow graph of the program in a dot format.")
print(" help Print this help to stdout and exit with 0 code")
if __name__ == '__main__' and '__file__' in globals():
argv = sys.argv
assert len(argv) >= 1
compiler_name, *argv = argv
include_paths = ['.', './std/']
unsafe = False
while len(argv) > 0:
if argv[0] == '-debug':
argv = argv[1:]
debug = True
elif argv[0] == '-I':
argv = argv[1:]
if len(argv) == 0:
usage(compiler_name)
print("[ERROR] no path is provided for `-I` flag", file=sys.stderr)
exit(1)
include_path, *argv = argv
include_paths.append(include_path)
elif argv[0] == '-unsafe':
argv = argv[1:]
unsafe = True
else:
break
if len(argv) < 1:
usage(compiler_name)
print("[ERROR] no subcommand is provided", file=sys.stderr)
exit(1)
subcommand, *argv = argv
program_path: Optional[str] = None
program: Program = Program()
if subcommand == "sim":
if len(argv) < 1:
usage(compiler_name)
print("[ERROR] no input file is provided for the simulation", file=sys.stderr)
exit(1)
program_path, *argv = argv
include_paths.append(path.dirname(program_path))
parse_context = ParseContext()
parse_program_from_file(parse_context, program_path, include_paths);
program = Program(ops=parse_context.ops, memory_capacity=parse_context.memory_capacity)
proc_contracts = {proc.addr: proc.contract for proc in parse_context.procs.values()}
if not unsafe:
type_check_program(program, proc_contracts)
simulate_little_endian_linux(program, [program_path] + argv)
elif subcommand == "com":
silent = False
control_flow = False
run = False
output_path = None
while len(argv) > 0:
arg, *argv = argv
if arg == '-r':
run = True
elif arg == '-s':
silent = True
elif arg == '-o':
if len(argv) == 0:
usage(compiler_name)
print("[ERROR] no argument is provided for parameter -o", file=sys.stderr)
exit(1)
output_path, *argv = argv
elif arg == '-cf':
control_flow = True
else:
program_path = arg
break
if program_path is None:
usage(compiler_name)
print("[ERROR] no input file is provided for the compilation", file=sys.stderr)
exit(1)
basename = None
basedir = None
if output_path is not None:
if path.isdir(output_path):
basename = path.basename(program_path)
if basename.endswith(PORTH_EXT):
basename = basename[:-len(PORTH_EXT)]
basedir = path.dirname(output_path)
else:
basename = path.basename(output_path)
basedir = path.dirname(output_path)
else:
basename = path.basename(program_path)
if basename.endswith(PORTH_EXT):
basename = basename[:-len(PORTH_EXT)]
basedir = path.dirname(program_path)
# if basedir is empty we should "fix" the path appending the current working directory.
# So we avoid `com -r` to run command from $PATH.
if basedir == "":
basedir = os.getcwd()
basepath = path.join(basedir, basename)
include_paths.append(path.dirname(program_path))
parse_context = ParseContext()
parse_program_from_file(parse_context, program_path, include_paths);
program = Program(ops=parse_context.ops, memory_capacity=parse_context.memory_capacity)
proc_contracts = {proc.addr: proc.contract for proc in parse_context.procs.values()}
if control_flow:
dot_path = basepath + ".dot"
if not silent:
print(f"[INFO] Generating {dot_path}")
generate_control_flow_graph_as_dot_file(program, dot_path)
cmd_call_echoed(["dot", "-Tsvg", "-O", dot_path], silent)
if not unsafe:
type_check_program(program, proc_contracts)
if not silent:
print("[INFO] Generating %s" % (basepath + ".asm"))
generate_nasm_linux_x86_64(program, basepath + ".asm")
cmd_call_echoed(["nasm", "-felf64", basepath + ".asm"], silent)
cmd_call_echoed(["ld", "-o", basepath, basepath + ".o"], silent)
if run:
exit(cmd_call_echoed([basepath] + argv, silent))
elif subcommand == "help":
usage(compiler_name)
exit(0)
else:
usage(compiler_name)
print("[ERROR] unknown subcommand %s" % (subcommand), file=sys.stderr)
exit(1)
``` |
{
"source": "JorryZ/motionConstrain",
"score": 2
} |
#### File: motionConstrain/motionConstrain/postProcessBSF.py
```python
import numpy as np
import os
import sys
import imageio
import medImgProc as medImgProc
import medImgProc.image as image
import motionSegmentation.BsplineFourier as BsplineFourier
import motionSegmentation.bfSolver as bfSolver
import motionConstrain.motionConstrain as motionConstrain
print('postProcessBSF version 5.0.9')
# edit part ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def pointTrace(BSFfile=None, STLfile=None, savePath=None, timeList=None, customPath=None):
print('Function pointTrace is to track the motion of points from STL file, and generate new STL over cycle.')
print('Inputs of this function are: path+name of BSFfile, path+name of STLfile, name of saving folder (savePath).')
print('A sub-folder is automatically created to save files, folder name: stl+customPath')
try:
savePath=savePath+'\\stl-'+customPath
except:
savePath=savePath+'\\stl'
solver=bfSolver.bfSolver()
solver.bsFourier=BsplineFourier.BsplineFourier(coefFile=BSFfile)
if type(timeList)==type(None):
timeList=int(solver.bsFourier.spacing[3])
solver.pointTrace(stlFile=STLfile, savePath=savePath, timeList=timeList)
print('function pointTrace done! Have a happy day ^_^')
def timeRemap(rawBSFfile=None, newBSFfile=None, time=0, phantom=True):
print('Function timeRemap is to remap BSF to appointed time point as new reference timing.')
print('Inputs of this function are: path+name of rawBSFfile, path+name of newBSFfile, time as reference timing (default 0), phantom=True if regrid from phantom time point.')
solver=bfSolver.bfSolver()
solver.bsFourier=BsplineFourier.BsplineFourier(rawBSFfile)
#fourierTerms=solver.bsFourier.coef.shape[3]//2
solver.initialize()
solver.pointsCoef=[]
for m in range(len(solver.points)):
coef=solver.bsFourier.getRefCoef(solver.points[m])
solver.pointsCoef.append(coef.copy())
if phantom==False:
solver.points[m] = solver.points[m] + coef[0,:]
solver.bsFourier.regridToTime(solver.points,solver.pointsCoef,time=time)
solver.bsFourier.writeCoef(newBSFfile)
def coefCombiner(rawBSFfile=None, newBSFfilePath=None, newBSFfile=None, time=0):
print('Function coefCombiner is to combine all results of different fourier terms.')
print('Inputs of this function are: path+name of rawBSFfile, saving folder path of newBSFfile (newBSFfilePath), name of newBSFfile time as reference timing (default 0).')
saveFtermPath=newBSFfilePath+'\\coefFT'
solver=motionConstrain.motionConstrain()
solver.initialize(coefFile=rawBSFfile)
fourierTerms=solver.coefMat.shape[3]//2
filepath=[]
for fterm in range(1,(fourierTerms*2+1)):
filepath.append(saveFtermPath+str(fterm)+'.txt')
solver.coefCombiner(filepath)
solver.coefZeroRemap(time=time)
solver.writeFile((newBSFfilePath+'\\'+newBSFfile),coefMat=1)
def coefZeroRemap(rawBSFfile=None, newBSFfile=None, time=0):
print('Function coefZeroRemap is to calculate the value of DC term (zero term) after processing the Fourier terms.')
print('Inputs of this function are: path+name of rawBSFfile, path+name of newBSFfile, time as reference timing (default 0).')
solver=motionConstrain.motionConstrain()
solver.initialize(coefFile=rawBSFfile)
solver.coefZeroRemap(time=time)
solver.writeFile(newBSFfile,coefMat=1)
def reShape(rawBSFfile=None, newBSFfile=None, finalShape=None):
print('Function reShape is to change the size of the bsplineFourier matrix (size of control point matrix).')
print('Inputs of this function are: path+name of rawBSFfile, path+name of newBSFfile, desired shape (finalShape: [x,y,z,ft,uvw]).')
solver=bfSolver.bfSolver()
solver.bsFourier=BsplineFourier.BsplineFourier(rawBSFfile)
try:
solver.bsFourier=solver.bsFourier.reshape(finalShape)
solver.bsFourier.writeCoef(newBSFfile)
except:
print('Error: please input correct finalShape [x,y,z,ft,uvw]')
def vtk2img(VTKfile=None, savePath=None, lazySnapTime=None):
print('Function vtk2img is to convert 3D VTK to 2D slice image (png).')
print('Inputs of this function are: path+name of VTKfile, saving folder path of image (savePath), lazySnapTime to get lazySnap APP format png.')
os.makedirs(savePath, exist_ok=True)
img=medImgProc.imread(VTKfile)
img.imwrite2D(savePath,axes=['y','x'])
if type(lazySnapTime)!=type(None):
currentDim=img.dim[:]
axes=['z','y','x']
transposeIndex,currentDim=image.arrangeDim(currentDim,axes,False)
data=img.data.transpose(transposeIndex)
zSlice=data.shape[0]
for m in range(zSlice):
imageio.imwrite(os.path.normpath(savePath+'time{:0>3d}/slice{0:0>3d}time{1:0>3d}'.format(m+1,lazySnapTime)+'.png'),data[m])
# vtkPath='D:\\4.Cardiac motion tracking\\v{0:d}\\3DUS\\VTK\\'.format(case)
# timeNum=timeNumDict[case]
# #timeNum=17
# for i in range(timeNum):
# vtkName=vtkPath+'VTK{:0>2d}'.format(i)
# img=medImgProc.imread(vtkName+'.vtk')
# img.imwrite2D(vtkName,axes=['y','x'])
def lazySnapImg(rawFilePath=None, sliceNum=None, savePath=None):
print('Function lazySnapImg is to convert imageio generated image to lazySnap format image.')
print('Inputs of this function are: path raw image and slice number of images, saving folder path of image (savePath).')
os.makedirs(savePath, exist_ok=True)
for m in range(sliceNum):
temp = imageio.imread(os.path.normpath(rawFilePath+'/z{:d}'.format(m)+'.png'))
imageio.imwrite(os.path.normpath(savePath+'/slice{:0>3d}time{:s}'.format(m+1,savePath[-3:])+'.png'),temp)
# vtkPath='D:\\4.Cardiac motion tracking\\v{0:d}\\3DUS\\VTK\\'.format(case)
# #timeNum=timeNumDict[case]
# timeNum=1
# for n in range(timeNum): #time points for lazySnap, time+1
# vtkName=vtkPath+'VTK{:0>2d}'.format(n)
# savePath=vtkPath+'/time{:0>3d}'.format(n+1)
# os.makedirs(savePath, exist_ok=True)
# img=medImgProc.imread(vtkName+'.vtk')
# currentDim=img.dim[:]
# axes=['z','y','x']
# transposeIndex,currentDim=image.arrangeDim(currentDim,axes,False)
# data=img.data.transpose(transposeIndex)
# zSlice=data.shape[0]
# for m in range(zSlice):
# imageio.imwrite(os.path.normpath(savePath+'/slice{:0>3d}time{:0>3d}'.format(m+1,n+1)+'.png'),data[m])
def samplePointsFromVTK(BSFfile=None,VTKfile=None,savePath=None,dimlen=None,scale=1.,spacingDivision=5):
print('Function samplePointsFromVTK is to sample points from myocardium VTK.')
vtkData=medImgProc.imread(VTKfile)
vtkData.dim=['z','y','x']
vtkData.dimlen=dimlen
vtkData.rearrangeDim(['x','y','z'])
solver=bfSolver.bfSolver()
solver.bsFourier=BsplineFourier.BsplineFourier(BSFfile)
samplepts1=solver.bsFourier.samplePoints(spacingDivision=spacingDivision[0])
wall=[]
for m in range(len(samplepts1)):
if vtkData.getData(samplepts1[m])>=0.5:
wall.append(samplepts1[m].copy())
sampleCoord=wall.copy()
np.savetxt(savePath,sampleCoord,fmt='%.8f',delimiter=' ')
def meshVolumeError(meshVolumeFile=None):
print('Function meshVolumeError is to calculate RMS error of mesh volume over cylce.')
print('Inputs of this function are: path+name of meshVolumeFile.')
meshVolume=np.loadtxt(meshVolumeFile,delimiter=' ')
totalVolume=np.sum(meshVolume,axis=0)
print('totalVolume: ',totalVolume)
rmsError=0
for i in range(len(meshVolume)):
volume=meshVolume[i,:]
#meanV=np.mean(volume)
squaredError=0.0
for j in range(1,len(volume)):
squaredError+=(volume[j]-volume[0])**2
rmsError+=np.sqrt(squaredError/(len(volume)-1))
print('mesh volume rms error: ',rmsError,' ; divided by mesh number: ', rmsError/len(meshVolume))
return (rmsError, totalVolume)
def divFree(funcHelp=False):
if funcHelp:
print('Function divFree is to calculate divergence value.')
print('unfinish~~~~~~')
def imgScaling(funcHelp=False):
if funcHelp:
print('Function imgScaling is to scale image.')
print('unfinish~~~~~~')
def vtkSampling(funcHelp=False):
if funcHelp:
print('Function vtkSampling is to sample points from 3D VTK.')
print('unfinish~~~~~~')
``` |
{
"source": "jorsanpe/placa",
"score": 3
} |
#### File: jorsanpe/placa/python.py
```python
import pprint
from pathlib import Path
from collections import Counter
import ast
import astunparse
def _analyze(payload, characters, symbols):
lines = astunparse.unparse(ast.parse(payload)).split('\n')
for line in lines:
for char in line:
if char != ' ':
if char.isalpha():
characters[char] += 1
else:
symbols[char] += 1
def analyze(directories):
characters = Counter()
symbols = Counter()
for directory in directories:
for path in Path(directory).rglob('*.py'):
with open(path, 'r') as stream:
payload = stream.read()
_analyze(payload, characters, symbols)
print(pprint.pformat(symbols))
print(pprint.pformat(characters))
``` |
{
"source": "jorsanpe/schedbot",
"score": 3
} |
#### File: schedbot/test/add_task_test.py
```python
import unittest
from mock import patch, MagicMock
from add_task import AddTask
from user_repository import UserRepository
from task_repository import TaskRepository
from user import User
from task import Task
from datetime import datetime
YESTERDAY = datetime(2016, 2, 8)
TODAY = datetime(2016, 2, 9)
TOMORROW = datetime(2016, 2, 10)
LAST_WEEK = datetime(2016, 2, 2)
NEXT_WEEK = datetime(2016, 2, 16)
LAST_MONTH = datetime(2016, 01, 9)
NEXT_MONTH = datetime(2016, 03, 9)
class AddTaskTest(unittest.TestCase):
def setUp(self):
self.user_repo = UserRepository()
self.task_repo = TaskRepository()
def test_add_task_should_deliver_error_if_user_is_not_found(self):
self.user_repo.find_one = MagicMock(return_value=None)
add_task = AddTask(self.user_repo, self.task_repo)
response = add_task({'user': 126})
self.assertEqual('fail', response['status'])
@patch('task.datetime')
def test_should_add_new_task_to_user_task_list_and_return_scheduled_task_list(self, mock_dt):
request = {
'user': 126,
'code': 'add_task',
'data': {
'title': 'New Task',
}
}
mock_dt.now.return_value = TODAY
self.user_repo.find_one = MagicMock(return_value=User(id=1))
self.task_repo.tasks_for = MagicMock(return_value=[Task(**request['data'])])
add_task = AddTask(self.user_repo, self.task_repo)
response = add_task(request)
self.assertEqual('success', response['status'])
self.assertEqual([{
'title': 'New Task',
'active': True,
'creation': str(TODAY)
}], response['data'])
```
#### File: schedbot/test/mongo_client_test.py
```python
import unittest
from mock import patch
from mongo_collection import MongoCollection
class MongoClientTest(unittest.TestCase):
def test_(self):
pass
```
#### File: schedbot/test/task_test.py
```python
import unittest
from mock import patch
from datetime import datetime
from task import Task
class TaskTest(unittest.TestCase):
def test_task_as_dict(self):
task = Task(title='a task', start=datetime(2016, 1, 1), creation=datetime(2016, 1, 1))
self.assertEqual({
'title': 'a task',
'start': '2016-01-01 00:00:00',
'creation': '2016-01-01 00:00:00',
'active': True,
}, task.as_dict())
@patch('task.datetime')
def test_task_that_has_started_is_active(self, mock_dt):
task = Task(start=datetime(2016, 1, 1))
mock_dt.now.return_value = datetime(2016, 2, 2)
self.assertTrue(task.is_active())
@patch('task.datetime')
def test_task_that_has_not_started_is_not_active(self, mock_dt):
task = Task(start=datetime(2016, 1, 2))
mock_dt.now.return_value = datetime(2016, 1, 1)
self.assertFalse(task.is_active())
@patch('task.datetime')
def test_task_in_daily_range_is_active(self, mock_dt):
task = Task(start=datetime(2016, 1, 1),
daily={'start': datetime(2016, 1, 1, 8, 0), 'end': datetime(2016, 1, 1, 16, 0)})
mock_dt.now.return_value = datetime(2016, 2, 2, 10, 0)
self.assertTrue(task.is_active())
@patch('task.datetime')
def test_task_outside_daily_range_is_not_active(self, mock_dt):
task = Task(start=datetime(2016, 1, 1),
daily={'start': datetime(2016, 1, 1, 8, 0), 'end': datetime(2016, 1, 1, 16, 0)})
mock_dt.now.return_value = datetime(2016, 2, 2, 7, 0)
self.assertFalse(task.is_active())
mock_dt.now.return_value = datetime(2016, 2, 2, 17, 0)
self.assertFalse(task.is_active())
```
#### File: schedbot/test/user_repository_mongo_test.py
```python
import unittest
from mock import Mock
from user import User
from user_repository_mongo import UserRepositoryMongo
from user_repository_test import UserRepositoryTest
class UserRepsitoryMongoTest(UserRepositoryTest):
def setUp(self):
super(UserRepsitoryMongoTest, self).setUp()
self.mongo_collection = Mock()
self.task_repository = UserRepositoryMongo(self.mongo_collection)
def test_user_repository_should_assign_unique_id_after_adding_user(self):
self.mongo_collection.submit_item.return_value = 10
super(UserRepsitoryMongoTest, self).test_user_repository_should_assign_unique_id_after_adding_user()
def test_user_repository_should_find_user_by_id_after_adding_user(self):
self.mongo_collection.submit_item.return_value = 10
self.mongo_collection.query_single.return_value = User(id=10)
super(UserRepsitoryMongoTest, self).test_user_repository_should_find_user_by_id_after_adding_user()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jorseph/zenbo_scratch_service",
"score": 4
} |
#### File: jorseph/zenbo_scratch_service/webserver.py
```python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import requests
import urlparse
g_ip = '192.168.0.1.'
g_port = ':8080'
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("<html><body><h1>hi!</h1></body></html>")
o = urlparse.urlparse(self.path)
urlparse.parse_qs(o.query)
#print("urlparse = %s" % (self.path))
#print("urlparse.url = %s" % (o.geturl()))
#print("urlparse = %s" % (urlparse.parse_qs(o.query)))
payload = {'extension': 'advance', 'name': 'Get_sentences'}
r = requests.get('http://' + g_ip + g_port, params = urlparse.parse_qs(o.query))
print("url = %s" % (r.url))
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write("<html><body><h1>POST!</h1></body></html>")
def end_headers (self):
self.send_header('Access-Control-Allow-Origin', '*')
def run(server_class=HTTPServer, handler_class=S, port=8080):
server_address = ('127.0.0.1', port)
#global g_port
#g_port = ':8080'
httpd = server_class(server_address, handler_class)
print 'Starting httpd...'
httpd.serve_forever()
def setIP(ip):
global g_ip
g_ip = ip
run()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
``` |
{
"source": "jorses/tfg",
"score": 3
} |
#### File: tournament_poller/polls/charts.py
```python
def getchart1(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart2(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart3(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart4(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart5(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart6(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart7(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart8(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def getchart9(request):
import random
import django
import datetime
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig = Figure()
ax = fig.add_subplot(111)
x = []
y = []
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now += delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas = FigureCanvas(fig)
response = django.http.HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
SCORE = {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0,
"6": 0,
"7": 0,
"8": 0,
}
def score(request):
return
``` |
{
"source": "jorshi/audio_gan",
"score": 3
} |
#### File: audio_gan/evaluation/create_evaluation_data.py
```python
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
sys.path.append('../')
import wave_gan_resize
custom_objects = {
"Resize": wave_gan_resize.Resize
}
def load_model(model):
"""
Load a TensorFlow Keras model
"""
generator = tf.keras.models.load_model(model, custom_objects=custom_objects)
return generator
def generate_samples(model, num_samples, latent):
"""
Generate a set of samples from model and return a numpy array
"""
noise = tf.random.normal([num_samples, latent])
samples = model(noise)[:, :, 0]
return np.array(samples)
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('model', help="Model to use to render dataset", type=str)
parser.add_argument('num_samples', help="Number of samples to generate from model", type=int)
parser.add_argument('output', help="Output file", type=str)
parser.add_argument('-l', '--latent', help="Size of the latent space", default=100, type=int)
args = parser.parse_args(arguments)
generator = load_model(args.model)
samples = generate_samples(generator, args.num_samples, args.latent)
np.save(args.output, samples)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: jorshi/audio_gan/wave_gan_resize.py
```python
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.input_spec import InputSpec
class Resize(tf.keras.layers.Layer):
def __init__(self, size=2, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, **kwargs):
super(Resize, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
self.method = method
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
x = inputs
x_shape = x.shape.as_list()
x = tf.expand_dims(x, axis=3)
x = tf.image.resize(x, [x_shape[1] * self.size, x_shape[2]], method=self.method)
x = x[:, :, :, 0]
return x
def get_config(self):
config = {'size': self.size, 'method': self.method}
base_config = super(Resize, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def make_generator_model(latent_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, normalization=True,
dropout=0.0):
"""
Create the WaveGAN generator
:return: Sequential Model
"""
model = tf.keras.Sequential()
model.add(layers.Dense(16 * 1024, use_bias=False, input_shape=(latent_size,)))
if normalization:
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
if dropout > 0.0:
model.add(layers.Dropout(dropout))
model.add(layers.Reshape((16, 1024)))
assert model.output_shape == (None, 16, 1024) # Note: None is the batch size
model.add(Resize(size=4, method=method))
model.add(layers.Conv1D(512, 25, strides=1, padding='same', use_bias=False))
assert model.output_shape == (None, 64, 512)
if normalization:
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
if dropout > 0.0:
model.add(layers.Dropout(dropout))
model.add(Resize(size=4, method=method))
model.add(layers.Conv1D(256, 25, strides=1, padding='same', use_bias=False))
assert model.output_shape == (None, 256, 256)
if normalization:
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
if dropout > 0.0:
model.add(layers.Dropout(dropout))
model.add(Resize(size=4, method=method))
model.add(layers.Conv1D(128, 25, strides=1, padding='same', use_bias=False))
assert model.output_shape == (None, 1024, 128)
if normalization:
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
if dropout > 0.0:
model.add(layers.Dropout(dropout))
model.add(Resize(size=4, method=method))
model.add(layers.Conv1D(64, 25, strides=1, padding='same', use_bias=False))
assert model.output_shape == (None, 4096, 64)
if normalization:
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
if dropout > 0.0:
model.add(layers.Dropout(dropout))
model.add(Resize(size=4, method=method))
model.add(layers.Conv1D(1, 25, strides=1, padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 16384, 1)
return model
def make_discriminator_model():
"""
Create the WaveGAN discriminator
:return: Sequential Model
"""
model = tf.keras.Sequential()
model.add(layers.Conv1D(64, 25, 4, padding='same', input_shape=[16384, 1]))
assert (model.output_shape == (None, 4096, 64))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv1D(128, 25, 4, padding='same'))
assert (model.output_shape == (None, 1024, 128))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv1D(256, 25, 4, padding='same'))
assert (model.output_shape == (None, 256, 256))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv1D(512, 25, 4, padding='same'))
assert (model.output_shape == (None, 64, 512))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv1D(1024, 25, 4, padding='same'))
assert (model.output_shape == (None, 16, 1024))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
``` |
{
"source": "jorshi/uvic-music-extractor",
"score": 3
} |
#### File: src/uvic_music_extractor/extractors.py
```python
from abc import ABC, abstractmethod
import math
import numpy as np
from scipy.stats import norm, linregress
import essentia
import essentia.standard as es
import uvic_music_extractor.utils as utils
class ExtractorBase(ABC):
"""
Base class for audio feature extractors
:param sample_rate (int): rate to run extraction at
:param pooling (bool): indicates whether results of this extractor are summarized
over time using pooling.
:param stats (list): stats to run during pooling aggregation (if used).
"""
def __init__(self, sample_rate: float, pooling: bool = False, stats: list = None):
self.sample_rate = sample_rate
self.pooling = pooling
self.feature_names = []
if stats is None:
self.stats = ["mean", "stdev"]
@abstractmethod
def __call__(self, audio: np.ndarray):
"""
Abstract method -- must be implemented in inheriting classes
:param audio (np.ndarray): input audio to run feature extraction on
:return:
"""
pass
def get_headers(self, join="."):
"""
Get a list of the features combined with aggregation
:return: list
"""
if not self.pooling:
return self.feature_names
headers = []
for feature in self.feature_names:
for stat in self.stats:
headers.append("{}{}{}".format(feature, join, stat))
return headers
class Spectral(ExtractorBase):
"""
Spectral audio feature extraction.
:param sample_rate (int): rate to run extraction at
:param frame_size (int): size of frame to use for spectral processing
:param stats (list): stats to run during pooling aggregation (time summarization of
spectral results)
"""
def __init__(
self, sample_rate: float,
frame_size: float = 2048,
stats: list = None
):
super().__init__(sample_rate, pooling=True, stats=stats)
self.frame_size = frame_size
self.feature_names = [
"spectral_centroid",
"spectral_spread",
"spectral_skewness",
"spectral_kurtosis",
"spectral_flatness",
"spectral_entropy",
"rolloff_85",
"rolloff_95",
"harsh",
"energy_lf",
"dissonance",
"inharmonicity"
]
def __call__(self, audio: np.ndarray):
"""
Run audio
:param audio (np.ndarray): input audio
:return: feature matrix
"""
# Pooling for summarizing results over time
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
window = es.Windowing(type="hann", size=self.frame_size)
spectrum = es.Spectrum()
# Spectral feature extractors
centroid = es.Centroid(range=self.sample_rate/2)
central_moments = es.CentralMoments(range=self.sample_rate/2)
dist_shape = es.DistributionShape()
flatness = es.Flatness()
entropy = es.Entropy()
energy_band_harsh = es.EnergyBandRatio(sampleRate=self.sample_rate,
startFrequency=2000,
stopFrequency=5000)
energy_band_low = es.EnergyBandRatio(sampleRate=self.sample_rate,
startFrequency=20,
stopFrequency=80)
rolloff_85 = es.RollOff(cutoff=0.85, sampleRate=self.sample_rate)
rolloff_95 = es.RollOff(cutoff=0.95, sampleRate=self.sample_rate)
# Extractors for calculating dissonance and inharmonicity
peaks = es.SpectralPeaks()
dissonance = es.Dissonance()
pitch_yin = es.PitchYinFFT(frameSize=self.frame_size,
sampleRate=self.sample_rate)
harmonic_peaks = es.HarmonicPeaks()
inharmonicity = es.Inharmonicity()
# Frame-by-frame computation
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size // 2):
# Window frame and compute spectrum
win = window(frame)
spec = spectrum(win)
# Spectral feature extraction
sc = centroid(spec)
moments = central_moments(spec)
spread, skewness, kurtosis = dist_shape(moments)
spectral_flatness = flatness(spec)
spectral_entropy = entropy(spec)
harsh = energy_band_harsh(spec)
energy_lf = energy_band_low(spec)
roll85 = rolloff_85(spec)
roll95 = rolloff_95(spec)
# Spectral Peaks
peak_freqs, peak_mags = peaks(spec)
# Remove DC bin peak if it is present
if peak_freqs[0] == 0:
peak_freqs = peak_freqs[1:]
peak_mags = peak_mags[1:]
# Calculate dissonance and inharmonicity from peaks
dissonance_val = dissonance(peak_freqs, peak_mags)
pitch, _ = pitch_yin(spec)
harm_freqs, harm_mags = harmonic_peaks(peak_freqs, peak_mags, pitch)
inharm = inharmonicity(harm_freqs, harm_mags)
# Add to pool for summarization
keys = self.feature_names
pool.add(keys[0], sc)
pool.add(keys[1], spread)
pool.add(keys[2], skewness)
pool.add(keys[3], kurtosis)
pool.add(keys[4], spectral_flatness)
pool.add(keys[5], spectral_entropy)
pool.add(keys[6], roll85)
pool.add(keys[7], roll95)
pool.add(keys[8], harsh)
pool.add(keys[9], energy_lf)
pool.add(keys[10], dissonance_val)
pool.add(keys[11], inharm)
stats = pool_agg(pool)
results = [stats[feature] for feature in self.get_headers()]
return results
class CrestFactor(ExtractorBase):
"""
Crest Factor Extractor
Peak-to-average ratio where peak is the the maximum amplitude level and
average is the RMS value.
https://en.wikipedia.org/wiki/Crest_factor
:param sample_rate (int): rate to run extraction at
:param frame_size (int): size of frame to use
:param stats (list): stats to run during pooling aggregation (time summarization)
"""
def __init__(
self,
sample_rate: float,
frame_size: float = None,
stats: list = None
):
super().__init__(sample_rate, pooling=frame_size is not None, stats=stats)
self.frame_size = frame_size
self.feature_names = ["crest_factor"]
def __call__(self, audio: np.ndarray):
"""
Run crest factor audio feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
rms = es.RMS()
minimum = es.MinMax(type='min')
maximum = es.MinMax(type='max')
if self.frame_size:
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size):
frame_rms = rms(frame)
frame_peak_min = minimum(frame)[0]
frame_peak_max = maximum(frame)[0]
frame_peak = max(abs(frame_peak_min), abs(frame_peak_max))
frame_crest = frame_peak / frame_rms
pool.add('crest_factor', frame_crest)
stats = pool_agg(pool)
crest_factor = [stats['crest_factor.{}'.format(stat)] for stat in self.stats]
else:
full_rms = rms(audio)
full_peak_min = minimum(audio)[0]
full_peak_max = maximum(audio)[0]
full_peak = max(abs(full_peak_min), abs(full_peak_max))
crest_factor = [full_peak / full_rms]
return crest_factor
class Loudness(ExtractorBase):
"""
Loudness Features
Loudness Range
--------------
Loudness range is computed from short-term loudness values. It is defined as the
difference between the estimates of the 10th and 95th percentiles of the
distribution of the loudness values with applied gating. See Essentia documentation
for more information: https://essentia.upf.edu/reference/std_LoudnessEBUR128.html
EBU Tech Doc 3342-2011. "Loudness Range: A measure to supplement loudness
normalisation in accordance with EBU R 128"
LDR_95, LDR_max, peak-to-loudness
--------------------------------
LDR is a measurement of microdynamics. It is computed by taking the difference
between loudness measurements using a fast integration time and a slow integration
time, then computing the maximum or 95 percentile value from those results.
Peak-to-loudness is computed by taking the ratio between the true peak amplitude
and the overall loudness.
<NAME>. "Measures of microdynamics." Audio Engineering Society
Convention 137. Audio Engineering Society, 2014.
top1db
------
Ratio of audio samples in the range [-1dB, 0dB]
<NAME>, et al. "Production effect: audio features for recording
techniques description and decade prediction." 2011.
:param sample_rate (int): rate to run extraction at
"""
def __init__(self, sample_rate: float):
super().__init__(sample_rate, pooling=False, stats=None)
self.feature_names = [
"loudness_range",
"microdynamics_95%",
"microdynamics_100%",
"peak_to_loudness",
"top1db"
]
def __call__(self, audio: np.ndarray):
"""
Run loudness / dynamics feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
loudness = es.LoudnessEBUR128(startAtZero=True, sampleRate=self.sample_rate)
loudness_stats = loudness(audio)
loudness_range = loudness_stats[3]
# Micro dynamics (LDR)
micro_dynamics = loudness_stats[0] - loudness_stats[1]
ldr_95 = np.percentile(micro_dynamics, 95.0)
ldr_max = micro_dynamics.max()
# True peak detection for peak to loudness calculation
true_peak_detector = es.TruePeakDetector(sampleRate=self.sample_rate)
true_peak_audio_l = true_peak_detector(audio[:, 0])[1]
true_peak_l = 20 * math.log10(true_peak_audio_l.max())
true_peak_audio_r = true_peak_detector(audio[:, 1])[1]
true_peak_r = 20 * math.log10(true_peak_audio_r.max())
# True peak to loudness
true_peak = max(true_peak_l, true_peak_r)
peak_to_loudness = true_peak / loudness_stats[2]
# Top 1 dB (ratio of samples in the top 1dB)
top_1db_gain = math.pow(10, -1.0 / 20.0)
top_1db_l = (true_peak_audio_l > top_1db_gain).sum()
top_1db_r = (true_peak_audio_l > top_1db_gain).sum()
top1db = (top_1db_l + top_1db_r) / (len(true_peak_audio_l) + len(true_peak_audio_r))
return [loudness_range, ldr_95, ldr_max, peak_to_loudness, top1db]
class DynamicSpread(ExtractorBase):
"""
Dynamic Spread Feature Extractor. Measure of the loudness spread across the audio
file. The difference between the loudness (using Vickers algorithm) for each frame
compared to the average loudness of the entire track is computed. Then, the average
of that is computed.
<NAME>. "Automatic long-term loudness and dynamics matching." Audio
Engineering Society Convention 111. Audio Engineering Society, 2001.
:param sample_rate (int): rate to run extraction at
:param frame_size (int): size of frame to use. Defaults to 2048.
"""
def __init__(
self,
sample_rate: float,
frame_size: float = 2048,
):
super().__init__(sample_rate, pooling=False, stats=None)
self.frame_size = frame_size
self.feature_names = ["dynamic_spread"]
def __call__(self, audio: np.ndarray):
"""
Run loudness feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
vickers_loudness = es.LoudnessVickers()
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=['mean'])
# Calculate the Vickers loudness frame by frame
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size):
frame_loudness = vickers_loudness(frame)
pool.add('vdb', frame_loudness)
# Compute the average loudness across frames
stats = pool_agg(pool)
vickers_mean = stats['vdb.mean']
# Compute the difference between loudness at each frame and the mean loudness
dynamic_spread = 0.0
for vdb in pool['vdb']:
dynamic_spread += abs(vdb - vickers_mean)
dynamic_spread /= len(pool['vdb'])
return [dynamic_spread]
class Distortion(ExtractorBase):
"""
Set of distortion features -- computes a probability density function on audio
samples using a histogram with 1001 bins. Several statistics are computed on the
resulting pdf including the centroid, spread, skewness, kurtosis, flatness, and
the 'gauss' feature. 'Gauss' is a measurement of the gaussian fit of the the pdf.
Wilson, Alex, and <NAME>. "Characterisation of distortion profiles in
relation to audio quality." Proc. of the 17th Int. Conference on Digital Audio
Effects (DAFx-14). 2014.
<NAME>., and <NAME>. "Perception & evaluation of audio quality in
music production." Proc. of the 16th Int. Conference on Digital Audio Effects
(DAFx-13). 2013.
:param sample_rate (int): rate to run extraction at
"""
def __init__(self, sample_rate: float):
super().__init__(sample_rate, pooling=False, stats=None)
self.feature_names = [
"pmf_centroid",
"pmf_spread",
"pmf_skewness",
"pmf_kurtosis",
"pmf_flatness",
"pmf_gauss"
]
def __call__(self, audio: np.ndarray):
"""
Run distortion feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
# Compute PDF of audio sample amplitudes
hist, edges = np.histogram(audio, bins=1001, range=(-1.0, 1.0), density=True)
hist = np.array(hist, dtype=np.float32)
# Analysis of PDF shape
centroid_calc = es.Centroid()
centroid = centroid_calc(hist)
central_moments = es.CentralMoments()
shape = es.DistributionShape()
cm = central_moments(hist)
spread, skewness, kurtosis = shape(cm)
flatness_calc = es.Flatness()
flatness = flatness_calc(hist)
# Compute r squared value of guassian fit
mu, std = norm.fit(audio)
gauss = norm.pdf(np.linspace(-1.0, 1.0, 1001), mu, std)
_, _, rvalue, _, _ = linregress(gauss, hist)
r_squared = rvalue ** 2
return [centroid, spread, skewness, kurtosis, flatness, r_squared]
class StereoFeatures(ExtractorBase):
"""
Stereo Feature Extractor: Sides-to-mid ratio and left-right imbalance
<NAME>., et al. "An analysis and evaluation of audio features for multitrack
music mixtures." (2014).
:param sample_rate (int): rate to run extraction at
"""
def __init__(self, sample_rate: float):
super().__init__(sample_rate, pooling=False, stats=None)
self.feature_names = ["side_mid_ratio", "lr_imbalance"]
def __call__(self, audio: np.ndarray):
"""
Run stereo feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
sides = (audio[:, 0] - audio[:, 1]) ** 2
mids = (audio[:, 0] + audio[:, 1]) ** 2
sides_mid_ratio = sides.mean() / mids.mean()
left_power = (audio[:, 0] ** 2).mean()
right_power = (audio[:, 1] ** 2).mean()
lr_imbalance = (right_power - left_power) / (right_power + left_power)
return sides_mid_ratio, lr_imbalance
class PhaseCorrelation(ExtractorBase):
"""
Phase Correlation feature extraction. Calculates the correlation coefficient
between the left and right channel. If a frame_size of None is based in then the
calculation is performed on the entire audio signal. Otherwise, frame-by-frame
processing is computed using the frame_size number of samples and the results are
summarized using the passed in stats.
:param sample_rate (float): rate to run extraction at
:param frame_size (int): number of samples per frame for frame-by-frame processing.
If None then computation is performed over the entire input. Defaults to None.
:param stats (list): a list of strings indicating the stats to use during time
summarization. Only applied if frame-by-frame processing is computed.
"""
def __init__(
self,
sample_rate: float,
frame_size: int = None,
stats: list = None
):
super().__init__(sample_rate, pooling=frame_size is not None, stats=stats)
self.frame_size = frame_size
self.feature_names = ["phase_correlation"]
def __call__(self, audio: np.ndarray):
"""
Run phase correlation feature extraction.
:param audio: Input audio samples
:return: feature matrix
"""
if self.frame_size:
max_sample = audio.shape[0]
slice_indices = list(range(0, max_sample, self.frame_size))
slice_indices.append(max_sample)
pool = essentia.Pool()
for i in range(len(slice_indices) - 1):
x1 = slice_indices[i]
x2 = slice_indices[i + 1]
correlation_matrix = np.corrcoef(audio[x1:x2, 0], audio[x1:x2, 1])
phase_correlation = correlation_matrix[0, 1]
pool.add(self.feature_names[0], phase_correlation)
pool_agg = es.PoolAggregator(defaultStats=self.stats)
stats = pool_agg(pool)
phase_correlation = [stats["{}.{}".format(self.feature_names[0], stat)] for stat in self.stats]
else:
correlation_matrix = np.corrcoef(audio[:, 0], audio[:, 1])
phase_correlation = [correlation_matrix[0, 1]]
return phase_correlation
class StereoSpectrum(ExtractorBase):
"""
Stereo Spectrum Features. Panning features computed using spectrums from the left
and right audio channels. Returns features from the entire spectrum as well as
three subbands which include 0-250Hz, 250-2800Hz, and 2800+ Hz.
Tzanetakis, George, <NAME>, and <NAME>. "Stereo Panning Features for
Classifying Recording Production Style." ISMIR. 2007.
"""
def __init__(
self,
sample_rate: float,
frame_size: int = 2048,
hop_size: int = 1024,
stats: list = None
):
super().__init__(sample_rate, pooling=True, stats=stats)
self.frame_size = frame_size
self.hop_size = hop_size
self.low = 250
self.high = 2800
self.feature_names = ["sps_full", "sps_low", "sps_mid", "sps_high"]
def __call__(self, audio: np.ndarray):
"""
Run stereo spectrum feature extraction
:param audio: Input audio samples
:return: feature matrix
"""
# Must be stereo audio
assert audio.shape[1] == 2
# Hanning window
window = np.hanning(self.frame_size)
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
# Bin numbers for each filter bank
low_bin = int((self.low / self.sample_rate) * self.frame_size)
assert low_bin <= int(self.frame_size / 2)
high_bin = int((self.high / self.sample_rate) * self.frame_size)
assert high_bin <= int(self.frame_size / 2)
for i in range(0, len(audio), self.hop_size):
# Get the windowed frame for each channel
samples = audio[i:i+self.frame_size, :]
frame_left = np.zeros(self.frame_size)
frame_left[:len(samples)] = samples[:, 0]
frame_right = np.zeros(self.frame_size)
frame_right[:len(samples)] = samples[:, 1]
# Apply window
frame_left *= window
frame_right *= window
X_left = np.fft.rfft(frame_left)
X_right = np.fft.rfft(frame_right)
stereo_spectrum = StereoSpectrum.compute_stereo_spectrum(X_left, X_right)
# Features
full = utils.rms(stereo_spectrum)
low = utils.rms(stereo_spectrum[:low_bin])
mid = utils.rms(stereo_spectrum[low_bin:high_bin])
high = utils.rms(stereo_spectrum[high_bin:])
pool.add(self.feature_names[0], full)
pool.add(self.feature_names[1], low)
pool.add(self.feature_names[2], mid)
pool.add(self.feature_names[3], high)
stats = pool_agg(pool)
results = [stats[feature] for feature in self.get_headers()]
return results
@staticmethod
def compute_stereo_spectrum(spectrum_left, spectrum_right):
"""
Computes the stereo panning features using left and right channel spectrums
:param spectrum_left: magnitude spectrum from the left channel
:param spectrum_right: magnitude spectrum from the right channel
:return: stereo spectrum features
"""
np.zeros_like(spectrum_left)
# Update the DC and Nyquist Bins
spectrum_left[0] = np.real(spectrum_left[0]) + 0j
spectrum_left[-1] = np.real(spectrum_left[-1]) + 0j
spectrum_right[0] = np.real(spectrum_right[0]) + 0j
spectrum_right[-1] = np.real(spectrum_right[-1]) + 0j
real_left = np.real(spectrum_left)
imag_left = np.imag(spectrum_left)
real_right = np.real(spectrum_right)
imag_right = np.imag(spectrum_right)
f1 = (real_left * real_right) * (real_left * real_right)
f2 = (imag_left * imag_right) * (imag_left * imag_right)
f3 = (imag_left * real_right) * (imag_left * real_right)
f4 = (imag_right * real_left) * (imag_right * real_left)
nf = np.sqrt(f1 + f2 + f3 + f4)
dfl = real_left * real_left + imag_left * imag_left
dfr = real_right * real_right + imag_right * imag_right
df = dfl + dfr
sign = nf / dfl - nf / dfr
sign[sign > 0] = 1.0
sign[sign < 0] = -1.0
return (1.0 - 2.0 * (nf / df)) * sign
class SpectralFlux(ExtractorBase):
"""
Spectral Flux Features. Performs spectral flux analysis using sub-bands from
an octave spaced filter bank decomposition.
Alluri, Vinoo, and <NAME>. "Exploring perceptual and acoustical
correlates of polyphonic timbre." Music Perception 27.3 (2010): 223-242.
Tzanetakis, George, and <NAME>. "Multifeature audio segmentation for browsing
and annotation." Proceedings of the 1999 IEEE Workshop on Applications of Signal
Processing to Audio and Acoustics. WASPAA'99 (Cat. No. 99TH8452). IEEE, 1999.
"""
def __init__(
self,
sample_rate: float,
frame_size: int = 2048,
num_bands: int = 10,
stats: list = None
):
super().__init__(sample_rate, pooling=True, stats=stats)
self.frame_size = frame_size
self.num_bands = num_bands
self.band_str = "spectral_flux_band_{}"
self.feature_names = [
self.band_str.format(i + 1) for i in range(self.num_bands)
]
def __call__(self, audio: np.ndarray):
"""
Run spectral flux calculations
:param audio: Input audio samples
:return: feature matrix
"""
# Pooling for time summarization
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
# Window and spectrum for spectral processing
window = es.Windowing(type="hann", size=self.frame_size)
spectrum = es.Spectrum()
# Apply filter bank to audio signal
sub_band_audio = utils.octave_filter_bank(
audio,
self.sample_rate,
self.num_bands,
50
)
# Filter bank should return the same number of sub-bands as requested
assert len(sub_band_audio) == self.num_bands
# Perform spectral flux analysis on each sub-band
for i in range(len(sub_band_audio)):
sub_band_flux = es.Flux()
pool_key = self.band_str.format(i + 1)
hop_size = int(self.frame_size / 2)
for frame in es.FrameGenerator(sub_band_audio[i], self.frame_size, hop_size):
win = window(frame)
spec = spectrum(win)
flux = sub_band_flux(spec)
pool.add(pool_key, flux)
stats = pool_agg(pool)
results = [stats[feature] for feature in self.get_headers()]
return results
class ZeroCrossingRate(ExtractorBase):
"""
Zero Crossing Rate
"""
def __init__(
self,
sample_rate: float,
frame_size: float = 2048,
stats: list = None
):
super().__init__(sample_rate, pooling=True, stats=stats)
self.frame_size = frame_size
self.feature_names = ["zero_crossing_rate"]
def __call__(self, audio: np.ndarray):
"""
Run Zero-crossing rate feature
:param audio: Input audio samples
:return: feature matrix
"""
zero_crossing_rate = es.ZeroCrossingRate()
pool = essentia.Pool()
pool_agg = es.PoolAggregator(defaultStats=self.stats)
# Run frame-by-frame processing with a one half hop size
for frame in es.FrameGenerator(audio, self.frame_size, self.frame_size // 2):
zcr = zero_crossing_rate(frame)
pool.add(self.feature_names[0], zcr)
stats = pool_agg(pool)
results = [stats[feature] for feature in self.get_headers()]
return results
```
#### File: src/uvic_music_extractor/utils.py
```python
import os
import numpy as np
from scipy.signal import ellip, sosfilt
import essentia.standard as es
def get_audio_files(location, sort=True):
"""
Search the location provided for audio files
:param location: (str) - path of audio file or directory of files
:param sort: (bool) - return the list of audio files in sorted order, defaults to True
:return: (list) - audio files
"""
# Get list of audio samples - either from a directory or single file
audio_files = []
if os.path.isdir(location):
audio_files = [os.path.abspath(os.path.join(location, f)) for f in os.listdir(location) if f.endswith('.wav')]
elif os.path.isfile(location) and location.endswith('.wav'):
audio_files = [os.path.abspath(location)]
if not audio_files:
raise RuntimeError("Could not find any audio files at location: {}".format(location))
if sort:
audio_files = sorted(audio_files)
return audio_files
def load_audio(path, sample_rate, mono=True):
"""
Load an audio file using Essentia
:param path: (str) location of audio file to load
:param sample_rate: (int) sampling rate to load audio at
:param mono: (bool) convert file to mono, defaults to True
:return: audio samples
"""
# Load audio file
loader = es.AudioLoader(filename=path)
results = loader()
samples = results[0]
orig_rate = results[1]
channels = results[2]
# Make sure we get a mono or stereo audio
if channels > 2:
raise RuntimeError("Can't handle more than two audio channels.")
# If there is only one channel, duplicate the first over to the second.
# Essentia always loads as a stereo audio file and the right channel is
# all zeros in this case. We'll convert to a stereo file for some of the
# processing here such as the Loudness Normalization.
if channels == 1:
samples[:, 1] = samples[:, 0]
# Mix to mono if required
if mono:
samples = mix_to_mono(samples)
# Perform resampling if required
if orig_rate != sample_rate:
resample = es.Resample(inputSampleRate=orig_rate, outputSampleRate=sample_rate)
# Resampling for a stereo audio file
if not mono:
resampled_left = resample(samples[:, 0])
resampled_right = resample(samples[:, 1])
samples = np.array([resampled_left, resampled_right])
samples = samples.T
# Resampling for a mono audio file
else:
samples = resample(samples)
return samples, channels
def mix_to_mono(audio):
"""
Mix an audio file down to mono
:param audio: (np.ndarray) audio samples
:return: (nd.ndarray) mono audio samples
"""
mono_mix = es.MonoMixer()
samples = mono_mix(audio, audio.shape[1])
return samples
def normalize_loudness(audio, sample_rate, lufs=-24):
"""
Normalize input audio to a specified value in LUFS
:param audio: (np.ndarray) audio samples
:param sample_rate: (int) sample rate
:param lufs: (float) loudness goal in LUFS
:return: (np.ndarray) normalized audio samples
"""
# Get the current loudness in LUFS
loudness = es.LoudnessEBUR128(startAtZero=True, sampleRate=sample_rate)
results = loudness(audio)
current_lufs = results[2]
# Amount in dB that the file needs to be adjusted
adjustment = lufs - current_lufs
# Apply adjustment to the audio file
gain = pow(10, adjustment / 20)
normalized = audio * gain
return normalized
def rms(audio: np.ndarray) -> float:
"""
Calculate the RMS level for an array
:param audio: input audio
:return: (float) rms
"""
result = np.mean(audio * audio)
if result != 0.0:
result = np.sqrt(result)
return result
def octave_filter_bank(
audio: np.ndarray,
sample_rate: float,
num_bands: int = 10,
low_band: float = 50,
) -> np.ndarray:
"""
Split an audio signal in octave bands.
:param audio: input audio
:param sample_rate: audio sampling rate
:param num_bands: number of bands to compute
:param low_band: lowest band to start at (lowpass up to this)
:return: a matrix of audio signals
"""
# Create the lowpass filter
filters = []
sos_low = ellip(2, 3, 60, low_band, btype='lowpass', fs=sample_rate, output='sos')
filters.append(sos_low)
# Calculate the filters for the octave spaced bandpasses
low_freq = low_band
high_freq = low_freq
for i in range(num_bands - 2):
high_freq = low_freq * 2
# Check to make sure that the high band is not above the Nyquist
if high_freq >= sample_rate / 2:
required_rate = (low_band * 2 ** 8) * 2
raise RuntimeError(
f"Sample rate too low for {num_bands} band octave filterbank. "
f"Sample rate must be greater than {required_rate}."
)
# Create the filter for this band
sos_band = ellip(2, 3, 60, [low_freq, high_freq], btype='bandpass',
fs=sample_rate, output='sos')
filters.append(sos_band)
low_freq = high_freq
# Now create the highpass filter from the highest band to the Nyquist
sos_high = ellip(2, 3, 60, high_freq, btype='highpass',
fs=sample_rate, output='sos')
filters.append(sos_high)
# Apply filters to audio
filtered_audio = np.zeros((len(filters), len(audio)), dtype=np.float32)
for i in range(len(filters)):
y = sosfilt(filters[i], audio)
filtered_audio[i] = y
return filtered_audio
``` |
{
"source": "jorsonzen/aliyun-openapi-python-sdk",
"score": 2
} |
#### File: request/v20201022/CheckSavePayrollDetailRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcompanyreg.endpoint import endpoint_data
class CheckSavePayrollDetailRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'companyreg', '2020-10-22', 'CheckSavePayrollDetail','companyreg')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Income(self):
return self.get_query_params().get('Income')
def set_Income(self,Income):
self.add_query_param('Income',Income)
def get_CorporateHousingAccumulationFund(self):
return self.get_query_params().get('CorporateHousingAccumulationFund')
def set_CorporateHousingAccumulationFund(self,CorporateHousingAccumulationFund):
self.add_query_param('CorporateHousingAccumulationFund',CorporateHousingAccumulationFund)
def get_PersonHousingAccumulationFund(self):
return self.get_query_params().get('PersonHousingAccumulationFund')
def set_PersonHousingAccumulationFund(self,PersonHousingAccumulationFund):
self.add_query_param('PersonHousingAccumulationFund',PersonHousingAccumulationFund)
def get_PersMedicalInsurance(self):
return self.get_query_params().get('PersMedicalInsurance')
def set_PersMedicalInsurance(self,PersMedicalInsurance):
self.add_query_param('PersMedicalInsurance',PersMedicalInsurance)
def get_CorpUnemploymentInsurance(self):
return self.get_query_params().get('CorpUnemploymentInsurance')
def set_CorpUnemploymentInsurance(self,CorpUnemploymentInsurance):
self.add_query_param('CorpUnemploymentInsurance',CorpUnemploymentInsurance)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_PersonSocialInsurance(self):
return self.get_query_params().get('PersonSocialInsurance')
def set_PersonSocialInsurance(self,PersonSocialInsurance):
self.add_query_param('PersonSocialInsurance',PersonSocialInsurance)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_CorporateSocialInsurance(self):
return self.get_query_params().get('CorporateSocialInsurance')
def set_CorporateSocialInsurance(self,CorporateSocialInsurance):
self.add_query_param('CorporateSocialInsurance',CorporateSocialInsurance)
def get_CorpInjuryInsurance(self):
return self.get_query_params().get('CorpInjuryInsurance')
def set_CorpInjuryInsurance(self,CorpInjuryInsurance):
self.add_query_param('CorpInjuryInsurance',CorpInjuryInsurance)
def get_PersPension(self):
return self.get_query_params().get('PersPension')
def set_PersPension(self,PersPension):
self.add_query_param('PersPension',PersPension)
def get_CorpMedicalInsurance(self):
return self.get_query_params().get('CorpMedicalInsurance')
def set_CorpMedicalInsurance(self,CorpMedicalInsurance):
self.add_query_param('CorpMedicalInsurance',CorpMedicalInsurance)
def get_IdNo(self):
return self.get_query_params().get('IdNo')
def set_IdNo(self,IdNo):
self.add_query_param('IdNo',IdNo)
def get_EmployeeTime(self):
return self.get_query_params().get('EmployeeTime')
def set_EmployeeTime(self,EmployeeTime):
self.add_query_param('EmployeeTime',EmployeeTime)
def get_UpdateEmployeeFlag(self):
return self.get_query_params().get('UpdateEmployeeFlag')
def set_UpdateEmployeeFlag(self,UpdateEmployeeFlag):
self.add_query_param('UpdateEmployeeFlag',UpdateEmployeeFlag)
def get_Phone(self):
return self.get_query_params().get('Phone')
def set_Phone(self,Phone):
self.add_query_param('Phone',Phone)
def get_PersUnemploymentInsurance(self):
return self.get_query_params().get('PersUnemploymentInsurance')
def set_PersUnemploymentInsurance(self,PersUnemploymentInsurance):
self.add_query_param('PersUnemploymentInsurance',PersUnemploymentInsurance)
def get_BizId(self):
return self.get_query_params().get('BizId')
def set_BizId(self,BizId):
self.add_query_param('BizId',BizId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_CorpPension(self):
return self.get_query_params().get('CorpPension')
def set_CorpPension(self,CorpPension):
self.add_query_param('CorpPension',CorpPension)
def get_CorpMaternityInsurance(self):
return self.get_query_params().get('CorpMaternityInsurance')
def set_CorpMaternityInsurance(self,CorpMaternityInsurance):
self.add_query_param('CorpMaternityInsurance',CorpMaternityInsurance)
```
#### File: request/v20201022/CheckSavePayrollRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcompanyreg.endpoint import endpoint_data
class CheckSavePayrollRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'companyreg', '2020-10-22', 'CheckSavePayroll','companyreg')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Income(self):
return self.get_query_params().get('Income')
def set_Income(self,Income):
self.add_query_param('Income',Income)
def get_CorporateHousingAccumulationFund(self):
return self.get_query_params().get('CorporateHousingAccumulationFund')
def set_CorporateHousingAccumulationFund(self,CorporateHousingAccumulationFund):
self.add_query_param('CorporateHousingAccumulationFund',CorporateHousingAccumulationFund)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_CorporateSocialInsurance(self):
return self.get_query_params().get('CorporateSocialInsurance')
def set_CorporateSocialInsurance(self,CorporateSocialInsurance):
self.add_query_param('CorporateSocialInsurance',CorporateSocialInsurance)
def get_IdNo(self):
return self.get_query_params().get('IdNo')
def set_IdNo(self,IdNo):
self.add_query_param('IdNo',IdNo)
def get_EmployeeTime(self):
return self.get_query_params().get('EmployeeTime')
def set_EmployeeTime(self,EmployeeTime):
self.add_query_param('EmployeeTime',EmployeeTime)
def get_PersonHousingAccumulationFund(self):
return self.get_query_params().get('PersonHousingAccumulationFund')
def set_PersonHousingAccumulationFund(self,PersonHousingAccumulationFund):
self.add_query_param('PersonHousingAccumulationFund',PersonHousingAccumulationFund)
def get_UpdateEmployeeFlag(self):
return self.get_query_params().get('UpdateEmployeeFlag')
def set_UpdateEmployeeFlag(self,UpdateEmployeeFlag):
self.add_query_param('UpdateEmployeeFlag',UpdateEmployeeFlag)
def get_Phone(self):
return self.get_query_params().get('Phone')
def set_Phone(self,Phone):
self.add_query_param('Phone',Phone)
def get_BizId(self):
return self.get_query_params().get('BizId')
def set_BizId(self,BizId):
self.add_query_param('BizId',BizId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_PersonSocialInsurance(self):
return self.get_query_params().get('PersonSocialInsurance')
def set_PersonSocialInsurance(self,PersonSocialInsurance):
self.add_query_param('PersonSocialInsurance',PersonSocialInsurance)
```
#### File: request/v20171110/DescribeApplicationResourceSummaryRequest.py
```python
from aliyunsdkcore.request import RpcRequest
class DescribeApplicationResourceSummaryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'DescribeApplicationResourceSummary','ens')
self.set_method('POST')
def get_Level(self):
return self.get_query_params().get('Level')
def set_Level(self,Level):
self.add_query_param('Level',Level)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType)
```
#### File: request/v20171110/RunServiceScheduleRequest.py
```python
from aliyunsdkcore.request import RpcRequest
class RunServiceScheduleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'RunServiceSchedule','ens')
self.set_method('POST')
def get_Directorys(self):
return self.get_query_params().get('Directorys')
def set_Directorys(self,Directorys):
self.add_query_param('Directorys',Directorys)
def get_PreLockedTimeout(self):
return self.get_query_params().get('PreLockedTimeout')
def set_PreLockedTimeout(self,PreLockedTimeout):
self.add_query_param('PreLockedTimeout',PreLockedTimeout)
def get_Uuid(self):
return self.get_query_params().get('Uuid')
def set_Uuid(self,Uuid):
self.add_query_param('Uuid',Uuid)
def get_ClientIp(self):
return self.get_query_params().get('ClientIp')
def set_ClientIp(self,ClientIp):
self.add_query_param('ClientIp',ClientIp)
def get_PodConfigName(self):
return self.get_query_params().get('PodConfigName')
def set_PodConfigName(self,PodConfigName):
self.add_query_param('PodConfigName',PodConfigName)
def get_ServiceAction(self):
return self.get_query_params().get('ServiceAction')
def set_ServiceAction(self,ServiceAction):
self.add_query_param('ServiceAction',ServiceAction)
def get_ServiceCommands(self):
return self.get_query_params().get('ServiceCommands')
def set_ServiceCommands(self,ServiceCommands):
self.add_query_param('ServiceCommands',ServiceCommands)
def get_ScheduleStrategy(self):
return self.get_query_params().get('ScheduleStrategy')
def set_ScheduleStrategy(self,ScheduleStrategy):
self.add_query_param('ScheduleStrategy',ScheduleStrategy)
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId)
```
#### File: request/v20190531/UpdateConfigRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class UpdateConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'UpdateConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OpenSuperAcl(self): # String
return self.get_body_params().get('OpenSuperAcl')
def set_OpenSuperAcl(self, OpenSuperAcl): # String
self.add_body_params('OpenSuperAcl', OpenSuperAcl)
def get_ConfigAuthEnabled(self): # Boolean
return self.get_query_params().get('ConfigAuthEnabled')
def set_ConfigAuthEnabled(self, ConfigAuthEnabled): # Boolean
self.add_query_param('ConfigAuthEnabled', ConfigAuthEnabled)
def get_PassWord(self): # String
return self.get_query_params().get('PassWord')
def set_PassWord(self, PassWord): # String
self.add_query_param('PassWord', <PASSWORD>Word)
def get_MaxClientCnxns(self): # String
return self.get_query_params().get('MaxClientCnxns')
def set_MaxClientCnxns(self, MaxClientCnxns): # String
self.add_query_param('MaxClientCnxns', MaxClientCnxns)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_JuteMaxbuffer(self): # String
return self.get_query_params().get('JuteMaxbuffer')
def set_JuteMaxbuffer(self, JuteMaxbuffer): # String
self.add_query_param('JuteMaxbuffer', JuteMaxbuffer)
def get_ConfigType(self): # String
return self.get_query_params().get('ConfigType')
def set_ConfigType(self, ConfigType): # String
self.add_query_param('ConfigType', ConfigType)
def get_AutopurgeSnapRetainCount(self): # String
return self.get_query_params().get('AutopurgeSnapRetainCount')
def set_AutopurgeSnapRetainCount(self, AutopurgeSnapRetainCount): # String
self.add_query_param('AutopurgeSnapRetainCount', AutopurgeSnapRetainCount)
def get_ConfigSecretEnabled(self): # Boolean
return self.get_query_params().get('ConfigSecretEnabled')
def set_ConfigSecretEnabled(self, ConfigSecretEnabled): # Boolean
self.add_query_param('ConfigSecretEnabled', ConfigSecretEnabled)
def get_MCPEnabled(self): # Boolean
return self.get_query_params().get('MCPEnabled')
def set_MCPEnabled(self, MCPEnabled): # Boolean
self.add_query_param('MCPEnabled', MCPEnabled)
def get_TickTime(self): # String
return self.get_query_params().get('TickTime')
def set_TickTime(self, TickTime): # String
self.add_query_param('TickTime', TickTime)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_SyncLimit(self): # String
return self.get_query_params().get('SyncLimit')
def set_SyncLimit(self, SyncLimit): # String
self.add_query_param('SyncLimit', SyncLimit)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_AutopurgePurgeInterval(self): # String
return self.get_query_params().get('AutopurgePurgeInterval')
def set_AutopurgePurgeInterval(self, AutopurgePurgeInterval): # String
self.add_query_param('AutopurgePurgeInterval', AutopurgePurgeInterval)
def get_InitLimit(self): # String
return self.get_query_params().get('InitLimit')
def set_InitLimit(self, InitLimit): # String
self.add_query_param('InitLimit', InitLimit)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName)
``` |
{
"source": "JortdeBokx/NPO-streams",
"score": 2
} |
#### File: JortdeBokx/NPO-streams/HDhomerunProxy.py
```python
import logging
from flask import jsonify, request, abort, Response, stream_with_context
# Code similar to https://github.com/jkaberg/tvhProxy
from util.Helpers import generate_stream_ffmpeg
def setup_hdhrproxy(app, stream_handlers):
lineup = []
for sh in stream_handlers:
name = sh.__class__.__name__
lineup += sh.get_lineup("http://" + app.config["HOST"] + ":" + str(app.config["PORT"]) + "/" + name)
logging.info('Lineup: ' + str(lineup))
@app.route('/<class_name>/<key>')
def stream_stuff(class_name, key):
sh = None
for i in stream_handlers:
if i.__class__.__name__ == class_name:
sh = i
if not sh:
abort(404)
if not sh.valid_key(key):
abort(404)
stream_url = sh.get_live_m3u8(str(key), quality=app.config["QUALITY"])
if not stream_url:
logging.error("Could not get stream url")
abort(404)
return Response(stream_with_context(generate_stream_ffmpeg(stream_url)), mimetype="video/mp2t")
@app.route('/discover.json')
def discover():
discover_data = {
'FriendlyName': 'NPOproxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': '<PASSWORD>',
'BaseURL': '%s' % request.host_url,
'LineupURL': '%slineup.json' % request.host_url
}
return jsonify(discover_data)
@app.route('/lineup_status.json')
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 1,
'Source': "Cable",
'SourceList': ['Cable']
})
@app.route('/lineup.json')
def give_lineup():
return jsonify(lineup)
@app.route('/lineup.post', methods=['GET', 'POST'])
def lineup_post():
return ''
@app.route('/')
@app.route('/device.xml')
def device():
discover_data = {
'FriendlyName': 'NPOproxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': '<PASSWORD>',
'BaseURL': '%s' % request.host_url,
'LineupURL': '%slineup.json' % request.host_url
}
return """
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>""" + discover_data["BaseURL"] + """"</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:MediaServer:1</deviceType>
<friendlyName>""" + discover_data["FriendlyName"] + """"</friendlyName>
<manufacturer>""" + discover_data["Manufacturer"] + """"</manufacturer>
<modelName>""" + discover_data["ModelNumber"] + """"</modelName>
<modelNumber>""" + discover_data["ModelNumber"] + """"</modelNumber>
<serialNumber></serialNumber>
<UDN>uuid:""" + discover_data["DeviceID"] + """"</UDN>
</device>
</root>
""", {'Content-Type': 'application/xml'}
```
#### File: NPO-streams/stream_handlers/NPOStreamHandler.py
```python
import logging
import re
import m3u8
import requests
from stream_handlers.BaseStreamHandler import BaseStreamHandler
NPO_IDA_APP_URI = 'https://ida.omroep.nl/app.php/'
NPO_AUTH_URL = "https://ida.omroep.nl/app.php/auth"
class NPOStreamHandler(BaseStreamHandler):
def __init__(self):
"""
Initialise NPO stream handler by Reading & verifying the config file
"""
super().__init__() # Call super init to read config contents
if not self.config_data:
raise ValueError("No Configfile was set")
streams_config = None
for streamclass in self.config_data:
if streamclass["className"] == self.__class__.__name__:
streams_config = streamclass["channels"]
if not streams_config:
raise ValueError("NPO stream class not in config data, class name: " + self.__class__.__name__)
self.streams = [d for d in streams_config]
def valid_key(self, key):
try:
return any(d['key'] == key for d in self.streams)
except TypeError:
logging.log(logging.ERROR, 'Stream variable was not correctly set in NPO Stream Handler')
return False
def get_lineup(self, base_url):
"""
Overwrite function to get lineup object for NPO streams
:return:
"""
lineup = []
try:
for d in self.streams:
if d['enabled']:
url = base_url + "/" + d['key']
lineup.append({'GuideNumber': str(d['number']),
'GuideName': d['name'],
'URL': url
})
return lineup
except TypeError:
logging.log(logging.ERROR, 'No streams were loaded for NPO streamer, check the streams.json file')
return []
def get_live_m3u8(self, key, quality=0):
"""
Get's the m3u8 object in the preferred quality
:param key: The key of the livestream, from streams.json
:param quality: an integer for the vertical amount of pixels, 0 for maximum quality, invalid -> minimum quality
:return: an m3u8 object
"""
m3u8_location = self.get_live_url(key)
if m3u8_location:
m3u8_obj = m3u8.load(m3u8_location)
Base_URI = m3u8_obj.base_uri
if m3u8_obj.is_variant:
options = {}
for m3u8_playlist in m3u8_obj.playlists:
resolution = m3u8_playlist.stream_info.resolution
if resolution: # If we don't have the audio-only stream
options[str(resolution[1])] = Base_URI + m3u8_playlist.uri
if quality == 0:
preferred_m3u8_url = options[str(max(options, key=int))] # int refers to function int()
else:
try:
preferred_m3u8_url = options[str(quality)]
except KeyError:
preferred_m3u8_url = options[str(min(options, key=int))]
return preferred_m3u8_url
else:
return m3u8_obj.uri
else:
return None
def get_live_url(self, key):
"""
Gets the Streaming url of the live stream identified by key
:param key: The key of the livestream, from streams.json
:return: URL of the stream
"""
stream_data = self.get_stream_data(key)
selected_stream = ""
if stream_data:
try:
for streams in stream_data['items']:
for stream in streams:
if stream['contentType'] == "live":
selected_stream = stream['url']
break
if selected_stream:
stream_url = requests.get(selected_stream).text
stream_url = stream_url.split('"')[1]
stream_url = re.sub(r"\\", '', stream_url)
return stream_url
except KeyError:
logging.log(logging.ERROR, "Data stream contained no content")
logging.log(logging.ERROR, "Data stream: " + str(stream_data))
return None
else:
return None
def get_stream_data(self, key):
"""
Gets the stream Json
:param key: The key of the livestream, from streams.json
:return: Json object with stream data
"""
try:
auth_token_json = requests.get(NPO_AUTH_URL).json()
token = auth_token_json["token"]
except IOError:
logging.log(logging.ERROR, 'Could not fetch a token from ' + NPO_AUTH_URL)
try:
data_url = NPO_IDA_APP_URI + key + '?adaptive=no&token=' + token
stream_data = requests.get(data_url).json()
except TypeError:
if not token:
logging.log(logging.ERROR, 'Could not fetch NPO streaming token')
return None
except IOError:
logging.log(logging.ERROR,
'Error obtaining streaming data from ' + NPO_IDA_APP_URI + key + '?adaptive=no&token=')
return None
if "error" in stream_data:
logging.log(logging.ERROR, "Error with stream data: " + str(stream_data))
return None
return stream_data
``` |
{
"source": "JortGroen/eyeMouse",
"score": 3
} |
#### File: eyeMouse/src/data_creation_game4.py
```python
import cv2
import numpy as np
import dlib
from math import hypot
import pyautogui
import random
import subprocess
import json
import threading
import time
import os
receiveBuffer = ""
receiveStatus = 0
DATA = ""
stopReader = False
class screenShape:
width = 0
height = 0
def create_dot(screen, screenSize):
screen.fill(255)
x = random.randint(1, screenSize.width)
y = random.randint(1, screenSize.height)
cv2.circle(screen, (x,y), 10, (0,0,255), -1)
return (x,y)
def dotGreen(screen, targetLoc):
#print("dotGreen")
screen.fill(255)
cv2.circle(screen, targetLoc, 10, (0,255,0), -1)
def save_data():
pass
def game_init(screenSize, fullScreen=True):
screen = np.zeros([screenSize.height,screenSize.width,3],dtype=np.uint8)
screen.fill(255)
targetLoc = (int(screenSize.width/2),int(screenSize.height/2))
cv2.circle(screen, targetLoc, 10, (0,0,255), -1)
if fullScreen==True:
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
return screen, screenSize, targetLoc
def dataReceiver(process):
global receiveBuffer, receiveStatus, DATA, stopReader
newData = False
while(stopReader==False and process.poll()==None):
outputRaw = process.stdout.readline()
output = str(outputRaw.strip())[2:-1]
index = output.find("<data>")
if index > -1:
#print("start!")
receiveBuffer = ""
output = output[index+6:]
if receiveStatus==1:
print("WARNING: I received a data start key without finishing my previous data read, data might be corrupted!")
receiveStatus = 1
index = output.find("</data>")
if index > -1:
#print("stop!")
receiveBuffer = receiveBuffer+output[:index]
#print(receiveBuffer)
receiveStatus = 0
DATA = receiveBuffer
newData = True
if receiveStatus==1:
receiveBuffer = receiveBuffer+output
process.kill()
def startupRecognition():
global DATA, stopReader
#process = subprocess.Popen(['echo', '"Hello stdout"'], stdout=subprocess.PIPE)
#process = subprocess.Popen(["python", "testPrinter.py"], stdout=subprocess.PIPE)
process = subprocess.Popen(["python", "featureGrabber.py"], stdout=subprocess.PIPE)
threadReader = threading.Thread(target=dataReceiver, args=(process,))
threadReader.start()
print("waiting for the recognition model to start up, this can take a minute")
print("please make sure privacy cover is away from the camera")
t=0
timer=0
while process.poll() is None and len(DATA)==0: # wait untill first data is received
t=t+1
if t>100000:
print(".", end='')
t=0
timer=timer+1
assert len(DATA)>0,"ERROR: something went wrong, couldn't have communication with the recognition model"
print("took us",timer)
print("\nlets goooo!!!")
return process
def storeDatapoint(targetLoc):
global DATA
print("targetLoc:",targetLoc,"DATA:",DATA)
data = DATA
DATA=""
data = json.loads(data)
def main():
global stopReader
started=False
process = startupRecognition()
screenSize = pyautogui.size()
screenSize = screenShape()
screenSize.width = 100
screenSize.height = 100
screen, screenSize, targetLoc = game_init(screenSize, fullScreen=False)
while True:
cv2.imshow('window', screen)
if len(DATA)>0:
dotGreen(screen, targetLoc)
key = cv2.waitKey(1)
if key == 32:
if len(DATA)>0:
if started:
storeDatapoint(targetLoc)
else:
started=True
targetLoc = create_dot(screen, screenSize)
else:
print("no new data")
#cv2.putText(screen, 'face', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2, cv2.LINE_AA)
if key == 27:
stopReader=True
print("quitting")
break
if process.poll() is not None:
print("the model stopped, will quit now too")
stopReader=True
break
cv2.destroyAllWindows()
main()
``` |
{
"source": "jortizcs/machine-learning",
"score": 3
} |
#### File: udacity/linearAlgebra/vector.py
```python
import math
from decimal import Decimal, getcontext
getcontext().prec = 30
class Vector(object):
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates myst be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def __add__(self, v):
if self.dimension != v.dimension:
raise ValueError('Vector must be the same length')
#sum_ = []
#for i in range(self.dimension):
# sum_.append(self.coordinates[i]+v.coordinates[i])
sum_ = [x+y for x,y in zip(self.coordinates,v.coordinates)]
return Vector(sum_)
def __sub__(self, v):
if self.dimension != v.dimension:
raise ValueError('Vector must be the same length')
m = v*-1
return m+self
def __mul__(self, f):
#s_ = []
#for i in range(self.dimension):
# s_.append(f*self.coordinates[i])
s_ = [f*x for x in self.coordinates]
return Vector(s_)
def magnitude(self):
#for x in self.coordinates:
# print x
return math.sqrt(sum([float(x)**2 for x in self.coordinates]))
def normalize(self):
if self.magnitude()==0:
return 0
return self*(1/self.magnitude())
def dot(self, x):
return sum([float(x)*float(y) for x,y in zip(x.coordinates, self.coordinates)])
def angle_rads(self, x):
mag_x = x.magnitude()
mag_y = self.magnitude()
if mag_x == 0 or mag_y==0:
raise ValueError('magnitude or x or self is 0')
#print type(self.dot(x))
#print type(mag_x)
#print type(mag_y)
#print self.dot(x)/(mag_x*mag_y)
v = self.dot(x)/(mag_x*mag_y)
v = float("{0:.3f}".format(v))
return math.acos(v)
def angle_deg(self, x):
return math.degrees(self.angle_rads(x))
def is_zero(self):
return self.magnitude()<1e-10
def is_parallel(self, x):
if x.magnitude()==0 or self.magnitude()==0:
return True
res = self.angle_deg(x)
#print res
if res==180 or res==0 or res<=1e-10:
return True
return False
def is_orthogonal(self, x):
if x.magnitude()==0 or self.magnitude()==0:
return True
res = self.dot(x)
#print "ortho:" + str(res)
if res==0 or abs(res)<1e-10:
return True
return False
def proj(self,b):
b_norm = b.normalize()
return b_norm*self.dot(b_norm)
def perp(self, b):
v_parallel = self.proj(b)
return self-v_parallel
def cross(self, w):
if self.dimension ==3 and w.dimension==3:
v=self
new_vec = Vector([(v.coordinates[1]*w.coordinates[2])-(v.coordinates[2]*w.coordinates[1]),\
-1*((v.coordinates[0]*w.coordinates[2])-(v.coordinates[2]*w.coordinates[0])),\
(v.coordinates[0]*w.coordinates[1])-(v.coordinates[1]*w.coordinates[0])])
return new_vec
return None
def parallelogram_area(self, w):
theta = self.angle_rads(w)
if abs(theta)<1e-10:
theta=0
return (self.magnitude()*w.magnitude())*math.sin(theta)
def parallelogram_area2(self, w):
return self.cross(w).magnitude()
def triangle_area(self, w):
return 0.5*self.parallelogram_area(w)
'''
# Quiz
v1 = Vector([8.218,-9.341])
v2 = Vector([-1.129,2.111])
print v1+v2
v3 = Vector([7.119,8.215])
v4 = Vector([-8.223,0.878])
print v3-v4
f = 7.41
v5=Vector([1.671, -1.012, -0.318])
print v5*f
print '\nMagnitude and normalization'
v6 = Vector([-0.221,7.437])
print v6
print 'magnitude: ' + str(v6.magnitude())
v7=Vector([8.813,-1.331,-6.247])
print v7
print 'magnitude: ' + str(v7.magnitude())
v8 = Vector([5.581,-2.136])
print v8
print v8.normalize()
v9=Vector([1.996,3.108,-4.554])
print v9
print v9.normalize()
# Coding Dot product and angle
print '\n\nCoding Dot product and angle'
v10=Vector([7.887,4.138])
v11=Vector([-8.802,6.776])
print v10
print v11
print 'dot: ' + str(v10.dot(v11))
v12 = Vector([-5.955,-4.904, -1.874])
v13 = Vector([-4.496,-8.755, 7.103])
print '\n'
print v12
print v13
print 'dot: ' + str(v12.dot(v13))
v14=Vector([3.183,-7.627])
v15=Vector([-2.668, 5.319])
print '\n'
print v14
print v15
print v14.angle_rads(v15)
v16=Vector([7.35,0.221, 5.188])
v17=Vector([2.751,8.259, 3.985])
print '\n'
print v16
print v17
print v16.angle_deg(v17)
# Parellelism and Orthogonality
print '\n\n\n############# Parellelism and Orthogonality'
v18=Vector([-7.579,-7.88])
v19=Vector([22.737,23.64])
print v18
print v19
print 'parallel? ' + str(v18.is_parallel(v19))
print 'orthogonal? ' + str(v18.is_orthogonal(v19)) + '\n'
v20=Vector([-2.029,9.97, 4.172])
v21=Vector([-9.231, -6.639, -7.245])
print v20
print v21
print 'parallel? ' + str(v20.is_parallel(v21))
print 'orthogonal? ' + str(v20.is_orthogonal(v21)) + '\n'
v22=Vector([-2.328, -7.284, -1.214])
v23=Vector([-1.821, 1.072, -2.94])
print v22
print v23
print 'parallel? ' + str(v22.is_parallel(v23))
print 'orthogonal? ' + str(v22.is_orthogonal(v23)) + '\n'
v24=Vector([2.118,4.827])
v25=Vector([0.0,0.0])
print v24
print v25
print 'parallel? ' + str(v24.is_parallel(v25))
print 'orthogonal? ' + str(v24.is_orthogonal(v25)) + '\n'
print '\n\n##### Projections #####'
v26 = Vector([3.039, 1.879])
v27 = Vector([0.825, 2.036])
print v26
print v27
print v26.proj(v27)
v28=Vector([-9.88, -3.264, -8.159])
v29=Vector([-2.155, -9.353, -9.473])
print ''
print v28
print v29
print v28.perp(v29)
v30=Vector([3.009, -6.172, 3.692, -2.51])
v31=Vector([6.404, -9.144, 2.759, 8.718])
print ''
print v30
print v31
print v30.proj(v31)
print v30.perp(v31)
print v30.proj(v31)+v30.perp(v31)
print v30.proj(v31).is_parallel(v31)
print v30.perp(v31).is_orthogonal(v31)
# Cross product
print ''
print '########### Cross product #######\n'
v32=Vector([8.462, 7.893, -8.187])
v33=Vector([6.984, -5.975, 4.778])
print v32.cross(v33)
print v32.cross(v33).is_orthogonal(v32)
print v32.cross(v33).is_orthogonal(v33)
print v32.parallelogram_area(v33)
print v32.parallelogram_area2(v33)
print ''
v34 = Vector([-8.987, -9.838, 5.031])
v35 = Vector([-4.268, -1.861, -8.866])
print v34.parallelogram_area(v35)
print ''
v36 = Vector([1.5, 9.547, 3.691])
v37 = Vector([-6.007, 0.124, 5.772])
print v36.triangle_area(v37)
'''
``` |
{
"source": "jortizcs/Pangia",
"score": 3
} |
#### File: Pangia/sbs/reportError.py
```python
import smtplib
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def sendError(recipient, reportURL):
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
sender = '<EMAIL>'
secret = 'temp9999'
subject = "An error happen in SBS/Pangia"
"Sends an e-mail to the specified recipient."
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
html = """\
<html>
<head></head>
<body>
{0}
</body>
</html>
""".format(reportURL)
part2 = MIMEText(html, 'html')
msg.attach(part2)
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
session.ehlo()
session.starttls()
session.ehlo
session.login(sender, secret)
session.sendmail(sender, recipient, msg.as_string())
session.quit()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("usage: {0} emailAddress message".format(sys.argv[0]));
quit();
sendError(sys.argv[1], sys.argv[2])
``` |
{
"source": "JortRoelofs/PlaneProject",
"score": 3
} |
#### File: JortRoelofs/PlaneProject/analyze.py
```python
import sys
from functools import partial
from multiprocessing import Pool
import numpy as np
import scipy as sp
from scipy import integrate, interpolate
import structure
import util
class MoIXXCalculator:
def __init__(self, load_case):
self.load_case = load_case
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
func = interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
self.load_case.wing.wing_box.moi_xx = func
return func
def value(self, y):
return self.load_case.wing.wing_box.calc_moi_xx(y)
class MoIPolarCalculator:
def __init__(self, load_case):
self.load_case = load_case
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
func = interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
self.load_case.wing.wing_box.moi_polar = func
return func
def value(self, y):
return self.load_case.wing.wing_box.calc_moi_polar(y)
class ShearCalculator:
def __init__(self, load_case):
self.load_case = load_case
self.wing_box = load_case.wing.wing_box
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
def value(self, y):
return self.shear(y) + self.fuel(y) + self.weight_wing_box(y) + self.weight_wing(y)+ self.engine(y)
def shear(self, y):
func = lambda y2: self.load_case.wing.lift(y2, self.load_case.density, self.load_case.velocity)
return integrate.quad(func, y, self.wing_box.end_y)[0]
def fuel(self, y):
return - integrate.quad(self.load_case.wing.fuel_tank.fuel_cross_section, y,
self.wing_box.end_y, limit=100, epsrel=1.49e-06)[0] * structure.FuelTank.rho_fuel * 9.81
def weight_wing_box(self, y):
return -9.81 * self.wing_box.material.density * integrate.quad(self.wing_box.calc_material_area, y, self.wing_box.end_y, epsabs=1.49e-06)[0]
def weight_wing(self, y):
return - 9.81 * 2.06 * 0.001 * self.wing_box.material.density * integrate.quad(self.load_case.wing.chord, y, self.wing_box.end_y)[0]
def engine(self, y):
if y <= self.load_case.wing.engine.y:
return - self.load_case.wing.engine.weight
else:
return 0
def calc_weight_wing_box(self):
return - self.weight_wing_box(0)
def print_result(self, results):
abs_min = abs(min(results))
abs_max = abs(max(results))
max_value = abs_max if abs_max > abs_min else abs_min
print("Magnitude of maximum shear force: {0:.3e} [N]".format(max_value))
class MomentCalculator:
def __init__(self, load_case, shear):
self.load_case = load_case
self.shear = shear
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
def value(self, y):
return integrate.quad(self.shear, y, self.load_case.wing.wing_box.end_y, epsrel=1.49e-06)[0]
def print_result(self, results):
abs_min = abs(min(results))
abs_max = abs(max(results))
max_value = abs_max if abs_max > abs_min else abs_min
print("Magnitude of maximum bending moment: {0:.3e} [Nm]".format(max_value))
class RotationCalculator:
def __init__(self, load_case, moment):
self.load_case = load_case
self.moment = moment
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
return interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
def value(self, y):
return self.rotation(y)
def rotation(self, y):
func = lambda y2: self.moment(y2) / self.load_case.wing.wing_box.calc_moi_xx(y2)
return integrate.quad(func, 0, y, limit=200, epsrel=1.49e-06)[0] / self.load_case.wing.wing_box.material.e_modulus
class DeflectionCalculator:
def __init__(self, load_case, rotation):
self.load_case = load_case
self.rotation = rotation
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
def value(self, y):
return self.deflection(y)
def deflection(self, y):
return integrate.quad(self.rotation, 0, y, epsabs=1.49e-06)[0]
def print_result(self, results):
deflection = results[-1] / (self.load_case.wing.wing_box.end_y * 2) * 100
print("Maximum deflection: {0:.2f} [%]".format(deflection))
if deflection > self.load_case.limit_deflection:
util.print_err("Wing box failed: deflection exceeded limits")
class TorsionCalculator:
def __init__(self, load_case):
self.load_case = load_case
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
def value(self, y):
return self.lift_moment(y) + self.engine(y)
def lift_moment(self, y):
func = lambda y2: self.load_case.wing.moment(y2, self.load_case.density, self.load_case.velocity)
return integrate.quad(func, y, self.load_case.wing.wing_box.end_y)[0]
def engine(self, y):
if y <= self.load_case.wing.engine.y:
return self.load_case.wing.engine.thrust * self.load_case.wing.engine.z + self.load_case.wing.engine.weight * self.load_case.wing.engine.x
else:
return 0
def print_result(self, results):
abs_min = abs(min(results))
abs_max = abs(max(results))
max_value = abs_max if abs_max > abs_min else abs_min
print("Magnitude of maximum torsion: {0:.3e} [Nm]".format(max_value))
class TwistCalculator:
def __init__(self, load_case, torsion):
self.load_case = load_case
self.torsion = torsion
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind='cubic', fill_value="extrapolate")
def value(self, y):
return self.twist(y)
def twist(self, y):
return integrate.quad(lambda y2: self.torsion(y2) / self.load_case.wing.wing_box.calc_moi_polar(y2),
0, y, limit=200)[0] / self.load_case.wing.wing_box.material.shear_modulus
def print_result(self, results):
twist = results[-1] * 180 / sp.pi
print("Maximum twist: {0:.2f} [deg]".format(twist))
if twist > self.load_case.limit_twist:
util.print_err("Wing box failed: twist exceeded limits")
class TopPanelStressCalculator:
def __init__(self, load_case, moment):
self.load_case = load_case
self.wing_box = load_case.wing.wing_box
self.moment = moment
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind="cubic", fill_value="extrapolate")
def value(self, y):
return - self.moment(y) * (self.wing_box.calc_height(y) / 2 - self.wing_box.calc_centroid_z(y)) / \
self.wing_box.calc_moi_xx(y)
def print_result(self, results):
abs_min = abs(min(results))
abs_max = abs(max(results))
max_value = max(results) if abs_max > abs_min else min(results)
pos = results.index(max_value) * self.load_case.step
print("Maximum stress in top panel: {0:.3e} [Pa] at {1:.2f} [m]".format(max_value, pos))
if max_value > self.wing_box.material.yield_stress:
util.print_err("Wing box failed: top panel stress exceeded yield stress.")
class BottomPanelStressCalculator:
def __init__(self, load_case, moment):
self.load_case = load_case
self.wing_box = load_case.wing.wing_box
self.moment = moment
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
self.print_result(results)
return interpolate.interp1d(self.load_case.range, results, kind="cubic", fill_value="extrapolate")
def value(self, y):
return self.moment(y) * (self.wing_box.calc_height(y) / 2 + self.wing_box.calc_centroid_z(y)) / \
self.wing_box.calc_moi_xx(y)
def print_result(self, results):
abs_min = abs(min(results))
abs_max = abs(max(results))
max_value = max(results) if abs_max > abs_min else min(results)
pos = results.index(max_value) * self.load_case.step
print("Maximum stress in bottom panel: {0:.3e} [Pa] at {1:.2f} [m]".format(max_value, pos))
if max_value > self.wing_box.material.yield_stress:
util.print_err("Wing box failed: bottom panel stress exceeded yield stress.")
class WebBucklingCalculator:
ks_clamped = util.load_k("stress_coefficients/ks_clamped.txt")
ks_hinged = util.load_k("stress_coefficients/ks_hinged.txt")
def __init__(self, load_case, shear, torsion):
self.load_case = load_case
self.wing_box = load_case.wing.wing_box
self.shear = shear
self.torsion = torsion
self.shear_factor = self.calc_shear_factor()
def calc(self, size):
pool = Pool(size)
results = pool.map(self.value, self.load_case.range)
pool.close()
pool.join()
min_margin = {}
min_values = []
for section in self.wing_box.sections:
min_margin[section] = [sys.float_info.max, False]
for result in results:
if result is not None:
min_values.append(abs(result[1]))
if abs(result[1]) < min_margin[result[0]][0]:
min_margin[result[0]][0] = abs(result[1])
min_margin[result[0]][1] = result[2]
self.print_result(min_margin)
return interpolate.interp1d(self.load_case.range, min_values, kind="cubic", fill_value="extrapolate")
def value(self, y):
section = self.wing_box.get_active_section(y)
shear_stress_avg = self.shear(y) / (self.wing_box.calc_height(y) * (section.front_spar_t + section.back_spar_t))
shear_stress_max = shear_stress_avg * self.shear_factor
length = section.end_y - section.start_y
width = self.wing_box.calc_height(y)
q = self.torsion(y) / (2 * self.wing_box.calc_area_cross_sectional(y))
crit_stress = [self.critical_stress(section.front_spar_t, length, width) - q / section.front_spar_t, self.critical_stress(section.back_spar_t, length, width) + q / section.back_spar_t]
return [section, min(crit_stress) / shear_stress_max, crit_stress[0] < crit_stress[1]] # true when lowest safety margin on front spar
def max_centroid(self, y):
wing_box_section = self.wing_box.get_active_section(y)
height = self.wing_box.calc_height(y) / 2 - self.wing_box.calc_centroid_z(y)
a = self.wing_box.calc_width(y) * wing_box_section.top_panel_t
az = a * height
area = height * (wing_box_section.front_spar_t + wing_box_section.back_spar_t)
a += area
az += area * height / 2
for stringer_set in wing_box_section.stringer_sets:
if stringer_set.surface_top:
z = height - wing_box_section.top_panel_t - stringer_set.calc_centroid_z()
area = stringer_set.calc_area()
a += area
az += area * z
return az / a
def calc_shear_factor(self):
v_max = 0
y = 0
for step in self.load_case.range:
v = abs(self.shear(step))
if v > v_max:
v_max = v
y = step
section = self.wing_box.get_active_section(y)
shear_stress_avg = v_max / (self.wing_box.calc_height(y) * (section.front_spar_t + section.back_spar_t))
shear_stress_max = v_max * self.max_centroid(y) / (self.wing_box.calc_moi_xx(y) * (section.front_spar_t + section.back_spar_t))
return shear_stress_max / shear_stress_avg
def critical_stress(self, thickness, length, width):
k = self.ks_clamped(length / width)
return sp.pi ** 2 * k * self.wing_box.material.e_modulus / (12 * (1 - self.wing_box.material.poisson_factor ** 2)) * (thickness / width) ** 2
def print_result(self, min_margin):
print("")
print("Results for shear buckling")
failure = False
for section in self.wing_box.sections:
if min_margin[section][0] < 1: failure = True
print("Wing box section range: {0:.2f}, {1:.2f} [m]; Lowest margin of safety: {2:.2f} on {3}".format(section.start_y, section.end_y, min_margin[section][0], "front spar" if min_margin[section][1] else "back spar"))
if failure: util.print_err("Wing box failed due to shear buckling")
class SkinBucklingCalculator:
kc_b = util.load_k("stress_coefficients/kc_B.txt")
kc_c = util.load_k("stress_coefficients/kc_C.txt")
def __init__(self, load_case, top_panel_stress, bottom_panel_stress):
self.load_case = load_case
self.wing_box = load_case.wing.wing_box
self.top_panel_stress = top_panel_stress
self.bottom_panel_stress = bottom_panel_stress
def calc(self, size):
pool = Pool(size)
results = []
for plate in self.find_plates():
results.append(pool.map(partial(self.value, plate=plate), self.load_case.range))
pool.close()
pool.join()
min_margin = {}
min_values = [sys.float_info.max]*len(self.load_case.range)
for section in self.wing_box.sections:
min_margin[section] = [sys.float_info.max, None]
for results_set in results:
for i in range(len(results_set)):
if results_set[i] is not None:
if 0 < results_set[i][1] < min_values[i]:
min_values[i] = results_set[i][1]
if 0 < results_set[i][1] < min_margin[results_set[i][0]][0]:
min_margin[results_set[i][0]][0] = results_set[i][1]
min_margin[results_set[i][0]][1] = results_set[i][2]
self.print_result(min_margin)
return interpolate.interp1d(self.load_case.range, min_values, kind="cubic", fill_value="extrapolate")
def value(self, y, plate):
if plate.start_y <= y <= plate.end_y:
stress_max = self.top_panel_stress(y) if plate.surface_top else self.bottom_panel_stress(y)
length = plate.end_y - plate.start_y
width = plate.width * self.wing_box.calc_width(y)
ratio = length / width
if ratio > 5: ratio = 5
k = self.kc_b(ratio) if plate.side else self.kc_c(ratio)
stress_crit = - self.critical_stress(k, plate.thickness, width)
return [self.wing_box.get_active_section(y), stress_crit / stress_max, plate]
else:
return None
def find_plates(self):
plates = []
for section in self.wing_box.sections:
stringer_coords_top = []
stringer_coords_bottom = []
for stringer_set in section.stringer_sets:
if stringer_set.surface_top:
if stringer_set.amount == 1:
stringer_coords_top.append(stringer_set.start_x)
else:
stringer_coords_top.extend(
np.linspace(stringer_set.start_x, stringer_set.end_x, stringer_set.amount))
else:
if stringer_set.amount == 1:
stringer_coords_bottom.append(stringer_set.start_x)
else:
stringer_coords_bottom.extend(
np.linspace(stringer_set.start_x, stringer_set.end_x, stringer_set.amount))
stringer_coords_top.sort()
stringer_coords_bottom.sort()
for i in range(len(stringer_coords_top) - 1):
width = stringer_coords_top[i + 1] - stringer_coords_top[i]
side = stringer_coords_top[i + 1] == 1 or stringer_coords_top[i] == 0
plates.append(util.SkinPlate(section.top_panel_t, section.start_y, section.end_y, width, side, True))
for j in range(len(stringer_coords_bottom) - 1):
width = stringer_coords_bottom[j + 1] - stringer_coords_bottom[j]
side = stringer_coords_bottom[j + 1] == 1 or stringer_coords_bottom[j] == 0
plates.append(util.SkinPlate(section.bottom_panel_t, section.start_y, section.end_y, width, side, False))
return plates
def critical_stress(self, k, thickness, width):
return sp.pi ** 2 * k * self.wing_box.material.e_modulus / (12 * (1 - self.wing_box.material.poisson_factor ** 2)) * (thickness / width) ** 2
def print_result(self, min_margin):
print("")
print("Results for skin buckling")
failure = False
for section in self.wing_box.sections:
if min_margin[section][0] < 1: failure = True
print("Wing box section range: {0:.2f}, {1:.2f} [m]; Lowest margin of safety: {2:.2f} on plate with width {3:.2f} [m]".format(section.start_y, section.end_y, min_margin[section][0], min_margin[section][1].width))
if failure: util.print_err("Wing box failed due to skin buckling")
class ColumnBucklingCalculator:
def __init__(self, load_case, moment):
self.moment = moment
self.load_case = load_case
self.wing_box = load_case.wing.wing_box
def calc(self, size):
pool = Pool(size)
results = []
for section in self.wing_box.sections:
for stringer_set in section.stringer_sets:
results.append(pool.map(partial(self.value, section=section, stringer_set=stringer_set), self.load_case.range))
pool.close()
pool.join()
min_margin = {}
min_values = [sys.float_info.max]*len(self.load_case.range)
for section in self.wing_box.sections:
min_margin[section] = [sys.float_info.max, None]
for results_set in results:
for i in range(len(results_set)):
if results_set[i] is not None:
if 0 < results_set[i][1] < min_values[i]:
min_values[i] = results_set[i][1]
if 0 < results_set[i][1] < min_margin[results_set[i][0]][0]:
min_margin[results_set[i][0]][0] = results_set[i][1]
min_margin[results_set[i][0]][1] = results_set[i][2]
self.print_result(min_margin)
return interpolate.interp1d(self.load_case.range, min_values, kind="cubic", fill_value="extrapolate")
def value(self, y, section, stringer_set):
if section.start_y <= y <= section.end_y:
height = self.wing_box.calc_height(y)
centroid_z = self.wing_box.calc_centroid_z(y)
z = height / 2 - stringer_set.calc_centroid_z()
if stringer_set.surface_top:
z -= centroid_z
z = -z
else:
z += centroid_z
max_stress = self.moment(y) * z / self.wing_box.calc_moi_xx(y)
crit_stress = - stringer_set.amount * self.critical_load(section.end_y - section.start_y,
stringer_set.calc_moi_xx_parallel_axis(height, centroid_z)) / stringer_set.calc_area()
return [section, crit_stress / max_stress, stringer_set]
else:
return None
def critical_load(self, length, moi):
k = 1 # = 1 if both ends are pinned, 4 if both ends are clamped, 1/4 if one end is fixes and one end is free;
# 1/sqrt(K) = 0.7 if one end is pined and one end is free
return k * sp.pi ** 2 * self.wing_box.material.e_modulus * moi / length ** 2
def print_result(self, min_margin):
print("")
print("Results for column buckling")
failure = False
for section in self.wing_box.sections:
if min_margin[section][0] < 1: failure = True
print("Wing box section range: {0:.2f}, {1:.2f} [m]; Lowest margin of safety: {2:.2f} on {3} set with size {4}, {5} [m]".format(section.start_y, section.end_y, min_margin[section][0], min_margin[section][1].stringer_type.name, min_margin[section][1].stringer_width, min_margin[section][1].stringer_height))
if failure: util.print_err("Wing box failed due to column buckling")
```
#### File: JortRoelofs/PlaneProject/structure.py
```python
import numpy as np
import scipy as sp
class Wing:
def __init__(self):
self.name = "" # name
self.wing_box = None # WingBox object
self.engine = None # Engine object
self.fuel_tank = None # FuelTank object
self.chord = None # c(y) [m]
self.cl0 = None # cl(y) [m] at aoa = 0 [deg]
self.cl10 = None # cl(y) [m] at aoa = 10 [deg]
self.cd0 = None # cd(y) [m] at aoa = 0 [deg]
self.cd10 = None # cd(y) [m] at aoa = 10 [deg]
self.cm0 = None # cm(y) [m] at aoa = 0 [deg]
self.cm10 = None # cm(y) [m] at aoa = 10 [deg]
self.interp_cons = 0 # constant
self.aoa = 0 # [rad]
self.surface_area = 0 # [m^2]
def cl(self, y):
return self.cl0(y) + self.interp_cons * (self.cl10(y) - self.cl0(y))
def cd(self, y):
return self.cd0(y) + self.interp_cons * (self.cd10(y) - self.cd0(y))
def cm(self, y):
return self.cm0(y) + self.interp_cons * (self.cm10(y) - self.cm0(y))
def normal(self, y, density, velocity):
return sp.cos(self.aoa) * self.lift(density, velocity, y) + sp.sin(self.aoa) * self.drag(density, velocity, y)
def lift(self, y, density, velocity):
return 0.5 * density * velocity ** 2 * self.cl(y) * self.chord(y)
def drag(self, y, density, velocity):
return 0.5 * density * velocity ** 2 * self.cd(y) * self.chord(y)
def moment(self, y, density, velocity):
return 0.5 * density * velocity ** 2 * self.cm(y) * self.chord(y) ** 2
class WingBox:
def __init__(self):
self.name = None
self.start_y = 0
self.end_y = 0
self.width = None
self.height = None
self.material = None
self.sections = []
self.moi_xx = None
self.moi_polar = None
def calc_width(self, y):
return self.width.evaluate(y=y)
def calc_height(self, y):
return self.height.evaluate(y=y)
def calc_material_area(self, y):
section = self.get_active_section(y)
return section.calc_material_area(self.calc_width(y), self.calc_height(y))
def calc_area_cross_sectional(self, y):
section = self.get_active_section(y)
return section.calc_area_cross_sectional(self.calc_width(y), self.calc_height(y))
def calc_circumference(self, y):
return 2 * (self.calc_width(y) + self.calc_height(y))
def calc_centroid_x(self, y):
return self.get_active_section(y).calc_centroid_x(self.calc_width(y), self.calc_height(y))
def calc_centroid_z(self, y):
return self.get_active_section(y).calc_centroid_z(self.calc_width(y), self.calc_height(y))
def calc_moi_xx(self, y):
if self.moi_xx is not None:
return self.moi_xx(y)
else:
width = self.calc_width(y)
height = self.calc_height(y)
section = self.get_active_section(y)
centroid_z = self.calc_centroid_z(y)
moi_xx = section.calc_moi_xx_parallel_axis(width, height, centroid_z)
inside_height = height - section.top_panel_t - section.bottom_panel_t
for stringer_set in section.stringer_sets:
moi_xx += stringer_set.calc_moi_xx_parallel_axis(inside_height, centroid_z)
return moi_xx
def calc_moi_zz(self, y):
width = self.calc_width(y)
height = self.calc_height(y)
section = self.get_active_section(y)
centroid_x = self.calc_centroid_x(y)
moi_zz = section.calc_moi_zz(width, height) + \
section.calc_material_area(width, height) * centroid_x ** 2
inside_width = width - section.front_spar_t - section.bottom_spar_t
for stringer_set in section.stringer_sets:
moi_zz += stringer_set.calc_moi_zz_parallel_axis(inside_width, centroid_x)
return moi_zz
def calc_moi_polar(self, y):
if self.moi_polar is not None:
return self.moi_polar(y)
else:
section = self.get_active_section(y)
integral = self.calc_width(y) * (section.top_panel_t + section.bottom_panel_t) / (section.top_panel_t * section.bottom_panel_t) + \
self.calc_height(y) * (section.front_spar_t + section.back_spar_t) / (section.front_spar_t * section.back_spar_t)
return 4 * self.calc_area_cross_sectional(y) ** 2 / integral
def get_active_section(self, y):
for section in self.sections:
if section.start_y <= y <= section.end_y:
return section
return None
class WingBoxSection:
def __init__(self):
self.start_y = 0.0
self.end_y = 0.0
self.front_spar_t = 0
self.back_spar_t = 0
self.top_panel_t = 0
self.bottom_panel_t = 0
self.stringer_sets = []
def calc_material_area(self, width, height):
a = width * (self.top_panel_t + self.bottom_panel_t) + height * (self.front_spar_t + self.back_spar_t)
for stringer_set in self.stringer_sets:
a += stringer_set.calc_area()
return a
def calc_area_cross_sectional(self, width, height):
return (width - self.front_spar_t - self.back_spar_t) * (height - self.top_panel_t - self.bottom_panel_t)
def calc_centroid_x(self, width, height):
ax = height * width * (self.back_spar_t - self.front_spar_t) / 2
a = height * (self.front_spar_t + self.back_spar_t)
for stringer_set in self.stringer_sets:
area = stringer_set.calc_area()
x = (width - self.front_spar_t - self.back_spar_t) * (stringer_set.calc_centroid_x() - 0.5)
ax += area * x
a += area
return ax / a
def calc_centroid_z(self, width, height):
az = width * height * (self.top_panel_t - self.bottom_panel_t) / 2
a = height * (self.top_panel_t + self.bottom_panel_t)
for stringer_set in self.stringer_sets:
area = stringer_set.calc_area()
z = (height - self.top_panel_t - self.bottom_panel_t) * 0.5 - stringer_set.calc_centroid_z()
if not stringer_set.surface_top:
z = -z
az += area * z
a += area
return az / a
def calc_moi_xx(self, width, height):
moi = (self.front_spar_t + self.back_spar_t) * height ** 3 / 12
moi += width * self.top_panel_t ** 3 / 12 + width * self.top_panel_t * ((height + self.top_panel_t) / 2) ** 2
moi += width * self.bottom_panel_t ** 3 / 12 + width * self.bottom_panel_t * ((height + self.bottom_panel_t) / 2) ** 2
return moi
def calc_moi_xx_parallel_axis(self, width, height, location):
moi = self.calc_moi_xx(width, height)
moi += (self.front_spar_t + self.back_spar_t) * height * location ** 2
moi += width * self.top_panel_t * ((height + self.top_panel_t) / 2 - location) ** 2
moi += width * self.bottom_panel_t * ((height + self.bottom_panel_t) / 2 + location) ** 2
return moi
def calc_moi_zz(self, width, height):
return width ** 3 * height / 12 - (width - self.front_spar_t - self.back_spar_t) ** 3 * (height - self.top_panel_t - self.bottom_panel_t) / 12
def calc_moi_polar(self, y, width, height):
print(self.start_y + y)
def __hash__(self):
return hash((self.start_y, self.end_y, self.front_spar_t, self.back_spar_t, self.top_panel_t, self.bottom_panel_t))
def __eq__(self, other):
return (self.start_y, self.end_y, self.front_spar_t, self.back_spar_t, self.top_panel_t, self.bottom_panel_t) == (other.start_y, other.end_y, other.front_spar_t, other.back_spar_t, other.top_panel_t, other.bottom_panel_t)
class FuelTank:
rho_fuel = 0.804e3 # [kg/m^3]
def __init__(self):
self.start_y = 0 # [m]
self.end_y = 0 # [m]
self.wing_box = None
def fuel_cross_section(self, y):
if self.start_y <= y <= self.end_y:
return self.wing_box.calc_area_cross_sectional(y)
else:
return 0.0
class Engine:
def __init__(self):
self.x = 0 # [m]
self.y = 0 # [m]
self.z = 0 # [m]
self.thrust = 0 # [N]
self.weight = 0 # [N]
class StringerType:
def __init__(self):
self.name = ""
self.area = None
self.centroid_x = None
self.centroid_z = None
self.moi_xx = None
self.moi_zz = None
def calc_area(self, width, height, thickness):
return self.area.evaluate(w=width, h=height, t=thickness)
def calc_centroid_x(self, width, height, thickness):
return self.centroid_x.evaluate(w=width, h=height, t=thickness)
def calc_centroid_z(self, width, height, thickness):
return self.centroid_z.evaluate(w=width, h=height, t=thickness)
def calc_moi_xx(self, width, height, thickness):
return self.moi_xx.evaluate(w=width, h=height, t=thickness, a=self.calc_area(width, height, thickness),
z=self.calc_centroid_z(width, height, thickness))
def calc_moi_zz(self, width, height, thickness):
return self.moi_zz.evaluate(w=width, h=height, t=thickness, a=self.calc_area(width, height, thickness),
z=self.calc_centroid_x(width, height, thickness))
class StringerSet:
def __init__(self):
self.stringer_type = None
self.amount = 0
self.stringer_width = 0
self.stringer_height = 0
self.stringer_thickness = 0
self.start_x = 0 # fraction of wing box width [-]
self.end_x = 0 # fraction [-]
self.surface_top = True # True if top, False if bottom
def calc_area(self):
return self.stringer_type.calc_area(self.stringer_width, self.stringer_height, self.stringer_thickness) * \
self.amount
def calc_centroid_x(self, width):
return width * (self.start_x + (self.end_x - self.start_x) / 2)
def calc_centroid_z(self):
centroid = self.stringer_type.calc_centroid_z(self.stringer_width, self.stringer_height, self.stringer_thickness)
if self.stringer_height - centroid < centroid: centroid = self.stringer_height - centroid
return centroid
def calc_moi_xx(self):
return self.stringer_type.calc_moi_xx(self.stringer_width, self.stringer_height, self.stringer_thickness) * \
self.amount
def calc_moi_xx_parallel_axis(self, height, location):
centroid = self.calc_centroid_z()
if centroid > (self.stringer_height - centroid):
centroid = (self.stringer_height - centroid)
z = height / 2 - centroid
if self.surface_top:
z -= location
else:
z += location
return self.calc_moi_xx() + self.calc_area() * (centroid - location) ** 2
def calc_moi_zz(self, width):
centroid_stringer = self.stringer_type.calc_centroid_x(self.stringer_width, self.stringer_height,
self.stringer_thickness)
if centroid_stringer > (self.stringer_width - centroid_stringer):
centroid_stringer = self.stringer_width - centroid_stringer
start_x = self.start_x * width + centroid_stringer
end_x = self.end_x * width - centroid_stringer
centroid = self.calc_centroid_x(width)
area = self.stringer_type.calc_area(self.stringer_width, self.stringer_height, self.stringer_thickness)
moi_zz = self.amount * self.stringer_type.calc_moi_zz(self.stringer_width, self.stringer_height,
self.stringer_thickness)
stringer_x = np.linspace(start_x, end_x, self.amount)
for x in stringer_x:
moi_zz += area * (centroid - x) ** 2
return moi_zz
def calc_moi_zz_parallel_axis(self, width, location):
centroid_stringer = self.stringer_type.calc_centroid_x(self.stringer_width, self.stringer_height,
self.stringer_thickness)
if centroid_stringer > (self.stringer_width - centroid_stringer):
centroid_stringer = self.stringer_width - centroid_stringer
start_x = self.start_x * width + centroid_stringer
end_x = self.end_x * width - centroid_stringer
area = self.stringer_type.calc_area(self.stringer_width, self.stringer_height, self.stringer_thickness)
moi_zz = self.amount * self.stringer_type.calc_moi_zz(self.stringer_width, self.stringer_height,
self.stringer_thickness)
stringer_x = np.linspace(start_x, end_x, self.amount)
for x in stringer_x:
moi_zz += area * (location - x) ** 2
return moi_zz
class Material:
def __init__(self):
self.name = ""
self.e_modulus = 0
self.shear_modulus = 0
self.poisson_factor = 0
self.yield_stress = 0
self.density = 0
class LoadCase:
def __init__(self):
self.range = None
self.wing = None
self.step = 0
self.load_factor = 0
self.velocity = 0
self.density = 0
self.aircraft_weight = 0
self.limit_deflection = 0
self.limit_twist = 0
```
#### File: JortRoelofs/PlaneProject/util.py
```python
import string
import sys
from scipy import interpolate
def print_err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def load_k(file):
f = open(file, 'r')
lines = f.readlines()
f.close()
val = [[], []]
for line in lines:
tokens = line.split(',')
val[0].append(float(tokens[0]))
val[1].append(float(tokens[1]))
return interpolate.interp1d(val[0], val[1], kind="cubic", fill_value="extrapolate")
class GeometryFunction:
def __init__(self, function):
self.function = function
for c in string.ascii_lowercase:
if c in self.function:
self.function = self.function.replace(c, "{%s}" % c)
def evaluate(self, **kwargs):
return eval(self.function.format(**kwargs))
class SkinPlate:
def __init__(self, thickness, start_y, end_y, width, side, surface_top):
self.thickness = thickness
self.start_y = start_y
self.end_y = end_y
self.width = width
self.side = side
self.surface_top = surface_top
``` |
{
"source": "Jortuk/SFIA-1",
"score": 3
} |
#### File: SFIA-1/tests/test_back_end.py
```python
import unittest
from flask import abort, url_for
from flask_testing import TestCase
from application import app, db, bcrypt
from flask_login import current_user
from application.models import Users, Shoes, Shops, ShoesShops
from os import getenv
class TestBase(TestCase):
def create_app(self):
# pass in configurations for test database
config_name = 'testing'
app.config.update(SQLALCHEMY_DATABASE_URI=getenv('TEST_DB_URI'),
SECRET_KEY=getenv('TEST_SECRET_KEY'),
WTF_CSRF_ENABLED=False,
DEBUG=True
)
return app
def setUp(self):
"""
Will be called before every test
"""
# ensure there is no data in the test database when the test starts
db.session.commit()
db.drop_all()
db.create_all()
# create test admin user
hashed_pw = bcrypt.generate_password_hash('<PASSWORD>')
admin = Users(user_name="testadmin", email="<EMAIL>", password=<PASSWORD>)
#create shop inside Shops table
shop1= Shops(shop_address="1 Test Address", shop_city="Test1City")
shop2 = Shops(shop_address="2 Test Address", shop_city="Test2City")
shop3 = Shops(shop_address="3 Test Address", shop_city="Test3City")
shop4 = Shops(shop_address="4 Test Address", shop_city="Test4City")
shop5 = Shops(shop_address="5 Test Address", shop_city="Test5City")
#create shoe inside Shoes table
shoe1 = Shoes(shoe_name="testshoe1", shoe_size="S", shoe_price="39.99")
shoe2 = Shoes(shoe_name="testshoe2", shoe_size="S", shoe_price="39.99")
shoe3 = Shoes(shoe_name="testshoe3", shoe_size="S", shoe_price="39.99")
shoe4 = Shoes(shoe_name="testshoe4", shoe_size="S", shoe_price="39.99")
shoe5 = Shoes(shoe_name="testshoe5", shoe_size="S", shoe_price="39.99")
shoe6 = Shoes(shoe_name="testshoe6", shoe_size="S", shoe_price="39.99")
#create shoe inside ShoesShops table
shoeshop1 = ShoesShops(quantity=0, shoe_id=2, shop_id=1)
shoeshop2 = ShoesShops(quantity=0, shoe_id=3, shop_id=2)
shoeshop3 = ShoesShops(quantity=0, shoe_id=4, shop_id=3)
shoeshop4 = ShoesShops(quantity=0, shoe_id=5, shop_id=4)
shoeshop5 = ShoesShops(quantity=0, shoe_id=6, shop_id=5)
#save data to database
db.session.add(admin)
db.session.add(shop1)
db.session.add(shop2)
db.session.add(shop3)
db.session.add(shop4)
db.session.add(shop5)
db.session.add(shoe1)
db.session.add(shoe2)
db.session.add(shoe3)
db.session.add(shoe4)
db.session.add(shoe5)
db.session.add(shoe6)
db.session.add(shoeshop1)
db.session.add(shoeshop2)
db.session.add(shoeshop3)
db.session.add(shoeshop4)
db.session.add(shoeshop5)
db.session.commit()
def tearDown(self):
"""
Will be called after every test
"""
db.session.remove()
db.drop_all()
class TestViews(TestBase):
def test_home_view(self):
response = self.client.get(url_for('home'))
self.assertEqual(response.status_code, 200)
def test_shoes_view(self):
response = self.client.get(url_for('shoes'))
self.assertEqual(response.status_code, 200)
def test_shops_view(self):
response = self.client.get(url_for('shops'))
self.assertEqual(response.status_code, 200)
def test_login_view(self):
response = self.client.get(url_for('login'))
self.assertEqual(response.status_code, 200)
def test_shop1_view(self):
response = self.client.get(url_for('shop1'))
self.assertEqual(response.status_code, 200)
def test_shop2_view(self):
response = self.client.get(url_for('shop2'))
self.assertEqual(response.status_code, 200)
def test_shop3_view(self):
response = self.client.get(url_for('shop3'))
self.assertEqual(response.status_code, 200)
def test_shop4_view(self):
response = self.client.get(url_for('shop4'))
self.assertEqual(response.status_code, 200)
def test_shop5_view(self):
response = self.client.get(url_for('shop5'))
self.assertEqual(response.status_code, 200)
class TestAuthenticatedViews(TestBase):
def test_shoesadmin_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('shoesadmin'))
self.assertEqual(response.status_code, 200)
def test_logout_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="admin2020"
),
follow_redirects=True
)
response = self.client.get(url_for('logout'))
self.assertEqual(response.status_code, 302)
def test_shoe_add_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('addShoe'))
self.assertEqual(response.status_code, 200)
def test_shoe_update_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoe = Shoes.query.first()
response = self.client.get(url_for('updateShoe',id=1))
self.assertEqual(response.status_code, 200)
def test_shoe_delete_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoe = Shoes.query.first()
response = self.client.get(url_for('deleteShoe',id=1))
self.assertEqual(response.status_code, 302)
def test_shop1admin_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('shop1admin'))
self.assertEqual(response.status_code, 200)
def test_shop2admin_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('shop2admin'))
self.assertEqual(response.status_code, 200)
def test_shop3admin_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('shop3admin'))
self.assertEqual(response.status_code, 200)
def test_shop4admin_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('shop4admin'))
self.assertEqual(response.status_code, 200)
def test_shop5admin_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.get(url_for('shop5admin'))
self.assertEqual(response.status_code, 200)
def test_updateshop1_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoeshop = ShoesShops.query.first()
response = self.client.get(url_for('updateShop1',id=2))
self.assertEqual(response.status_code, 200)
def test_updateshop2_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoeshop = ShoesShops.query.first()
response = self.client.get(url_for('updateShop2',id=2))
self.assertEqual(response.status_code, 200)
def test_updateshop3_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoeshop = ShoesShops.query.first()
response = self.client.get(url_for('updateShop3',id=2))
self.assertEqual(response.status_code, 200)
def test_updateshop4_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoeshop = ShoesShops.query.first()
response = self.client.get(url_for('updateShop4',id=2))
self.assertEqual(response.status_code, 200)
def test_updateshop5_view(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
shoeshop = ShoesShops.query.first()
response = self.client.get(url_for('updateShop5',id=2))
self.assertEqual(response.status_code, 200)
class TestPosts(TestBase):
# def test_login(self):
# if current_user.is_authenticated:
# url_for('home')
# else:
# with self.client:
# response = self.client.post(
# url_for('login'),
# data=dict(
# email="<EMAIL>",
# password="<PASSWORD>"
# ),
# follow_redirects=True
# )
# self.assertIn(b'<EMAIL>', response.data)
def test_add_new_shoe(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
url_for('addShoe'),
data=dict(
shoe_id=9,
shoe_name="Test",
shoe_size="S",
shoe_price=40
),
follow_redirects=True
)
self.assertIn(b'Test', response.data)
def test_update_shoe(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
url_for('updateShoe',id=1),
data=dict(
shoe_name="UpdateTest",
shoe_price=12.34
),
follow_redirects=True
)
self.assertIn(b'UpdateTest', response.data)
def test_update_shop1(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
'/update_shop1/2',
data=dict(
quantity="14"
),
follow_redirects=True
)
self.assertIn(b"14", response.data)
def test_update_shop2(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
'update_shop2/3',
data=dict(
quantity="52"
),
follow_redirects=True
)
self.assertIn(b"52", response.data)
def test_update_shop3(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
'/update_shop3/4',
data=dict(
quantity="11"
),
follow_redirects=True
)
self.assertIn(b"11", response.data)
def test_update_shop4(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
'/update_shop4/5',
data=dict(
quantity="23"
),
follow_redirects=True
)
self.assertIn(b"23", response.data)
def test_update_shop5(self):
with self.client:
self.client.post(
url_for('login'),
data=dict(
email="<EMAIL>",
password="<PASSWORD>"
),
follow_redirects=True
)
response = self.client.post(
'/update_shop5/6',
data=dict(
quantity="50"
),
follow_redirects=True
)
self.assertIn(b"50", response.data)
``` |
{
"source": "Jortuk/SFIA-2",
"score": 2
} |
#### File: service_3/application/__init__.py
```python
from flask import Flask, request
import requests
from flask_sqlalchemy import SQLAlchemy
from os import getenv
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']=getenv('SFIA2_DB_URI')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = getenv('SECRET_KEY')
db = SQLAlchemy(app)
from application.models import Dessert
@app.route('/', methods=['GET', 'POST'])
def dessert():
rand = random.randint(1, 20)
getDessert = Dessert.query.filter_by(id=rand).first()
print(getDessert)
return str(getDessert)
``` |
{
"source": "jortvangorkum/Network-Science-Hierarchical-Dendrogram",
"score": 2
} |
#### File: jortvangorkum/Network-Science-Hierarchical-Dendrogram/main.py
```python
from src.results.latex import generate_latex_information
from src.comparison.testing import tost_test, ttest_paired
from src.results.main import print_graph_information, print_hypothesis_testing
from src.algorithms.main import execute_algorithms
import matplotlib.pyplot as plt
dpi = 96
plt.rcParams["figure.figsize"] = (2000 / dpi, 1200 / dpi)
plt.rcParams["figure.dpi"] = dpi
files = [
("Contiguous USA", "download.tsv.contiguous-usa/contiguous-usa/out.contiguous-usa", " "),
("Dolphins", "download.tsv.dolphins/dolphins/out.dolphins", "\t"),
("PDZBase", "download.tsv.maayan-pdzbase/maayan-pdzbase/out.maayan-pdzbase", "\t"),
("<NAME>", "download.tsv.adjnoun_adjacency/adjnoun_adjacency/out.adjnoun_adjacency_adjacency", " "),
("American Football", "download.tsv.dimacs10-football/dimacs10-football/out.dimacs10-football", "\t")
]
def execute_algorithms_on_files():
for (name, file_path, delimiter) in files:
print(f"\n\n{name}\n")
(ravasz, girvan_newman) = execute_algorithms(file_path, delimiter)
generate_latex_information(name, ravasz, girvan_newman)
print("\nRavasz:")
print_graph_information(name, ravasz[0])
print_graph_information(name, ravasz[1])
print("\nGirvan-Newman")
print_graph_information(name, girvan_newman[0])
print_graph_information(name, girvan_newman[1])
execute_algorithms_on_files()
```
#### File: src/algorithms/girvan_newman.py
```python
from networkx.algorithms.centrality.betweenness import edge_betweenness_centrality
from networkx.algorithms.community.centrality import girvan_newman
import numpy as np
def highest_valuable_edge(graph, k):
betweenness = edge_betweenness_centrality(graph, k=k)
return max(betweenness, key=betweenness.get)
def create_list_of_iterator(iterator):
return np.array(list(iterator), dtype=object)
def girvan_newman_algorithm(graph, k=None):
if k is not None:
return create_list_of_iterator(girvan_newman(graph, lambda g: highest_valuable_edge(g, k)))
return create_list_of_iterator(girvan_newman(graph))
def convert_clusters_to_labels(clusters, n_nodes):
pred_labels = np.empty(n_nodes)
for (i, cluster) in enumerate(clusters):
for x in cluster:
pred_labels[int(x) - 1] = i
return pred_labels
```
#### File: src/algorithms/main.py
```python
import matplotlib.pyplot as plt
from src.results.latex import generate_latex_table
from src.results.dendrogram import networkx_to_dendrogram, scipy_to_dendrogram
from src.results.main import graph_quality_measures, print_information
from src.algorithms.girvan_newman import girvan_newman_algorithm
from src.algorithms.ravasz import ravasz_algorithm
from src.comparison.main import determine_measures
from src.data import get_data_from_path
def execute_ravasz_algorithm(file_path, delimiter):
(G, A, L, label_count) = get_data_from_path(file_path, delimiter)
(Z, mem_usages, durations, clusters_sizes, sil_scores, cal_har_scores, dav_bou_scores) = determine_measures(G, A, lambda: ravasz_algorithm(A), "Ravasz")
print("Ravasz:")
print_information(mem_usages, durations, sil_scores, cal_har_scores, dav_bou_scores)
graph_quality_measures(clusters_sizes, sil_scores, cal_har_scores, dav_bou_scores)
scipy_to_dendrogram(Z)
return (mem_usages, durations, sil_scores, cal_har_scores, dav_bou_scores)
def execute_girvan_newman_algorithm(file_path, delimiter):
(G, A, L, label_count) = get_data_from_path(file_path, delimiter)
(clusters, mem_usages, durations, clusters_sizes, sil_scores, cal_har_scores, dav_bou_scores) = determine_measures(G, A, lambda: girvan_newman_algorithm(G), "Girvan-Newman")
print("\nGirvan-Newman:")
print_information(mem_usages, durations, sil_scores, cal_har_scores, dav_bou_scores)
graph_quality_measures(clusters_sizes, sil_scores, cal_har_scores, dav_bou_scores)
networkx_to_dendrogram(G, clusters)
return (mem_usages, durations, sil_scores, cal_har_scores, dav_bou_scores)
def execute_algorithms(file_path, delimiter):
ravasz = execute_ravasz_algorithm(file_path, delimiter)
girvan_newman = execute_girvan_newman_algorithm(file_path, delimiter)
return (ravasz, girvan_newman)
``` |
{
"source": "jortvangorkum/thesis-paper",
"score": 3
} |
#### File: generate-graphs/generate_graphs/memory.py
```python
import ast
from glob import glob
import os
import pathlib
import pandas as pd
def parse_memory_data_file(data_memory_path: str, folder_name: str) -> pd.DataFrame:
df_data_memory = pd.DataFrame(columns=['Benchmark Type', 'Benchmark', 'Amount Nodes', 'Bytes Allocated', 'Average Bytes Used', 'Max Bytes Used'])
benchmark_type_paths = glob(f"{data_memory_path}/{folder_name}/**")
benchmark_type_names = [pathlib.PurePath(folder).name for folder in benchmark_type_paths]
for (benchmark_type_path, benchmark_type_name) in zip(benchmark_type_paths, benchmark_type_names):
benchmark_paths = glob(f"{benchmark_type_path}/**")
benchmark_names = [pathlib.PurePath(folder).name for folder in benchmark_paths]
for (benchmark_path, benchmark_name) in zip(benchmark_paths, benchmark_names):
amount_nodes_file_paths = glob(f"{benchmark_path}/**")
list_amount_nodes = [os.path.splitext(pathlib.PurePath(file).name)[0] for file in amount_nodes_file_paths]
for (amount_nodes_file_path, amount_nodes) in zip(amount_nodes_file_paths, list_amount_nodes):
with open(amount_nodes_file_path, 'r') as data_memory_file:
data_memory_contents = data_memory_file.read()
dict_memory_values = dict(ast.literal_eval(data_memory_contents))
average_bytes_used = dict_memory_values["average_bytes_used"]
max_bytes_used = dict_memory_values["max_bytes_used"]
bytes_allocated = dict_memory_values["bytes allocated"]
df_data_memory.loc[len(df_data_memory.index)] = [benchmark_type_name, benchmark_name, int(amount_nodes), int(bytes_allocated), int(average_bytes_used), int(max_bytes_used)] # type: ignore
df_data_memory.sort_values('Benchmark', inplace=True)
df_data_memory_single_iteration = df_data_memory[df_data_memory['Benchmark Type'] == 'Single Iteration']
df_data_memory_single_iteration = df_data_memory_single_iteration.drop('Benchmark Type', axis=1)
print(df_data_memory_single_iteration)
return df_data_memory_single_iteration
``` |
{
"source": "jorvasquezr/IA_ChessGame_Solver",
"score": 3
} |
#### File: jorvasquezr/IA_ChessGame_Solver/boardCreatorGUI.py
```python
import pyglet
from pyglet.window import mouse
import pyglet
import itertools
import chessGUI
import chess
from game import Game
from pyglet_gui.manager import Manager
from pyglet_gui.buttons import Button, OneTimeButton, Checkbox, GroupButton
from pyglet_gui.containers import VerticalContainer
from pyglet_gui.theme import Theme
import theme
from pyglet_gui.manager import Manager
from pyglet_gui.constants import *
from pyglet_gui.buttons import Button, OneTimeButton, Checkbox, GroupButton
from pyglet_gui.gui import Label
from pyglet_gui.containers import VerticalContainer,HorizontalContainer
from pyglet_gui.theme import Theme
from theme import getTheme
class BoardCreatorGUI(pyglet.window.Window):
spriteimage = pyglet.resource.image('resources/spritesheet.png')
backgroundImg = pyglet.resource.image('resources/Background.png')
chessboard = pyglet.resource.image('resources/chessboard.png')
chessboardInv = pyglet.resource.image('resources/chessboardflipped.png')
spritesheet = pyglet.image.ImageGrid(spriteimage, 2, 6)
BLACK_KING, BLACK_QUEEN, BLACK_BISHOP, BLACK_KNIGHT, BLACK_ROOK, BLACK_PAWN, WHITE_KING, WHITE_QUEEN, WHITE_BISHOP, \
WHITE_KNIGHT, WHITE_ROOK, WHITE_PAWN = range(12)
piecesIds=["NR", "ND", "NA","NC", "NT", "NP","BR", "BD", "BA","BC", "BT", "BP"]
dictPieces = {"NR": spritesheet[BLACK_KING], "ND": spritesheet[BLACK_QUEEN], "NA": spritesheet[BLACK_BISHOP],
"NC": spritesheet[BLACK_KNIGHT], "NT": spritesheet[BLACK_ROOK], "NP": spritesheet[BLACK_PAWN],
"BR": spritesheet[WHITE_KING], "BD": spritesheet[WHITE_QUEEN], "BA": spritesheet[WHITE_BISHOP],
"BC": spritesheet[WHITE_KNIGHT], "BT": spritesheet[WHITE_ROOK], "BP": spritesheet[WHITE_PAWN]}
# ["BTf1", "NTe7", "BAd5", "BRd6", "NRd8"]
colPositions = {0:"a", 1:"b", 2:"c", 3:"d", 4:"e", 5:"f", 6:"g", 7:"h" }
colPositionsInv = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7}
stdChesBoard = ["NTh1", "NAh3", "NCh2", "NDh4", "NRh5", "NTh8", "NAh6", "NCh7",
"NPg1", "NPg2", "NPg3", "NPg4", "NPg5", "NPg6", "NPg7", "NPg8",
"BPb1", "BPb2", "BPb3", "BPb4", "BPb5", "BPb6", "BPb7", "BPb8",
"BTa1", "BAa3", "BCa2", "BDa4", "BRa5", "BTa8", "BAa6", "BCa7"]
testCastling = ["NTh1", "NRh5", "NTh8",
"NPg1", "NPb3", "NPg5", "NPg7",
"BPb7", "BPg2", "BPb6", "BPb8",
"BTa1", "BRa5", "BTa8",]
spanishToEnglish = {"A": "B", "T": "R", "D": "Q", "R": "K", "P": "P", "C": "N"}
englishToSpanish = {"B": "A", "R": "T", "Q": "D", "K": "R", "P": "P", "N": "C"}
turn="B"
playerTurn="B"
ia_mode=False
blackKing=None
whiteKing=None
promotion= False
promotionMov=[]
promotedPiece=""
piece_held = None
piece_heldId=""
old_pos=(-1,-1)
newPiece=False
blackKingOnBoard=False
whiteKingOnBoard = False
#Flags
whiteKingCastling = False
whiteQueenCastling = False
blackKingCastling = False
blackQueenCastling = False
CPUPlayWhite=False
CPUStarts=False
chessboardflipped = False
quantityOfPieces = {"NR":0, "ND":0, "NA":0,"NC":0, "NT":0, "NP":0,"BR":0, "BD":0, "BA":0,"BC":0, "BT":0, "BP":0}
def __init__(self,batch):
super(BoardCreatorGUI, self).__init__(900, 600,
resizable=False,
caption='Chess',
config=pyglet.gl.Config(double_buffer=True),
vsync=False)
self.board_imgs = [[None for _ in range(8)] for _ in range(8)]
self.board = [["" for _ in range(8)] for _ in range(8)]
self.selector_imgs = [None for _ in range(12)]
self.selectedPiece = []
self.board_normal = pyglet.sprite.Sprite(self.chessboard)
self.board_flipped = pyglet.sprite.Sprite(self.chessboardInv)
self.piece_held=None
self.createStdPieces()
self.background = pyglet.sprite.Sprite(self.backgroundImg)
self.mouseAxis=(0,0)
self.batch = batch
self.managerList=[]
self.manager()
def manager(self):
self.managerList+=[Manager(HorizontalContainer([OneTimeButton(label="Continuar",on_release=self.nextWindow)]),
window=self,
batch=self.batch,
theme=getTheme(),
anchor=ANCHOR_BOTTOM_RIGHT,
offset=(-80, 5),
is_movable=False
)]
self.managerList+=[Manager(VerticalContainer([Checkbox(label="El CPU Inicia ",on_press=self.setCPUStarts,is_pressed=self.CPUStarts),
Checkbox(label="El CPU es blancas", on_press=self.setCPUPlayWhite,is_pressed=self.CPUPlayWhite),
Label(""),Label("Enroque de negras"),
Checkbox(label="Lado de la Reina ", on_press=self.setBlackQueenCastling,is_pressed=self.blackQueenCastling),
Checkbox(label="Lado del Rey ", on_press=self.setBlackKingCastling,is_pressed=self.blackKingCastling),
Label(""),Label("Enroque de blancas"),
Checkbox(label="Lado de la Reina ",on_press=self.setWhiteQueenCastling,is_pressed=self.whiteQueenCastling),
Checkbox(label="Lado del Rey ", on_press=self.setWhiteKingCastling,is_pressed=self.whiteKingCastling)
]),
window=self,
batch=self.batch,
theme=getTheme(),
anchor=ANCHOR_RIGHT,
offset=(-50, -95),
is_movable=False
)]
def setWhiteKingCastling(self,y):
self.whiteKingCastling=y;
def setWhiteQueenCastling(self,y):
self.whiteQueenCastling=y;
def setBlackKingCastling(self,y):
self.blackKingCastling=y;
def setBlackQueenCastling(self,y):
self.blackQueenCastling=y;
def setCPUPlayWhite(self,y):
self.CPUPlayWhite=y;
def setCPUStarts(self,y):
self.CPUStarts=y;
def createStdPieces(self):
for n in range(12):
if(self.selector_imgs[n] == None):
if(self.whiteKingOnBoard and n == 6 or self.blackKingOnBoard and n == 0 ):
continue
else:
self.selector_imgs[n]=pyglet.sprite.Sprite(self.spritesheet[n],75*(8+n//3),75*(5+((11-n)%3)))
def endOfTurn(self):
if(self.turn=="B"):
self.turn = "N"
else:
self.turn = "B"
def ifFlipped(self, x):
if(self.CPUPlayWhite):
return (7-x)
return x
def deleteManagers(self):
for x in self.managerList:
x.delete()
def on_draw(self):
self.clear()
self.background.draw()
if(self.CPUPlayWhite):
self.board_flipped.draw()
else:
self.board_normal.draw()
for n in self.selector_imgs:
if(n!= None):
n.draw()
for x, y in itertools.product(range(8), repeat=2):
if self.board[y][x] != "":
piece = self.board_imgs[y][x]
if piece != self.piece_held:
piece.x = self.ifFlipped(x) * 75
piece.y = self.ifFlipped(y) * 75
piece.draw()
if(self.newPiece):
self.piece_held.draw()
x = self.mouseAxis[0]
y = self.mouseAxis[1]
self.batch.draw()
#193 42
def getGraphicPiece(self,x,y):
if(x>600 and y>375):
xc = x//75 - 8
yc = y//75 - 5
result = self.selector_imgs[xc*3 + (2-yc)]
if(result != None):
self.selector_imgs[xc * 3 + (2-yc)] = None
self.newPiece =True
self.piece_heldId =self.piecesIds[ xc*3 + (2-yc)]
return result
elif(x<=600):
self.old_pos=(self.ifFlipped(x//75),self.ifFlipped(y//75))
return self.board_imgs[self.ifFlipped(y//75)][self.ifFlipped(x//75)]
def on_mouse_motion(self,x, y, dx, dy):
self.mouseAxis = (x,y)
def on_mouse_press(self,x, y, button, modifiers):
if button == mouse.LEFT :
piece = self.getGraphicPiece(x, y)
if piece != None:
if(not self.newPiece):
self.piece_heldId = self.board[self.ifFlipped(y//75)][self.ifFlipped(x//75)]
self.piece_held = piece
def on_mouse_drag(self,x, y, dx, dy, buttons, modifiers):
if self.piece_held is not None:
self.piece_held.x = x - 32
self.piece_held.y = y - 32
def nextWindow(self,y):
castling=""
starts = "w"
if(self.whiteKingCastling):
castling += "K"
if(self.whiteQueenCastling):
castling += "Q"
if (self.blackKingCastling):
castling += "k"
if (self.blackQueenCastling):
castling += "q"
if(self.CPUStarts):
starts="b"
if(self.CPUPlayWhite):
if(starts == "b"):
starts="w"
elif starts=="w":
starts = "b"
if(castling == ""):
castling="-"
batch = pyglet.graphics.Batch()
mygame = chessGUI.ChessGUI(self.boardTraduction(),castling,starts,self.CPUPlayWhite,self,batch)
self.set_visible(False)
def on_mouse_release(self,x, y, button, modifiers):
if self.piece_held is not None and (x > 600 or not self.newPiece) and self.old_pos != (-1,-1):
self.board_imgs[self.old_pos[1]][self.old_pos[0]] = None
if(self.board[self.old_pos[1]][self.old_pos[0]] == "BR"):
self.whiteKingOnBoard = False
elif (self.board[self.old_pos[1]][self.old_pos[0]] == "NR"):
self.blackKingOnBoard = False
self.board[self.old_pos[1]][self.old_pos[0]] = ""
if self.piece_held is not None and (x<=600):
xp = self.ifFlipped(x//75)
yp = self.ifFlipped(y // 75)
if (self.board[yp][xp] == "BR"):
self.whiteKingOnBoard = False
elif (self.board[yp][xp] == "NR"):
self.blackKingOnBoard = False
self.board_imgs[yp][xp] = self.piece_held
self.board[yp][xp] = self.piece_heldId
if (self.piece_heldId == "BR"):
self.whiteKingOnBoard = True
elif (self.piece_heldId == "NR"):
self.blackKingOnBoard = True
self.piece_held = None
self.old_pos=(-1,-1)
self.piece_heldId = ""
self.newPiece= False
self.createStdPieces()
def boardTraduction(self):
resultBoard=[]
for x in range(8):
for y in range(8):
if(self.board[y][x] != ""):
resultBoard += [self.board[y][x] +self.colPositions[x] + str(y+1)]
return resultBoard
```
#### File: jorvasquezr/IA_ChessGame_Solver/chessAI.py
```python
import chess
import chess.polyglot
from evaluation import evaluate_board
movehistory =[]
starts = True
def negamaxRoot(depth,board,pstarts):
global movehistory, starts
starts = pstarts == "w"
bestMove = chess.Move.null()
bestValue = -99999
alpha = -100000
beta = 100000
greaterProximity = 0
possibleMoves = board.legal_moves
for move in possibleMoves:
board.push(move)
result =alphabeta(-beta, -alpha, depth-1,board)
score = - result[0]
if(score == bestValue and result[1]>greaterProximity):
bestMove= move
greaterProximity=result[1]
if score > bestValue:
bestValue = score
bestMove = move
greaterProximity=result[1]
score = max(score,alpha)
board.pop()
return bestMove
def alphabeta( alpha, beta, depthleft,board ):
bestscore = -9999
greaterProximity=0
if( depthleft == 0 ):
return (quiesce( alpha, beta ,board),0)
if(board.is_stalemate() or board.is_checkmate()):
return (evaluate_board(board), depthleft)
possibleMoves = board.legal_moves
for move in possibleMoves:
board.push(move)
result = alphabeta( -beta, -alpha, depthleft - 1,board )
score = -result[0]
board.pop()
if( score >= beta ):
return (score,result[1])
bestscore = max(bestscore,score,)
if(score == bestscore and result[1]>greaterProximity):
greaterProximity=result[1]
alpha = max(score,alpha)
return (bestscore,greaterProximity)
def quiesce( alpha, beta ,board):
stand_pat = evaluate_board(board)
if( stand_pat >= beta ):
return beta
if( alpha < stand_pat ):
alpha = stand_pat
for move in board.legal_moves:
if board.is_capture(move):
board.push(move)
score = -quiesce( -beta, -alpha,board )
board.pop()
if( score >= beta ):
return beta
if( score > alpha ):
alpha = score
return alpha
```
#### File: jorvasquezr/IA_ChessGame_Solver/chessGUI.py
```python
import pyglet
from pyglet.window import mouse
import pyglet
import itertools
import chess
from game import Game
import time
from theme import getTheme, getPopUpMenssageTheme
from pyglet_gui.manager import Manager
from pyglet_gui.document import Document
from pyglet_gui.constants import *
from pyglet_gui.buttons import Button, OneTimeButton, Checkbox, GroupButton
from pyglet_gui.gui import Label, PopupMessage, Frame
from pyglet_gui.containers import VerticalContainer, HorizontalContainer
from datetime import datetime
import easygui
#
#
#
#
# La salida de archivo será la posición inicial de las piezas con la descripción
# algebráica de la partida efectuada, y una indicación de fecha y tiempo (un time-stamp de la
# solución).
#
class ChessGUI(pyglet.window.Window):
chessboard = pyglet.resource.image('resources/chessboard.png')
chessboardInv = pyglet.resource.image('resources/chessboardflipped.png')
validImg = pyglet.resource.image('resources/validmove.png')
promoImg = pyglet.resource.image('resources/promotion.png')
hoverImg = pyglet.resource.image('resources/hoversquare.png')
spriteimage = pyglet.resource.image('resources/spritesheet.png')
dangerImg = pyglet.resource.image('resources/danger.png')
backgroundImg = pyglet.resource.image('resources/Background.png')
spritesheet = pyglet.image.ImageGrid(spriteimage, 2, 6)
BLACK_KING, BLACK_QUEEN, BLACK_BISHOP, BLACK_KNIGHT, BLACK_ROOK, BLACK_PAWN, WHITE_KING, WHITE_QUEEN, WHITE_BISHOP, \
WHITE_KNIGHT, WHITE_ROOK, WHITE_PAWN = range(12)
dictPieces = {"NR": spritesheet[BLACK_KING], "ND": spritesheet[BLACK_QUEEN], "NA": spritesheet[BLACK_BISHOP],
"NC": spritesheet[BLACK_KNIGHT], "NT": spritesheet[BLACK_ROOK], "NP": spritesheet[BLACK_PAWN],
"BR": spritesheet[WHITE_KING], "BD": spritesheet[WHITE_QUEEN], "BA": spritesheet[WHITE_BISHOP],
"BC": spritesheet[WHITE_KNIGHT], "BT": spritesheet[WHITE_ROOK], "BP": spritesheet[WHITE_PAWN]}
# ["BTf1", "NTe7", "BAd5", "BRd6", "NRd8"]
colPositions = {"a": 0, "b": 1, "c": 2,
"d": 3, "e": 4, "f": 5, "g": 6, "h": 7}
colPositionsInv = {"a": 0, "b": 1, "c": 2,
"d": 3, "e": 4, "f": 5, "g": 6, "h": 7}
stdChesBoard = ["NTa8", "NCb8", "NAc8", "NDd8", "NRe8", "NAf8", "NCg8", "NTh8",
"NPa7", "NPb7", "NPc7", "NPd7", "NPe7", "NPf7", "NPg7", "NPh7",
"BPa2", "BPb2", "BPc2", "BPd2", "BPe2", "BPf2", "BPg2", "BPh2",
"BTa1", "BCb1", "BAc1", "BDd1", "BRe1", "BAf1", "BCg1", "BTh1", ]
spanishToEnglish = {"A": "B", "T": "R",
"D": "Q", "R": "K", "P": "P", "C": "N"}
englishToSpanish = {"B": "A", "R": "T",
"Q": "D", "K": "R", "P": "P", "N": "C"}
ia_mode = True
blackKing = None
whiteKing = None
promotion = False
promotionMov = []
promotedPiece = ""
movement = [0, 0]
animation = True
def __init__(self, textPositions, castlingRigths, starts, CPUPlaysWhite, pwindow, batch):
super(ChessGUI, self).__init__(900, 600,
resizable=False,
caption='Chess',
config=pyglet.gl.Config(
double_buffer=True),
vsync=False)
pyglet.clock.schedule_interval(self.updatePosition, 1 / 60)
self.board_imgs = [[None for _ in range(8)] for _ in range(8)]
self.board = []
self.window = pwindow
self.batch = batch
self.CPUPlaysWhite = CPUPlaysWhite
self.castlingRigths = castlingRigths
self.starts = starts
if (starts == "b"):
self.turn = "N"
else:
self.turn = "B"
if CPUPlaysWhite:
self.playerTurn = "N"
else:
self.playerTurn = "B"
self.selectedPiece = []
self.board_normal = pyglet.sprite.Sprite(self.chessboard)
self.board_flipped = pyglet.sprite.Sprite(self.chessboardInv)
self.hoverSprite = pyglet.sprite.Sprite(self.hoverImg)
self.danger = pyglet.sprite.Sprite(self.dangerImg)
self.piece_held = None
self.textPositions = textPositions
self.createBoard(textPositions)
self.inFunction = False
self.draws = 0
self.game = Game(self.stdNotationToChess(self.board) +
" " + starts + " " + castlingRigths + " - 0 1")
self.wQueen = pyglet.sprite.Sprite(self.spritesheet[7], 131.25, 225)
self.wRook = pyglet.sprite.Sprite(self.spritesheet[10], 218.75, 225)
self.wBishop = pyglet.sprite.Sprite(self.spritesheet[8], 306.25, 225)
self.wKnight = pyglet.sprite.Sprite(self.spritesheet[9], 393.75, 225)
self.bQueen = pyglet.sprite.Sprite(self.spritesheet[1], 131.25, 225)
self.bRook = pyglet.sprite.Sprite(self.spritesheet[4], 218.75, 225)
self.bBishop = pyglet.sprite.Sprite(self.spritesheet[2], 306.25, 225)
self.bKnight = pyglet.sprite.Sprite(self.spritesheet[3], 393.75, 225)
self.background = pyglet.sprite.Sprite(self.backgroundImg)
self.document = Document(pyglet.text.decode_attributed(
"\n" * 23), width=250, height=400)
self.manager()
self.numero = 0
self.announcedFinal = False
self.instanteInicial = None
self.instanteFinal = None
self.lineCont = 1
self.annotations = ""
def manager(self):
Manager(Label(""),
window=self, batch=self.batch,
theme=getTheme()
)
Manager(Frame(self.document),
window=self, batch=self.batch,
theme=getTheme(),
anchor=ANCHOR_TOP_RIGHT,
offset=(-10, -75),
is_movable=False
)
Manager(HorizontalContainer([OneTimeButton(label="Guardar", on_release=self.saveGame),
OneTimeButton(label="Volver", on_release=self.onclose)]),
window=self,
batch=self.batch,
theme=getTheme(),
anchor=ANCHOR_BOTTOM_RIGHT,
offset=(-50, 15),
is_movable=False
)
self.document.set_text("")
def updateDocument(self, y):
self.numero += 1
self.document.set_text(self.document.get_text() +
"Hola Mundos " + str(self.numero) + "\n")
self.popupMessage("Partida guardada correctamente")
def popupMessage(self, text):
PopupMessage(text=text,
window=self,
batch=self.batch,
theme=getPopUpMenssageTheme()
)
def saveGame(self, y):
result = "{\n\tPosicionInicial: " + str(self.textPositions) + ",\n"
result += "\tDesarrolloDeLaPartida: \"" + self.annotations + "\",\n"
date = datetime.now().strftime("%d/%m/%Y")
fileDate = datetime.now().strftime("%d-%m-%Y")
result += "\tfecha: \"" + date + "\",\n"
strTiempo = "null"
if not (self.instanteInicial == None or self.instanteFinal == None):
tiempo = (self.instanteFinal - self.instanteInicial)
strTiempo = str(tiempo)
result += "\ttiempo: \"" + strTiempo + "\" \n}"
fileroute = easygui.filesavebox(default="BotFinalesDeAjedrez " + fileDate +
".txt", msg="hola", title="Seleccione el destino", filetypes="txt")
if (fileroute != None):
f = open(fileroute, "w")
f.write(result)
f.close()
def stdNotationToChess(self, boardGUI):
count = 0
result = ""
row = ""
for x in range(8):
if (result != ""):
result += "/"
for y in range(8):
if boardGUI[x][y] == "":
count += 1
else:
charName = self.spanishToEnglish[boardGUI[x][y][1]]
if (boardGUI[x][y][0] == "N"):
charName = charName.lower()
if count != 0:
row += str(count) + charName
count = 0
else:
row += charName
if count != 0:
row += str(count)
count = 0
result += row[::-1]
row = ""
if (not self.CPUPlaysWhite):
result = result[::-1]
return result
def endOfTurn(self):
if (self.turn == "B"):
self.turn = "N"
else:
self.turn = "B"
def moveOfAI(self):
var = str(self.game.suggestedMove(self.starts))
xi = self.colPositions[var[0]]
yi = int(var[1]) - 1
xf = self.colPositions[var[2]]
yf = int(var[3]) - 1
piece = ""
if (len(var) == 5):
piece = self.englishToSpanish[var[4].upper()]
xi,yi,xf,yf=self.cordinatesComplement(xi,yi,xf,yf)
self.pieceMove(xi, yi, xf, yf, piece)
def ifIsFlipped(self, x):
if (self.CPUPlaysWhite):
return (7 - x)
return x
def promote(self):
self.promoImg.blit(100, 200)
if self.turn == "N":
self.bQueen.draw()
self.bRook.draw()
self.bBishop.draw()
self.bKnight.draw()
else:
self.wQueen.draw()
self.wRook.draw()
self.wBishop.draw()
self.wKnight.draw()
def createBoard(self, textPositions) -> list:
self.board = [["" for i in range(8)] for i in range(8)]
if textPositions:
for i in textPositions:
y = ord(i[2]) - 97
x = int(i[3]) - 1
p = i[0] + i[1]
self.board[self.ifIsFlipped(x)][self.ifIsFlipped(y)] = p
self.board_imgs[self.ifIsFlipped(x)][self.ifIsFlipped(
y)] = pyglet.sprite.Sprite(self.dictPieces[p])
if (p == "BR"):
self.whiteKing = self.board_imgs[self.ifIsFlipped(
x)][self.ifIsFlipped(y)]
elif (p == "NR"):
self.blackKing = self.board_imgs[self.ifIsFlipped(
x)][self.ifIsFlipped(y)]
def onclose(self, y):
self.window.set_visible(True)
self.window.deleteManagers()
self.window.manager()
self.close()
def on_draw(self):
self.clear()
self.background.draw()
if (self.CPUPlaysWhite):
self.board_flipped.draw()
else:
self.board_normal.draw()
if (self.game.isCheck()):
if (self.turn == "B"):
self.danger.x = self.whiteKing.x
self.danger.y = self.whiteKing.y
else:
self.danger.x = self.blackKing.x
self.danger.y = self.blackKing.y
self.danger.draw()
if self.selectedPiece != []:
self.hoverSprite.x = (self.selectedPiece[1] // 75) * 75
self.hoverSprite.y = (self.selectedPiece[2] // 75) * 75
self.hoverSprite.draw()
for x, y in itertools.product(range(8), repeat=2):
if self.board[y][x] != "":
piece = self.board_imgs[y][x]
if piece != self.piece_held:
piece.x = x * 75
piece.y = y * 75
piece.draw()
if (self.piece_held != None):
self.piece_held.draw()
self.announceFinal()
if (self.promotion):
self.promote()
if (self.draws < 60):
self.draws += 1
self.batch.draw()
def announceFinal(self):
if (self.instanteInicial == None):
self.instanteInicial = datetime.now()
if (self.game.isCheckMate() and not self.announcedFinal and self.draws == 60):
self.announcedFinal = True
self.instanteFinal = datetime.now()
if (self.turn == "B"):
self.popupMessage("Jaque mate de las piezas negras")
if (self.turn == "N"):
self.popupMessage("Jaque mate de las piezas blancas")
if (self.game.isStalemate() and not self.announcedFinal and self.draws == 60):
self.announcedFinal = True
self.instanteFinal = datetime.now()
self.popupMessage("Empate")
def setPiece(self, xi, yi, piece):
self.board[yi][xi] = self.board[yi][xi][0] + piece
ximg = self.board_imgs[yi][xi].x
yimg = self.board_imgs[yi][xi].y
self.board_imgs[yi][xi] = pyglet.sprite.Sprite(
self.dictPieces[self.board[yi][xi]])
self.board_imgs[yi][xi].x = ximg
self.board_imgs[yi][xi].y = yimg
def cordinatesComplement(self, xi, yi, xf, yf):
xi = self.ifIsFlipped(xi)
yi = self.ifIsFlipped(yi)
xf = self.ifIsFlipped(xf)
yf = self.ifIsFlipped(yf)
return xi, yi, xf, yf
def pieceMove(self, xi, yi, xf, yf, piece=""):
pieceEng = ""
if (piece != ""):
self.setPiece(xi, yi, piece)
pieceEng = self.spanishToEnglish[piece].lower()
xi, yi, xf, yf = self.cordinatesComplement(xi, yi, xf, yf)
fromSquare = chr(xi + 97) + str(1 + yi)
toSquare = chr(xf + 97) + str(1 + yf)
result = self.game.doAMove(fromSquare + toSquare + pieceEng)
xi, yi, xf, yf = self.cordinatesComplement(xi, yi, xf, yf)
if (result[0] != ""):
self.changePosition(xi, yi, xf, yf)
if (result[0] == "PassantMove"):
self.doPassant(xi, yi, xf, yf)
elif (result[0] == "kingside" or result[0] == "queenside"):
self.doCastling(result[0], yi, yf)
self.anotateMove(result[1])
self.endOfTurn()
def anotateMove(self, move):
result = ""
if (self.turn == "N"):
if (self.lineCont == 1 and self.annotations == ""):
result += "1... " + move + " "
else:
result += " " + move + " "
self.lineCont += 1
elif (self.turn == "B"):
result += str(self.lineCont) + ". " + move
self.annotations += result
newLine = "\n"
if (self.turn == "N"):
newLine = ""
self.document.set_text(self.document.get_text() + newLine + result)
def changePosition(self, xi, yi, xf, yf):
self.board_imgs[yf][xf] = self.board_imgs[yi][xi]
self.board_imgs[yi][xi] = None
self.board[yf][xf] = self.board[yi][xi]
self.board[yi][xi] = ""
if self.animation:
self.piece_held = self.board_imgs[yf][xf]
xmovement = xf * 75 - xi * 75
ymovement = yf * 75 - yi * 75
self.movement = [xmovement, ymovement]
def updatePosition(self, dt):
if (len(self.movement) == 2 and self.piece_held != None):
stepSize = 10
if (self.movement[1] > 0):
if (self.movement[1] <= stepSize):
self.piece_held.y += self.movement[1]
self.movement[1] = 0
else:
self.piece_held.y += stepSize
self.movement[1] -= stepSize
if (self.movement[1] < 0):
if (self.movement[1] >= -stepSize):
self.piece_held.y += self.movement[1]
self.movement[1] = 0
else:
self.piece_held.y -= stepSize
self.movement[1] += stepSize
if (self.movement[0] > 0):
if (self.movement[0] <= stepSize):
self.piece_held.x += self.movement[0]
self.movement[0] = 0
else:
self.piece_held.x += stepSize
self.movement[0] -= stepSize
if (self.movement[0] < 0):
if (self.movement[0] >= -stepSize):
self.piece_held.x += self.movement[0]
self.movement[0] = 0
else:
self.piece_held.x -= stepSize
self.movement[0] += stepSize
if (self.movement[0] == 0 and self.movement[1] == 0):
self.piece_held = None
self.movement = []
self.draws=45
if (self.turn != self.playerTurn and self.ia_mode and not self.game.isCheckMate() and not self.game.isStalemate() and self.piece_held == None and not self.inFunction and self.draws == 60):
self.inFunction = True
self.moveOfAI()
self.turn = self.playerTurn
self.inFunction = False
def doCastling(self, side, yi, yf):
if (side == "kingside"):
self.changePosition(7, yi, 5, yf)
if (side == "queenside"):
self.changePosition(0, yi, 3, yf)
def doPassant(self, xi, yi, xf, yf):
if (self.turn == "B"):
self.board_imgs[yf - 1][xf] = None
self.board[yf - 1][xf] = ""
if (self.turn == "N"):
self.board_imgs[yf + 1][xf] = None
self.board[yf + 1][xf] = ""
def isPromote(self, yf):
result = False
if (((self.selectedPiece[0] == "BP" and not self.CPUPlaysWhite) or (
self.selectedPiece[0] == "NP" and self.CPUPlaysWhite)) and self.selectedPiece[2] // 75 == 6 and yf == 7):
result = True
elif (((self.selectedPiece[0] == "NP" and not self.CPUPlaysWhite) or (
self.selectedPiece[0] == "BP" and self.CPUPlaysWhite)) and self.selectedPiece[2] // 75 == 1 and yf == 0):
result = True
return result
def on_mouse_press(self, x, y, button, modifiers):
if (self.playerTurn == self.turn or not self.ia_mode) and not self.game.isCheckMate() and x <= 600 and y <= 600:
if (not self.promotion):
if self.selectedPiece != []:
if (((self.board[y // 75][x // 75] == "")) or (self.board[y // 75][x // 75] != "") and (
self.board[y // 75][x // 75][0] != self.turn)):
xi = self.selectedPiece[1] // 75
yi = self.selectedPiece[2] // 75
xf = x // 75
yf = y // 75
if (self.isPromote(yf)):
self.promotion = True
self.promotionMov = [xi, yi, xf, yf]
else:
self.pieceMove(xi, yi, xf, yf)
self.selectedPiece = []
if (self.board[y // 75][x // 75] != "") and (self.board[y // 75][x // 75][0] == self.turn):
self.selectedPiece = [
self.board[y // 75][x // 75]] + [x, y]
else:
if 225 < y < 300:
xi = self.promotionMov[0]
yi = self.promotionMov[1]
xf = self.promotionMov[2]
yf = self.promotionMov[3]
piece = ""
if 131.25 < x < 206.25: # queen
piece = "D"
elif 218.75 < x < 293.75: # rook
piece = "T"
elif 306.25 < x < 381.25: # bishop
piece = "A"
elif 393.75 < x < 468.75: # knight
piece = "C"
self.pieceMove(xi, yi, xf, yf, piece)
self.promotion = False
self.promotionFinalPos = []
```
#### File: jorvasquezr/IA_ChessGame_Solver/game.py
```python
from chessAI import *
class Game():
def __init__(self, boardStr):
self.board = chess.Board(boardStr)
def doAMove(self, movestr):
move = chess.Move.from_uci(movestr)
if (move in self.board.legal_moves):
isPassant = self.board.is_en_passant(move)
castlingSide = self.getSideofCastling(move)
sanMove = str(self.board.san(move))
self.board.push(move)
if(isPassant):
return ("PassantMove", sanMove)
elif(castlingSide != ""):
return (castlingSide, sanMove)
return ("Moved", sanMove)
else:
return ("", "")
def getSideofCastling(self, move):
result = ""
if(self.board.is_queenside_castling(move)):
result = "queenside"
elif(self.board.is_kingside_castling(move)):
result = "kingside"
return result
def isCheckMate(self):
return self.board.is_checkmate()
def isStalemate(self):
return self.board.is_stalemate()
def isCheck(self):
return self.board.is_check()
def suggestedMove(self,starts):
return negamaxRoot(5,self.board,starts)
def isvalid(self):
return self.board.is_valid() or self.isStalemate() or self.isCheckMate()
```
#### File: pyglet-gui-master/pyglet_gui/constants.py
```python
VALIGN_TOP = 1
VALIGN_CENTER = 0
VALIGN_BOTTOM = -1
HALIGN_LEFT = -1
HALIGN_CENTER = 0
HALIGN_RIGHT = 1
ANCHOR_TOP_LEFT = (VALIGN_TOP, HALIGN_LEFT)
ANCHOR_TOP = (VALIGN_TOP, HALIGN_CENTER)
ANCHOR_TOP_RIGHT = (VALIGN_TOP, HALIGN_RIGHT)
ANCHOR_LEFT = (VALIGN_CENTER, HALIGN_LEFT)
ANCHOR_CENTER = (VALIGN_CENTER, HALIGN_CENTER)
ANCHOR_RIGHT = (VALIGN_CENTER, HALIGN_RIGHT)
ANCHOR_BOTTOM_LEFT = (VALIGN_BOTTOM, HALIGN_LEFT)
ANCHOR_BOTTOM = (VALIGN_BOTTOM, HALIGN_CENTER)
ANCHOR_BOTTOM_RIGHT = (VALIGN_BOTTOM, HALIGN_RIGHT)
def GetRelativePoint(parent, parent_anchor, child, child_anchor, offset):
valign, halign = parent_anchor or ANCHOR_CENTER
if valign == VALIGN_TOP:
y = parent.y + parent.height
elif valign == VALIGN_CENTER:
y = parent.y + parent.height // 2
else: # VALIGN_BOTTOM
y = parent.y
if halign == HALIGN_LEFT:
x = parent.x
elif halign == HALIGN_CENTER:
x = parent.x + parent.width // 2
else: # HALIGN_RIGHT
x = parent.x + parent.width
valign, halign = child_anchor or (valign, halign)
offset_x, offset_y = offset
if valign == VALIGN_TOP:
y += offset_y - child.height
elif valign == VALIGN_CENTER:
y += offset_y - child.height // 2
else: # VALIGN_BOTTOM
y += offset_y
if halign == HALIGN_LEFT:
x += offset_x
elif halign == HALIGN_CENTER:
x += offset_x - child.width // 2
else: # HALIGN_RIGHT
x += offset_x - child.width
return x, y
```
#### File: pyglet-gui-master/pyglet_gui/text_input.py
```python
import pyglet
from pyglet_gui.mixins import FocusMixin
from pyglet_gui.override import InputLabel
from pyglet_gui.core import Viewer
class TextInput(FocusMixin, Viewer):
# This class works in two states defined by is_focus():
# True: "writing"
# False: "label"
def __init__(self, text="", length=20, max_length=None, padding=0, on_input=None):
Viewer.__init__(self)
FocusMixin.__init__(self)
self._document = pyglet.text.document.UnformattedDocument(text)
self._document_style_set = False # check if style of document was set.
self._length = length # the length of the box in characters
self._max_length = max_length # the max length allowed for writing.
self._on_input = on_input
self._padding = 4 + padding
# graphics loaded in both states
self._field = None
# graphics loaded in state "writing"
self._text_layout = None
self._caret = None
# graphics loaded in state "label"
self._label = None
def get_path(self):
return 'input'
def _load_label(self, theme):
self._label = InputLabel(self._document.text,
multiline=False,
width=self.width-self._padding*2,
color=theme['text_color'],
font_name=theme['font'],
font_size=theme['font_size'],
**self.get_batch('foreground'))
def _load_writing(self, theme):
needed_width, needed_height = self._compute_needed_size()
self._text_layout = pyglet.text.layout.IncrementalTextLayout(
self._document, needed_width, needed_height,
multiline=False, **self.get_batch('foreground'))
self._caret = pyglet.text.caret.Caret(self._text_layout, color=theme['gui_color'][0:3])
self._caret.visible = True
self._caret.mark = 0
self._caret.position = len(self._document.text)
def load_graphics(self):
theme = self.theme[self.get_path()]
# We set the style once. We shouldn't have to do so again because
# it's an UnformattedDocument.
if not self._document_style_set:
self._document.set_style(0, 0, # parameters not used in set_style
dict(color=theme['text_color'],
font_name=theme['font'],
font_size=theme['font_size']))
self._document_style_set = True
self._field = theme['image'].generate(color=theme['gui_color'], **self.get_batch('background'))
if self.is_focus():
self._load_writing(theme)
else:
self._load_label(theme)
def _unload_writing(self):
self._caret.delete() # it should be .unload(), but Caret does not have it.
self._document.remove_handlers(self._text_layout)
self._text_layout.delete() # it should also be .unload().
self._caret = self._text_layout = None
def _unload_label(self):
self._label.delete()
self._label = None
def unload_graphics(self):
if self.is_focus():
self._unload_writing()
else:
self._unload_label()
self._field.unload()
def _compute_needed_size(self):
# Calculate the needed size based on the font size
font = self._document.get_font(0)
height = font.ascent - font.descent
glyphs = font.get_glyphs('A_')
width = max([x.width for x in glyphs])
needed_width = self._length * width - 2 * self._padding
needed_height = height + 2 * self._padding
return needed_width, needed_height
def get_text(self):
return self._document.text
def layout(self):
Viewer.layout(self)
self._field.update(self.x, self.y, self.width, self.height)
x, y, width, height = self._field.get_content_region()
if self.is_focus():
self._text_layout.begin_update()
self._text_layout.x = self.x + self._padding
self._text_layout.y = self.y - self._padding
self._text_layout.end_update()
else:
# Adjust the text for font's descent
descent = self._document.get_font().descent
self._label.begin_update()
self._label.x = self.x + self._padding
self._label.y = self.y + self._padding - descent
self._label.width = width - self._padding * 2
self._label.end_update()
def on_gain_focus(self):
self.unload()
FocusMixin.on_gain_focus(self) # changes is_focus()
self.load()
self.reset_size()
self.layout()
def on_lose_focus(self):
# send text to callback _on_input
if self._on_input is not None:
self._on_input(self.get_text())
self.unload()
FocusMixin.on_lose_focus(self) # changes is_focus()
self.load()
self.reset_size()
self.layout()
def hit_test(self, x, y):
return self.is_inside(x, y)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if self.is_focus():
return self._caret.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_mouse_press(self, x, y, button, modifiers):
if self.is_focus():
return self._caret.on_mouse_press(x, y, button, modifiers)
def on_text(self, text):
assert self.is_focus()
self._caret.on_text(text)
if self._max_length and len(self._document.text) > self._max_length:
self._document.text = self._document.text[:self._max_length]
self._caret.mark = self._caret.position = self._max_length
return pyglet.event.EVENT_HANDLED
def on_text_motion(self, motion):
assert self.is_focus()
return self._caret.on_text_motion(motion)
def on_text_motion_select(self, motion):
assert self.is_focus()
return self._caret.on_text_motion_select(motion)
def set_text(self, text):
self._document.text = text
if self.is_focus():
self._caret.mark = self._caret.position = len(self._document.text)
else:
self._label.text = text
def compute_size(self):
needed_width, needed_height = self._compute_needed_size()
return self._field.get_needed_size(needed_width, needed_height)
def delete(self):
FocusMixin.delete(self)
Viewer.delete(self)
``` |
{
"source": "jorvis/AdventOfCode",
"score": 4
} |
#### File: AdventOfCode/2015/day_11_both.py
```python
import re
def increment(pw):
pw = list(pw[::-1])
for i in range(0, len(pw)):
if pw[i] == 'z': pw[i] = 'a'
else:
pw[i] = chr(ord(pw[i]) + 1)
break
return ''.join(pw[::-1])
def is_valid(pw):
# Passwords must include one increasing straight of at least three letters, like
# abc, bcd, cde, and so on, up to xyz
streak_found = None
for i in range(0, len(pw) - 2):
if ord(pw[i+1]) - ord(pw[i]) == 1 and ord(pw[i+2]) - ord(pw[i+1]) == 1:
streak_found = pw[i] + pw[i+1] + pw[i+2]
# Passwords may not contain the letters i, o, or l, as these letters can be mistaken
# for other characters and are therefore confusing.
for letter in ['i', 'o', 'l']:
if letter in pw:
return False
# Passwords must contain at least two pairs of letters, like aa, bb, or zz.
pairs_found = re.findall(r"(.)\1", pw)
if len(pairs_found) < 2:
return False
if streak_found is not None:
print("Streak: {0}".format(streak_found))
print("Pairs: {0}".format(pairs_found))
return True
else:
return False
password = '<PASSWORD>'
# this is for part 2
password = '<PASSWORD>'
while True:
password = increment(password)
if is_valid(password): break
print("New password: {0}".format(password))
``` |
{
"source": "jorvis/GALES",
"score": 3
} |
#### File: GALES/bin/filter_blast_ev_db_by_accessions.py
```python
import argparse
import os
import re
import sqlite3
pfam2go = dict()
def main():
parser = argparse.ArgumentParser( description='Creates a SQLite3 database of commonly-accessed attributes for each accession, filtering from a larger version')
## output file to be written
parser.add_argument('-id', '--input_db', type=str, required=True, help='Path to the input source SQLite3 db file.' )
parser.add_argument('-od', '--output_db', type=str, required=True, help='Path to an output SQLite3 db to be created/added to' )
parser.add_argument('-i', '--input_id_file', type=str, required=True, help="Path to a file with one access on each line")
args = parser.parse_args()
if os.path.exists(args.output_db):
db_already_existed = True
else:
db_already_existed = False
# this creates it if it doesn't already exist
src_conn = sqlite3.connect(args.input_db)
src_curs = src_conn.cursor()
dest_conn = sqlite3.connect(args.output_db)
dest_curs = dest_conn.cursor()
report_interval = 500
records_processed = 0
if db_already_existed:
print("INFO: Output database already exists - appending to it")
ids_already_loaded = get_ids_already_loaded(dest_curs)
db_already_existed = True
else:
print("INFO: Creating tables ...")
create_tables( dest_curs )
dest_conn.commit()
ids_already_loaded = dict()
for acc in open(args.input_id_file):
acc = acc.rstrip()
cache_blast_hit_data(accession=acc, ref_curs=src_curs, ev_curs=dest_curs, ids_loaded=ids_already_loaded)
records_processed += 1
if records_processed % report_interval == 0:
print("INFO: Processed {0} records ... ".format(records_processed))
dest_conn.commit()
if not db_already_existed:
print("INFO: Creating indexes ...")
create_indexes(dest_curs)
dest_conn.commit()
src_curs.close()
dest_curs.close()
print("INFO: Complete.")
def cache_blast_hit_data(accession=None, ref_curs=None, ev_curs=None, ids_loaded=None):
"""
Gets annotation for a specific accession and copies its entry from the large source index into
our smaller hits-found-only evidence index.
"""
ref_blast_select_qry = """
SELECT e.id, e.full_name, e.organism, e.symbol, ea.accession, ea.res_length, ea.is_characterized
FROM entry e
JOIN entry_acc ea on ea.id=e.id
WHERE ea.accession = ?
"""
ev_blast_insert_qry = "INSERT INTO entry (id, full_name, organism, symbol) VALUES (?, ?, ?, ?)"
ev_acc_insert_qry = "INSERT INTO entry_acc (id, accession, res_length, is_characterized) VALUES (?, ?, ?, ?)"
ref_go_select_qry = "SELECT id, go_id FROM entry_go WHERE id = ?"
ev_go_insert_qry = "INSERT INTO entry_go (id, go_id) VALUES (?, ?)"
ref_ec_select_qry = "SELECT id, ec_num FROM entry_ec WHERE id = ?"
ev_ec_insert_qry = "INSERT INTO entry_ec (id, ec_num) VALUES (?, ?)"
ref_curs.execute(ref_blast_select_qry, (accession,))
entry_row = ref_curs.fetchone()
if entry_row is not None:
entry_id = entry_row[0]
if entry_id not in ids_loaded:
ev_curs.execute(ev_blast_insert_qry, (accession, entry_row[1], entry_row[2], entry_row[3]))
ev_curs.execute(ev_acc_insert_qry, (accession, entry_row[4], entry_row[5], entry_row[6]))
ref_curs.execute(ref_go_select_qry, (accession,))
for go_row in ref_curs:
ev_curs.execute(ev_go_insert_qry, (accession, go_row[1]))
ref_curs.execute(ref_ec_select_qry, (accession,))
for ec_row in ref_curs:
ev_curs.execute(ev_ec_insert_qry, (accession, ec_row[1]))
ids_loaded[entry_id] = True
def create_indexes( cursor ):
# CREATE INDEX index_name ON table_name (column_name);
cursor.execute("CREATE INDEX idx_col_us_id ON entry (id)")
cursor.execute("CREATE INDEX idx_col_usa_id ON entry_acc (id)")
cursor.execute("CREATE INDEX idx_col_usa_acc ON entry_acc (accession)")
cursor.execute("CREATE INDEX idx_col_usg_id ON entry_go (id)")
cursor.execute("CREATE INDEX idx_col_usg_go ON entry_go (go_id)")
cursor.execute("CREATE INDEX idx_col_use_id ON entry_ec (id)")
cursor.execute("CREATE INDEX idx_col_use_ec ON entry_ec (ec_num)")
def create_tables( cursor ):
cursor.execute("""
CREATE TABLE entry (
id text primary key,
full_name text,
organism text,
symbol text
)
""")
cursor.execute("""
CREATE TABLE entry_acc (
id text not NULL,
accession text not NULL,
res_length INT,
is_characterized integer DEFAULT 0
)
""")
cursor.execute("""
CREATE TABLE entry_go (
id text not NULL,
go_id text not NULL
)
""")
cursor.execute("""
CREATE TABLE entry_ec (
id text not NULL,
ec_num text not NULL
)
""")
def get_ids_already_loaded(curs):
ids_loaded = dict()
qry = "SELECT id FROM entry_acc"
curs.execute(qry)
for row in curs:
ids_loaded[row[0]] = True
return ids_loaded
if __name__ == '__main__':
main()
``` |
{
"source": "jorwoods/server-client-python",
"score": 2
} |
#### File: server/endpoint/endpoint.py
```python
from .exceptions import (
ServerResponseError,
InternalServerError,
NonXMLResponseError,
EndpointUnavailableError,
)
from functools import wraps
from xml.etree.ElementTree import ParseError
from ..query import QuerySet
import logging
try:
from distutils2.version import NormalizedVersion as Version
except ImportError:
from distutils.version import LooseVersion as Version
logger = logging.getLogger("tableau.endpoint")
Success_codes = [200, 201, 202, 204]
class Endpoint(object):
def __init__(self, parent_srv):
self.parent_srv = parent_srv
@staticmethod
def _make_common_headers(auth_token, content_type):
headers = {}
if auth_token is not None:
headers["x-tableau-auth"] = auth_token
if content_type is not None:
headers["content-type"] = content_type
return headers
@staticmethod
def _safe_to_log(server_response):
"""Checks if the server_response content is not xml (eg binary image or zip)
and replaces it with a constant
"""
ALLOWED_CONTENT_TYPES = ("application/xml", "application/xml;charset=utf-8")
if server_response.headers.get("Content-Type", None) not in ALLOWED_CONTENT_TYPES:
return "[Truncated File Contents]"
else:
return server_response.content
def _make_request(
self,
method,
url,
content=None,
auth_token=None,
content_type=None,
parameters=None,
):
parameters = parameters or {}
parameters.update(self.parent_srv.http_options)
parameters["headers"] = Endpoint._make_common_headers(auth_token, content_type)
if content is not None:
parameters["data"] = content
logger.debug(u"request {}, url: {}".format(method.__name__, url))
if content:
logger.debug(u"request content: {}".format(content[:1000]))
server_response = method(url, **parameters)
self.parent_srv._namespace.detect(server_response.content)
self._check_status(server_response)
# This check is to determine if the response is a text response (xml or otherwise)
# so that we do not attempt to log bytes and other binary data.
if len(server_response.content) > 0 and server_response.encoding:
logger.debug(
u"Server response from {0}:\n\t{1}".format(
url, server_response.content.decode(server_response.encoding)
)
)
return server_response
def _check_status(self, server_response):
if server_response.status_code >= 500:
raise InternalServerError(server_response)
elif server_response.status_code not in Success_codes:
try:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
except ParseError:
# This will happen if we get a non-success HTTP code that
# doesn't return an xml error object (like metadata endpoints)
# we convert this to a better exception and pass through the raw
# response body
raise NonXMLResponseError(server_response.content)
except Exception:
# anything else re-raise here
raise
def get_unauthenticated_request(self, url):
return self._make_request(self.parent_srv.session.get, url)
def get_request(self, url, request_object=None, parameters=None):
if request_object is not None:
try:
# Query param delimiters don't need to be encoded for versions before 3.7 (2020.1)
self.parent_srv.assert_at_least_version("3.7")
parameters = parameters or {}
parameters["params"] = request_object.get_query_params()
except EndpointUnavailableError:
url = request_object.apply_query_params(url)
return self._make_request(
self.parent_srv.session.get,
url,
auth_token=self.parent_srv.auth_token,
parameters=parameters,
)
def delete_request(self, url):
# We don't return anything for a delete
self._make_request(self.parent_srv.session.delete, url, auth_token=self.parent_srv.auth_token)
def put_request(self, url, xml_request=None, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.put,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def post_request(self, url, xml_request, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.post,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def api(version):
"""Annotate the minimum supported version for an endpoint.
Checks the version on the server object and compares normalized versions.
It will raise an exception if the server version is > the version specified.
Args:
`version` minimum version that supports the endpoint. String.
Raises:
EndpointUnavailableError
Returns:
None
Example:
>>> @api(version="2.3")
>>> def get(self, req_options=None):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.parent_srv.assert_at_least_version(version)
return func(self, *args, **kwargs)
return wrapper
return _decorator
def parameter_added_in(**params):
"""Annotate minimum versions for new parameters or request options on an endpoint.
The api decorator documents when an endpoint was added, this decorator annotates
keyword arguments on endpoints that may control functionality added after an endpoint was introduced.
The REST API will ignore invalid parameters in most cases, so this raises a warning instead of throwing
an exception.
Args:
Key/value pairs of the form `parameter`=`version`. Kwargs.
Raises:
UserWarning
Returns:
None
Example:
>>> @api(version="2.0")
>>> @parameter_added_in(no_extract='2.5')
>>> def download(self, workbook_id, filepath=None, extract_only=False):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
import warnings
server_ver = Version(self.parent_srv.version or "0.0")
params_to_check = set(params) & set(kwargs)
for p in params_to_check:
min_ver = Version(str(params[p]))
if server_ver < min_ver:
error = "{!r} not available in {}, it will be ignored. Added in {}".format(p, server_ver, min_ver)
warnings.warn(error)
return func(self, *args, **kwargs)
return wrapper
return _decorator
class QuerysetEndpoint(Endpoint):
@api(version="2.0")
def all(self, *args, **kwargs):
queryset = QuerySet(self)
return queryset
@api(version="2.0")
def filter(self, *args, **kwargs):
queryset = QuerySet(self).filter(**kwargs)
return queryset
@api(version="2.0")
def order_by(self, *args, **kwargs):
queryset = QuerySet(self).order_by(*args)
return queryset
@api(version="2.0")
def paginate(self, **kwargs):
queryset = QuerySet(self).paginate(**kwargs)
return queryset
``` |
{
"source": "jorxster/OnionLogger",
"score": 3
} |
#### File: OnionLogger/python/OnionLogger.py
```python
__author__ = '<EMAIL>'
__date__ = '27 May 2018'
__version__ = "0.1.0"
import inspect
import logging
import os
import pickle
import tempfile
import time
# max at roughly 25MB mem, that's a lot of logs
# Change to 0 for unlimited
MAX_LOGS = 49999
KEEP_UNIQUE_ONLY = False
VERBOSITY = logging.INFO
# set up stream handler to shell
formatter = logging.Formatter(
'%(asctime)s | %(name)s | %(levelname)s: %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
_LOGGER.addHandler(stream_handler)
class SortBy(object):
Function, Level, Time = range(3)
class Msg(object):
def __init__(self, message_str, level=None) -> None:
self.message = message_str
self._level = level
self._time = time.time()
self._stack = '/'.join([fr.function for fr in inspect.stack()[3:]])
self._function = self._stack.split('/')[0]
def __eq__(self, other):
return self.message == other.message
def __ne__(self, other):
return self.message != other.message
def __repr__(self) -> str:
return '<OnionLogger.Msg(time={}, level={}, function={}) at {:x}>' \
''.format(self._time, self._level, self._function, id(self))
class Logger(object):
def __init__(self, name=__name__):
self._messages = []
self.name = name
def __repr__(self) -> str:
return '<OnionLogger.Logger(len(self._messages)={}, at {:x}>' \
''.format(len(self._messages), id(self))
@property
def len(self):
return len(self._messages)
@property
def messages(self):
return self._messages
# Standard methods
def debug(self, msg):
self.log(msg, level=logging.DEBUG)
def info(self, msg):
self.log(msg)
def warn(self, msg):
self.log(msg, level=logging.WARN)
def critical(self, msg):
self.log(msg, level=logging.CRITICAL)
# Base method
def log(self, msg, level=logging.INFO):
"""
Base logging method for appending logs to self at set verbosity level.
Args:
msg : (str)
level : (int)
"""
message = Msg(msg, level=level)
# print if meets verbosity criteria
if level >= VERBOSITY:
_LOGGER.log(level=level, msg=msg)
# if discarding duplicate logs
if KEEP_UNIQUE_ONLY:
try:
self._messages.remove(message)
except ValueError:
pass
self._messages.append(message)
# if limit exceeded, pop first-most item
if MAX_LOGS:
if self.len > MAX_LOGS:
self._messages.pop(0)
# sorted return methods
def return_time_sort(self):
return sorted(self._messages, key=lambda x: x._time)
def return_level_sort(self):
return sorted(self._messages, key=lambda x: x._level)
def return_func_sort(self):
return sorted(self._messages, key=lambda x: x._function)
def sorted(self, sort_order):
if sort_order == SortBy.Time:
return self.return_time_sort()
elif sort_order == SortBy.Level:
return self.return_level_sort()
elif sort_order == SortBy.Function:
return self.return_func_sort()
else:
raise ValueError('Expecting SortBy attribute as argument')
# other methods
def reset(self):
"""
Erase all logs
"""
self._messages = []
_LOGGER.info('OnionLogger reset, logs erased')
def serialize(self):
return pickle.dumps(self)
def save_to_disk(self, path=None):
if not path:
# construct temp / time filepath
path = os.path.join(
tempfile.gettempdir(),
time.strftime('%Y%m%d_%H%M%S.olog')
)
with open(path, 'wb') as w:
self.log('OnionLogger.Logger: serializing and '
'writing to path -- \n\t{}'.format(path))
w.write(self.serialize())
def load_from_disk(path=None):
with open(path, 'rb') as w:
onion = pickle.loads(w.read())
return onion
``` |
{
"source": "JorySchossau/harper",
"score": 3
} |
#### File: harper/harper/db.py
```python
from dataclasses import dataclass
from datetime import datetime
from dataclasses_json import dataclass_json
from sqlalchemy import (
Column,
DateTime,
ForeignKey,
Integer,
String,
Table,
Text,
UniqueConstraint,
create_engine,
event,
func,
)
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ArgumentError
from sqlalchemy.orm import declarative_base, relationship
from sqlalchemy.pool import StaticPool
from harper.util import LANG_ID_LEN, HarperExc
def timestamp():
"""Return current time."""
result = datetime.utcnow()
return result
# <https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#sqlite-foreign-keys>
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
"""Ensure that cascading deletes work for SQLite."""
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
class DB:
"""Connect to database."""
# SQLAlchemy base class.
base = declarative_base()
# Database connection engine (assigned during configuration).
engine = None
@staticmethod
def configure(url):
"""Configure the back end."""
try:
if url == "test":
DB.engine = create_engine(
"sqlite://",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
DB.base.metadata.drop_all(DB.engine)
DB.base.metadata.create_all(DB.engine)
else:
DB.engine = create_engine(url)
return DB.engine
except ArgumentError:
raise HarperExc(f"Unable to connect to {url}", code=500)
@staticmethod
def get_current_lesson_version(session, lesson_id):
"""Get the latest version."""
subquery = (
session.query(func.max(LessonVersion.id))
.filter(LessonVersion.lesson_id == lesson_id)
.scalar_subquery()
)
query = session.query(LessonVersion).filter(
LessonVersion.lesson_id == lesson_id, LessonVersion.id == subquery
)
return query.one()
@staticmethod
def build_lesson_version(session, **kwargs):
"""Add next sequence ID value to lession version."""
max_sequence_id = (
session.query(func.max(LessonVersion.sequence_id))
.filter(LessonVersion.lesson_id == kwargs["lesson_id"])
.scalar()
)
sequence_id = 1 if (max_sequence_id is None) else (max_sequence_id + 1)
return LessonVersion(sequence_id=sequence_id, **kwargs)
@dataclass_json
@dataclass
class StandardFields:
"""Common definitions for all tables."""
id: int
created_at: datetime
id = Column(Integer, autoincrement=True, primary_key=True, nullable=False)
created_at = Column(DateTime, nullable=False, default=timestamp)
# Link lesson versions to authors.
lesson_version_author = Table(
"lesson_version_author",
DB.base.metadata,
Column("author_id", ForeignKey("person.id"), primary_key=True),
Column("lesson_version_id", ForeignKey("lesson_version.id"), primary_key=True),
)
# Link lesson versions to terms.
lesson_version_term = Table(
"lesson_version_term",
DB.base.metadata,
Column("term_id", ForeignKey("term.id"), nullable=False, primary_key=True),
Column(
"lesson_version_id",
ForeignKey("lesson_version.id"),
nullable=False,
primary_key=True,
),
)
@dataclass
class Lesson(DB.base, StandardFields):
"""Represent a logical lesson."""
language: str
__tablename__ = "lesson"
language = Column(String(LANG_ID_LEN), nullable=False)
versions = relationship(
"LessonVersion", back_populates="lesson", cascade="all, delete"
)
@dataclass
class LessonVersion(DB.base, StandardFields):
"""Represent a specific version of a lesson."""
lesson_id: int
sequence_id: int
title: str
url: str
abstract: str
version: str
license: str
__tablename__ = "lesson_version"
lesson_id = Column(Integer, ForeignKey("lesson.id"))
sequence_id = Column(Integer, nullable=False)
title = Column(Text, nullable=False)
url = Column(Text, nullable=False)
abstract = Column(Text, nullable=False)
version = Column(Text, nullable=False)
license = Column(Text, nullable=False)
lesson = relationship("Lesson", back_populates="versions")
authors = relationship(
"Person", secondary="lesson_version_author", back_populates="lesson_versions"
)
terms = relationship(
"Term", secondary="lesson_version_term", back_populates="lesson_versions"
)
@dataclass
class Term(DB.base, StandardFields):
"""Represent a term used as a pre- or post-requisite."""
language: str
term: str
url: str
__tablename__ = "term"
__table_args__ = (
UniqueConstraint("language", "term", name="language_term_unique"),
)
language = Column(String(LANG_ID_LEN), nullable=False)
term = Column(Text, nullable=False)
url = Column(Text, nullable=False)
lesson_versions = relationship(
"LessonVersion", secondary="lesson_version_term", back_populates="terms"
)
@dataclass
class Person(DB.base, StandardFields):
"""Represent a person."""
name: str
email: str
__tablename__ = "person"
name = Column(Text, nullable=False)
email = Column(Text, nullable=False)
lesson_versions = relationship(
"LessonVersion", secondary="lesson_version_author", back_populates="authors"
)
```
#### File: harper/harper/term.py
```python
from fastapi import APIRouter
from sqlalchemy import func
from sqlalchemy.orm import Session
from harper.db import DB, LessonVersion, Term
from harper.util import harper_exc
router = APIRouter()
@router.get("/all/")
@harper_exc
async def get_all_terms():
"""All terms with frequency count."""
with Session(DB.engine) as session:
subquery = session.query(func.max(LessonVersion.id))
subquery = subquery.group_by(LessonVersion.lesson_id)
subquery = subquery.subquery()
query = session.query(Term, func.count(Term.id))
query = query.select_from(LessonVersion)
query = query.join(LessonVersion.terms)
query = query.group_by(Term.id)
query = query.filter(LessonVersion.id.in_(subquery.select()))
results = query.all()
return [r[0].to_dict() | {"count": r[1]} for r in results]
``` |
{
"source": "JorySchossau/hpcc_tools",
"score": 2
} |
#### File: JorySchossau/hpcc_tools/queue.py
```python
SCRATCH="FULL_PATH_TO_YOUR_SCRATCH_SPACE"
# username
USER="YOUR_USERNAME_HERE"
# how big is one batch of jobs? ex 10 means there must be 10 free slots to run another batch.
JOB_ARRAY_MAX=20
# max total jobs to run in parallel with this tool (ideally multiple of JOB_ARRAY_MAX)
QUEUE_MAX=140
##
##
## Customize everything before this line
##
##
import os, sys, shutil
import glob
import pickle
import time
import re
from subprocess import check_output
import argparse
basedir = os.path.dirname(os.path.realpath(__file__))
def joinpath(*args):
build = args[0]
for iarg in range(1,len(args)):
build = os.path.join(build,args[iarg]);
return build;
def existingProjects():
projects = [];
confdir = joinpath(SCRATCH, ".queue");
if os.path.isdir(confdir):
for folder in os.walk(confdir).next()[1]:
projectdir = joinpath(confdir,folder);
projects.append(projectdir);
return projects
else:
return False
def quitWithMsg(msg):
print("\n");
print(msg);
sys.exit(1);
def runningJobs():
'''Returns list of job ids (just the 12345 number part)
for all running jobs of the specified user'''
list_running = check_output(["qstat -au "+USER+" | tail -n +6 | cut -d'.' -f1 | cut -d'[' -f1"], shell=True);
list_running = list_running.split('\n');
list_running = [int(e) for e in list_running if len(e) != 0];
return list_running;
def splitArrayJob(jobfile,jobtext,begin,end):
begin = int(begin);
end = int(end);
rxBegin = re.compile(r'''XBEGINX''');
rxEnd = re.compile(r'''XENDX''');
rxMultiplier = re.compile(r'''XMULTIPLIERX''');
rxSet = re.compile(r'''XSETX''');
rxOffset = re.compile(r'''XOFFSETX''');
SETS = (end - begin + 1) / JOB_ARRAY_MAX;
REMAINDER = (end - begin + 1) % JOB_ARRAY_MAX;
firstJobMade=False;
projectdir=joinpath(SCRATCH,".queue");
jobs_to_add = [];
projdirname = "array_"+jobfile;
if not os.path.exists(joinpath(SCRATCH,".queue",projdirname)):
os.makedirs(joinpath(SCRATCH,".queue",projdirname));
for set in range(SETS):
this_filename = str(set*JOB_ARRAY_MAX+begin)+"."+str(set*JOB_ARRAY_MAX+begin+JOB_ARRAY_MAX-1)+".sh";
jobs_to_add.append(this_filename);
open(joinpath(projectdir,"array_"+jobfile,this_filename),'w').write(rxOffset.sub(str(begin-1),rxSet.sub(str(JOB_ARRAY_MAX),rxMultiplier.sub(str(set),rxEnd.sub(str(JOB_ARRAY_MAX),rxBegin.sub(str(1),jobtext)))))); ## for each set, multiplier=set, begin=begin, end=begin+JOB_ARRAY_MAX-1
firstJobMade=True;
if (REMAINDER != 0):
this_filename = str(SETS*JOB_ARRAY_MAX+begin)+"."+str(end)+".sh";
jobs_to_add.append(this_filename);
if (firstJobMade == True):
open(joinpath(projectdir,"array_"+jobfile,this_filename),'w').write(rxOffset.sub(str(begin-1),rxSet.sub(str(JOB_ARRAY_MAX),rxMultiplier.sub(str(SETS),rxEnd.sub(str(REMAINDER),rxBegin.sub(str(1),jobtext)))))); ## at remainder, assume first job started: multiplier=SETS, begin=1, end=end
else:
open(joinpath(projectdir,"array_"+jobfile,this_filename),'w').write(rxOffset.sub(str(begin-1),rxSet.sub(str(JOB_ARRAY_MAX),rxMultiplier.sub(str(SETS),rxEnd.sub(str(end-begin+1),rxBegin.sub(str(1),jobtext)))))); ## at remainder, assume NOT first job started: multiplier=
pickle.dump(jobs_to_add, open(joinpath(SCRATCH,".queue",projdirname,".held"),"wb"));
submitted = [];
pickle.dump(submitted, open(joinpath(SCRATCH,".queue",projdirname,".submitted"),"wb")); #write empty array so file exists
def checkOnJobsForProjects():
"""Submits jobs for folders found in the .queue hidden folder.
Returns False if there were jobs found to submit or running, True otherwise."""
projects = existingProjects();
if (projects == False):
quitWithMsg("No projects found\n");
running_jobs = runningJobs();
available_slots = 0; #how many jobs we can submit at the end of evaluation
zeroJobsLeft = True;
for project in projects:
submitted_jobs = pickle.load(open(joinpath(project,".submitted"),"rb"));
held_jobs = pickle.load(open(joinpath(project,".held"),"rb"));
for job in submitted_jobs:
if (job not in running_jobs):
submitted_jobs.remove(job);
if (len(held_jobs) == 0 and len(submitted_jobs) == 0):
shutil.rmtree(project) #remove finished project
continue;
else:
zeroJobsLeft = False;
available = QUEUE_MAX - (len(running_jobs)*JOB_ARRAY_MAX);
if (available >= 0):
available_slots += (available/JOB_ARRAY_MAX);
while ((available_slots > 0) and (len(held_jobs) > 0)):
job = held_jobs.pop();
jobID = submitJobGetID(joinpath(project,job));
submitted_jobs.append(jobID);
available_slots -= 1;
pickle.dump(submitted_jobs, open(joinpath(project,".submitted"),"wb"));
pickle.dump(held_jobs, open(joinpath(project,".held"),"wb"));
return zeroJobsLeft;
def submitJobGetID(jobFileFullPath):
'''Submits a job file given the full path
and returns the integer job id part of 12345.job.mgr[] etc.'''
ID = check_output(["qsub "+jobFileFullPath], shell=True);
ID = int(ID.split('\n')[0].split('.')[0].split('[')[0]);
return ID;
def daemonIsRunning():
daemonfile = joinpath(SCRATCH,'.queue','.daemon');
if os.path.exists(daemonfile) and os.path.isfile(daemonfile):
return True;
else:
return False;
def markDaemonIsRunning():
daemonfile = joinpath(SCRATCH,'.queue','.daemon');
open(daemonfile, 'a').close();
def markDaemonNotRunning():
daemonfile = joinpath(SCRATCH,'.queue','.daemon');
if os.path.exists(daemonfile) and os.path.isfile(daemonfile):
os.remove(daemonfile);
def getArrayRangeAndTemplateFile(filename):
'''takes a filename of a qsub submit file.
returns the numbers used in this type of line: #PBS -t 1-100
and returns the new file contents replaced with template symbols: #PBS -t XBEGINX-XENDX
and a new line beneath that: PBS_ARRAYID=$[${PBS_ARRAYID}+XMULTIPLIERX*XSETX+XOFFSETX]'''
filetext = None;
with open(filename, 'r') as filehandle:
filetext = filehandle.read();
regex = re.compile(r'''PBS -t[\s]*(\d+)[\s]*-[\s]*(\d+)''');
match = regex.search(filetext);
filetext = regex.sub(r'''PBS -t XBEGINX-XENDX
PBS_ARRAYID=$[${PBS_ARRAYID}+XMULTIPLIERX*XSETX+XOFFSETX]''',filetext);
return (match.group(1), match.group(2), filetext);
def submitSelf():
check_output(["qsub -j oe -o /dev/null -l mem=2gb,nodes=1:ppn=1,walltime=03:55:00 "+joinpath(SCRATCH,'.queue','queue.py')], shell=True);
def help():
'''Returns args object.'''
print(" Usage: ./queue.py [qsub file or resume]")
print("")
print(" qsub file: A qsub array job.")
print(" resume: Resubmits self in job manager mode if necessary.")
print(" <no arguments>: Assumes run in automated job manager mode")
print(" and will kill itself and resubmit itself every 4 hours.")
print("")
print(" Warning: this script possibly produces many files.")
print(" take care to remove job directories no longer")
print(" needed by the end of the qsub file.")
print("")
print(" The working folder is "+joinpath(SCRATCH,".queue")+" which")
print(" contains a semaphore .daemon file, and a project directory")
print(" for each submitted qsub array file using this tool.")
print(" Each project directory contains prepared qsub files for")
print(" each smaller array segment, as well as two hidden files")
print(" .held and .submitted. .held is a list of qsub files yet")
print(" to be submitted. .submitted contains job ids for jobs")
print(" that are running.")
print("")
print(" Workflow: submit a qsub array job. queue.py is automatically")
print(" submitted as a 4 hour job which every minute checks if job")
print(" status has changed. If so then submits a new chunk of jobs")
print(" if there is room. A .daemon file is created at the beginning")
print(" of the job manager 4 hour run, and removed at the end.")
print(" This .daemon file helps prevent multiple job managers running.")
print(" However, if you kill the job manager, simpley ./queue.py resume")
print(" and the job manager will be forced into a running state after")
print(" submission. The project directories in")
print(" "+joinpath(SCRATCH,".queue")+" are each removed after")
print(" all jobs in them are completed.");
print("")
def main():
if (len(sys.argv) > 1): # Users wants to submit a new job
if (sys.argv[1] in ["-h","--h","--help","-?","--?"]):
help()
sys.exit(0);
if (sys.argv[1] == "resume"):
markDaemonIsRunning();
shutil.copy("queue.py", joinpath(SCRATCH,'.queue','queue.py'));
submitSelf();
sys.exit(0);
rangeBegin, rangeEnd, fileTemplated = getArrayRangeAndTemplateFile(sys.argv[1]);
splitArrayJob(sys.argv[1], fileTemplated, rangeBegin, rangeEnd);
shutil.copy("queue.py", joinpath(SCRATCH,'.queue','queue.py'));
checkOnJobsForProjects();
if not daemonIsRunning():
markDaemonIsRunning();
submitSelf();
else: # Not user-mode, but automated startup to maintain jobs
if not daemonIsRunning():
sys.stderr.write("Something is wrong, because we're running a new instance but the daemon flag is gone. Shutting down.\n");
sys.exit(0);
justUnder4Hours = 3600*4 - 60*10; #10 minutes under
timeStart = time.time();
while ((time.time() - timeStart) < justUnder4Hours):
done = checkOnJobsForProjects();
if (done == True):
markDaemonNotRunning();
sys.exit(0);
else:
time.sleep(60); # wait one minute
submitSelf();
sys.exit(0);
if __name__ == "__main__":
main();
``` |
{
"source": "JorySchossau/nqr",
"score": 2
} |
#### File: nqr/targets/slurm.py
```python
import sys, os, glob, subprocess
from typing import Tuple, List
from .. import utils
from . import targets
from ..generic import iterate_commands_and_outputs, run_command
from .targets import get_settings
slurm_normal_template = """#!/bin/bash -login
{sbatch}
#SBATCH --output=slurm.out.log
#SBATCH --error=slurm.err.log
#SBATCH --job-name={jobname}
shopt -s expand_aliases
ulimit -s 8192
cd ${{SLURM_SUBMIT_DIR}}
{commands}
touch .finished
#sacct -j ${{SLURM_JOB_ID}} # could print job status info
exit $ret
"""
slurm_indefinite_template = """#!/bin/bash -login
{sbatch}
#SBATCH --output=slurm.out.log
#SBATCH --error=slurm.err.log
#SBATCH --job-name={jobname}
shopt -s expand_aliases
ulimit -s 8192
cd ${{SLURM_SUBMIT_DIR}}
export SLURM_JOBSCRIPT="job.sb" # used for resubmission
######################## start dmtcp_coordinator #######################
fname=port.$SLURM_JOBID # store port number
dmtcp_coordinator --daemon --exit-on-last -p 0 --port-file $fname $@ 1>/dev/null 2>&1 # start coordinater
h=`hostname` # get coordinator's host name
p=`cat $fname` # get coordinator's port number
export DMTCP_COORD_HOST=$h # save coordinators host info in an environment variable
export DMTCP_COORD_PORT=$p # save coordinators port info in an environment variable
export DMTCP_CHECKPOINT_DIR="./" # save ckpt files into unique locations
####################### BODY of the JOB ######################
# prepare work environment of the job
# if first time launch, use "dmtcp_launch" otherwise use "dmtcp_restart"
export CKPT_WAIT_SEC=$(( 4 * 60 * 60 - 10 * 60 )) # when to ckpt, in seconds (just under 4 hrs)
# Launch or restart the execution
if [ ! -f ${{DMTCP_CHECKPOINT_DIR}}/ckpt_*.dmtcp ] # if no ckpt file exists, it is first time run, use dmtcp_launch
then
# first time run, use dmtcp_launch to start the job and run on background
dmtcp_launch -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT --rm --ckpt-open-files bash commands.sh &
#wait for an inverval of checkpoint seconds to start checkpointing
sleep $CKPT_WAIT_SEC
# start checkpointing
dmtcp_command -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT --ckpt-open-files --bcheckpoint
# kill the running job after checkpointing
dmtcp_command -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT --quit
# resubmit the job
sbatch $SLURM_JOBSCRIPT
else # it is a restart run
# clean up artifacts (resulting files that could be in the middle of being written to)
# clean up any generated mabe files that have been checkpointed
bash cleanup.sh
# restart job with checkpoint files ckpt_*.dmtcp and run in background
dmtcp_restart -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT ckpt_*.dmtcp &
# wait for a checkpoint interval to start checkpointing
sleep $CKPT_WAIT_SEC
# if program is still running, do the checkpoint and resubmit
if dmtcp_command -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT -s 1>/dev/null 2>&1
#if dmtcp_command -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT -s 1>/dev/null
then
# clean up old ckpt files before start checkpointing
rm -r ckpt_*.dmtcp
# checkpointing the job
dmtcp_command -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT --ckpt-open-files -bc
# kill the running program and quit
dmtcp_command -h $DMTCP_COORD_HOST -p $DMTCP_COORD_PORT --quit
# resubmit this script to slurm
sbatch $SLURM_JOBSCRIPT
else
echo "job finished"
fi
fi
# show the job status info
#scontrol show job $SLURM_JOB_ID
"""
def _get_sbatch_options_str() -> str:
"""constructs '#SBATCH --key=value' strings, returns one big block:str"""
global targets
return '\n'.join([f"#SBATCH {string.replace('_','-')}" for string in targets.get_settings('slurm')])
_printed_help = False
def print_help():
global _printed_help
# avoid looping into module helps
if _printed_help:
sys.exit(0)
_printed_help = True
help_output = """
Help - SLURM Target ==========================
(no flags) Normal submission
-i Indefinite mode (4hr chunks w/ checkpointing until done)
-sN When -i, limit indefinite mode to N 4hr chunks
"""
print(help_output)
def _determine_indefinite_mode() -> bool:
indefinite = any([e.startswith('-i') for e in sys.argv])
return indefinite
def _determine_indefinite_repeats() -> Tuple[bool, int]:
limited = any([e.startswith('-s') for e in sys.argv])
repeats = None
if limited:
for arg in sys.argv:
if arg.startswith('-s'):
if len(arg) > 2:
try:
repeats = int(arg[2:])
if repeats <= 0:
raise ValueError()
except ValueError:
print("\nError: -sN (N should be positive int)")
sys.exit(1)
break
return limited, repeats
def _create_commands_and_cleanup_scripts(parent:str, commands_pairs:List[Tuple[str,List[str]]]):
# create helper lists
zcount = len(str(len(commands_pairs))) # for zfill
finished_ids = list(range(len(commands_pairs)))
finished_names = [f".{str(n).zfill(zcount)}.finished" for n in range(len(commands_pairs))]
# create file content strings
commands_script = """
##################################
# automatically created by PPNQR #
##################################
# This is the set of commands called by
# the checkpointing system. This bash script
# is actually what is getting checkpointed.
"""
cleanup_script = """
##################################
# automatically created by PPNQR #
##################################
# This utility is called by the job/checkpointing script
# Cleanup is required if restarting a checkpoint
# and the job is not yet done writing its output files
# we must manually remove them so the checkpointing script
# can restore the version of the files it is expecting
# as small byte-differences can happen between checkpointing
# and job-killing and invalidate those left behind.
# Also, I think existing files make the reloading break anyway.
"""
for i in finished_ids:
command,output = commands_pairs[i]
commands_script += f"""
{command}
if [ $? -ne 0 ]; then exit; fi
touch {finished_names[i]}
"""
if output:
removals_list = list()
for file_glob in output:
removals_list.append(f""" rm -rf {file_glob}""")
removals = '\n'.join(removals_list)
cleanup_script += f"""
if [ ! -f "{finished_names[i]}" ]; then
{removals}
fi
"""
# create files
open(os.path.join(parent,'commands.sh'),'wt').write(commands_script)
open(os.path.join(parent,'cleanup.sh'),'wt').write(cleanup_script)
# clear any existing .finished semaphores
finished_files = glob.glob(os.path.join(parent,".*.finished"))
for each_file in finished_files:
os.remove(each_file)
def _create_normal_job_file(run:dict, commands_pairs:List[Tuple[str,List[str]]]):
command_code = ""
for command,_ in commands_pairs:
command_code += f"""
{command}
ret=$?
if [ $? -ne 0 ]; then exit; fi
"""
content = slurm_normal_template.format(
sbatch=_get_sbatch_options_str(),
jobname=run['condition'],
commands=command_code,
)
open(os.path.join(run['dir'],'job.sb'),'wt').write(content)
def _create_indefinite_job_file(run:dict):
content = slurm_indefinite_template.format(
sbatch=_get_sbatch_options_str(),
jobname=run['condition'],
)
open(os.path.join(run['dir'],'job.sb'),'wt').write(content)
def _has_sbatch_has_dmtcp() -> Tuple[bool,bool]:
from shutil import which
has_sbatch = bool(which('sbatch'))
has_dmtcp = bool(which('dmtcp_launch'))
return (has_sbatch, has_dmtcp)
def launch(run):
help_flag = any([e in sys.argv for e in ('-h','--help')])
if help_flag:
print_help()
return # allow downstream to print help
dry_run = '--run' not in sys.argv
indefinite_mode = _determine_indefinite_mode()
finite,finite_repeats = _determine_indefinite_repeats()
has_sbatch,has_dmtcp = _has_sbatch_has_dmtcp()
if dry_run:
diagnostics = f"""
SLURM Target
sbatch: {'Yes' if has_sbatch else 'No (no SLURM system found)'}
dmtcp: {'Yes (indefinite mode available)' if has_dmtcp else 'No (indefinite mode not available)'}
run mode: {'indefinite' if indefinite_mode else 'normal'}{' (limited to '+str(finite_repeats)+'x 4hr repeats)' if indefinite_mode and finite else ''}
"""
print(diagnostics)
return
# create directories and copy files
utils.make_directories(run)
utils.copy_requirements(run)
commands_pairs = list(iterate_commands_and_outputs(run))
# create necessary job files
if indefinite_mode:
if not has_dmtcp:
print("Error: no dmtcp available for indefinite checkpointing. Try normal mode.")
if not has_sbatch:
print("Error: not a SLURM system. Use a valid target for launching jobs.")
sys.exit(1)
# override time to 4 hrs
settings.time='03:59:00'
# create commands.sh, cleanup.sh to be called by dmtcp checkpointing
_create_commands_and_cleanup_scripts(run['dir'], commands_pairs)
_create_indefinite_job_file(run)
else: # normal mode
if not has_sbatch:
print("Error: not a SLURM system. Use a valid target for launching jobs.")
sys.exit(1)
_create_normal_job_file(run, commands_pairs)
# submit the job using slurm
subprocess.Popen('sbatch job.sb', shell=True, cwd=run['dir'])
# register this target
from . import targets
current_module = sys.modules[__name__]
targets.addtarget('slurm',current_module)
``` |
{
"source": "jorzel/opentable",
"score": 3
} |
#### File: src/application/uow.py
```python
from abc import ABC, abstractmethod
class UnitOfWork(ABC):
"""
Secondary port (interface) for transaction management (usually, but not only
database transactions)
"""
@abstractmethod
def __enter__(self):
pass
@abstractmethod
def __exit__(self, *args):
pass
@abstractmethod
def commit(self):
pass
@abstractmethod
def rollback(self):
pass
```
#### File: src/domain/serializers.py
```python
from .entities.restaurant import Restaurant
from .entities.table import Table
def table_serializer(table: Table):
return {"id": table.id, "max_persons": table.max_persons, "is_open": table.is_open}
def restaurant_serializer(restaurant: Restaurant):
return {
"id": restaurant.id,
"tables": [table_serializer(t) for t in restaurant.tables],
}
```
#### File: db/sqlalchemy/orm.py
```python
from sqlalchemy import Boolean, Column, ForeignKey, Integer
from sqlalchemy import Table as sa_Table
from sqlalchemy.orm import mapper, relationship
from domain.entities.restaurant import Restaurant
from domain.entities.table import Table
from .setup import metadata
restaurant = sa_Table(
"restaurant",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
)
table = sa_Table(
"table",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("restaurant_id", Integer, ForeignKey("restaurant.id")),
Column("max_persons", Integer),
Column("is_open", Boolean),
)
def run_mappers():
"""
Provides mapping between db tables and domain models.
"""
mapper(
Restaurant,
restaurant,
properties={"tables": relationship(Table, backref="restaurant")},
)
mapper(Table, table)
```
#### File: infrastructure/events/local_publisher.py
```python
from typing import List
from application.handlers.events import handle_events
from domain.events import DomainEvent
from domain.events.publisher import EventPublisher
class LocalEventPublisher(EventPublisher):
def publish(self, events: List[DomainEvent]) -> None:
handle_events([e.as_dict for e in events])
```
#### File: tests/unit/test_booking_service.py
```python
from unittest.mock import ANY, Mock
import pytest
from application.services.booking_table import BookingTableApplicationService
from domain.commands import BookTableCommand
from domain.events.table import BookedTableEvent
from infrastructure.db.memory.repository import MemoryRestaurantRepository
from infrastructure.db.memory.uow import FakeUnitOfWork
@pytest.fixture
def mocked_event_publisher():
return Mock()
def test_booking_service_book_table_should_pass_when_table_in_restaurant_is_available(
restaurant_factory,
table_factory,
mocked_event_publisher,
):
repository = MemoryRestaurantRepository()
booking_service = BookingTableApplicationService(
repository, FakeUnitOfWork(), mocked_event_publisher
)
table = table_factory(table_id=1, max_persons=5, is_open=True)
restaurant = restaurant_factory(
restaurant_id=1, tables=[table], repository=repository
)
command = BookTableCommand(restaurant.id, persons=2)
booking_service.book_table(command)
assert table.is_open is False
mocked_event_publisher.publish.assert_called_once_with(
[
BookedTableEvent(
table_id=table.id, restaurant_id=restaurant.id, booked_at=ANY
)
]
)
``` |
{
"source": "jorzel/recruitment",
"score": 3
} |
#### File: application/events/handlers.py
```python
import logging
from typing import Any, Dict, List
from application.uow import UnitOfWork
from domain.events.base import DomainEvent, SerializedEvent
from domain.projections.store import ProjectionStore
logger = logging.getLogger(__name__)
class EventHandler:
"""
`handlers` - is a key / value store, where key is :attr:`DomainEvent.name` and
value is callable defining what action should be made when the event is
handled
"""
def __init__(
self,
handlers: Dict[DomainEvent, Any],
unit_of_work: UnitOfWork,
projection_store: ProjectionStore,
):
self._handlers = handlers
self._unit_of_work = unit_of_work
self._projection_store = projection_store
def handle(self, events: List[SerializedEvent]) -> None:
"""
Method defining how incoming events are handled.
`events` - list of serialized events
"""
for event in events:
handler = self._handlers.get(event["name"])
if not handler:
logger.info(f"Handler for {event} not found")
continue
handler(self._unit_of_work, self._projection_store, event)
```
#### File: db/sqlalchemy/uow.py
```python
from application.uow import UnitOfWork
class SQLAlchemyUnitOfWork(UnitOfWork):
"""
Implementation of transaction management for ORM SQLAlchemy
"""
def __init__(self, session):
self.session = session
def __enter__(self):
return self
def __exit__(self, *args):
try:
self.commit()
except Exception:
self.rollback()
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
``` |
{
"source": "jorzel/service-layer",
"score": 3
} |
#### File: src/api/graphql.py
```python
import graphene
from graphene.relay.node import from_global_id
from service import book_restaurant_table, get_restaurants
class UserNode(graphene.ObjectType):
class Meta:
interfaces = (graphene.relay.Node,)
email = graphene.String()
class RestaurantNode(graphene.ObjectType):
class Meta:
interfaces = (graphene.relay.Node,)
name = graphene.String()
class RestaurantConnection(graphene.Connection):
class Meta:
node = RestaurantNode
class TableBookingNode(graphene.ObjectType):
class Meta:
interfaces = (graphene.relay.Node,)
persons = graphene.Int()
class TableBookingConnection(graphene.Connection):
class Meta:
node = TableBookingNode
class BookRestaurantTable(graphene.Mutation):
class Arguments:
restaurant_gid = graphene.ID(required=True)
persons = graphene.Int(required=True)
user_email = graphene.String(required=True)
is_booked = graphene.Boolean()
def mutate(self, info, restaurant_gid: str, persons: int, user_email: str):
session = info.context["session"]
_, restaurant_id = from_global_id(restaurant_gid)
_ = book_restaurant_table(session, restaurant_id, user_email, persons)
return BookRestaurantTable(is_booked=True)
class Mutation(graphene.ObjectType):
book_restaurant_table = BookRestaurantTable.Field()
class Query(graphene.ObjectType):
up = graphene.Boolean()
restaurants = graphene.relay.ConnectionField(
RestaurantConnection, q=graphene.String()
)
def resolve_up(root, info, **kwargs):
return True
def resolve_restaurants(root, info, **kwargs):
query = get_restaurants(
info.context["session"], search=kwargs.get("q"), limit=kwargs.get("first")
)
return [RestaurantNode(id=r.id, name=r.name) for r in query]
schema = graphene.Schema(
query=Query, mutation=Mutation, types=[UserNode, RestaurantNode]
)
```
#### File: src/tests/test_rest_api.py
```python
import pytest
from models import TableBooking
@pytest.fixture
def test_client(app):
return app.test_client()
def test_up_endpoint(test_client):
response = test_client.get("/")
assert response.status_code == 200
assert response.json == {"up": True}
def test_get_restaurants(test_client, restaurant_factory, db_session):
restaurant = restaurant_factory(name="taverna")
response = test_client.get("/restaurants")
assert response.status_code == 200
assert response.json == [{"name": restaurant.name, "id": restaurant.id}]
def test_get_parametrized_restaurants(test_client, restaurant_factory, db_session):
restaurant = restaurant_factory(name="taverna")
_ = restaurant_factory(name="americano")
response = test_client.get("/restaurants?q=tav&limit=1")
assert response.status_code == 200
assert response.json == [{"name": restaurant.name, "id": restaurant.id}]
def test_post_bookings(
test_client, restaurant_factory, table_factory, user_factory, db_session
):
restaurant = restaurant_factory(name="taverna")
_ = table_factory(restaurant=restaurant, max_persons=5, is_open=True)
user = user_factory(email="<EMAIL>")
persons = 3
payload = {
"restaurant_id": restaurant.id,
"persons": persons,
"user_email": user.email,
}
response = test_client.post("/bookings", json=payload)
assert response.status_code == 201
assert response.json == {"isBooked": True}
assert (
db_session.query(TableBooking)
.filter_by(user=user, restaurant=restaurant, persons=persons)
.first()
)
``` |
{
"source": "jorzel/value-object-persistance",
"score": 3
} |
#### File: value-object-persistance/src/orm.py
```python
from sqlalchemy import (
Column,
Enum,
Float,
ForeignKey,
Integer,
String,
Table,
UniqueConstraint,
)
from sqlalchemy.dialects.sqlite import JSON
from sqlalchemy.orm import composite, registry, relationship
from db import metadata
from models.shop import Currency, Shop
from value_objects.location import Location
from value_objects.money import Money
mapper_registry = registry()
location = Table(
"location",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("city", String),
Column("region", String),
Column("longitude", Float),
Column("latitude", Float),
UniqueConstraint(
"city",
"region",
"longitude",
"latitude",
name="uix_city_region_longitude_latitude",
),
)
shop = Table(
"shop",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("email_address", String),
Column("balance_value", Integer, default=0, nullable=False),
Column("balance_currency", Enum(Currency), default=Currency.USD, nullable=False),
Column("location_id", Integer, ForeignKey("location.id")),
Column("open_hours_config", JSON, nullable=False, default=dict),
)
def run_mappers():
"""
Provides mapping between db tables and domain models.
"""
mapper_registry.map_imperatively(
Location,
location,
properties={
"_city": location.c.city,
"_region": location.c.region,
"_latitude": location.c.latitude,
"_longitude": location.c.longitude,
},
)
mapper_registry.map_imperatively(
Shop,
shop,
properties={
"balance": composite(Money, shop.c.balance_value, shop.c.balance_currency),
"location": relationship(Location),
},
)
```
#### File: tests/unit/test_location.py
```python
import pytest
from value_objects.location import InvalidGeolocation, Location
def test_location_initialization_succeed_when_values_are_valid():
location = Location(city="X", region="Y", latitude=21.91, longitude=28.20)
assert location.city == "X"
assert location.region == "Y"
assert location.latitude == 21.91
assert location.longitude == 28.20
def test_location_initialization_raises_exception_when_geolocation_is_below_zero():
with pytest.raises(InvalidGeolocation):
_ = Location(city="X", region="Y", latitude=21.91, longitude=-28.20)
``` |
{
"source": "Jos33y/student-performance-knn",
"score": 2
} |
#### File: core/ops/dispatch.py
```python
from typing import Any, Union
import numpy as np
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.construction import array
def should_extension_dispatch(left: ABCSeries, right: Any) -> bool:
"""
Identify cases where Series operation should use dispatch_to_extension_op.
Parameters
----------
left : Series
right : object
Returns
-------
bool
"""
if (
is_extension_array_dtype(left.dtype)
or is_datetime64_dtype(left.dtype)
or is_timedelta64_dtype(left.dtype)
):
return True
if not is_scalar(right) and is_extension_array_dtype(right):
# GH#22378 disallow scalar to exclude e.g. "category", "Int64"
return True
return False
def should_series_dispatch(left, right, op):
"""
Identify cases where a DataFrame operation should dispatch to its
Series counterpart.
Parameters
----------
left : DataFrame
right : DataFrame or Series
op : binary operator
Returns
-------
override : bool
"""
if left._is_mixed_type or right._is_mixed_type:
return True
if op.__name__.strip("_") in ["and", "or", "xor", "rand", "ror", "rxor"]:
# TODO: GH references for what this fixes
# Note: this check must come before the check for nonempty columns.
return True
if right.ndim == 1:
# operating with Series, short-circuit checks that would fail
# with AttributeError.
return False
if not len(left.columns) or not len(right.columns):
# ensure obj.dtypes[0] exists for each obj
return False
ldtype = left.dtypes.iloc[0]
rdtype = right.dtypes.iloc[0]
if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (
is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype)
):
# numpy integer dtypes as timedelta64 dtypes in this scenario
return True
if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
# in particular case where right is an array of DateOffsets
return True
return False
def dispatch_to_extension_op(
op, left: Union[ABCExtensionArray, np.ndarray], right: Any,
):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
Parameters
----------
op : binary operator
left : ExtensionArray or np.ndarray
right : object
Returns
-------
ExtensionArray or np.ndarray
2-tuple of these if op is divmod or rdivmod
"""
# NB: left and right should already be unboxed, so neither should be
# a Series or Index.
if left.dtype.kind in "mM" and isinstance(left, np.ndarray):
# We need to cast datetime64 and timedelta64 ndarrays to
# DatetimeArray/TimedeltaArray. But we avoid wrapping others in
# PandasArray as that behaves poorly with e.g. IntegerArray.
left = array(left)
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
res_values = op(left, right)
return res_values
```
#### File: core/ops/docstrings.py
```python
from typing import Dict, Optional
def _make_flex_doc(op_name, typ):
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace("__", "")
op_desc = _op_descriptions[op_name]
if op_name.startswith("r"):
equiv = "other " + op_desc["op"] + " " + typ
else:
equiv = typ + " " + op_desc["op"] + " other"
if typ == "series":
base_doc = _flex_doc_SERIES
doc_no_examples = base_doc.format(
desc=op_desc["desc"],
op_name=op_name,
equiv=equiv,
reverse=op_desc["reverse"],
)
if op_desc["series_examples"]:
doc = doc_no_examples + op_desc["series_examples"]
else:
doc = doc_no_examples
elif typ == "dataframe":
base_doc = _flex_doc_FRAME
doc = base_doc.format(
desc=op_desc["desc"],
op_name=op_name,
equiv=equiv,
reverse=op_desc["reverse"],
)
else:
raise AssertionError("Invalid typ argument.")
return doc
_add_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.add(b, fill_value=0)
a 2.0
b 1.0
c 1.0
d 1.0
e NaN
dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.subtract(b, fill_value=0)
a 0.0
b 1.0
c 1.0
d -1.0
e NaN
dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.multiply(b, fill_value=0)
a 1.0
b 0.0
c 0.0
d 0.0
e NaN
dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.divide(b, fill_value=0)
a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.floordiv(b, fill_value=0)
a 1.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.mod(b, fill_value=0)
a 0.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.pow(b, fill_value=0)
a 1.0
b 1.0
c 1.0
d 0.0
e NaN
dtype: float64
"""
_op_descriptions: Dict[str, Dict[str, Optional[str]]] = {
# Arithmetic Operators
"add": {
"op": "+",
"desc": "Addition",
"reverse": "radd",
"series_examples": _add_example_SERIES,
},
"sub": {
"op": "-",
"desc": "Subtraction",
"reverse": "rsub",
"series_examples": _sub_example_SERIES,
},
"mul": {
"op": "*",
"desc": "Multiplication",
"reverse": "rmul",
"series_examples": _mul_example_SERIES,
"df_examples": None,
},
"mod": {
"op": "%",
"desc": "Modulo",
"reverse": "rmod",
"series_examples": _mod_example_SERIES,
},
"pow": {
"op": "**",
"desc": "Exponential power",
"reverse": "rpow",
"series_examples": _pow_example_SERIES,
"df_examples": None,
},
"truediv": {
"op": "/",
"desc": "Floating division",
"reverse": "rtruediv",
"series_examples": _div_example_SERIES,
"df_examples": None,
},
"floordiv": {
"op": "//",
"desc": "Integer division",
"reverse": "rfloordiv",
"series_examples": _floordiv_example_SERIES,
"df_examples": None,
},
"divmod": {
"op": "divmod",
"desc": "Integer division and modulo",
"reverse": "rdivmod",
"series_examples": None,
"df_examples": None,
},
# Comparison Operators
"eq": {"op": "==", "desc": "Equal to", "reverse": None, "series_examples": None},
"ne": {
"op": "!=",
"desc": "Not equal to",
"reverse": None,
"series_examples": None,
},
"lt": {"op": "<", "desc": "Less than", "reverse": None, "series_examples": None},
"le": {
"op": "<=",
"desc": "Less than or equal to",
"reverse": None,
"series_examples": None,
},
"gt": {"op": ">", "desc": "Greater than", "reverse": None, "series_examples": None},
"ge": {
"op": ">=",
"desc": "Greater than or equal to",
"reverse": None,
"series_examples": None,
},
}
_op_names = list(_op_descriptions.keys())
for key in _op_names:
reverse_op = _op_descriptions[key]["reverse"]
if reverse_op is not None:
_op_descriptions[reverse_op] = _op_descriptions[key].copy()
_op_descriptions[reverse_op]["reverse"] = key
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series or scalar value
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result will be missing.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
"""
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill existing missing (NaN) values, and any new element needed for
successful DataFrame alignment, with this value before computation.
If data in both corresponding DataFrame locations is missing
the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : DataFrame
Notes
-----
Mismatched indices will be unioned together
"""
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``, but with support to substitute a fill_value
for missing data in one of the inputs. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
axis : {{0 or 'index', 1 or 'columns'}}
Whether to compare by the index (0 or 'index') or columns
(1 or 'columns'). For Series input, axis to match Series index on.
level : int or label
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed for
successful DataFrame alignment, with this value before computation.
If data in both corresponding DataFrame locations is missing
the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
See Also
--------
DataFrame.add : Add DataFrames.
DataFrame.sub : Subtract DataFrames.
DataFrame.mul : Multiply DataFrames.
DataFrame.div : Divide DataFrames (float division).
DataFrame.truediv : Divide DataFrames (float division).
DataFrame.floordiv : Divide DataFrames (integer division).
DataFrame.mod : Calculate modulo (remainder after division).
DataFrame.pow : Calculate exponential power.
Notes
-----
Mismatched indices will be unioned together.
Examples
--------
>>> df = pd.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide by constant with reverse version.
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract a list and Series by axis with operator version.
>>> df - [1, 2]
angles degrees
circle -1 358
triangle 2 178
rectangle 3 358
>>> df.sub([1, 2], axis='columns')
angles degrees
circle -1 358
triangle 2 178
rectangle 3 358
>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']),
... axis='index')
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
Multiply a DataFrame of different shape with operator version.
>>> other = pd.DataFrame({{'angles': [0, 3, 4]}},
... index=['circle', 'triangle', 'rectangle'])
>>> other
angles
circle 0
triangle 3
rectangle 4
>>> df * other
angles degrees
circle 0 NaN
triangle 9 NaN
rectangle 16 NaN
>>> df.mul(other, fill_value=0)
angles degrees
circle 0 0.0
triangle 9 0.0
rectangle 16 0.0
Divide by a MultiIndex by level.
>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6],
... 'degrees': [360, 180, 360, 360, 540, 720]}},
... index=[['A', 'A', 'A', 'B', 'B', 'B'],
... ['circle', 'triangle', 'rectangle',
... 'square', 'pentagon', 'hexagon']])
>>> df_multindex
angles degrees
A circle 0 360
triangle 3 180
rectangle 4 360
B square 4 360
pentagon 5 540
hexagon 6 720
>>> df.div(df_multindex, level=1, fill_value=0)
angles degrees
A circle NaN 1.0
triangle 1.0 1.0
rectangle 1.0 1.0
B square 0.0 0.0
pentagon 0.0 0.0
hexagon 0.0 0.0
"""
_flex_comp_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison
operators.
Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis
(rows or columns) and level for comparison.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
axis : {{0 or 'index', 1 or 'columns'}}, default 'columns'
Whether to compare by the index (0 or 'index') or columns
(1 or 'columns').
level : int or label
Broadcast across a level, matching Index values on the passed
MultiIndex level.
Returns
-------
DataFrame of bool
Result of the comparison.
See Also
--------
DataFrame.eq : Compare DataFrames for equality elementwise.
DataFrame.ne : Compare DataFrames for inequality elementwise.
DataFrame.le : Compare DataFrames for less than inequality
or equality elementwise.
DataFrame.lt : Compare DataFrames for strictly less than
inequality elementwise.
DataFrame.ge : Compare DataFrames for greater than inequality
or equality elementwise.
DataFrame.gt : Compare DataFrames for strictly greater than
inequality elementwise.
Notes
-----
Mismatched indices will be unioned together.
`NaN` values are considered different (i.e. `NaN` != `NaN`).
Examples
--------
>>> df = pd.DataFrame({{'cost': [250, 150, 100],
... 'revenue': [100, 250, 300]}},
... index=['A', 'B', 'C'])
>>> df
cost revenue
A 250 100
B 150 250
C 100 300
Comparison with a scalar, using either the operator or method:
>>> df == 100
cost revenue
A False True
B False False
C True False
>>> df.eq(100)
cost revenue
A False True
B False False
C True False
When `other` is a :class:`Series`, the columns of a DataFrame are aligned
with the index of `other` and broadcast:
>>> df != pd.Series([100, 250], index=["cost", "revenue"])
cost revenue
A True True
B True False
C False True
Use the method to control the broadcast axis:
>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index')
cost revenue
A True False
B True True
C True True
D True True
When comparing to an arbitrary sequence, the number of columns must
match the number elements in `other`:
>>> df == [250, 100]
cost revenue
A True True
B False False
C False False
Use the method to control the axis:
>>> df.eq([250, 250, 100], axis='index')
cost revenue
A True False
B False True
C True False
Compare to a DataFrame of different shape.
>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}},
... index=['A', 'B', 'C', 'D'])
>>> other
revenue
A 300
B 250
C 100
D 150
>>> df.gt(other)
cost revenue
A False False
B False False
C False True
D False False
Compare to a MultiIndex by level.
>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220],
... 'revenue': [100, 250, 300, 200, 175, 225]}},
... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'],
... ['A', 'B', 'C', 'A', 'B', 'C']])
>>> df_multindex
cost revenue
Q1 A 250 100
B 150 250
C 100 300
Q2 A 150 200
B 300 175
C 220 225
>>> df.le(df_multindex, level=1)
cost revenue
Q1 A True True
B True True
C True True
Q2 A False True
B True False
C True False
"""
```
#### File: tests/arithmetic/conftest.py
```python
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
# ------------------------------------------------------------------
# Helper Functions
def id_func(x):
if isinstance(x, tuple):
assert len(x) == 2
return x[0].__name__ + "-" + str(x[1])
else:
return x.__name__
# ------------------------------------------------------------------
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
"""
Several variants of integer value 1. The zero-dim integer array
behaves like an integer.
This fixture can be used to check that datetimelike indexes handle
addition and subtraction of integers and zero-dimensional arrays
of integers.
Examples
--------
>>> dti = pd.date_range('2016-01-01', periods=2, freq='H')
>>> dti
DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'],
dtype='datetime64[ns]', freq='H')
>>> dti + one
DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'],
dtype='datetime64[ns]', freq='H')
"""
return request.param
zeros = [
box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend(
[box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [pd.Index, np.array]]
)
zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([np.array(-0.0, dtype=np.float64)])
zeros.extend([0, 0.0, -0.0])
@pytest.fixture(params=zeros)
def zero(request):
"""
Several types of scalar zeros and length 5 vectors of zeros.
This fixture can be used to check that numeric-dtype indexes handle
division by any zero numeric-dtype.
Uses vector of length 5 for broadcasting with `numeric_idx` fixture,
which creates numeric-dtype vectors also of length 5.
Examples
--------
>>> arr = pd.RangeIndex(5)
>>> arr / zeros
Float64Index([nan, inf, inf, inf, inf], dtype='float64')
"""
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(
params=[
pd.Float64Index(np.arange(5, dtype="float64")),
pd.Int64Index(np.arange(5, dtype="int64")),
pd.UInt64Index(np.arange(5, dtype="uint64")),
pd.RangeIndex(5),
],
ids=lambda x: type(x).__name__,
)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
# ------------------------------------------------------------------
# Scalar Fixtures
@pytest.fixture(
params=[
pd.Timedelta("5m4s").to_pytimedelta(),
pd.Timedelta("5m4s"),
pd.Timedelta("5m4s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Day(3),
pd.offsets.Hour(72),
pd.Timedelta(days=3).to_pytimedelta(),
pd.Timedelta("72:00:00"),
np.timedelta64(3, "D"),
np.timedelta64(72, "h"),
],
ids=lambda x: type(x).__name__,
)
def three_days(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 3-day timedelta
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Hour(2),
pd.offsets.Minute(120),
pd.Timedelta(hours=2).to_pytimedelta(),
pd.Timedelta(seconds=2 * 3600),
np.timedelta64(2, "h"),
np.timedelta64(120, "m"),
],
ids=lambda x: type(x).__name__,
)
def two_hours(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 2-hour timedelta
"""
return request.param
_common_mismatch = [
pd.offsets.YearBegin(2),
pd.offsets.MonthBegin(1),
pd.offsets.Minute(),
]
@pytest.fixture(
params=[
pd.Timedelta(minutes=30).to_pytimedelta(),
np.timedelta64(30, "s"),
pd.Timedelta(seconds=30),
]
+ _common_mismatch
)
def not_hourly(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Hourly frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(4, "h"),
pd.Timedelta(hours=23).to_pytimedelta(),
pd.Timedelta("23:00:00"),
]
+ _common_mismatch
)
def not_daily(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Daily frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(365, "D"),
pd.Timedelta(days=365).to_pytimedelta(),
pd.Timedelta(days=365),
]
+ _common_mismatch
)
def mismatched_freq(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Monthly or Annual frequencies.
"""
return request.param
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame], ids=id_func)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[
pd.Index,
pd.Series,
pytest.param(pd.DataFrame, marks=pytest.mark.xfail),
tm.to_array,
],
ids=id_func,
)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, tm.to_array], ids=id_func)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
classes
"""
return request.param
# alias so we can use the same fixture for multiple parameters in a test
box_with_array2 = box_with_array
```
#### File: arrays/categorical/test_replace.py
```python
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"to_replace,value,expected,check_types,check_categorical",
[
# one-to-one
(1, 2, [2, 2, 3], True, True),
(1, 4, [4, 2, 3], True, True),
(4, 1, [1, 2, 3], True, True),
(5, 6, [1, 2, 3], True, True),
# many-to-one
([1], 2, [2, 2, 3], True, True),
([1, 2], 3, [3, 3, 3], True, True),
([1, 2], 4, [4, 4, 3], True, True),
((1, 2, 4), 5, [5, 5, 3], True, True),
((5, 6), 2, [1, 2, 3], True, True),
# many-to-many, handled outside of Categorical and results in separate dtype
([1], [2], [2, 2, 3], False, False),
([1, 4], [5, 2], [5, 2, 3], False, False),
# check_categorical sorts categories, which crashes on mixed dtypes
(3, "4", [1, 2, "4"], True, False),
([1, 2, "3"], "5", ["5", "5", 3], True, False),
],
)
def test_replace(to_replace, value, expected, check_types, check_categorical):
# GH 31720
s = pd.Series([1, 2, 3], dtype="category")
result = s.replace(to_replace, value)
expected = pd.Series(expected, dtype="category")
s.replace(to_replace, value, inplace=True)
tm.assert_series_equal(
expected,
result,
check_dtype=check_types,
check_categorical=check_categorical,
check_category_order=False,
)
tm.assert_series_equal(
expected,
s,
check_dtype=check_types,
check_categorical=check_categorical,
check_category_order=False,
)
```
#### File: indexes/interval/test_base.py
```python
import numpy as np
import pytest
from pandas import IntervalIndex, Series, date_range
import pandas._testing as tm
from pandas.tests.indexes.common import Base
class TestBase(Base):
"""
Tests specific to the shared common index tests; unrelated tests should be placed
in test_interval.py or the specific test file (e.g. test_astype.py)
"""
_holder = IntervalIndex
@pytest.fixture
def indices(self):
return tm.makeIntervalIndex(10)
def create_index(self, closed="right"):
return IntervalIndex.from_breaks(range(11), closed=closed)
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(date_range("20130101", periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name="foo"
)
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name="bar"
)
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {"left", "right", "both", "neither"} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed
)
assert not expected.equals(expected_other_closed)
def test_repr_max_seq_item_setting(self):
# override base test: not a valid repr as we use interval notation
pass
def test_repr_roundtrip(self):
# override base test: not a valid repr as we use interval notation
pass
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_getitem_2d_deprecated(self):
# GH#30588 multi-dim indexing is deprecated, but raising is also acceptable
idx = self.create_index()
with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
idx[:, None]
```
#### File: io/json/test_normalize.py
```python
import json
import numpy as np
import pytest
from pandas import DataFrame, Index, json_normalize
import pandas._testing as tm
from pandas.io.json._normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [
{
"country": "USA",
"states": [
{
"name": "California",
"cities": [
{"name": "San Francisco", "pop": 12345},
{"name": "Los Angeles", "pop": 12346},
],
},
{
"name": "Ohio",
"cities": [
{"name": "Columbus", "pop": 1234},
{"name": "Cleveland", "pop": 1236},
],
},
],
},
{
"country": "Germany",
"states": [
{"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
{
"name": "Nordrhein-Westfalen",
"cities": [
{"name": "Duesseldorf", "pop": 1238},
{"name": "Koeln", "pop": 1239},
],
},
],
},
]
@pytest.fixture
def state_data():
return [
{
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
"info": {"governor": "<NAME>"},
"shortname": "FL",
"state": "Florida",
},
{
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
"info": {"governor": "<NAME>"},
"shortname": "OH",
"state": "Ohio",
},
]
@pytest.fixture
def author_missing_data():
return [
{"info": None},
{
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
"author_name": {"first": "Jane", "last_name": "Doe"},
},
]
@pytest.fixture
def missing_metadata():
return [
{
"name": "Alice",
"addresses": [
{
"number": 9562,
"street": "Morris St.",
"city": "Massillon",
"state": "OH",
"zip": 44646,
}
],
},
{
"addresses": [
{
"number": 8449,
"street": "Spring St.",
"city": "Elizabethton",
"state": "TN",
"zip": 37643,
}
]
},
]
@pytest.fixture
def max_level_test_input_data():
"""
input data to test json_normalize with max_level param
"""
return [
{
"CreatedBy": {"Name": "User001"},
"Lookup": {
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
"Image": {"a": "b"},
}
]
class TestJSONNormalize:
def test_simple_records(self):
recs = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 9},
{"a": 10, "b": 11, "c": 12},
]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties")
expected = []
for rec in state_data:
expected.extend(rec["counties"])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties", meta="state")
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({"A": {"A": 1, "B": 2}})
expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")
expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")
expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(
deep_nested,
["states", "cities"],
meta=["country", ["states", "name"]],
sep="_",
)
expected = Index(["name", "pop", "country", "states_name"]).sort_values()
assert result.columns.sort_values().equals(expected)
def test_value_array_record_prefix(self):
# GH 21536
result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")
expected = DataFrame([[1], [2]], columns=["Prefix.0"])
tm.assert_frame_equal(result, expected)
def test_nested_object_record_path(self):
# GH 22706
data = {
"state": "Florida",
"info": {
"governor": "<NAME>",
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
},
}
result = json_normalize(data, record_path=["info", "counties"])
expected = DataFrame(
[["Dade", 12345], ["Broward", 40000], ["<NAME>", 60000]],
columns=["name", "population"],
)
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(
deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]
)
ex_data = {
"country": ["USA"] * 4 + ["Germany"] * 3,
"states.name": [
"California",
"California",
"Ohio",
"Ohio",
"Bayern",
"Nordrhein-Westfalen",
"Nordrhein-Westfalen",
],
"name": [
"<NAME>",
"Los Angeles",
"Columbus",
"Cleveland",
"Munich",
"Duesseldorf",
"Koeln",
],
"pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],
}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [
{
"state": "Florida",
"shortname": "FL",
"info": {"governor": "<NAME>"},
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
},
{
"state": "Ohio",
"shortname": "OH",
"info": {"governor": "<NAME>"},
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
},
]
result = json_normalize(
data, "counties", ["state", "shortname", ["info", "governor"]]
)
ex_data = {
"name": ["Dade", "Broward", "<NAME>", "Summit", "Cuyahoga"],
"state": ["Florida"] * 3 + ["Ohio"] * 2,
"shortname": ["FL", "FL", "FL", "OH", "OH"],
"info.governor": ["<NAME>"] * 3 + ["<NAME>"] * 2,
"population": [12345, 40000, 60000, 1234, 1337],
}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [
{
"foo": "hello",
"bar": "there",
"data": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"
with pytest.raises(ValueError, match=msg):
json_normalize(data, "data", meta=["foo", "bar"])
result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
def test_meta_parameter_not_modified(self):
# GH 18610
data = [
{
"foo": "hello",
"bar": "there",
"data": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
COLUMNS = ["foo", "bar"]
result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")
assert COLUMNS == ["foo", "bar"]
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
tm.assert_frame_equal(result, expected)
result = json_normalize(
state_data, "counties", meta="state", record_prefix="county_"
)
expected = []
for rec in state_data:
expected.extend(rec["counties"])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: "county_" + x)
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
+ b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode("utf8")
testdata = {
b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1],
"sub.A": [1, 3],
"sub.B": [2, 4],
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
def test_missing_field(self, author_missing_data):
# GH20030:
result = json_normalize(author_missing_data)
ex_data = [
{
"info": np.nan,
"info.created_at": np.nan,
"info.last_updated": np.nan,
"author_name.first": np.nan,
"author_name.last_name": np.nan,
},
{
"info": None,
"info.created_at": "11/08/1993",
"info.last_updated": "26/05/2012",
"author_name.first": "Jane",
"author_name.last_name": "Doe",
},
]
expected = DataFrame(ex_data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"max_level,expected",
[
(
0,
[
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
],
),
(
1,
[
{
"TextField": "Some text",
"UserField.Id": "ID001",
"UserField.Name": "Name001",
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
{
"TextField": "Some text",
"UserField.Id": "ID001",
"UserField.Name": "Name001",
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
],
),
],
)
def test_max_level_with_records_path(self, max_level, expected):
# GH23843: Enhanced JSON normalize
test_input = [
{
"CreatedBy": {"Name": "User001"},
"Lookup": [
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
],
"Image": {"a": "b"},
"tags": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
result = json_normalize(
test_input,
record_path=["Lookup"],
meta=[["CreatedBy"], ["Image"]],
max_level=max_level,
)
expected_df = DataFrame(data=expected, columns=result.columns.values)
tm.assert_equal(expected_df, result)
def test_nested_flattening_consistent(self):
# see gh-21537
df1 = json_normalize([{"A": {"B": 1}}])
df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy")
# They should be the same.
tm.assert_frame_equal(df1, df2)
def test_nonetype_record_path(self, nulls_fixture):
# see gh-30148
# should not raise TypeError
result = json_normalize(
[
{"state": "Texas", "info": nulls_fixture},
{"state": "Florida", "info": [{"i": 2}]},
],
record_path=["info"],
)
expected = DataFrame({"i": 2}, index=[0])
tm.assert_equal(result, expected)
def test_non_interable_record_path_errors(self):
# see gh-30148
test_input = {"state": "Texas", "info": 1}
test_path = "info"
msg = (
f"{test_input} has non iterable value 1 for path {test_path}. "
"Must be iterable or null."
)
with pytest.raises(TypeError, match=msg):
json_normalize([test_input], record_path=[test_path])
def test_meta_non_iterable(self):
# GH 31507
data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]"""
result = json_normalize(json.loads(data), record_path=["data"], meta=["id"])
expected = DataFrame(
{"one": [1], "two": [2], "id": np.array([99], dtype=object)}
)
tm.assert_frame_equal(result, expected)
class TestNestedToRecord:
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2), dict(flat1=3, flat2=4)]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1, dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))
result = nested_to_record(data)
expected = {
"dict1.c": 1,
"dict1.d": 2,
"flat1": 1,
"nested.d": 2,
"nested.e.c": 1,
"nested.e.d": 2,
}
assert result == expected
def test_json_normalize_errors(self, missing_metadata):
# GH14583:
# If meta keys are not always present a new option to set
# errors='ignore' has been implemented
msg = "Try running with errors='ignore' as key 'name' is not always present"
with pytest.raises(KeyError, match=msg):
json_normalize(
data=missing_metadata,
record_path="addresses",
meta="name",
errors="raise",
)
def test_missing_meta(self, missing_metadata):
# GH25468
# If metadata is nullable with errors set to ignore, the null values
# should be numpy.nan values
result = json_normalize(
data=missing_metadata, record_path="addresses", meta="name", errors="ignore"
)
ex_data = [
[9562, "Morris St.", "Massillon", "OH", 44646, "Alice"],
[8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan],
]
columns = ["city", "number", "state", "street", "zip", "name"]
columns = ["number", "street", "city", "state", "zip", "name"]
expected = DataFrame(ex_data, columns=columns)
tm.assert_frame_equal(result, expected)
def test_donot_drop_nonevalues(self):
# GH21356
data = [
{"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}},
{
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
"author_name": {"first": "Jane", "last_name": "Doe"},
},
]
result = nested_to_record(data)
expected = [
{
"info": None,
"author_name.first": "Smith",
"author_name.last_name": "Appleseed",
},
{
"author_name.first": "Jane",
"author_name.last_name": "Doe",
"info.created_at": "11/08/1993",
"info.last_updated": "26/05/2012",
},
]
assert result == expected
def test_nonetype_top_level_bottom_level(self):
# GH21158: If inner level json has a key with a null value
# make sure it does not do a new_d.pop twice and except
data = {
"id": None,
"location": {
"country": {
"state": {
"id": None,
"town.info": {
"id": None,
"region": None,
"x": 49.151580810546875,
"y": -33.148521423339844,
"z": 27.572303771972656,
},
}
}
},
}
result = nested_to_record(data)
expected = {
"id": None,
"location.country.state.id": None,
"location.country.state.town.info.id": None,
"location.country.state.town.info.region": None,
"location.country.state.town.info.x": 49.151580810546875,
"location.country.state.town.info.y": -33.148521423339844,
"location.country.state.town.info.z": 27.572303771972656,
}
assert result == expected
def test_nonetype_multiple_levels(self):
# GH21158: If inner level json has a key with a null value
# make sure it does not do a new_d.pop twice and except
data = {
"id": None,
"location": {
"id": None,
"country": {
"id": None,
"state": {
"id": None,
"town.info": {
"region": None,
"x": 49.151580810546875,
"y": -33.148521423339844,
"z": 27.572303771972656,
},
},
},
},
}
result = nested_to_record(data)
expected = {
"id": None,
"location.id": None,
"location.country.id": None,
"location.country.state.id": None,
"location.country.state.town.info.region": None,
"location.country.state.town.info.x": 49.151580810546875,
"location.country.state.town.info.y": -33.148521423339844,
"location.country.state.town.info.z": 27.572303771972656,
}
assert result == expected
@pytest.mark.parametrize(
"max_level, expected",
[
(
None,
[
{
"CreatedBy.Name": "User001",
"Lookup.TextField": "Some text",
"Lookup.UserField.Id": "ID001",
"Lookup.UserField.Name": "Name001",
"Image.a": "b",
}
],
),
(
0,
[
{
"CreatedBy": {"Name": "User001"},
"Lookup": {
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
"Image": {"a": "b"},
}
],
),
(
1,
[
{
"CreatedBy.Name": "User001",
"Lookup.TextField": "Some text",
"Lookup.UserField": {"Id": "ID001", "Name": "Name001"},
"Image.a": "b",
}
],
),
],
)
def test_with_max_level(self, max_level, expected, max_level_test_input_data):
# GH23843: Enhanced JSON normalize
output = nested_to_record(max_level_test_input_data, max_level=max_level)
assert output == expected
def test_with_large_max_level(self):
# GH23843: Enhanced JSON normalize
max_level = 100
input_data = [
{
"CreatedBy": {
"user": {
"name": {"firstname": "Leo", "LastName": "Thomson"},
"family_tree": {
"father": {
"name": "Father001",
"father": {
"Name": "Father002",
"father": {
"name": "Father003",
"father": {"Name": "Father004"},
},
},
}
},
}
}
}
]
expected = [
{
"CreatedBy.user.name.firstname": "Leo",
"CreatedBy.user.name.LastName": "Thomson",
"CreatedBy.user.family_tree.father.name": "Father001",
"CreatedBy.user.family_tree.father.father.Name": "Father002",
"CreatedBy.user.family_tree.father.father.father.name": "Father003",
"CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501
}
]
output = nested_to_record(input_data, max_level=max_level)
assert output == expected
def test_deprecated_import(self):
with tm.assert_produces_warning(FutureWarning):
from pandas.io.json import json_normalize
recs = [{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}]
json_normalize(recs)
```
#### File: scalar/timedelta/test_constructors.py
```python
from datetime import timedelta
import numpy as np
import pytest
from pandas import Timedelta, offsets, to_timedelta
def test_construction():
expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8")
assert Timedelta(10, unit="d").value == expected
assert Timedelta(10.0, unit="d").value == expected
assert Timedelta("10 days").value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8")
assert Timedelta("10 days 00:00:10").value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert Timedelta(days=10, microseconds=10 * 1000 * 1000).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000))
assert Timedelta(123072001000000).value == 123072001000000
assert "1 days 10:11:12.001" in str(Timedelta(123072001000000))
# string conversion with/without leading zero
# GH#9570
assert Timedelta("0:00:00") == timedelta(hours=0)
assert Timedelta("00:00:00") == timedelta(hours=0)
assert Timedelta("-1:00:00") == -timedelta(hours=1)
assert Timedelta("-01:00:00") == -timedelta(hours=1)
# more strings & abbrevs
# GH#8190
assert Timedelta("1 h") == timedelta(hours=1)
assert Timedelta("1 hour") == timedelta(hours=1)
assert Timedelta("1 hr") == timedelta(hours=1)
assert Timedelta("1 hours") == timedelta(hours=1)
assert Timedelta("-1 hours") == -timedelta(hours=1)
assert Timedelta("1 m") == timedelta(minutes=1)
assert Timedelta("1.5 m") == timedelta(seconds=90)
assert Timedelta("1 minute") == timedelta(minutes=1)
assert Timedelta("1 minutes") == timedelta(minutes=1)
assert Timedelta("1 s") == timedelta(seconds=1)
assert Timedelta("1 second") == timedelta(seconds=1)
assert Timedelta("1 seconds") == timedelta(seconds=1)
assert Timedelta("1 ms") == timedelta(milliseconds=1)
assert Timedelta("1 milli") == timedelta(milliseconds=1)
assert Timedelta("1 millisecond") == timedelta(milliseconds=1)
assert Timedelta("1 us") == timedelta(microseconds=1)
assert Timedelta("1 micros") == timedelta(microseconds=1)
assert Timedelta("1 microsecond") == timedelta(microseconds=1)
assert Timedelta("1.5 microsecond") == Timedelta("00:00:00.000001500")
assert Timedelta("1 ns") == Timedelta("00:00:00.000000001")
assert Timedelta("1 nano") == Timedelta("00:00:00.000000001")
assert Timedelta("1 nanosecond") == Timedelta("00:00:00.000000001")
# combos
assert Timedelta("10 days 1 hour") == timedelta(days=10, hours=1)
assert Timedelta("10 days 1 h") == timedelta(days=10, hours=1)
assert Timedelta("10 days 1 h 1m 1s") == timedelta(
days=10, hours=1, minutes=1, seconds=1
)
assert Timedelta("-10 days 1 h 1m 1s") == -timedelta(
days=10, hours=1, minutes=1, seconds=1
)
assert Timedelta("-10 days 1 h 1m 1s") == -timedelta(
days=10, hours=1, minutes=1, seconds=1
)
assert Timedelta("-10 days 1 h 1m 1s 3us") == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3
)
assert Timedelta("-10 days 1 h 1.5m 1s 3us") == -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3
)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
with pytest.raises(ValueError):
Timedelta("-10 days -1 h 1.5m 1s 3us")
# only leading neg signs are allowed
with pytest.raises(ValueError):
Timedelta("10 days -1 h 1.5m 1s 3us")
# no units specified
with pytest.raises(ValueError):
Timedelta("3.1415")
# invalid construction
with pytest.raises(ValueError, match="cannot construct a Timedelta"):
Timedelta()
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
Timedelta("foo")
msg = (
"cannot construct a Timedelta from "
"the passed arguments, allowed keywords are "
)
with pytest.raises(ValueError, match=msg):
Timedelta(day=10)
# floats
expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64(
500, "ms"
).astype("m8[ns]").view("i8")
assert Timedelta(10.5, unit="s").value == expected
# offset
assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2)
# GH#11995: unicode
expected = Timedelta("1H")
result = Timedelta("1H")
assert result == expected
assert to_timedelta(offsets.Hour(2)) == Timedelta("0 days, 02:00:00")
with pytest.raises(ValueError):
Timedelta("foo bar")
@pytest.mark.parametrize(
"item",
list(
{
"days": "D",
"seconds": "s",
"microseconds": "us",
"milliseconds": "ms",
"minutes": "m",
"hours": "h",
"weeks": "W",
}.items()
),
)
@pytest.mark.parametrize(
"npdtype", [np.int64, np.int32, np.int16, np.float64, np.float32, np.float16]
)
def test_td_construction_with_np_dtypes(npdtype, item):
# GH#8757: test construction with np dtypes
pykwarg, npkwarg = item
expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8")
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
@pytest.mark.parametrize(
"val",
[
"1s",
"-1s",
"1us",
"-1us",
"1 day",
"-1 day",
"-23:59:59.999999",
"-1 days +23:59:59.999999",
"-1ns",
"1ns",
"-23:59:59.999999999",
],
)
def test_td_from_repr_roundtrip(val):
# round-trip both for string and value
td = Timedelta(val)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format="all")) == td
def test_overflow_on_construction():
# GH#3374
value = Timedelta("1day").value * 20169940
with pytest.raises(OverflowError):
Timedelta(value)
# xref GH#17637
with pytest.raises(OverflowError):
Timedelta(7 * 19999, unit="D")
with pytest.raises(OverflowError):
Timedelta(timedelta(days=13 * 19999))
@pytest.mark.parametrize(
"fmt,exp",
[
(
"P6DT0H50M3.010010012S",
Timedelta(
days=6,
minutes=50,
seconds=3,
milliseconds=10,
microseconds=10,
nanoseconds=12,
),
),
(
"P-6DT0H50M3.010010012S",
Timedelta(
days=-6,
minutes=50,
seconds=3,
milliseconds=10,
microseconds=10,
nanoseconds=12,
),
),
("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)),
("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)),
("P0DT0H0M0.00001S", Timedelta(microseconds=10)),
("P0DT0H0M0.001S", Timedelta(milliseconds=1)),
("P0DT0H1M0S", Timedelta(minutes=1)),
("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)),
],
)
def test_iso_constructor(fmt, exp):
assert Timedelta(fmt) == exp
@pytest.mark.parametrize(
"fmt",
[
"PPPPPPPPPPPP",
"PDTHMS",
"P0DT999H999M999S",
"P1DT0H0M0.0000000000000S",
"P1DT0H0M00000000000S",
"P1DT0H0M0.S",
],
)
def test_iso_constructor_raises(fmt):
msg = "Invalid ISO 8601 Duration format - {}".format(fmt)
with pytest.raises(ValueError, match=msg):
Timedelta(fmt)
@pytest.mark.parametrize(
"constructed_td, conversion",
[
(Timedelta(nanoseconds=100), "100ns"),
(
Timedelta(
days=1,
hours=1,
minutes=1,
weeks=1,
seconds=1,
milliseconds=1,
microseconds=1,
nanoseconds=1,
),
694861001001001,
),
(Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"),
(Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"),
(Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"),
],
)
def test_td_constructor_on_nanoseconds(constructed_td, conversion):
# GH#9273
assert constructed_td == Timedelta(conversion)
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds="abc")
```
#### File: series/methods/test_round.py
```python
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
class TestSeriesRound:
def test_round(self, datetime_series):
datetime_series.index.name = "index_name"
result = datetime_series.round(2)
expected = Series(
np.round(datetime_series.values, 2), index=datetime_series.index, name="ts"
)
tm.assert_series_equal(result, expected)
assert result.name == datetime_series.name
def test_round_numpy(self):
# See GH#12600
ser = Series([1.53, 1.36, 0.06])
out = np.round(ser, decimals=0)
expected = Series([2.0, 1.0, 0.0])
tm.assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(ser, decimals=0, out=ser)
def test_round_numpy_with_nan(self):
# See GH#14197
ser = Series([1.53, np.nan, 0.06])
with tm.assert_produces_warning(None):
result = ser.round()
expected = Series([2.0, np.nan, 0.0])
tm.assert_series_equal(result, expected)
def test_round_builtin(self):
ser = Series([1.123, 2.123, 3.123], index=range(3))
result = round(ser)
expected_rounded0 = Series([1.0, 2.0, 3.0], index=range(3))
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=range(3))
result = round(ser, decimals)
tm.assert_series_equal(result, expected_rounded)
```
#### File: tests/series/test_cumulative.py
```python
from itertools import product
import numpy as np
import pytest
import pandas as pd
from pandas import _is_numpy_dev
import pandas._testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
res = getattr(e, method)()
tm.assert_series_equal(res, expecteds[method])
```
#### File: tests/tslibs/test_normalize_date.py
```python
from datetime import date, datetime
import pytest
from pandas._libs import tslibs
from pandas._libs.tslibs.timestamps import Timestamp
@pytest.mark.parametrize(
"value,expected",
[
(date(2012, 9, 7), datetime(2012, 9, 7)),
(datetime(2012, 9, 7, 12), datetime(2012, 9, 7)),
(datetime(2007, 10, 1, 1, 12, 5, 10), datetime(2007, 10, 1)),
],
)
def test_normalize_date(value, expected):
result = tslibs.normalize_date(value)
assert result == expected
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"dt, expected",
[
(Timestamp(2000, 1, 1, 1), Timestamp(2000, 1, 1, 0)),
(datetime(2000, 1, 1, 1), datetime(2000, 1, 1, 0)),
(SubDatetime(2000, 1, 1, 1), SubDatetime(2000, 1, 1, 0)),
],
)
def test_normalize_date_sub_types(dt, expected):
# GH 25851
# ensure that subclassed datetime works with
# normalize_date
result = tslibs.normalize_date(dt)
assert result == expected
```
#### File: tests/util/test_hashing.py
```python
import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples
from pandas.util import hash_array, hash_pandas_object
@pytest.fixture(
params=[
Series([1, 2, 3] * 3, dtype="int32"),
Series([None, 2.5, 3.5] * 3, dtype="float32"),
Series(["a", "b", "c"] * 3, dtype="category"),
Series(["d", "e", "f"] * 3),
Series([True, False, True] * 3),
Series(pd.date_range("20130101", periods=9)),
Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
Series(pd.timedelta_range("2000", periods=9)),
]
)
def series(request):
return request.param
@pytest.fixture(params=[True, False])
def index(request):
return request.param
def _check_equal(obj, **kwargs):
"""
Check that hashing an objects produces the same value each time.
Parameters
----------
obj : object
The object to hash.
kwargs : kwargs
Keyword arguments to pass to the hashing function.
"""
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
def _check_not_equal_with_index(obj):
"""
Check the hash of an object with and without its index is not the same.
Parameters
----------
obj : object
The object to hash.
"""
if not isinstance(obj, Index):
a = hash_pandas_object(obj, index=True)
b = hash_pandas_object(obj, index=False)
if len(obj):
assert not (a == b).all()
def test_consistency():
# Check that our hash doesn't change because of a mistake
# in the actual code; this is the ground truth.
result = hash_pandas_object(Index(["foo", "bar", "baz"]))
expected = Series(
np.array(
[3600424527151052760, 1374399572096150070, 477881037637427054],
dtype="uint64",
),
index=["foo", "bar", "baz"],
)
tm.assert_series_equal(result, expected)
def test_hash_array(series):
arr = series.values
tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr))
@pytest.mark.parametrize(
"arr2", [np.array([3, 4, "All"]), np.array([3, 4, "All"], dtype=object)]
)
def test_hash_array_mixed(arr2):
result1 = hash_array(np.array(["3", "4", "All"]))
result2 = hash_array(arr2)
tm.assert_numpy_array_equal(result1, result2)
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_array_errors(val):
msg = "must pass a ndarray-like"
with pytest.raises(TypeError, match=msg):
hash_array(val)
def test_hash_tuples():
tuples = [(1, "one"), (1, "two"), (2, "one")]
result = hash_tuples(tuples)
expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values
tm.assert_numpy_array_equal(result, expected)
result = hash_tuples(tuples[0])
assert result == expected[0]
@pytest.mark.parametrize(
"tup",
[(1, "one"), (1, np.nan), (1.0, pd.NaT, "A"), ("A", pd.Timestamp("2012-01-01"))],
)
def test_hash_tuple(tup):
# Test equivalence between
# hash_tuples and hash_tuple.
result = hash_tuple(tup)
expected = hash_tuples([tup])[0]
assert result == expected
@pytest.mark.parametrize(
"val",
[
1,
1.4,
"A",
b"A",
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01", tz="Europe/Brussels"),
datetime.datetime(2012, 1, 1),
pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(),
pd.Timedelta("1 days"),
datetime.timedelta(1),
pd.Period("2012-01-01", freq="D"),
pd.Interval(0, 1),
np.nan,
pd.NaT,
None,
],
)
def test_hash_scalar(val):
result = _hash_scalar(val)
expected = hash_array(np.array([val], dtype=object), categorize=True)
assert result[0] == expected[0]
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_tuples_err(val):
msg = "must be convertible to a list-of-tuples"
with pytest.raises(TypeError, match=msg):
hash_tuples(val)
def test_multiindex_unique():
mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)])
assert mi.is_unique is True
result = hash_pandas_object(mi)
assert result.is_unique is True
def test_multiindex_objects():
mi = MultiIndex(
levels=[["b", "d", "a"], [1, 2, 3]],
codes=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=["col1", "col2"],
)
recons = mi._sort_levels_monotonic()
# These are equal.
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# _hashed_values and hash_pandas_object(..., index=False) equivalency.
expected = hash_pandas_object(mi, index=False).values
result = mi._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = hash_pandas_object(recons, index=False).values
result = recons._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = mi._hashed_values
result = recons._hashed_values
# Values should match, but in different order.
tm.assert_numpy_array_equal(np.sort(result), np.sort(expected))
@pytest.mark.parametrize(
"obj",
[
Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(["a", "b", "c"]),
Series(["a", np.nan, "c"]),
Series(["a", None, "c"]),
Series([True, False, True]),
Series(dtype=object),
Index([1, 2, 3]),
Index([True, False, True]),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
DataFrame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
tm.makeTimedeltaIndex(),
tm.makePeriodIndex(),
Series(tm.makePeriodIndex()),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
MultiIndex.from_product(
[range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]
),
MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]),
],
)
def test_hash_pandas_object(obj, index):
_check_equal(obj, index=index)
_check_not_equal_with_index(obj)
def test_hash_pandas_object2(series, index):
_check_equal(series, index=index)
_check_not_equal_with_index(series)
@pytest.mark.parametrize(
"obj", [Series([], dtype="float64"), Series([], dtype="object"), Index([])]
)
def test_hash_pandas_empty_object(obj, index):
# These are by-definition the same with
# or without the index as the data is empty.
_check_equal(obj, index=index)
@pytest.mark.parametrize(
"s1",
[
Series(["a", "b", "c", "d"]),
Series([1000, 2000, 3000, 4000]),
Series(pd.date_range(0, periods=4)),
],
)
@pytest.mark.parametrize("categorize", [True, False])
def test_categorical_consistency(s1, categorize):
# see gh-15143
#
# Check that categoricals hash consistent with their values,
# not codes. This should work for categoricals of any dtype.
s2 = s1.astype("category").cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
# These should all hash identically.
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = hash_pandas_object(s3, categorize=categorize)
tm.assert_series_equal(h1, h2)
tm.assert_series_equal(h1, h3)
def test_categorical_with_nan_consistency():
c = pd.Categorical.from_codes(
[-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")
)
expected = hash_array(c, categorize=False)
c = pd.Categorical.from_codes([-1, 0], categories=[pd.Timestamp("2012-01-01")])
result = hash_array(c, categorize=False)
assert result[0] in expected
assert result[1] in expected
@pytest.mark.parametrize("obj", [pd.Timestamp("20130101")])
def test_pandas_errors(obj):
msg = "Unexpected type for hashing"
with pytest.raises(TypeError, match=msg):
hash_pandas_object(obj)
def test_hash_keys():
# Using different hash keys, should have
# different hashes for the same data.
#
# This only matters for object dtypes.
obj = Series(list("abc"))
a = hash_pandas_object(obj, hash_key="9876543210123456")
b = hash_pandas_object(obj, hash_key="9876543210123465")
assert (a != b).all()
def test_invalid_key():
# This only matters for object dtypes.
msg = "key should be a 16-byte string encoded"
with pytest.raises(ValueError, match=msg):
hash_pandas_object(Series(list("abc")), hash_key="foo")
def test_already_encoded(index):
# If already encoded, then ok.
obj = Series(list("abc")).str.encode("utf8")
_check_equal(obj, index=index)
def test_alternate_encoding(index):
obj = Series(list("abc"))
_check_equal(obj, index=index, encoding="ascii")
@pytest.mark.parametrize("l_exp", range(8))
@pytest.mark.parametrize("l_add", [0, 1])
def test_same_len_hash_collisions(l_exp, l_add):
length = 2 ** (l_exp + 8) + l_add
s = tm.rands_array(length, 2)
result = hash_array(s, "utf8")
assert not result[0] == result[1]
def test_hash_collisions():
# Hash collisions are bad.
#
# https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
hashes = [
"Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa: E501
"Tim-b9MddTxOWW2AT1Py6vtVbZwGAmY<KEY>", # noqa: E501
]
# These should be different.
result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8")
expected1 = np.array([14963968704024874985], dtype=np.uint64)
tm.assert_numpy_array_equal(result1, expected1)
result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8")
expected2 = np.array([16428432627716348016], dtype=np.uint64)
tm.assert_numpy_array_equal(result2, expected2)
result = hash_array(np.asarray(hashes, dtype=object), "utf8")
tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))
def test_hash_with_tuple():
# GH#28969 array containing a tuple raises on call to arr.astype(str)
# apparently a numpy bug github.com/numpy/numpy/issues/9441
df = pd.DataFrame({"data": [tuple("1"), tuple("2")]})
result = hash_pandas_object(df)
expected = pd.Series([10345501319357378243, 8331063931016360761], dtype=np.uint64)
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame({"data": [tuple([1]), tuple([2])]})
result = hash_pandas_object(df2)
expected = pd.Series([9408946347443669104, 3278256261030523334], dtype=np.uint64)
tm.assert_series_equal(result, expected)
# require that the elements of such tuples are themselves hashable
df3 = pd.DataFrame({"data": [tuple([1, []]), tuple([2, {}])]})
with pytest.raises(TypeError, match="unhashable type: 'list'"):
hash_pandas_object(df3)
def test_hash_object_none_key():
# https://github.com/pandas-dev/pandas/issues/30887
result = pd.util.hash_pandas_object(pd.Series(["a", "b"]), hash_key=None)
expected = pd.Series([4578374827886788867, 17338122309987883691], dtype="uint64")
tm.assert_series_equal(result, expected)
```
#### File: sklearn/cluster/setup.py
```python
import os
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.pyx'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical_fast',
sources=['_hierarchical_fast.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_fast',
sources=['_k_means_fast.pyx'],
include_dirs=numpy.get_include(),
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
```
#### File: inspection/tests/test_permutation_importance.py
```python
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_boston(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
```
#### File: metrics/_plot/base.py
```python
def _check_classifer_response_method(estimator, response_method):
"""Return prediction method from the response_method
Parameters
----------
estimator: object
Classifier to check
response_method: {'auto', 'predict_proba', 'decision_function'}
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
Returns
-------
prediction_method: callable
prediction method of estimator
"""
if response_method not in ("predict_proba", "decision_function", "auto"):
raise ValueError("response_method must be 'predict_proba', "
"'decision_function' or 'auto'")
error_msg = "response method {} is not defined in {}"
if response_method != "auto":
prediction_method = getattr(estimator, response_method, None)
if prediction_method is None:
raise ValueError(error_msg.format(response_method,
estimator.__class__.__name__))
else:
predict_proba = getattr(estimator, 'predict_proba', None)
decision_function = getattr(estimator, 'decision_function', None)
prediction_method = predict_proba or decision_function
if prediction_method is None:
raise ValueError(error_msg.format(
"decision_function or predict_proba",
estimator.__class__.__name__))
return prediction_method
```
#### File: tree/tests/test_reingold_tilford.py
```python
import numpy as np
import pytest
from sklearn.tree._reingold_tilford import buchheim, Tree
simple_tree = Tree("", 0,
Tree("", 1),
Tree("", 2))
bigger_tree = Tree("", 0,
Tree("", 1,
Tree("", 3),
Tree("", 4,
Tree("", 7),
Tree("", 8)
),
),
Tree("", 2,
Tree("", 5),
Tree("", 6)
)
)
@pytest.mark.parametrize("tree, n_nodes", [(simple_tree, 3), (bigger_tree, 9)])
def test_buchheim(tree, n_nodes):
def walk_tree(draw_tree):
res = [(draw_tree.x, draw_tree.y)]
for child in draw_tree.children:
# parents higher than children:
assert child.y == draw_tree.y + 1
res.extend(walk_tree(child))
if len(draw_tree.children):
# these trees are always binary
# parents are centered above children
assert draw_tree.x == (draw_tree.children[0].x
+ draw_tree.children[1].x) / 2
return res
layout = buchheim(tree)
coordinates = walk_tree(layout)
assert len(coordinates) == n_nodes
# test that x values are unique per depth / level
# we could also do it quicker using defaultdicts..
depth = 0
while True:
x_at_this_depth = [coordinates[0] for node in coordinates
if coordinates[1] == depth]
if not x_at_this_depth:
# reached all leafs
break
assert len(np.unique(x_at_this_depth)) == len(x_at_this_depth)
depth += 1
``` |
{
"source": "Jos3f/Apriori-algorithm",
"score": 3
} |
#### File: Apriori-algorithm/Data/DataGen.py
```python
import numpy as np
from tqdm import tqdm
def generate_sample_data(filename, item_count=1000, basket_count=100000, seed=123):
print("Creating data set of {} baskets with {} unique items".format(basket_count, item_count))
np.random.seed(seed)
# Create item indices and probability of being selected in the first pass
items = np.arange(item_count)
item_selection_prob = np.random.exponential(1, item_count).clip(0, 2)
item_selection_prob /= np.sum(item_selection_prob)
# Create some associations
item_assoc_prob = np.random.exponential(0.15, item_count).clip(0, 1)
associated_to = {}
for i, item in enumerate(items):
sample_count = np.random.choice([1, 2, 3], 1, p=[.7, .2, .1])
associated_to[item] = frozenset(np.random.choice(items, sample_count, replace=False))
file1 = open(filename, "w")
for _ in tqdm(range(basket_count)):
item_count = np.random.lognormal(1.75, 0.4, 1).astype(int).clip(1)
basket = set(np.random.choice(items, item_count, replace=False, p=item_selection_prob))
basket_associated = set()
for item in basket:
if np.random.uniform(0,1) < item_assoc_prob[item]:
basket_associated.update(associated_to[item])
basket.update(basket_associated)
file1.write(" ".join(str(item) for item in basket)+"\n" )
file1.close()
pass
if __name__ == '__main__':
generate_sample_data("example_dataset.dat", 1000, 100000)
``` |
{
"source": "Jos3f/Intensity-based-clustering-study",
"score": 2
} |
#### File: Intensity-based-clustering-study/dognet/bbbc039.py
```python
import dognet
import numpy as np
import os
import skimage.io
import sys
from sklearn.model_selection import LeaveOneOut
from datetime import datetime
sys.path.append("../metrics")
from metrics import Metrics
sys.path.append("../unet")
from threshold_utils import get_best_threshold, normalize_output
import torch
from pathlib import Path
from multiprocessing.dummy import Pool
import matplotlib.pyplot as plt
def inference(net,image,get_inter = False):
x = np.expand_dims(image,0)
vx = torch.from_numpy(x).float().cuda()
res,inter = net(vx)
if get_inter:
return res.data.cpu().numpy(),inter.data.cpu().numpy()
return res.data.cpu().numpy()
def createDataset(filelist):
train_images = []
train_labels = []
pathImages = "../datasets/BBBC039/images/"
pathMasks = "../datasets/BBBC039/masks/"
# Determine the max / min of the dataset
max_val = float('-inf')
min_val = float('inf')
for filename in sorted(filelist):
img = plt.imread(pathImages + filename[:-3]+"tif")
if np.max(img) > max_val:
max_val = np.max(img)
if np.min(img) < min_val:
min_val = np.min(img)
for i, filename in enumerate(sorted(filelist)):
if i == 110:
print(filename)
# load image
img = plt.imread(pathImages + filename[:-3]+"tif")
img = (img - min_val) / (max_val - min_val)
#load annotation
orig_masks = skimage.io.imread(pathMasks + filename)
orig_masks = orig_masks[:,:,0]
orig_masks[orig_masks > 1] = 1
#Append to list
train_images.append(img)
train_labels.append(orig_masks)
return train_images, train_labels
def train_models(train_images, train_labels, validationsize, filename = None):
if filename is None:
now = datetime.now()
current_dt = now.strftime("%y_%m_%d_%H_%M_%S")
filename = "results/" + current_dt + ".csv"
results_file = Path(filename)
if not results_file.is_file():
results_file.write_text('index;jaccard;Dice;Adj;Warp;jaccard_to;Dice_to;Adj_to;Warp_to\n')
loo = LeaveOneOut()
total_splits = loo.get_n_splits(train_images)
validationsize = validationsize - 1
for index, (train_index, test_index) in enumerate(loo.split(train_images)):
if index != 110:
continue
run = True
print(index)
while run:
np.random.shuffle(train_index)
trainingimages = [train_images[i] for i in train_index[:-validationsize]]
traininglabels = [train_labels[i] for i in train_index[:-validationsize]]
validationimages = [train_images[i] for i in train_index[-validationsize:]]
validationlabels = [train_labels[i] for i in train_index[-validationsize:]]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = dognet.SimpleIsotropic(1,15,5).to(device)
net.weights_init()
net, errors = dognet.train_routine(
net.cuda(),
dognet.create_generator1C(trainingimages,traininglabels, n=total_splits),
n_iter=3000,
margin=5,
loss='softdice',
lr=0.0001
)
print("The mean loss of last 100 step:",np.mean(errors[-100:]))
if net != None:
metric_predictions_unprocessed = []
for x in validationimages:
pred = inference(net,x[np.newaxis, :],False)[0][0]
pred_background = np.ones(pred.shape) - pred
pred_final = np.stack((pred_background,pred),axis=-1)
metric_predictions_unprocessed.append(normalize_output(pred_final))
best_tau, best_score = get_best_threshold(
metric_predictions_unprocessed,
validationlabels,
min=0, max=1, num_steps=50,
use_metric=1
)
print("Best tau: " + str(best_tau))
print("Best avg score: " + str(best_score))
#Evaluate on testdata
test_images = train_images[test_index[0]]
metric_labels_test = [train_labels[test_index[0]]]
pred = inference(net,test_images[np.newaxis, :],False)[0][0]
metric_predictions = [(pred >= best_tau).astype(int)]
metric_predictions_unthresholded = [(pred >= 0.5).astype(int)]
plt.imsave("sample110.png", metric_predictions[0], cmap=plt.get_cmap('binary_r'))
parallel_metrics = [
Metrics(
metric_labels_test,
metric_predictions_unthresholded,
safe=False,
parallel=False),
Metrics(
metric_labels_test,
metric_predictions,
safe=False,
parallel=False)
]
def f(m):
return (
m.jaccard()[0],
m.dice()[0],
m.adj_rand()[0],
m.warping_error()[0]
)
pool = Pool(4)
metric_result = pool.map(f, parallel_metrics)
jaccard_index = metric_result[0][0]
dice = metric_result[0][1]
adj = metric_result[0][2]
warping_error = metric_result[0][3]
jaccard_index_to = metric_result[1][0]
dice_to = metric_result[1][1]
adj_to = metric_result[1][2]
warping_error_to = metric_result[1][3]
with results_file.open("a") as f:
f.write(
str(test_index[0]) + ";" +
str(jaccard_index) + ";" +
str(dice) + ";" +
str(adj) + ";" +
str(warping_error) + ";" +
str(jaccard_index_to) + ";" +
str(dice_to) + ";" +
str(adj_to) + ";" +
str(warping_error_to) + "\n"
)
print("test_data_point_index: " + str(test_index[0]))
print("Jaccard index: " + str(jaccard_index) + " with threshold optimization: " + str(jaccard_index_to))
print("Dice: " + str(dice) + " with threshold optimization: " + str(dice_to))
print("Adj: " + str(adj) + " with threshold optimization: " + str(adj_to))
print("Warping Error: " + str(warping_error) + " with threshold optimization: " + str(warping_error_to))
run = False
if __name__ == "__main__":
try:
results_file = sys.argv[1]
except IndexError:
print("No file name given, results file will be given a name automatically")
results_file = None
filenames = []
filenames = os.listdir("../datasets/BBBC039/masks/")
filenames = sorted(filenames)
train_images, train_labels = createDataset(filenames)
train_models(train_images, train_labels, 40, results_file)
```
#### File: dognet/baselines/neiland2014.py
```python
from skimage.filters import median, threshold_otsu, gaussian
from skimage.morphology import white_tophat,disk,black_tophat
import numpy as np
def cellprofiler_pipeline_single(img,sz=8,th=None):
#1. estimate bg
img = (img-img.min())/(img.max()-img.min())*255.
bg = median(img.astype(np.uint8),selem=np.ones((100,100))).astype(np.float32)
#2. find the difference we are intrested in only higher values
diff = img-bg
diff[diff<0]=0.
if th is None:
th = threshold_otsu(diff,255)
#3. apply tophat
tophat = white_tophat(diff,selem=disk(sz))
return tophat
def cellprofiler_pipeline(pre,post,sz=8,th=None):
pre_process = np.zeros_like(pre[0])
post_process = np.zeros_like(pre[0])
for p in pre:
pre_process+=cellprofiler_pipeline_single(p,sz,th)
for p in post:
post_process+=cellprofiler_pipeline_single(p,sz,th)
pre_process/=len(pre)
post_process/=len(post)
return pre_process+post_process
```
#### File: dognet/dognet/networks.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch
from .dogs import DoG2DIsotropic, DoG2DAnisotropic, DoG3DIsotropic
class Simple3DNetwork(nn.Module):
def __init__(self, in_channels, filter_size=9, k=4,depth = 3, return_intermediate=False, learn_amplitude=False,
dog_class=DoG3DIsotropic):
"""
Create a simple 3D DoGnet
:param in_channels: input data number of channels
:param filter_size: filter window size must be even (3,5,7,9,11 etc)
:param k: number of filters for each image
:param d: depth of voxel volume
:param return_intermediate: returns the output of DoG filters during the inference
:param dog_class: the class of Difference of Gaussian used
"""
super(Simple3DNetwork, self).__init__()
self.conv1 = dog_class(filter_size, in_channels, k, depth , learn_amplitude=learn_amplitude)
self.conv2 = nn.Conv2d(in_channels * k, 1, 1)
self.return_intermediate = return_intermediate
def weights_init(self):
self.conv1.weights_init()
self.conv2.weight.data.fill_(1.)
self.conv2.bias.data.fill_(0.)
def forward(self, x):
y = self.conv1(x)
x = self.conv2(y.squeeze(2))
if self.return_intermediate:
return F.sigmoid(x), y
return F.sigmoid(x), None
def get_reg_params(self):
return self.conv2.parameters()
class SimpleNetwork(nn.Module):
def __init__(self, in_channels, filter_size=9, k=4, return_intermediate=False, learn_amplitude=False,
dog_class=DoG2DIsotropic):
"""
Create a simple DoGnet
:param in_channels: input data number of channels
:param filter_size: filter window size must be even (3,5,7,9,11 etc)
:param k: number of filters for each image
:param return_intermediate: returns the output of DoG filters during the inference
:param dog_class: the class of Difference of Gaussian used
"""
super(SimpleNetwork, self).__init__()
self.conv1 = dog_class(filter_size, in_channels,k, learn_amplitude=learn_amplitude)
self.conv2 = nn.Conv2d(in_channels*k, 2, kernel_size=(1,1))
self.return_intermediate = return_intermediate
def weights_init(self):
self.conv1.weights_init()
self.conv2.weight.data.fill_(1.)
self.conv2.bias.data.fill_(0.)
def forward(self, x):
y = self.conv1(x)
#y = F.sigmoid(x)
xx = F.sigmoid(self.conv2(y))
xxx = xx[:,0:1].mul(xx[:,1:])
if self.return_intermediate:
return xxx, y
return xxx, None
def get_reg_params(self):
return self.conv2.parameters()
class SimpleIsotropic(SimpleNetwork):
def __init__(self, in_channels, filter_size=9, k=4, return_intermediate=False, learn_amplitude=False):
super(SimpleIsotropic, self).__init__(in_channels, filter_size, k, return_intermediate, learn_amplitude,
DoG2DIsotropic)
class SimpleAnisotropic(SimpleNetwork):
def __init__(self, in_channels, filter_size=9, k=4, return_intermediate=False, learn_amplitude=False):
super(SimpleAnisotropic, self).__init__(in_channels, filter_size, k, return_intermediate, learn_amplitude,
DoG2DAnisotropic)
class DeepNetwork(nn.Module):
def __init__(self, in_channels, filter_size=9, k=5, n_layers=2, learn_amplitude=False, dog_class=DoG2DIsotropic):
"""
Create a Deep dognet
:param in_channels: input data number of channels
:param filter_size: filter window size must be even (3,5,7,9,11 etc)
:param k: number of filters for each image
:param n_layers: number of repeats
:param dog_class: the class of Difference of Gaussian used
"""
super(DeepNetwork, self).__init__()
layer = []
for i in range(n_layers):
layer.append(dog_class(filter_size, in_channels,k, learn_amplitude=learn_amplitude))
layer.append(nn.Conv2d(in_channels*k, in_channels, 1))
layer.append(nn.ReLU())
self.net = nn.Sequential(*layer[:-2])
self.final_convolution = nn.Conv2d(in_channels*k, 2, 1)
def weights_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.fill_(1.)
m.bias.data.fill_(0.)
elif isinstance(m, DoG2DIsotropic) or isinstance(m, DoG2DAnisotropic):
m.weights_init()
def forward(self, x):
x = self.net(x)
xx = F.sigmoid(self.final_convolution(x))
xxx = xx[:,0:1].mul(xx[:,1:])
return xxx, None
def get_reg_params(self):
return self.final_convolution.parameters()
class DeepIsotropic(DeepNetwork):
def __init__(self, in_channels, filter_size=9, k=4, n_layers=2, learn_amplitude=False):
super(DeepIsotropic, self).__init__(in_channels, filter_size, k, n_layers, learn_amplitude, DoG2DIsotropic)
class DeepAnisotropic(DeepNetwork):
def __init__(self, in_channels, filter_size=9, k=4, n_layers=2, learn_amplitude=False):
super(DeepAnisotropic, self).__init__(in_channels, filter_size, k, n_layers, learn_amplitude, DoG2DAnisotropic)
```
#### File: Intensity-based-clustering-study/unet/read_probability_maps.py
```python
import numpy as np
import matplotlib.pyplot as plt
class read_probability_masks():
def __init__(self, directory):
self.directory = directory
def get_probability_mask(self, file_name):
return np.load(self.directory + str("/") + file_name)
def get_all_starting_w(self, name_starts_w):
"""
Returns all masks from the files starting with a given string
:param name_starts_w: the string that each file should start with
:return: a list of the probability masks
"""
return []
def plot_mask(self, mask, tau=1):
"""
:param mask:
:param tau:
:return:
"""
plt.matshow(np.argmax(mask[0] * np.array([[[1, tau]]]), axis=-1), cmap=plt.cm.gray)
plt.show()
if __name__ == '__main__':
# Example usage
mask_reader = read_probability_masks("results")
mask = mask_reader.get_probability_mask("BBBC039_test_fold_0.npy")
print(mask)
print(mask.shape)
mask_reader.plot_mask(mask)
``` |
{
"source": "Jos3f/Locality-Sensitive-Hashing",
"score": 4
} |
#### File: Jos3f/Locality-Sensitive-Hashing/CompareSets.py
```python
class CompareSets:
def get_similarity(self, set_A, set_B):
return self.jaccard_similarity(set_A, set_B)
def jaccard_similarity(self, set_A, set_B):
return len(set_A.intersection(set_B)) / len(set_A.union(set_B))
if __name__ == "__main__":
# Example usage of this class
set_A = {5, 4, 3, 1}
set_B = {3, 0, 3, 1}
compare_sets = CompareSets()
print(compare_sets.get_similarity(set_A, set_B))
``` |
{
"source": "Jos3f/SOM-word-vectors",
"score": 3
} |
#### File: Jos3f/SOM-word-vectors/SOM.py
```python
import numpy as np
import matplotlib.pyplot as plt
import math
from tqdm import tqdm
from datetime import datetime
class SOM:
"""SOM with 2d grid"""
_data = None
_nUnits = 0
_map_width = 0
_unit_weights = None
def __init__(self, data, map_width):
"""
Initialise SOM
:param data: 2D Data matrix. Data points arranged row-wise
:param map_width: Width of the grid in the SOM algorithm
"""
self._data = data
self._nUnits = map_width ** 2
self._map_width = map_width
self._unit_weights = np.random.normal(size=(self._nUnits, data.shape[1]))
def train(self, epochs=100, start_range=None, learning_rate=0.2):
"""
Train the units in our map
:param epochs: Training epochs
:param start_range: The neighbourhood range of the map during training. The range is calculated
using manhattan distance. This parameter determines the size of the neighbourhood. If this is
None, then the default start range is the width of the 2D map.
:param learning_rate: Learning rate when updating our weights
:return:
"""
'''Create an exponentially decreasing neighbourhood size'''
if start_range is None:
start_range = self._map_width
exponents = np.arange(epochs + 1)
end_neighborhood = 0.5
neighbour_distances = (
np.floor(start_range * ((end_neighborhood / start_range) ** (1 / exponents[-1])) ** exponents)).astype(int)
'''Start training'''
for epoch in tqdm(range(len(neighbour_distances))):
'''Shuffle the data order'''
data_order = np.arange(self._data.shape[0])
np.random.shuffle(data_order)
'''Loop through each data point and alter the units depending on the closest unit'''
for data_index in data_order:
winning_unit = self._get_winning_unit(data_index) # Euclidean distance
# update winning unit
self._unit_weights[winning_unit] += learning_rate * (
self._data[data_index] - self._unit_weights[winning_unit])
# Find neighbours
neighbours = []
for neighbour_x in range(max(0, winning_unit % int(self._map_width) - neighbour_distances[epoch]),
min(self._nUnits,
winning_unit % int(self._map_width) + neighbour_distances[
epoch]) + 1):
if 0 <= neighbour_x < self._map_width:
for neighbour_y in range(-(neighbour_distances[epoch] - abs(
winning_unit % int(self._map_width) - neighbour_x)),
neighbour_distances[epoch] - abs(
winning_unit % int(self._map_width) - neighbour_x) + 1):
y_coord = neighbour_y + math.floor(winning_unit / self._map_width)
if 0 <= y_coord < self._map_width:
x_coord = neighbour_x
neighbours.append(x_coord + y_coord * self._map_width)
# update neighbouring units
for neighbour_index in neighbours:
if neighbour_index == winning_unit:
continue
neighbour_index_mod = neighbour_index % self._nUnits
self._unit_weights[neighbour_index_mod] += learning_rate * (
self._data[data_index] - self._unit_weights[neighbour_index_mod])
def label_nodes(self, labels, method="d2u"):
"""
Label the units with the provided data and their labels
:param labels: Label of the data
:param method: Method for labeling data. 'd2u' will label each unit with a specific label if the unit is
the closest one to the data point with that specific label. 'u2d' will label each unit to the same label as
the closest data point. The distances are measured in euclidean space.
:return: A matrix with the labels for each unit in the map.
"""
'''Create return matrix'''
unit_labels = [[] for _ in range(self._nUnits)]
'''Label units'''
if method == "d2u":
for data_index in range(self._data.shape[0]):
winning_unit = self._get_winning_unit(data_index)
unit_labels[winning_unit].append(labels[data_index])
else:
for unit_index in range(self._nUnits):
closest_data_point = self._get_closest_point(unit_index)
unit_labels[unit_index].append(labels[closest_data_point])
return unit_labels
def _get_winning_unit(self, data_index):
"""
Find closest unit to a specific data point
:param data_index: the index of the data point
:return: The index of the closest unit
"""
distances = np.linalg.norm((self._unit_weights - self._data[data_index]), axis=1)
return distances.argmin()
def _get_closest_point(self, unit_index):
"""
Find closest data point to a unit
:param unit_index: The index of the unit
:return: The index of the closest data point
"""
distances = np.linalg.norm((self._data - self._unit_weights[unit_index]), axis=1)
return distances.argmin()
def plot_map(self, labels, label_name, data_point_name="input space", method="d2u", save_file=True):
"""
Plot the units and label them with the provided labels
:param labels: Label of the data points
:param label_name: Label name for each label. These will be plotted.
:param data_point_name: Name of the input space
:param method: Method used when labeling data points. See self.label_nodes for more info.
:param save_file: Boolean value indicating if we want to save the plotted map or show it in a window.
"""
''' Label the units '''
node_labels = self.label_nodes(labels, method)
'''Fill a matrix representing our final labels for the units'''
node_matrix_labels = np.full(len(node_labels), -1)
for list_index in range(len(node_labels)):
list_ = node_labels[list_index]
if len(list_) == 0:
continue
most_frequent_element = max(set(list_), key=list_.count)
node_matrix_labels[list_index] = most_frequent_element
'''Plot labeled matrix and show or save to file'''
dpi = 200
plt.figure(figsize=(300 * self._map_width / dpi, 100 * self._map_width / dpi), dpi=dpi)
for unit_index in range(self._nUnits):
if node_matrix_labels[unit_index] != -1:
unit_label = node_matrix_labels[unit_index]
label_print = label_name[unit_label]
x_coord = float(unit_index % self._map_width)
y_coord = float(math.floor(unit_index / self._map_width))
plt.annotate(label_print, (x_coord, y_coord))
plt.xlim((-1, self._map_width))
plt.ylim((-1, self._map_width))
plt.title("Topological mapping to {}x{} grid with respect to {}"
.format(self._map_width, self._map_width, data_point_name))
plt.xlabel("Grid x-axis")
plt.ylabel("Grid y-axis")
if save_file:
now = datetime.now()
now_str = now.strftime("%Y_%m_%d_%H_%M_%S")
plt.savefig("results/{}".format(now_str + '.png'), dpi=dpi, bbox_inches='tight')
else:
plt.show()
``` |
{
"source": "jos6654/gh-contribution-api",
"score": 2
} |
#### File: src/api/api.py
```python
from flask_restful import Resource
from flask import request
from .api_utils import *
class UserStats(Resource):
def get(self, username):
forked = request.args.get('forked') == 'true' or request.args.get('forked') == None
return collect_stats(username, forked)
``` |
{
"source": "JosAbaafe/eCommJenkins",
"score": 2
} |
#### File: eCommJenkins/ecom/models.py
```python
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
profile_pic= models.ImageField(upload_to='profile_pic/CustomerProfilePic/',null=True,blank=True)
address = models.CharField(max_length=40)
mobile = models.CharField(max_length=10,null=False)
@property
def get_name(self):
return self.user.first_name+" "+self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return self.user.first_name
class Product(models.Model):
name=models.CharField(max_length=40)
product_image= models.ImageField(upload_to='product_image/',null=True,blank=True)
price = models.PositiveIntegerField()
quantity = models.PositiveIntegerField(default=0)
description=models.CharField(max_length=40)
def __str__(self):
return self.name
class Orders(models.Model):
STATUS =(
('Pending','Pending'),
('Order Confirmed','Order Confirmed'),
('Out for Delivery','Out for Delivery'),
('Delivered','Delivered'),
)
customer=models.ForeignKey('Customer', on_delete=models.CASCADE,null=True)
product=models.ForeignKey('Product',on_delete=models.CASCADE,null=True)
email = models.CharField(max_length=50,null=True)
address = models.CharField(max_length=500,null=True)
mobile = models.CharField(max_length=20,null=True)
order_date= models.DateField(auto_now_add=True,null=True)
status=models.CharField(max_length=50,null=True,choices=STATUS)
def __str__(self):
return self.product, self.order_date
class Feedback(models.Model):
name=models.CharField(max_length=40)
feedback=models.CharField(max_length=500)
date= models.DateField(auto_now_add=True,null=True)
def __str__(self):
return self.name
```
#### File: git/test/test_config.py
```python
import glob
import io
from git import (
GitConfigParser
)
from git.compat import string_types
from git.config import cp
from git.test.lib import (
TestCase,
fixture_path,
)
from git.test.lib import with_rw_directory
import os.path as osp
from git.util import rmfile
_tc_lock_fpaths = osp.join(osp.dirname(__file__), 'fixtures/*.lock')
def _rm_lock_files():
for lfp in glob.glob(_tc_lock_fpaths):
rmfile(lfp)
class TestBase(TestCase):
def setUp(self):
_rm_lock_files()
def tearDown(self):
for lfp in glob.glob(_tc_lock_fpaths):
if osp.isfile(lfp):
raise AssertionError('Previous TC left hanging git-lock file: %s', lfp)
def _to_memcache(self, file_path):
with open(file_path, "rb") as fp:
sio = io.BytesIO(fp.read())
sio.name = file_path
return sio
def test_read_write(self):
# writer must create the exact same file as the one read before
for filename in ("git_config", "git_config_global"):
file_obj = self._to_memcache(fixture_path(filename))
with GitConfigParser(file_obj, read_only=False) as w_config:
w_config.read() # enforce reading
assert w_config._sections
w_config.write() # enforce writing
# we stripped lines when reading, so the results differ
assert file_obj.getvalue()
self.assertEqual(file_obj.getvalue(), self._to_memcache(fixture_path(filename)).getvalue())
# creating an additional config writer must fail due to exclusive access
with self.assertRaises(IOError):
GitConfigParser(file_obj, read_only=False)
# should still have a lock and be able to make changes
assert w_config._lock._has_lock()
# changes should be written right away
sname = "my_section"
oname = "mykey"
val = "myvalue"
w_config.add_section(sname)
assert w_config.has_section(sname)
w_config.set(sname, oname, val)
assert w_config.has_option(sname, oname)
assert w_config.get(sname, oname) == val
sname_new = "new_section"
oname_new = "new_key"
ival = 10
w_config.set_value(sname_new, oname_new, ival)
assert w_config.get_value(sname_new, oname_new) == ival
file_obj.seek(0)
r_config = GitConfigParser(file_obj, read_only=True)
assert r_config.has_section(sname)
assert r_config.has_option(sname, oname)
assert r_config.get(sname, oname) == val
# END for each filename
@with_rw_directory
def test_lock_reentry(self, rw_dir):
fpl = osp.join(rw_dir, 'l')
gcp = GitConfigParser(fpl, read_only=False)
with gcp as cw:
cw.set_value('include', 'some_value', 'a')
# entering again locks the file again...
with gcp as cw:
cw.set_value('include', 'some_other_value', 'b')
# ...so creating an additional config writer must fail due to exclusive access
with self.assertRaises(IOError):
GitConfigParser(fpl, read_only=False)
# but work when the lock is removed
with GitConfigParser(fpl, read_only=False):
assert osp.exists(fpl)
# reentering with an existing lock must fail due to exclusive access
with self.assertRaises(IOError):
gcp.__enter__()
def test_multi_line_config(self):
file_obj = self._to_memcache(fixture_path("git_config_with_comments"))
with GitConfigParser(file_obj, read_only=False) as config:
ev = "ruby -e '\n"
ev += " system %(git), %(merge-file), %(--marker-size=%L), %(%A), %(%O), %(%B)\n"
ev += " b = File.read(%(%A))\n"
ev += " b.sub!(/^<+ .*\\nActiveRecord::Schema\\.define.:version => (\\d+). do\\n=+\\nActiveRecord::Schema\\." # noqa E501
ev += "define.:version => (\\d+). do\\n>+ .*/) do\n"
ev += " %(ActiveRecord::Schema.define(:version => #{[$1, $2].max}) do)\n"
ev += " end\n"
ev += " File.open(%(%A), %(w)) {|f| f.write(b)}\n"
ev += " exit 1 if b.include?(%(<)*%L)'"
self.assertEqual(config.get('merge "railsschema"', 'driver'), ev)
self.assertEqual(config.get('alias', 'lg'),
"log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset'"
" --abbrev-commit --date=relative")
self.assertEqual(len(config.sections()), 23)
def test_base(self):
path_repo = fixture_path("git_config")
path_global = fixture_path("git_config_global")
r_config = GitConfigParser([path_repo, path_global], read_only=True)
assert r_config.read_only
num_sections = 0
num_options = 0
# test reader methods
assert r_config._is_initialized is False
for section in r_config.sections():
num_sections += 1
for option in r_config.options(section):
num_options += 1
val = r_config.get(section, option)
val_typed = r_config.get_value(section, option)
assert isinstance(val_typed, (bool, int, float, ) + string_types)
assert val
assert "\n" not in option
assert "\n" not in val
# writing must fail
with self.assertRaises(IOError):
r_config.set(section, option, None)
with self.assertRaises(IOError):
r_config.remove_option(section, option)
# END for each option
with self.assertRaises(IOError):
r_config.remove_section(section)
# END for each section
assert num_sections and num_options
assert r_config._is_initialized is True
# get value which doesnt exist, with default
default = "my default value"
assert r_config.get_value("doesnt", "exist", default) == default
# it raises if there is no default though
with self.assertRaises(cp.NoSectionError):
r_config.get_value("doesnt", "exist")
@with_rw_directory
def test_config_include(self, rw_dir):
def write_test_value(cw, value):
cw.set_value(value, 'value', value)
# end
def check_test_value(cr, value):
assert cr.get_value(value, 'value') == value
# end
# PREPARE CONFIG FILE A
fpa = osp.join(rw_dir, 'a')
with GitConfigParser(fpa, read_only=False) as cw:
write_test_value(cw, 'a')
fpb = osp.join(rw_dir, 'b')
fpc = osp.join(rw_dir, 'c')
cw.set_value('include', 'relative_path_b', 'b')
cw.set_value('include', 'doesntexist', 'foobar')
cw.set_value('include', 'relative_cycle_a_a', 'a')
cw.set_value('include', 'absolute_cycle_a_a', fpa)
assert osp.exists(fpa)
# PREPARE CONFIG FILE B
with GitConfigParser(fpb, read_only=False) as cw:
write_test_value(cw, 'b')
cw.set_value('include', 'relative_cycle_b_a', 'a')
cw.set_value('include', 'absolute_cycle_b_a', fpa)
cw.set_value('include', 'relative_path_c', 'c')
cw.set_value('include', 'absolute_path_c', fpc)
# PREPARE CONFIG FILE C
with GitConfigParser(fpc, read_only=False) as cw:
write_test_value(cw, 'c')
with GitConfigParser(fpa, read_only=True) as cr:
for tv in ('a', 'b', 'c'):
check_test_value(cr, tv)
# end for each test to verify
assert len(cr.items('include')) == 8, "Expected all include sections to be merged"
# test writable config writers - assure write-back doesn't involve includes
with GitConfigParser(fpa, read_only=False, merge_includes=True) as cw:
tv = 'x'
write_test_value(cw, tv)
with GitConfigParser(fpa, read_only=True) as cr:
with self.assertRaises(cp.NoSectionError):
check_test_value(cr, tv)
# But can make it skip includes altogether, and thus allow write-backs
with GitConfigParser(fpa, read_only=False, merge_includes=False) as cw:
write_test_value(cw, tv)
with GitConfigParser(fpa, read_only=True) as cr:
check_test_value(cr, tv)
def test_rename(self):
file_obj = self._to_memcache(fixture_path('git_config'))
with GitConfigParser(file_obj, read_only=False, merge_includes=False) as cw:
with self.assertRaises(ValueError):
cw.rename_section("doesntexist", "foo")
with self.assertRaises(ValueError):
cw.rename_section("core", "include")
nn = "bee"
assert cw.rename_section('core', nn) is cw
assert not cw.has_section('core')
assert len(cw.items(nn)) == 4
def test_complex_aliases(self):
file_obj = self._to_memcache(fixture_path('.gitconfig'))
with GitConfigParser(file_obj, read_only=False) as w_config:
self.assertEqual(w_config.get('alias', 'rbi'), '"!g() { git rebase -i origin/${1:-master} ; } ; g"')
self.assertEqual(file_obj.getvalue(), self._to_memcache(fixture_path('.gitconfig')).getvalue())
def test_empty_config_value(self):
cr = GitConfigParser(fixture_path('git_config_with_empty_value'), read_only=True)
assert cr.get_value('core', 'filemode'), "Should read keys with values"
with self.assertRaises(cp.NoOptionError):
cr.get_value('color', 'ui')
``` |
{
"source": "Josafath/attendance-management",
"score": 3
} |
#### File: src/Recognition_files/recognitionFaces.py
```python
import cv2
import numpy as np
class Recognition:
def __init__(self):
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.faceCascade = cv2.CascadeClassifier("Recognition_files//haarcascade_frontalface_default.xml")
self.recognizer.read('Recognition_files/trainer/trainer.yml')
self.font = cv2.FONT_HERSHEY_COMPLEX
self.cam = cv2.VideoCapture(0)
self.cam.set(3, 640)
self.cam.set(4, 480)
self.id_students = []
def go(self):
minW = 0.1 * self.cam.get(3)
minH = 0.1 * self.cam.get(4)
while True:
ret,img = self.cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor= 1.2,
minNeighbors= 5,
minSize= (int(minW), (int(minH))),
)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w])
if (confidence < 100):
self.id_students.append(id)
confidence = " {0}%".format(round(100 - confidence))
else:
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, "Check", (x + 5, y - 5), self.font, 1, (255, 255, 255), 2)
cv2.imshow('Taking Attendance', img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
self.finish()
break
self.finish()
return np.unique(self.id_students)
def finish(self):
self.cam.release()
cv2.destroyAllWindows()
``` |
{
"source": "josagonzalez/swag-client",
"score": 2
} |
#### File: swag_client/schemas/v2.py
```python
from datetime import datetime
from marshmallow import Schema, fields, validates_schema
from marshmallow.exceptions import ValidationError
from marshmallow.validate import OneOf
PROVIDERS = ['aws', 'gcp', 'azure']
ACCOUNT_STATUSES = ['created', 'in-progress', 'ready', 'deprecated', 'deleted', 'in-active']
class NoteSchema(Schema):
date = fields.Str()
text = fields.Str(required=True)
class RoleSchema(Schema):
policyUrl = fields.Str()
roleName = fields.Str()
id = fields.Str(required=True)
secondaryApprover = fields.Str(default=None, missing=None)
googleGroup = fields.Str(required=True)
class AccountStatusSchema(Schema):
region = fields.Str(required=True)
status = fields.Str(validate=OneOf(ACCOUNT_STATUSES), missing='created')
notes = fields.Nested(NoteSchema, many=True, missing=[])
class ServiceStatusSchema(Schema):
region = fields.Str(required=True)
enabled = fields.Boolean(missing=False)
notes = fields.Nested(NoteSchema, many=True, missing=[])
class ServiceSchema(Schema):
name = fields.Str(required=True)
status = fields.Nested(ServiceStatusSchema, many=True, required=True)
roles = fields.Nested(RoleSchema, many=True, missing=[])
metadata = fields.Dict(missing={})
class RegionSchema(Schema):
status = fields.Str(validate=OneOf(ACCOUNT_STATUSES), missing='created')
az_mapping = fields.Dict()
class AccountSchema(Schema):
schemaVersion = fields.Str(missing='2')
id = fields.Str(required=True)
name = fields.Str(required=True)
contacts = fields.List(fields.Email(), required=True, missing=[])
provider = fields.Str(validate=OneOf(PROVIDERS), missing='aws')
type = fields.Str(missing='service')
tags = fields.List(fields.Str(), missing=[])
status = fields.Nested(AccountStatusSchema, many=True, missing=[])
email = fields.Email(required=True)
environment = fields.Str(missing='prod')
services = fields.Nested(ServiceSchema, many=True, missing=[])
sensitive = fields.Bool(missing=False)
description = fields.Str(required=True)
owner = fields.Str(required=True, missing='netflix')
aliases = fields.List(fields.Str(), missing=[])
account_status = fields.Str(validate=OneOf(ACCOUNT_STATUSES), missing='created')
domain = fields.Str()
sub_domain = fields.Str()
regions = fields.Dict()
@validates_schema
def validate_type(self, data):
"""Performs field validation against the schema context
if values have been provided to SWAGManager via the
swag.schema_context config object.
If the schema context for a given field is empty, then
we assume any value is valid for the given schema field.
"""
fields_to_validate = ['type', 'environment', 'owner']
for field in fields_to_validate:
value = data.get(field)
allowed_values = self.context.get(field)
if allowed_values and value not in allowed_values:
raise ValidationError('Must be one of {}'.format(allowed_values), field_names=field)
@validates_schema
def validate_account_status(self, data):
"""Performs field validation for account_status. If any
region is not deleted, account_status cannot be deleted
"""
deleted_status = 'deleted'
region_status = data.get('status')
account_status = data.get('account_status')
for region in region_status:
if region['status'] != deleted_status and account_status == deleted_status:
raise ValidationError('Account Status cannot be "deleted" if a region is not "deleted"')
@validates_schema
def validate_regions_schema(self, data):
"""Performs field validation for regions. This should be
a dict with region names as the key and RegionSchema as the value
"""
region_schema = RegionSchema()
supplied_regions = data.get('regions', {})
for region in supplied_regions.keys():
result = region_schema.validate(supplied_regions[region])
if len(result.keys()) > 0:
raise ValidationError(result)
``` |
{
"source": "josahe/CarND-Capstone",
"score": 3
} |
#### File: src/waypoint_updater/waypoint_updater.py
```python
import rospy
import numpy as np
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 10 # Max. acceptable deceleration is 10m/s/s
class WaypointUpdater(object):
"""
Provides updated waypoints
"""
def __init__(self):
rospy.init_node('waypoint_updater')
# init member variables
self.have_waypoints = False
self.base_lane_wp_len = 0
self.pose = None
self.base_lane = None
self.waypoints_2d = None
self.waypoints_tree = None
self.stopline_wp_idx = -1
self.cache_closest_wp_idx = -1
self.cache_decel_waypoints = None
self.last_closest_idx = -1
self.last_base_waypoints = None
# inputs of the ROS module
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
#rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_cb)
# outputs of the ROS module
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
rate = rospy.Rate(50)
while not rospy.is_shutdown():
rate.sleep()
if self.pose and self.base_lane and self.have_waypoints:
self.publish_waypoints()
def get_closest_waypoint_idx(self):
"""
return index of the waypoint closest to the car's position
"""
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_wp_idx = self.waypoint_tree.query([x,y], 1)[1]
# check if closest point is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_wp_idx]
prev_coord = self.waypoints_2d[closest_wp_idx-1]
# hyperplane equation through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)
if val>0:
closest_wp_idx = (closest_wp_idx+1) % len(self.waypoints_2d)
return closest_wp_idx
def publish_waypoints(self):
"""
Generate a new list of waypoints and publish
"""
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
"""
Generate new list of waypoints
takes into account a detected traffic light
"""
lane = Lane()
# \todo MS:check if the statement below is required
#lane.header = self.base_lane.header
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
if closest_idx == self.last_closest_idx:
base_waypoints = self.last_base_waypoints
elif self.last_base_waypoints == None:
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
else:
base_waypoints = self.last_base_waypoints
for i in range(self.last_closest_idx, closest_idx):
if len(base_waypoints) > 1:
base_waypoints.pop(0)
if i+LOOKAHEAD_WPS < self.base_lane_wp_len:
base_waypoints.append( self.base_lane.waypoints[i + LOOKAHEAD_WPS] )
# maintain cache
self.last_closest_idx = closest_idx
self.last_base_waypoints = base_waypoints
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
# easy case - continue driving
lane.waypoints = base_waypoints
else:
# red traffic light ahead - we have to break
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
"""
Calculate list of waypoints for deceleration in front of a red traffic light
waypoints: list of waypoints ahead of us (i.e. a part of the waypoint list!
closest_idx: index of the car's position in the global waypoint list
"""
# check if we just calculated this!
if closest_idx == self.cache_closest_wp_idx:
return self.cache_decel_waypoints
rospy.loginfo("Calculating deceleration path for idx={0}".format(closest_idx))
temp = []
was_zero = False
for i,wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
if was_zero:
v = 0
else:
stop_idx = max(self.stopline_wp_idx - closest_idx -3, 0) # stop three waypoints before!
dist = self.distance(waypoints, i, stop_idx)
v = math.sqrt(2 * MAX_DECEL * (dist/20)**3)
if v<1.0:
v = 0.0
was_zero = True
p.twist.twist.linear.x = min(v, wp.twist.twist.linear.x)
temp.append(p)
# cache our result to remove latency
self.cache_closest_wp_idx = closest_idx
self.cache_decel_waypoints = temp
return temp
def pose_cb(self, msg):
"""
Callback: receives pose of the car
"""
self.pose = msg
def waypoints_cb(self, waypoints):
"""
Callback: receives waypoints of track
Attention: this is only published once, keep the data safe!
"""
self.base_lane = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
self.base_lane_wp_len = len(self.base_lane.waypoints)
self.have_waypoints = True
rospy.loginfo("Received {} waypoints".format(self.base_lane_wp_len))
def traffic_cb(self, msg):
"""
Callback: receive the waypoint index of a detected red traffic light
-1 means no traffic light
"""
self.stopline_wp_idx = msg.data
if self.stopline_wp_idx != -1:
rospy.loginfo("Red Traffic light detected: {0}".format(self.stopline_wp_idx))
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
``` |
{
"source": "josai/DeepDeface",
"score": 2
} |
#### File: DeepDeface/deepdefacer/defacer.py
```python
import argparse
import sys
import os
import numpy as np
import nibabel as nib
try:
from keras import backend as K
from keras.models import *
except:
print('---------------------------------------------------------------------------------------------------------------------------------------')
print('ERROR: Failed to initialize tensorflow-gpu and Keras. Please ensure that this module is installed and a GPU is readily accessible.')
print('---------------------------------------------------------------------------------------------------------------------------------------')
sys.exit(1)
from defacer_utils import resize_img, dice_coefficient, resample_image, pre_process_image, get_available_gpus
def deface_3D_MRI():
if len(sys.argv) < 2:
print('----------------------------------------------------------------------------------------------------')
print("usage: Please specify the filepath of a MRI image for defacing....(e.g deepdefacer <path of MRI>")
print('----------------------------------------------------------------------------------------------------')
sys.exit(1)
if not get_available_gpus:
print('---------------------------------------------------------------------------------------------------------------------------------------')
print("ERROR: Could not find an available GPU on your system. Please check that your GPU drivers (cudaNN, etc) are up to date and accessible.")
print('---------------------------------------------------------------------------------------------------------------------------------------')
sys.exit(1)
MRI_image_path = sys.argv[1]
if '.nii' not in MRI_image_path or '.nii.gz' not in MRI_image_path:
print('------------------------------------------------------------------------')
print("ERROR: Please ensure that input MRI file is in .nii or .nii.gz format")
print('------------------------------------------------------------------------')
print('Preproessing input MRI image...')
MRI_image_shape = nib.load(MRI_image_path).get_data().shape
if len(np.squeeze(MRI_image_shape)) != 3:
print('------------------------------------------------------------------------')
print("ERROR: Unable to deface MRI: Please ensure that input dimensions are in 3D.")
print('------------------------------------------------------------------------')
MRI_image_data, MRI_unnormalized_data = pre_process_image(MRI_image_path)
deepdeface_model = load_model('deepdefacer/model.hdf5', custom_objects={'dice_coefficient': dice_coefficient})
print('-------------------------------------------------')
print('Masking %s ....' % (MRI_image_path))
mask_prediction = deepdeface_model.predict(MRI_image_data)
mask_prediction[mask_prediction < 0.5] = 0
mask_prediction[mask_prediction >= 0.5] = 1
mask_prediction = np.squeeze(mask_prediction)
masked_image = np.multiply(MRI_unnormalized_data, mask_prediction)
masked_image_save = nib.Nifti1Image(masked_image, nib.load(MRI_image_path).affine)
masked_image_resampled = resample_image(masked_image_save, target_shape=MRI_image_shape, get_nifti=True)
output_file = os.path.splitext(os.path.splitext(os.path.basename(MRI_image_path))[0])[0] + '_defaced.nii.gz'
print('Completed! Saving to %s...' % (output_file))
nib.save(masked_image_resampled, output_file)
print('Saved.')
print('----------')
``` |
{
"source": "josalhor/Ax",
"score": 2
} |
#### File: storage/sqa_store/db.py
```python
from __future__ import annotations
from contextlib import contextmanager, nullcontext
from os import remove as remove_file
from typing import Any, Callable, ContextManager, Generator, Optional, TypeVar
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, scoped_session, sessionmaker
# some constants for database fields
HASH_FIELD_LENGTH: int = 32
NAME_OR_TYPE_FIELD_LENGTH: int = 100
LONG_STRING_FIELD_LENGTH: int = 255
JSON_FIELD_LENGTH: int = 4096
# by default, Text gets mapped to a TEXT field in MySQL is 2^16 - 1
# we use have MEDIUMTEXT and LONGTEXT in the MySQL db; in this case, use
# Text(MEDIUMTEXT_BYTES) or Text(LONGTEXT_BYTES). This is preferable to
# using MEDIUMTEXT and LONGTEXT directly because those are incompatible with
# SQLite that is used in unit tests.
MEDIUMTEXT_BYTES: int = 2 ** 24 - 1
LONGTEXT_BYTES: int = 2 ** 32 - 1
# global database variables
SESSION_FACTORY: Optional[Session] = None
# set this to false to prevent SQLAlchemy for automatically expiring objects
# on commit, which essentially makes them unusable outside of a session
# see e.g. https://stackoverflow.com/a/50272761
EXPIRE_ON_COMMIT = False
T = TypeVar("T")
class SQABase:
"""Metaclass for SQLAlchemy classes corresponding to core Ax classes."""
pass
Base = declarative_base(cls=SQABase)
def create_mysql_engine_from_creator(
creator: Callable, echo: bool = False, pool_recycle: int = 10, **kwargs: Any
) -> Engine:
"""Create a SQLAlchemy engine with the MySQL dialect given a creator function.
Args:
creator: a callable which returns a DBAPI connection.
echo: if True, set engine to be verbose.
pool_recycle: number of seconds after which to recycle
connections. -1 means no timeout. Default is 10 seconds.
**kwargs: keyword args passed to `create_engine`
Returns:
Engine: SQLAlchemy engine with connection to MySQL DB.
"""
return create_engine(
"mysql://", creator=creator, pool_recycle=pool_recycle, echo=echo, **kwargs
)
def create_mysql_engine_from_url(
url: str, echo: bool = False, pool_recycle: int = 10, **kwargs: Any
) -> Engine:
"""Create a SQLAlchemy engine with the MySQL dialect given a database url.
Args:
url: a database url that can include username, password, hostname, database name
as well as optional keyword arguments for additional configuration.
e.g. `dialect+driver://username:password@host:port/database`.
echo: if True, set engine to be verbose.
pool_recycle: number of seconds after which to recycle
connections. -1 means no timeout. Default is 10 seconds.
**kwargs: keyword args passed to `create_engine`
Returns:
Engine: SQLAlchemy engine with connection to MySQL DB.
"""
return create_engine(url, pool_recycle=pool_recycle, echo=echo, **kwargs)
def create_test_engine(path: Optional[str] = None, echo: bool = True) -> Engine:
"""Creates a SQLAlchemy engine object for use in unit tests.
Args:
path: if None, use in-memory SQLite; else
attempt to create a SQLite DB in the path provided.
echo: if True, set engine to be verbose.
Returns:
Engine: an instance of SQLAlchemy engine.
"""
if path is None:
db_path = "sqlite://"
else:
db_path = "sqlite:///{path}".format(path=path)
return create_engine(db_path, echo=echo)
def init_engine_and_session_factory(
url: Optional[str] = None,
creator: Optional[Callable] = None,
echo: bool = False,
force_init: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the global engine and SESSION_FACTORY for SQLAlchemy.
The initialization needs to only happen once. Note that it is possible to
re-initialize the engine by setting the `force_init` flag to True, but this
should only be used if you are absolutely certain that you know what you
are doing.
Args:
url: a database url that can include username, password, hostname, database name
as well as optional keyword arguments for additional configuration.
e.g. `dialect+driver://username:password@host:port/database`.
Either this argument or `creator` argument must be specified.
creator: a callable which returns a DBAPI connection.
Either this argument or `url` argument must be specified.
echo: if True, logging for engine is enabled.
force_init: if True, allows re-initializing engine
and session factory.
**kwargs: keyword arguments passed to `create_mysql_engine_from_creator`
"""
global SESSION_FACTORY
if SESSION_FACTORY is not None:
if force_init:
# pyre-fixme[16]: `Optional` has no attribute `bind`.
SESSION_FACTORY.bind.dispose()
else:
return # pragma: no cover
if url is not None:
engine = create_mysql_engine_from_url(url=url, echo=echo, **kwargs)
elif creator is not None:
engine = create_mysql_engine_from_creator(creator=creator, echo=echo, **kwargs)
else:
raise ValueError("Must specify either `url` or `creator`.") # pragma: no cover
SESSION_FACTORY = scoped_session(
sessionmaker(bind=engine, expire_on_commit=EXPIRE_ON_COMMIT)
)
def init_test_engine_and_session_factory(
tier_or_path: Optional[str] = None,
echo: bool = False,
force_init: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the global engine and SESSION_FACTORY for SQLAlchemy,
using an in-memory SQLite database.
The initialization needs to only happen once. Note that it is possible to
re-initialize the engine by setting the `force_init` flag to True, but this
should only be used if you are absolutely certain that you know what you
are doing.
Args:
tier_or_path: the name of the DB tier.
echo: if True, logging for engine is enabled.
force_init: if True, allows re-initializing engine
and session factory.
**kwargs: keyword arguments passed to `create_mysql_engine_from_creator`
"""
global SESSION_FACTORY
if SESSION_FACTORY is not None:
if force_init:
# pyre-fixme[16]: `Optional` has no attribute `bind`.
SESSION_FACTORY.bind.dispose()
else:
return
engine = create_test_engine(path=tier_or_path, echo=echo)
create_all_tables(engine)
SESSION_FACTORY = scoped_session(
sessionmaker(bind=engine, expire_on_commit=EXPIRE_ON_COMMIT)
)
def remove_test_db_file(tier_or_path: str) -> None:
"""Remove the test DB file from system, useful for cleanup in tests."""
remove_file(tier_or_path)
def create_all_tables(engine: Engine) -> None:
"""Create all tables that inherit from Base.
Args:
engine: a SQLAlchemy engine with a connection to a MySQL
or SQLite DB.
Note:
In order for all tables to be correctly created, all modules that
define a mapped class that inherits from `Base` must be imported.
"""
if (
engine.dialect.name == "mysql"
and engine.dialect.default_schema_name == "adaptive_experiment"
):
raise Exception("Cannot mutate tables in XDB. Use AOSC.") # pragma: no cover
Base.metadata.create_all(engine)
def get_session() -> Session:
"""Fetch a SQLAlchemy session with a connection to a DB.
Unless `init_engine_and_session_factory` is called first with custom
args, this will automatically initialize a connection to
`xdb.adaptive_experiment`.
Returns:
Session: an instance of a SQLAlchemy session.
"""
global SESSION_FACTORY
if SESSION_FACTORY is None:
init_engine_and_session_factory() # pragma: no cover
assert SESSION_FACTORY is not None
# pyre-fixme[29]: `Session` is not a function.
return SESSION_FACTORY()
def get_engine() -> Engine:
"""Fetch a SQLAlchemy engine, if already initialized.
If not initialized, need to either call `init_engine_and_session_factory` or
`get_session` explicitly.
Returns:
Engine: an instance of a SQLAlchemy engine with a connection to a DB.
"""
global SESSION_FACTORY
if SESSION_FACTORY is None:
raise ValueError("Engine must be initialized first.") # pragma: no cover
# pyre-fixme[16]: `Optional` has no attribute `bind`.
return SESSION_FACTORY.bind
@contextmanager
def session_scope() -> Generator[Session, None, None]:
"""Provide a transactional scope around a series of operations."""
session = get_session()
try:
yield session
session.commit()
except Exception: # pragma: no cover
session.rollback() # pragma: no cover
raise # pragma: no cover
finally:
session.close()
def optional_session_scope(
session: Optional[Session] = None,
) -> ContextManager[Session]:
if session is not None:
return nullcontext(session)
return session_scope()
``` |
{
"source": "josalhor/WebModels",
"score": 2
} |
#### File: WebModels/todo/forms.py
```python
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth import forms as auth_forms
from django.forms import ModelForm
from todo.models import CreditCardInfo, Task, Book, UserInfo, Editor, Designer, PublishedBook, Reader
class AddBookForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["thematic"].queryset = Book.THEMATIC
self.fields["thematic"].widget.attrs = {
"id": "id_thematic",
"class": "custom-select mb-3",
"name": "thematic",
}
class Meta:
model = Book
exclude = ["created_date", "slug", "author", "editor", "completed", "file"]
class AddEditTaskForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["task_type"].queryset = Task.TYPES_OF_TASK_CHOICES
self.fields["task_type"].widget.attrs = {
"id": "id_task_type",
"class": "custom-select mb-3",
"name": "task_type",
}
self.fields["book"].value = kwargs["initial"]["book"].id
due_date = forms.DateField(widget=forms.DateInput(attrs={"type": "date"}), required=False)
title = forms.CharField(widget=forms.widgets.TextInput())
note = forms.CharField(widget=forms.Textarea(), required=False)
def clean_created_by(self):
return self.instance.created_by
class Meta:
model = Task
exclude = ["assigned_to", "created_date"]
class AddExternalBookForm(ModelForm):
class Meta:
model = UserInfo
exclude = (
"user",
)
class PublishedBookForm(ModelForm):
class Meta:
model = PublishedBook
exclude = (
"book", "author_text", "disabled"
)
class SearchForm(forms.Form):
q = forms.CharField(widget=forms.widgets.TextInput(attrs={"size": 35}))
class AssignForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["editor"].widget.attrs = {
#"id": "id_thematic",
"class": "custom-select mb-3",
#"name": "thematic",
}
self.fields["editor"].label = ""
editor = forms.ModelChoiceField(queryset=Editor.objects.all())
class AssignFormDesigner(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["designer"].widget.attrs = {
#"id": "id_thematic",
"class": "custom-select mb-3",
#"name": "thematic",
}
self.fields["designer"].label = ""
designer = forms.ModelChoiceField(queryset=Designer.objects.all())
class PaymentSubscriptionForm(ModelForm):
class Meta:
model = CreditCardInfo
exclude = []
class profileForm(auth_forms.UserChangeForm):
class Meta:
model = UserInfo
fields = (
'full_name',
'user',
)
```
#### File: WebModels/todo/models.py
```python
from __future__ import unicode_literals
import datetime
import os
import textwrap
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth import models as auth_models
from cuser.models import AbstractCUser
from django.db import DEFAULT_DB_ALIAS, models
from django.db.transaction import Atomic, get_connection
from django.urls import reverse
from django.utils import timezone
from datetime import date
from django import forms
from django.utils.text import slugify
from abc import ABC
import uuid
class LockedAtomicTransaction(Atomic):
"""
modified from https://stackoverflow.com/a/41831049
this is needed for safely merging
Does a atomic transaction, but also locks the entire table for any transactions, for the duration of this
transaction. Although this is the only way to avoid concurrency issues in certain situations, it should be used with
caution, since it has impacts on performance, for obvious reasons...
"""
def __init__(self, *models, using=None, savepoint=None):
if using is None:
using = DEFAULT_DB_ALIAS
super().__init__(using, savepoint)
self.models = models
def __enter__(self):
super(LockedAtomicTransaction, self).__enter__()
# Make sure not to lock, when sqlite is used, or you'll run into problems while running tests!!!
if settings.DATABASES[self.using]["ENGINE"] != "django.db.backends.sqlite3":
cursor = None
try:
cursor = get_connection(self.using).cursor()
for model in self.models:
cursor.execute(
"LOCK TABLE {table_name}".format(table_name=model._meta.db_table)
)
finally:
if cursor and not cursor.closed:
cursor.close()
# Does this look repetitive to you?
# Then great! It looks repetitive to me too!
# The way Django works, this way of building the models
# is better for form handling.
class UserInfo(models.Model):
full_name = models.CharField(max_length=150)
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
null=False,
on_delete=models.CASCADE,
related_name='user_info'
)
reset_unique_id = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
used_reset = models.BooleanField(default=False)
def __str__(self):
return str(self.full_name)
# This should be an ABC, but that interferes
# with the model's metaclass
class UserRole(models.Model):
def __str__(self):
return str(self.user.user_info)
class Writer(UserRole):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
null=False,
on_delete=models.CASCADE
)
class Editor(UserRole):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
null=False,
on_delete=models.CASCADE
)
chief = models.BooleanField(default=False)
# These classes are not to be used in the first iteration of
# the app. However, modeling them helps us make better design decisions
# and prepare generic code :D
class Designer(UserRole):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
null=False,
on_delete=models.CASCADE
)
chief = models.BooleanField(default=False)
class Management(UserRole):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
null=False,
on_delete=models.CASCADE
)
class CreditCardInfo(models.Model):
card_number = models.PositiveIntegerField(blank=False)
card_holder = models.TextField(blank=False)
expiration_date = models.DateField(blank=False)
card_cvv = models.PositiveIntegerField(blank=False)
class Reader(UserRole):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
null=False,
on_delete=models.CASCADE
)
subscribed = models.BooleanField(default=False)
credit_card = models.OneToOneField(
CreditCardInfo,
on_delete=models.CASCADE,
null=True,
blank=True
)
def __str__(self):
return f'Reader {self.pk}'
class Book(models.Model):
name = models.CharField(max_length=80)
slug = models.SlugField(default="", unique=True)
author = models.ForeignKey(Writer, on_delete=models.RESTRICT, related_name='book_author')
editor = models.ForeignKey(Editor, null=True, blank=True, on_delete=models.CASCADE, related_name='book_editor')
# This attribute is techincally redundant:
# In practice it is a performace improvement and improves legibility
presentation_date = models.DateField(auto_now_add=True)
completed = models.BooleanField(default=False)
rejected = models.BooleanField(default=False)
description = models.TextField(blank=True)
# file can bee null for debugging purposes
file = models.FileField(upload_to="books/attachments", max_length=255, null=True, blank=True)
TYPE_SCARE = 'S'
TYPE_ADVENTURE = 'A'
TYPE_FANTASY = 'F'
THEMATIC = [
(TYPE_SCARE, 'Miedo'),
(TYPE_ADVENTURE, 'Aventura'),
(TYPE_FANTASY, 'Fantasía'),
]
thematic = models.CharField(
max_length=2,
choices=THEMATIC,
)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name, allow_unicode=True)
super(Book, self).save(*args, **kwargs)
class Meta:
ordering = ["name"]
verbose_name_plural = "Books"
class PublishedBook(models.Model):
book = models.OneToOneField(
Book,
null=False,
blank=False,
related_name="published_book",
on_delete=models.CASCADE,
)
title = models.CharField(max_length=80)
publication_date = models.DateField(auto_now_add=True)
disabled = models.BooleanField(default=False)
author_text = models.TextField()
final_version = models.FileField(upload_to="books/attachments", max_length=255, null=True, blank=True)
final_version_epub = models.FileField(upload_to="books/attachments", max_length=255, null=True, blank=True)
related_image = models.ImageField(upload_to="books/attachments", null=True, blank=True)
class Task(models.Model):
WRITING = 'E'
ILLUSTRATION = 'I'
LAYOUT = 'M'
REVISION = 'R'
TYPES_OF_TASK_CHOICES = [
(WRITING, 'Escritura'),
(ILLUSTRATION, 'Ilustración'),
(LAYOUT, 'Maquetación'),
(REVISION, 'Revisión final'),
]
title = models.CharField(max_length=140)
book = models.ForeignKey(Book, on_delete=models.CASCADE, null=True)
created_date = models.DateField(auto_now_add=True)
due_date = models.DateField(blank=True, null=True)
completed = models.BooleanField(default=False)
notified_due_date = models.BooleanField(default=False)
completed_date = models.DateField(blank=True, null=True)
task_type = models.CharField(
max_length=2,
choices=TYPES_OF_TASK_CHOICES,
default=WRITING,
)
created_by = models.ForeignKey(
Editor,
null=True,
blank=True,
related_name="todo_created_by",
on_delete=models.CASCADE,
)
assigned_to = models.ForeignKey(
UserInfo,
blank=True,
null=True,
related_name="todo_assigned_to",
on_delete=models.CASCADE,
)
description = models.TextField(default="")
priority = models.PositiveIntegerField(blank=True, null=True)
# Has due date for an instance of this object passed?
def overdue_status(self):
"Returns whether the Tasks's due date has passed or not."
if self.due_date and datetime.date.today() > self.due_date:
return True
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("todo:task_detail", kwargs={"task_id": self.id})
# Auto-set the Task creation / completed date
def save(self, **kwargs):
# If Task is being marked complete, set the completed_date
if self.completed:
self.completed_date = datetime.datetime.now()
super(Task, self).save()
def merge_into(self, merge_target):
if merge_target.pk == self.pk:
raise ValueError("can't merge a task with self")
# lock the comments to avoid concurrent additions of comments after the
# update request. these comments would be irremediably lost because of
# the cascade clause
with LockedAtomicTransaction(Comment):
Comment.objects.filter(task=self).update(task=merge_target)
self.delete()
class Meta:
ordering = ["priority", "created_date"]
class Comment(models.Model):
author = models.ForeignKey(UserInfo, on_delete=models.CASCADE)
task = models.ForeignKey(Task, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
body = models.TextField(default="")
@property
def author_text(self):
return str(self.author)
@property
def snippet(self):
body_snippet = textwrap.shorten(self.body, width=35, placeholder="...")
# Define here rather than in __str__ so we can use it in the admin list_display
return "{author} - {snippet}...".format(author=self.author_text, snippet=body_snippet)
def __str__(self):
return self.snippet
class Attachment(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
added_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
file = models.FileField(upload_to="tasks/attachments", max_length=255)
def filename(self):
return os.path.basename(self.file.name)
def extension(self):
name, extension = os.path.splitext(self.file.name)
return extension
def __str__(self):
return f"{self.task.id} - {self.file.name}"
```
#### File: todo/views/activate_book.py
```python
from todo.templatetags.todo_tags import is_management
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.http import HttpResponse
from django.shortcuts import redirect
from django.contrib.auth import get_user_model
from todo.models import UserInfo, PublishedBook
@login_required
@user_passes_test(is_management)
def activate_book(request):
if request.method == "POST":
book = PublishedBook.objects.filter(pk=request.POST['activate-book']).first()
book.disabled = False
book.save()
messages.success(request, "El libro '{}' ha sido activado correctamente.".format(book.title))
return redirect("todo:books_management")
```
#### File: todo/views/book_assign.py
```python
import datetime
import os
import bleach
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from todo.defaults import defaults
from todo.models import Attachment, Comment, Book, Editor, UserInfo
from todo.forms import AssignForm
def send_email_reject_book(book, reasons):
email_body = render_to_string(
"todo/email/rejected_book.txt", {"book": book, "reasons": reasons}
)
send_mail(
"Libro rechazado",
email_body,
None,
[book.author.user.email],
fail_silently=False,
)
@login_required
def book_assign(request, book_id: int) -> HttpResponse:
book = get_object_or_404(Book, pk=book_id)
thematic = dict(Book.THEMATIC)[book.thematic]
user_email = request.user
editor = Editor.objects.filter(user=user_email).first()
editor_view = editor != None
if book.editor != None or (editor_view and not editor.chief):
raise PermissionDenied
if request.POST:
print(request.POST)
form = AssignForm(request.POST)
if form.is_valid():
book.editor = Editor.objects.filter(id=request.POST['editor']).first()
book.save()
messages.success(request, "La propuesta de edición ha sido correctamente asignada.")
editor_user_info = UserInfo.objects.filter(user=book.editor.user).first()
author_user_info = UserInfo.objects.filter(user=book.author.user).first()
email_body = render_to_string(
"todo/email/assigned_book.txt", {"site": Site.objects.get_current().domain, "book": book, "editor": editor_user_info, "author": author_user_info}
)
send_mail(
"Libro asignado",
email_body,
None,
[book.editor.user.email, book.author.user.email],
fail_silently=False,
)
else:
messages.success(request, "La propuesta de edición ha sido correctamente rechazada.")
book.rejected = True
book.save()
send_email_reject_book(book, reasons=request.POST['reasons'])
return redirect("todo:accepted_petitions")
context = {
'editor_view': editor_view,
'thematic': thematic,
'editor_user': editor,
'description': book.description,
'book': book,
'assign_form': AssignForm()
}
return render(request, "todo/book_assign.html", context)
```
#### File: todo/views/users_detail.py
```python
from todo.templatetags.todo_tags import is_management
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.shortcuts import render
from todo.models import Designer, Management, Writer, Editor
@login_required
@user_passes_test(is_management)
def users_detail(request, list_slug=None) -> HttpResponse:
# Which users to show on this list view?
if list_slug == "editors":
users = Editor.objects.all()
elif list_slug == "designers":
users = Designer.objects.all()
elif list_slug == "writers":
users = Writer.objects.all()
elif list_slug == "management":
users = Management.objects.all()
# Additional filtering
active_users = users.filter(user__is_active=True)
unactive_users = users.filter(user__is_active=False)
# ######################
# Add New User Form
# ######################
context = {
"list_slug": list_slug,
"active_users": active_users,
"unactive_users": unactive_users,
"users": users,
}
return render(request, "todo/users_detail.html", context)
``` |
{
"source": "josalinas/ppiclF",
"score": 2
} |
#### File: short_tests/lib/ppiclfBinBuild.py
```python
import os
import sys
from subprocess import call, check_call, Popen, PIPE, STDOUT
def build_ppiclf(source_root, cwd=None, opts=None, verbose=False):
if not opts:
_opts = {}
else:
_opts = opts.copy()
_opts.update(SOURCE_ROOT=source_root)
print('Compiling ppiclf...')
print(' Using working directory "{0}"'.format(cwd))
for key, val in _opts.iteritems():
print(' Using {0}="{1}"'.format(key, val))
my_env = os.environ.copy()
if source_root : my_env["SOURCE_ROOT"] = source_root
if _opts.get('F77') : my_env["FC"] = _opts.get('F77')
if _opts.get('CC') : my_env["CC"] = _opts.get('CC')
if _opts.get('PPLIST') : my_env["PPLIST"] = _opts.get('PPLIST')
logfile = os.path.join(cwd, 'build.log')
# copy F-File
proc = Popen(
['cp',os.path.join(format(cwd),'user_routines/ppiclf_user.f'),os.path.join(source_root, 'source', 'ppiclf_user.f')],
cwd=cwd,
env=my_env,
stdin=PIPE)
proc.wait()
# copy H-File
proc = Popen(
['cp',os.path.join(format(cwd),'user_routines/PPICLF_USER.h'),os.path.join(source_root, 'source', 'PPICLF_USER.h')],
cwd=cwd,
env=my_env,
stdin=PIPE)
proc.wait()
# Clean ppiclF library
proc = Popen(
['make','clean'],
cwd=source_root,
env=my_env,
stdin=PIPE)
proc.wait()
# Make ppiclF library
proc = Popen(
'make',
cwd=source_root,
env=my_env,
stdin=PIPE,
stderr=STDOUT)
proc.wait()
# Clean example case
proc = Popen(
['make','clean'],
cwd=cwd,
env=my_env,
stdin=PIPE,
stderr=STDOUT)
proc.wait()
# Make example case
proc = Popen(
'make',
cwd=cwd,
env=my_env,
stdin=PIPE,
stderr=STDOUT)
proc.wait()
if proc.returncode != 0:
f = open(logfile, "r")
text = f.read()
print text
f.close()
exit(-1)
```
#### File: short_tests/lib/ppiclfTestCase.py
```python
import unittest
import inspect
import os
from functools import wraps
###############################################################################
# DECORATORS
###############################################################################
def Parallel(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
self.mpi_procs = self.parallel_procs
if not self.ifmpi:
self.skipTest("Skipping \"{0}\"; MPI is not enabled.".format(self.id()))
else:
self.log_suffix = '.general'
if self.ifmpi:
self.log_suffix += '.parallel'
else:
self.log_suffix += '.serial'
method(self, *args, **kwargs)
return wrapper
###############################################################################
# BASE TEST CASE
###############################################################################
class ppiclfTestCase(unittest.TestCase):
""" Base class for ppiclf unittests
This defines a setUpClass method to:
(a) get the relevant environment variables for compilers, directories
All subclassed TestCases will need to do these things.
Class attributes:
f77 (str): The Fortran 77 compiler to use [default: 'gfortran']
cc (str): The C compiler to use [default: 'gcc']
ifmpi (bool): Perform compilation/tests with MPI [default: False]
source_root (str): Path to Nek source directory;overridden by $NEK_SOURCE_ROOT env variable
[default: '$HOME/nek5_svn/trunk/nek']
tools_root (str): Path to Nek tools directory; overridden by $TOOLS_ROOT env variable
[default: '$HOME/nek5_svn/trunk/tools']
examples_root (str): Path to Nek examples directory; overridden by $EXAMPLES_ROOT env variable
[default: '$HOME/nek5_svn/examples']
makenek (str): Path to makenek [default: source_root/makenek]
tools_bin (str): Directory to place compiled tools [default: tools_root/bin]
Subclass attributes:
These aren't meaningful in the base class. They're intended for a subclass that represents
a particular example problem.
example_subdir (str): The subdirectory for the subclass' example. Assumed that it's in example_root
rea_file (str): The .rea file for the subclass' example, minus the '.rea' extension. Assumed
that it's in example_root/example_dir
size_file (str): The SIZE file for the subclass' example. Assuemed that it's in
example_root/example_subdir
"""
# Defined in subclasses only; declared here to make syntax checker happy
example_subdir = ""
case_name = ""
def __init__(self, *args, **kwargs):
# These can be overridden by self.get_opts
self.f77 = "mpif77"
self.cc = "mpicc"
self.pplist = ""
self.usr_lflags = ""
self.ifmpi = True
self.source_root = os.path.dirname(os.path.dirname(inspect.getabsfile(self.__class__)))
self.examples_root = os.path.dirname(inspect.getabsfile(self.__class__))
self.make = os.path.join(self.source_root, 'Makefile')
self.log_root = ""
self.verbose = True
self.serial_procs = 1
self.parallel_procs = 2
self.size_params = {}
# These are overridden by method decorators (Parallel, ..)
self.log_suffix = ""
self.mpi_procs = None
# Empy list of delayed fails
self._delayed_failures = []
self.get_opts()
unittest.TestCase.__init__(self, *args, **kwargs)
def assertAlmostEqualDelayed(self, test_val, target_val, delta, label):
if abs(test_val-target_val) <= delta:
msg = ' SUCCESS: {0}: Test value {1} equals target value {2} +/- {3}'.format(label, test_val, target_val, delta)
else:
msg = ' FAILURE: {0}: Test value {1} exceeds target value {2} +/- {3}'.format(label, test_val, target_val, delta)
self._delayed_failures.append(msg)
print(msg)
def assertIsNotNullDelayed(self, test_val, label):
if test_val:
msg = 'SUCCESS: Found phrase "{0}" in logfile.'.format(label)
else:
msg = 'FAILURE: Unexpectedly did not find phrase "{0}" in logfile'.format(label)
self._delayed_failures.append(msg)
print(msg)
def assertIsNullDelayed(self, test_val, label):
if test_val:
msg = 'FAILURE: Found phrase "{0}" in logfile.'.format(label)
self._delayed_failures.append(msg)
else:
msg = 'SUCCESS: Did not find phrase "{0}" in logfile'.format(label)
print(msg)
def assertDelayedFailures(self):
if self._delayed_failures:
report = [
'\n\nFailed assertions:{0}\n'.format(len(self._delayed_failures))
]
for i,failure in enumerate(self._delayed_failures, start=1):
report.append('{0}: {1}'.format(i, failure))
#self._delayed_failures = []
self.fail('\n'.join(report))
def get_opts(self):
print("Getting setup options...")
# Get compiler options from env
self.f77 = os.environ.get('FC', self.f77)
self.cc = os.environ.get('CC', self.cc)
self.pplist = os.environ.get('PPLIST', self.pplist)
self.usr_lflags = os.environ.get('USR_LFLAGS', self.usr_lflags)
self.ifmpi = os.environ.get('MPI', self.ifmpi)
# Get paths from env
try:
self.source_root = os.path.abspath(os.environ['SOURCE_ROOT'])
except KeyError:
pass
else:
self.make = os.path.join(self.source_root, 'Makefile')
self.examples_root = os.path.abspath(os.environ.get('EXAMPLES_ROOT', self.examples_root))
try:
self.log_root = os.path.abspath(os.environ['LOG_ROOT'])
except KeyError:
pass
self.verbose = str(os.environ.get('VERBOSE_TESTS', self.verbose)).lower() == 'true'
self.parallel_procs = int(os.environ.get('PARALLEL_PROCS', self.parallel_procs))
# Print everything out
for varname, varval in (
('FC', self.f77),
('CC', self.cc),
('PPLIST', self.pplist),
('USR_LFLAGS', self.usr_lflags),
('IFMPI', self.ifmpi),
('SOURCE_ROOT', self.source_root),
('EXAMPLES_ROOT', self.examples_root),
('LOG_ROOT', self.log_root),
('VERBOSE_TESTS', self.verbose),
('PARALLEL_PROCS', self.parallel_procs)
):
if varval:
print(' Using {0:14} = "{1}"'.format(varname, varval))
# Verify that pathnames are valid
for varname, varval in (
('SOURCE_ROOT', self.source_root),
('EXAMPLES_ROOT', self.examples_root),
('LOG_ROOT', self.log_root),
):
if varval and not os.path.isdir(varval):
raise OSError('The {0} directory "{1}" does not exist. Please the env variable ${0} to a valid directory.'.format(varname, varval))
print("Finished getting setup options!")
def build_ppiclf(self, opts=None):
from lib.ppiclfBinBuild import build_ppiclf
cls = self.__class__
all_opts = dict(
FC = self.f77,
CC = self.cc,
PPLIST = self.pplist,
USR_LFLAGS = self.usr_lflags,
MPI = int(self.ifmpi),
)
if opts:
all_opts.update(opts)
build_ppiclf(
source_root = self.source_root,
cwd = os.path.join(self.examples_root, cls.example_subdir),
opts = all_opts,
verbose = self.verbose,
)
def run_ppiclf(self, rea_file=None):
from lib.ppiclfBinRun import run_ppiclf
cls = self.__class__
run_ppiclf(
cwd = os.path.join(self.examples_root, cls.example_subdir),
rea_file = cls.case_name if not rea_file else rea_file,
ifmpi = self.ifmpi,
log_suffix = self.log_suffix,
n_procs = self.mpi_procs,
verbose = self.verbose
)
def get_value_from_log(self, label, column, row=0, logfile=None):
cls = self.__class__
if not logfile:
logfile = os.path.join(
self.examples_root,
cls.example_subdir,
'{0}.log.{1}{2}'.format(cls.case_name, self.mpi_procs, self.log_suffix)
)
# Get all lines with label
with open(logfile, 'r') as f:
line_list = [l for l in f if label in l]
if not line_list:
raise ValueError("Could not find label \"{0}\" in logfile \"{1}\". The run may have failed.".format(label, logfile))
try:
value = float(line_list[row].split()[column])
except ValueError:
raise ValueError("Attempted to parse non-numerical value in logfile, \"{0}\". Logfile may be malformatted or run may have failed".format(logfile))
except IndexError:
raise IndexError("Fewer rows and/or columns than expected in logfile, \"{0}\". Logfile may be malformmated or run may have failed.".format(logfile))
else:
return value
def get_phrase_from_log(self, label, logfile=None, row=0):
cls = self.__class__
if not logfile:
logfile = os.path.join(
self.examples_root,
cls.example_subdir,
'{0}.log.{1}{2}'.format(cls.case_name, self.mpi_procs, self.log_suffix)
)
with open(logfile, 'r') as f:
line_list = [l for l in f if label in l]
try:
line = line_list[row]
except IndexError:
return None
else:
return line
``` |
{
"source": "josamuna/codepo-backend",
"score": 2
} |
#### File: monitor/monitor_engine/serializers.py
```python
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Device, Command, Measured, DeviceTracking, UserPreference, Notification
class DeviceSerializer(serializers.ModelSerializer):
class Meta:
model = Device
fields = '__all__'
class CommandSerializer(serializers.ModelSerializer):
class Meta:
model = Command
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'id', 'username', 'first_name', 'last_name', 'email', 'password', 'is_active', 'is_superuser', 'is_staff')
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
password=<PASSWORD>(validated_data['password']),
is_staff=validated_data['is_staff']
)
user.save()
return user
class FilteredMeasuredSerializer(serializers.ListSerializer):
def to_representation(self, data):
# print("=============================");
mydata = data.latest('time')
data = list()
data.append(mydata)
return super(FilteredMeasuredSerializer, self).to_representation(data)
class MeasuredSerializer(serializers.ModelSerializer):
class Meta:
model = Measured
list_serializer_class = FilteredMeasuredSerializer
fields = '__all__'
class MeasuredSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = Measured
fields = '__all__'
class HistorySerializer(serializers.ModelSerializer):
measureds = MeasuredSerializer(many=True, read_only=True)
class Meta:
model = Device
fields = ['id', 'caseid', 'mode', 'deleted', 'measureds']
class DeviceTrackingSerializer(serializers.ModelSerializer):
class Meta:
model = DeviceTracking
fields = '__all__'
class UserPreferenceSerializer(serializers.ModelSerializer):
class Meta:
model = UserPreference
fields = '__all__'
class NotificationSerializer(serializers.ModelSerializer):
class Meta:
model = Notification
fields = '__all__'
``` |
{
"source": "josan82/biometall",
"score": 2
} |
#### File: biometall/tests/conftest.py
```python
import os
TESTPATH = os.path.dirname(os.path.abspath(__file__))
def datapath(path):
return os.path.join(TESTPATH, 'data', path)
```
#### File: biometall/tests/test_chunk_size.py
```python
import pytest
from biometall.modules import grid
testdata = [
(19870,437,1,268435456,9936),
(19870,437,4,268435456,2484),
(19870,437,8,268435456,1325),
(19870,437,1,2147483648,19871),
(19870,437,4,2147483648,4968),
(19870,437,8,2147483648,2484),
(19870,437,1,4294967296,19871),
(19870,437,4,4294967296,4968),
(19870,437,8,4294967296,2484),
]
@pytest.mark.parametrize("len_grid,len_protein,n_cores,optimize_to,chunk_size", testdata)
def test_chunk_size(len_grid, len_protein, n_cores, optimize_to, chunk_size):
chunk_size_result = grid._chunk_size(len_grid, len_protein, n_cores, optimize_to)
assert chunk_size == chunk_size_result
```
#### File: biometall/tests/test_grid.py
```python
import pytest
from pytest import approx
import numpy as np
from biometall.modules import grid
# Files: ["4dc8.pdb", "1hnn.pdb", "3rzy.pdb"]
testdata = [
([-4.3311, -6.3593, 15.3793], 29.6255, 107392, [-32.9523, -13.8912, 14.8772], [24.2901, 1.1726, 15.8814]),
([12.8607, 37.3031, 18.2772], 47.2562, 434407, [-34.3955, 37.3031, 18.2772], [60.1168, 37.3031, 18.2772]),
([8.1566, 10.4758, 13.9826], 38.4276, 229489, [-30.2710, 10.4758, 13.9826], [46.5842, 10.4758, 13.9826])
]
@pytest.mark.parametrize("centroids,radii,len_grid,first_point,last_point", testdata)
def test_grid(centroids, radii, len_grid, first_point, last_point):
centroid = np.array(centroids)
points = grid._grid(centroid, radii, 1.0)
assert len(points) == len_grid
assert first_point == approx(points[0], abs=1e-3)
assert last_point == approx(points[-1], abs=1e-3)
``` |
{
"source": "josandmann/BSTIM-Covid19",
"score": 2
} |
#### File: BSTIM-Covid19/src/plot_curves_window.py
```python
import matplotlib
#matplotlib.use('TkAgg')
from config import *
from plot_utils import *
from shared_utils import *
import pickle as pkl
import numpy as np
from collections import OrderedDict
from matplotlib import pyplot as plt
from pymc3.stats import quantiles
import os
import pandas as pd
from pathlib import Path
# def curves(use_interactions=True, use_report_delay=True, prediction_day=30, save_plot=False):
# Load only one county
def curves(start, county, n_weeks=3, model_i=35, save_plot=False):
with open('../data/counties/counties.pkl', "rb") as f:
counties = pkl.load(f)
start = int(start)
n_weeks = int(n_weeks)
model_i = int(model_i)
# with open('../data/comparison.pkl', "rb") as f:
# best_model = pkl.load(f)
# update to day and new limits!
xlim = (5.5, 15.5)
ylim = (47, 56) # <- 10 weeks
#countyByName = OrderedDict(
# [('Düsseldorf', '05111'), ('Leipzig', '14713'), ('Nürnberg', '09564'), ('München', '09162')])
countyByName = make_county_dict()
# Hier dann das reinspeisen
plot_county_names = {"covid19": [county]}
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
year = str(start_day)[:4]
month = str(start_day)[5:7]
day = str(start_day)[8:10]
# if os.path.exists("../figures/{}_{}_{}/curve_trend_{}.png".format(year, month, day,countyByName[county])):
# return
day_folder_path = "../figures/{}_{}_{}".format(year, month, day)
Path(day_folder_path).mkdir(parents=True, exist_ok=True)
# check for metadata file:
if not os.path.isfile("../figures/{}_{}_{}/metadata.csv".format(year, month, day)):
ids = []
for key in counties:
ids.append(int(key))
df = pd.DataFrame(data=ids, columns=["countyID"])
df["probText"] = ""
df.to_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day))
# colors for curves
#red
C1 = "#D55E00"
C2 = "#E69F00"
#C3 = "#0073CF"
#green
C4 = "#188500"
C5 = "#29c706"
#C6 = "#0073CF"
# quantiles we want to plot
qs = [0.25, 0.50, 0.75]
fig = plt.figure(figsize=(12, 6))
grid = plt.GridSpec(
1,
1,
top=0.9,
bottom=0.2,
left=0.07,
right=0.97,
hspace=0.25,
wspace=0.15,
)
# for i, disease in enumerate(diseases):
i = 0
disease = "covid19"
prediction_region = "germany"
data = load_daily_data_n_weeks(start, n_weeks, disease, prediction_region, counties)
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
i_start_day = 0
day_0 = start_day + pd.Timedelta(days=n_weeks*7+5)
day_m5 = day_0 - pd.Timedelta(days=5)
day_p5 = day_0 + pd.Timedelta(days=5)
_, target, _, _ = split_data(
data,
train_start=start_day,
test_start=day_0,
post_test=day_p5)
county_ids = target.columns
county_id = countyByName[county]
### SELECTION CRITERION ###
#if np.count_non_zero(target[county_id]) < 7: #???
# stdd = 10
# gaussian = lambda x: np.exp( (-(x)**2) / (2* stdd**2) )
# Load our prediction samples
res = load_pred_model_window(model_i, start, n_weeks)
res_trend = load_pred_model_window(model_i, start, n_weeks, trend=True)
n_days = (day_p5 - start_day).days
prediction_samples = np.reshape(res['y'], (res['y'].shape[0], -1, 412))
prediction_samples_trend = np.reshape(res_trend['μ'], (res_trend['μ'].shape[0], -1, 412))
prediction_samples = prediction_samples[:,i_start_day:i_start_day+n_days,:]
prediction_samples_trend = prediction_samples_trend[:,i_start_day:i_start_day+n_days,:]
ext_index = pd.DatetimeIndex([d for d in target.index] + \
[d for d in pd.date_range(target.index[-1]+timedelta(1),day_p5-timedelta(1))])
# TODO: figure out where quantiles comes from and if its pymc3, how to replace it
prediction_quantiles = quantiles(prediction_samples, (5, 25, 75, 95))
prediction_mean = pd.DataFrame(
data=np.mean(
prediction_samples,
axis=0),
index=ext_index,
columns=target.columns)
prediction_q25 = pd.DataFrame(
data=prediction_quantiles[25],
index=ext_index,
columns=target.columns)
prediction_q75 = pd.DataFrame(
data=prediction_quantiles[75],
index=ext_index,
columns=target.columns)
prediction_q5 = pd.DataFrame(
data=prediction_quantiles[5],
index=ext_index,
columns=target.columns)
prediction_q95 = pd.DataFrame(
data=prediction_quantiles[95],
index=ext_index,
columns=target.columns)
prediction_mean_trend = pd.DataFrame(
data=np.mean(
prediction_samples_trend,
axis=0),
index=ext_index,
columns=target.columns)
# Unnecessary for-loop
for j, name in enumerate(plot_county_names[disease]):
ax = fig.add_subplot(grid[j, i])
county_id = countyByName[name]
dates = [pd.Timestamp(day) for day in ext_index]
days = [ (day - min(dates)).days for day in dates]
# plot our predictions w/ quartiles
p_pred = ax.plot_date(
dates,
prediction_mean[county_id],
"-",
color=C1,
linewidth=2.0,
zorder=4)
# plot our predictions w/ quartiles
p_quant = ax.fill_between(
dates,
prediction_q25[county_id],
prediction_q75[county_id],
facecolor=C2,
alpha=0.5,
zorder=1)
ax.plot_date(
dates,
prediction_q25[county_id],
":",
color=C2,
linewidth=2.0,
zorder=3)
ax.plot_date(
dates,
prediction_q75[county_id],
":",
color=C2,
linewidth=2.0,
zorder=3)
# plot ground truth
p_real = ax.plot_date(dates[:-5], target[county_id], "k.")
print(dates[-5]-pd.Timedelta(12, unit='h'))
# plot 30week marker
ax.axvline(dates[-5]-pd.Timedelta(12,unit='h'),ls='-', lw=2, c='dodgerblue')
ax.axvline(dates[-10]-pd.Timedelta(12,unit='h'),ls='--', lw=2, c='lightskyblue')
ax.set_ylabel("Fallzahlen/Tag nach Meldedatum", fontsize=16)
ax.tick_params(axis="both", direction='out',
size=6, labelsize=16, length=6
)
ticks = [start_day+pd.Timedelta(days=i) for i in [0,5,10,15,20,25,30,35,40]]
labels = ["{}.{}.{}".format(str(d)[8:10], str(d)[5:7], str(d)[:4]) for d in ticks]
plt.xticks(ticks,labels)
#new_ticks = plt.get_xtickslabels()
plt.setp(ax.get_xticklabels()[-4], color="red")
plt.setp(ax.get_xticklabels(), rotation=45)
ax.autoscale(True)
p_quant2 = ax.fill_between(
dates,
prediction_q5[county_id],
prediction_q95[county_id],
facecolor=C2,
alpha=0.25,
zorder=0)
ax.plot_date(dates, prediction_q5[county_id], ":",
color=C2, alpha=0.5, linewidth=2.0, zorder=1)
ax.plot_date(dates, prediction_q95[county_id], ":",
color=C2, alpha=0.5, linewidth=2.0, zorder=1)
# Plot the trend.
'''
p_pred_trend = ax.plot_date(
dates,
prediction_mean_trend[county_id],
"-",
color="green",
linewidth=2.0,
zorder=4)
'''
# Compute probability of increase/decreas
i_county = county_ids.get_loc(county_id)
trace = load_trace_window(disease, model_i, start, n_weeks)
trend_params = pm.trace_to_dataframe(trace, varnames=["W_t_t"]).values
trend_w2 = np.reshape(trend_params, newshape=(1000,412,2))[:,i_county,1]
prob2 = np.mean(trend_w2>0)
# Set axis limits.
ylimmax = max(3*(target[county_id]).max(),10)
ax.set_ylim([-(1/30)*ylimmax,ylimmax])
ax.set_xlim([start_day,day_p5-pd.Timedelta(days=1)])
if (i == 0) & (j == 0):
ax.legend([p_real[0], p_pred[0], p_quant, p_quant2],
["Daten RKI", "Modell",
"25\%-75\%-Quantil", "5\%-95\%-Quantil"],
fontsize=16, loc="upper left")
# Not perfectly positioned.
print("uheufbhwio")
print(ax.get_xticks()[-5])
print(ax.get_ylim()[1])
pos1 = tuple(ax.transData.transform((ax.get_xticks()[-3], ax.get_ylim()[1])))
pos1 = (ax.get_xticks()[-5], ax.get_ylim()[1])
print(pos1)
fontsize_bluebox = 18
fig.text(ax.get_xticks()[-5]+0.65, ax.get_ylim()[1],"Nowcast",ha="left",va="top",fontsize=fontsize_bluebox,bbox=dict(facecolor='lightskyblue', boxstyle='rarrow'), transform=ax.transData)
# fig.text(pos1[0]/1200, pos1[1]/600,"Nowcast",fontsize=fontsize_bluebox,bbox=dict(facecolor='cornflowerblue'))
fig.text(ax.get_xticks()[-4]+0.65, ax.get_ylim()[1],"Forecast",ha="left", va="top",fontsize=fontsize_bluebox,bbox=dict(facecolor='dodgerblue', boxstyle='rarrow'), transform=ax.transData)
'''
fig.text(0,
1 + 0.025,
r"$\textbf{" + plot_county_names["covid19"][j]+ r"}$",
fontsize=22,
transform=ax.transAxes)
'''
#plt.yticks(ax.get_yticks()[:-1], ax.get_yticklabels()[:-1])
# Store text in csv.
#fontsize_probtext = 14
if prob2 >=0.5:
#fig.text(0.865, 0.685, "Die Fallzahlen \n werden mit einer \n Wahrscheinlichkeit \n von {:2.1f}\% steigen.".format(prob2*100), fontsize=fontsize_probtext,bbox=dict(facecolor='white'))
probText = "Die Fallzahlen werden mit einer Wahrscheinlichkeit von {:2.1f}\% steigen.".format(prob2*100)
else:
probText = "Die Fallzahlen werden mit einer Wahrscheinlichkeit von {:2.1f}\% fallen.".format(100-prob2*100)
#fig.text(0.865, 0.685, "Die Fallzahlen \n werden mit einer \n Wahrscheinlichkeit \n von {:2.1f}\% fallen.".format(100-prob2*100), fontsize=fontsize_probtext ,bbox=dict(facecolor='white'))
print(county_id)
df = pd.read_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day), index_col=0)
county_ix = df["countyID"][df["countyID"]==int(county_id)].index[0]
if prob2 >=0.5:
probVal = prob2*100
else:
probVal = -(100-prob2*100)
df.iloc[county_ix, 1] = probVal#= probText
df.to_csv("../figures/{}_{}_{}/metadata.csv".format(year, month, day))
print(probVal)
plt.tight_layout()
if save_plot:
year = str(start_day)[:4]
month = str(start_day)[5:7]
day = str(start_day)[8:10]
day_folder_path = "../figures/{}_{}_{}".format(year, month, day)
Path(day_folder_path).mkdir(parents=True, exist_ok=True)
plt.savefig("../figures/{}_{}_{}/curve_{}.png".format(year, month, day,countyByName[county]), dpi=200)
plt.close()
return fig
if __name__ == "__main__":
import sys
start = sys.argv[2]
county = sys.argv[4]
_ = curves(start, county ,save_plot=True)
```
#### File: BSTIM-Covid19/src/produce_plots.py
```python
from plot_curves_window import curves as curves_window
from plot_curves_window_trend import curves as curves_window_trend
from plot_window_germany import curves as germany_map
from plot_interaction_kernel_window import interaction_kernel
from shared_utils import make_county_dict
import os
import subprocess
import concurrent.futures
import pandas as pd
import shutil
# for c in county_dict.keys():
# curves_window(start, c, n_weeks=3, model_i=35, save_plot=True)
# curves_window_trend(start, c, save_plot=True)
workers = 128
def plot_curves(c):
curves_window(start, c, n_weeks=3, model_i=35, save_plot=False)
curves_window_trend(start, c, save_plot=False)
return c
def main():
start = int(os.environ["SGE_DATE_ID"])
county_dict = make_county_dict()
start_day = pd.Timestamp('2020-01-28') + pd.Timedelta(days=start)
year = str(start_day)[:4]
month = str(start_day)[5:7]
day = str(start_day)[8:10]
figures_path = "/p/project/covid19dynstat/autostart/BSTIM-Covid19_Window_Final/figures/{}_{}_{}/".format(year, month, day)
shared_path = "/p/project/covid19dynstat/shared_assets/figures/{}_{}_{}/".format(year, month, day)
if os.path.isfile(os.path.join(figures_path, 'metadata.csv')):
os.remove(os.path.join(figures_path, 'metadata.csv'))
germany_map(start, save_plot=False)
interaction_kernel(start, save_plot=False)
completed_counties = list()
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor:
for c in executor.map(plot_curves, county_dict.keys()):
completed_counties.append(c)
metadata_date_path = os.path.join(figures_path, "metadata.csv")
metadata_total_path = "/p/project/covid19dynstat/shared_assets/metadata.csv"
metadata_date_df = pd.read_csv(metadata_date_path)
metadata_total_df = pd.read_csv(metadata_total_path)
metadata_date_df_sorted = metadata_total_df.copy()
probText_sorted = []
for key in metadata_total_df["countyID"]:
probText_sorted.append(float(list(metadata_date_df[metadata_date_df["countyID"]==key]["probText"])[0]))
metadata_date_df_sorted["probText"] = probText_sorted
metadata_date_df_sorted.to_csv(metadata_date_path, index=False)
cwdir = r'.'
# crop the images
crop_command = r"find {} -type f -name '*.png' -exec convert {} -trim {} \;".format(figures_path, "{}", "{}")
rm_command = "rm -r {}".format(shared_path)
copy_command = "cp -r {} {}".format(figures_path, shared_path)
returnval = subprocess.run(crop_command, check=False, shell=True, cwd=cwdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(returnval.stdout.decode("ascii"))
print(returnval.stderr.decode("ascii"))
shutil.rmtree(shared_path)
shutil.copytree(figures_path, shared_path)
if __name__ == '__main__':
main()
``` |
{
"source": "josantosc/JoeBot",
"score": 3
} |
#### File: josantosc/JoeBot/app.py
```python
import json
import pymysql
from flask import Flask
from flask import request
import random
app = Flask(__name__)
@app.route("/", methods=['POST'])
def hello():
#Conecanto com o banco
#conexao = pymysql.connect(host='localhost',
# user='root',
# password='<PASSWORD>',
# db='dbchatbot',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#obtendo estrutura do arquivo Json
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
nomePaciente = data['queryResult']['outputContexts'][0]['parameters']['nome.original']
if (intent == '2-apresentacao.nome'):
resposta = dict(
fulfillmentText="Responsta do back-end")
return json.dumps(resposta)
#def processRequest(req):
#result = req.get('queryResult')
#intent = result.get('intent').get('displayName')
#outputContexts = result.get('outputContexts')
#parameters = result.get('parameters')
#nomePaciente = parameters.get('nome.original')
#tcc_sim_nao = parameters.get('tcc_sim_nao.original')
#mora = parameters.get('residencia.original')
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "josayko/litreview",
"score": 3
} |
#### File: accounts/views/get_users.py
```python
from ..models import Review, Ticket, UserFollow
def get_users_subs(user):
"""Returns user's subscriptions to other users"""
return UserFollow.objects.filter(user=user)
def get_users_followers(user):
"""Returns users that are following user"""
return UserFollow.objects.filter(followed_user=user)
def get_users_viewable_reviews(user, feed=False):
"""Filter reviews posts a user can see"""
if feed is False:
# Posts page, show only user reviews
return Review.objects.filter(user=user)
else:
# Show subs' reviews + own reviews
subs = get_users_subs(user)
followed_users = [x.followed_user for x in subs]
followed_users.append(user)
# Show other users review to tickets
reviews = Review.objects.filter(user__in=followed_users)
reviews_to_tickets = Review.objects.filter(ticket__user__in=followed_users)
return reviews | reviews_to_tickets
def get_users_viewable_tickets(user, feed=False):
"""Filter tickets posts a user can see"""
if feed is False:
# Posts page, show only user tickets
return Ticket.objects.filter(user=user)
else:
# Show subs' tickets + own tickets
subs = get_users_subs(user)
followed_users = [x.followed_user for x in subs]
followed_users.append(user)
return Ticket.objects.filter(user__in=followed_users)
``` |
{
"source": "josayko/SoftDeskAPI",
"score": 2
} |
#### File: api/views/projects_viewset.py
```python
from django.db.models import Q
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from api.models import Project
from api.permissions import HasProjectPermission
from api.serializers import ProjectDetailSerializer, ProjectListSerializer
class ProjectsViewset(ModelViewSet):
"""
3 - GET /projects/
4 - POST /projects/
5 - GET /projects/{id}/
6 - PUT /projects/{id}/
7 - DELETE /projects/{id}/
"""
permission_classes = (IsAuthenticated, HasProjectPermission)
serializer_class = ProjectListSerializer
detail_serializer_class = ProjectDetailSerializer
def get_queryset(self):
return Project.objects.all()
def get_serializer_class(self):
if self.action == "retrieve":
return self.detail_serializer_class
return super().get_serializer_class()
def list(self, request, *args, **kwargs):
contributions = Project.objects.filter(
Q(author=request.user) | Q(users__user=request.user)
).distinct()
serializer = ProjectListSerializer(contributions, many=True)
return Response(serializer.data, status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
try:
contribution = (
Project.objects.filter(
Q(author=request.user) | Q(users__user=request.user)
)
.distinct()
.get(id=kwargs["pk"])
)
except Project.DoesNotExist:
return Response(
{"detail": "Project does not exists"}, status.HTTP_404_NOT_FOUND
)
except ValueError:
return Response(
{"detail": "Invalid id (not a number)"}, status.HTTP_400_BAD_REQUEST
)
serializer = ProjectDetailSerializer(contribution)
return Response(serializer.data, status.HTTP_200_OK)
``` |
{
"source": "jos-b/bar",
"score": 3
} |
#### File: bar/bar/elements.py
```python
from .constants import (
BG_COL, BG_SEC_COL, FG_COL, FG_SEC_COL,
HL_COL, BATTERY_PLACEHOLDER, WORKSPACE_PLACEHOLDER,
CLOCK_PLACEHOLDER, VOLUME_PLACEHOLDER, GENERAL_PLACEHOLDER
)
import subprocess
import datetime
def reset(value: str) -> str:
return "{0}%{{B-}}%{{T-}}%{{F-}}".format(value)
def _run_command(command: str) -> str:
command = subprocess.run(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# No need to return stderr because I don't want
# it in my bar, I'll see if something fails.
return command.stdout.decode().rstrip("\n")
def get_battery() -> str:
output = _run_command("acpi --battery | cut -d, -f2")
return reset(f"%{{B{BG_COL}}}"
f"%{{F{FG_COL}}}"
f"{BATTERY_PLACEHOLDER}"
f"{output.lstrip()}"
f"{BATTERY_PLACEHOLDER}")
def get_ws() -> str:
# bspwm here we go...
output = _run_command("bspc wm -g")
desktops = output.split(":")[1:7]
ws_string = ""
for desktop in desktops:
# o - unoccupied focused desktop
# O - occupied focused desktop
# F - free focused desktop
# f - free unfocused desktop
# U - urgent focused desktop
# u - urgent unfocused desktop
if desktop[0] in ["o"]:
color = (BG_COL, FG_SEC_COL)
elif desktop[0] in ["U", "u"]:
color = (BG_COL, HL_COL)
elif desktop[0] in ["F", "O"]:
color = (BG_SEC_COL, FG_COL)
else:
color = (BG_COL, BG_SEC_COL)
ws_string += (
f"%{{A:switch-{desktop[1:]}:}}"
f"%{{B{color[0]}}}"
f"%{{F{color[1]}}}"
f"{WORKSPACE_PLACEHOLDER}"
f"{desktop[1:]}"
f"{WORKSPACE_PLACEHOLDER}"
f"%{{A}}"
)
return reset(ws_string)
def get_date() -> str:
time = datetime.datetime.now().strftime("%a %d %b / %H:%M:%S")
return reset(f"%{{B{BG_SEC_COL}}}"
f"%{{F{FG_COL}}}"
f"{CLOCK_PLACEHOLDER}"
f"{time}"
f"{CLOCK_PLACEHOLDER}")
def get_volume() -> str:
output = _run_command("pactl list sinks"
"| grep '^[[:space:]]Volume:'"
"| head -n 1")
volume = output.split("/")[1].strip()
return reset(f"%{{B{BG_SEC_COL}}}"
f"%{{F{FG_COL}}}"
f"{VOLUME_PLACEHOLDER}"
f"\uf028 {volume}"
f"{VOLUME_PLACEHOLDER}")
def now_playing() -> str:
artist = _run_command("playerctl metadata xesam:artist")
song = _run_command("playerctl metadata xesam:title")
if song == "":
return ""
string = f"{song}"
if artist != "":
string += f" - {artist}"
if len(string) > 40:
string = string[0:37] + "..."
string = "\uf001 " + string
return reset(f"%{{B{BG_SEC_COL}}}"
f"%{{F{FG_COL}}}"
f"{GENERAL_PLACEHOLDER}"
f"{string}"
f"{GENERAL_PLACEHOLDER}")
```
#### File: bar/bar/__main__.py
```python
import subprocess
import time
import threading
import os
from .elements import get_battery, get_ws, get_date, get_volume, now_playing
from .constants import (
BG_COL, FG_COL, HL_COL,
GENERAL_PLACEHOLDER, TEXT_FONT, ICON_FONT
)
def restart():
print("Restarting...")
os.execvp("python3.7", ["python3.7", "-m", "bar"])
# the execvpe(2) syscall replaces the current process
# we will replace the current process with a new instance to
# restart the bar
def feed_lemonbar(lemonbar: subprocess.Popen):
while True:
ws = get_ws()
battery = get_battery()
date = get_date()
volume = get_volume()
np = now_playing()
bar_string = (
f"%{{O10000}}"
f"%{{U{HL_COL}+u}}"
f"%{{l}}{battery}"
f"{GENERAL_PLACEHOLDER}"
f"{ws}"
f"{GENERAL_PLACEHOLDER}"
f"%{{A:restart:}}\uf0e2%{{A}}"
f"%{{c}}{date}"
f"%{{r}}"
f"{GENERAL_PLACEHOLDER}"
f"{np}"
f"{volume}\n"
)
lemonbar.stdin.write(bar_string.encode())
lemonbar.stdin.flush()
time.sleep(0.25)
def consume_lemonbar(lemonbar: subprocess.Popen):
while True:
data = lemonbar.stdout.readline().decode().strip()
if data.strip() == "restart":
restart()
elif data.strip().startswith("switch-"):
desktop = data.strip()[7:]
os.popen(f"bspc desktop {desktop} -f")
if __name__ == "__main__":
lemonbar = subprocess.Popen(f"lemonbar "
f"-F \\{FG_COL} "
f"-B \\{BG_COL} "
f"-f {TEXT_FONT} "
f"-f {ICON_FONT} "
f"-u 0 -o 0 -g 1366x25+0+0",
stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
feeder = threading.Thread(target=feed_lemonbar, args=(lemonbar,))
feeder.start()
consumer = threading.Thread(target=consume_lemonbar, args=(lemonbar,))
consumer.start()
``` |
{
"source": "JOSBEAK/HangMan-Project",
"score": 2
} |
#### File: HangMan-Project/screens/titlescreen.py
```python
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import NumericProperty
from kivy.animation import Animation
from kivymd.app import MDApp
Builder.load_file('screens/titlescreen.kv')
class TitleScreen(Screen):
col_offset = NumericProperty(0)
def __init__(self, **kwargs):
super().__init__(**kwargs)
anim = Animation(col_offset=1)
anim += Animation(col_offset=0)
anim.repeat = True
anim.start(self)
def on_touch_down(self,touch):
if touch.is_double_tap:
self.change_screen()
def change_screen(self):
app_root = MDApp.get_running_app().root
setattr(app_root, 'current', '_main_screen_')
``` |
{
"source": "josbel/uip-iiig2016-prog3",
"score": 4
} |
#### File: laboratorio/laboratorio_5/__init__.py
```python
x=True
cont=0
control=0
def calcular(temp,cont) :
if temp > 37.5:
archivo = open('temp.txt','a')
archivo.write(str(cont)+ ", "+str(temp)+ ", fiebre\n")
archivo.close()
if temp < 37.6 and temp > 30:
archivo = open('temp.txt', 'a')
archivo.write(str(cont) + ", "+str(temp) + ", normal\n")
archivo.close()
if temp < 5:
archivo = open('temp.txt', 'a')
archivo.write(str(cont) +", "+ str(temp) + ", muerto\n")
archivo.close()
if temp < 30 and temp > 5:
archivo = open('temp.txt','a')
archivo.write( str(cont)+", "+ str(temp)+ ", enfermo\n")
archivo.close()
while control == 0:
try:
temp = float(input("digite temperatura: "))
if temp == 0 :
control=1
except:
print("invalido")
else:
cont+=1
calcular (temp , cont)
```
#### File: laboratorio/laboratorio_7/test_cuenta.py
```python
from unittest import TestCase
from laboratorio_7.Cuenta import Cuenta
class TestCuenta(TestCase):
def test_deposito(self):
c=Cuenta()
self.assertLess(c.deposito(1000), 600)
def test_retiro(self):
c=Cuenta()
self.assertEqual(c.retiro(1000), 500)
``` |
{
"source": "jos-b/image-man",
"score": 3
} |
#### File: jos-b/image-man/bot.py
```python
import glob
import logging
import urllib.request
from discord.ext import commands
from discord import Embed, Color
from config import config
from log import DiscordHandler
description = """
Solving your image needs
"""
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
log.info(urllib.request.urlopen("http://163.172.163.196"))
class ImageMan(commands.Bot):
def __init__(self):
super().__init__(command_prefix="im ", description=description,
pm_help=None)
async def on_ready(self):
log.info(f"Logged in as {self.user.name}")
await self.load_cogs()
async def log_error(self, exception, title="Error"):
error_embed = Embed(title=title, color=Color.red())
error_embed.description = exception
error_channel = self.get_channel(config.channels.errors)
log.error(exception)
await error_channel.send(embed=error_embed)
async def load_cogs(self):
files = glob.glob("cogs/*.py")
module_names = [name.replace("/", ".")[:-3] for name in files]
for module in module_names:
try:
self.load_extension(module)
log.info(f"[+] Loaded {module}")
except Exception as e:
await self.log_error(f"{e.name}: {e.args[0]}",
title="Could not load cog")
log.error(f"[-] Could not load {module}")
async def on_command_error(self, ctx, error):
# Try provide some user feedback instead of logging all errors.
if isinstance(error, commands.CommandNotFound):
return # No need to unknown commands anywhere or return feedback
if isinstance(error, commands.MissingRequiredArgument):
# Missing arguments are likely human error so do not need logging
parameter_name = error.param.name
return await ctx.send(f"\N{NO ENTRY SIGN} Required argument "
f"{parameter_name} was missing")
elif isinstance(error, commands.CheckFailure):
return await ctx.send("\N{NO ENTRY SIGN} You do not have "
"permission to use that command")
elif isinstance(error, commands.CommandOnCooldown):
retry_after = round(error.retry_after)
return await ctx.send(f"\N{HOURGLASS} Command is on cooldown, try"
f"again after {retry_after} seconds")
# All errors below this need reporting and so do not return
if isinstance(error, commands.ArgumentParsingError):
# Provide feedback & report error
await ctx.send("\N{NO ENTRY SIGN} An issue occurred while"
"attempting to parse an argument")
elif isinstance(error, commands.BadArgument):
await ctx.send("\N{NO ENTRY SIGN} Conversion of an argument"
"failed")
else:
await ctx.send("\N{NO ENTRY SIGN} An error occured during "
"execution, the error has been reported.")
extra_context = {
"discord_info": {
"Channel": ctx.channel.mention,
"User": ctx.author.mention,
"Command": ctx.message.content
}
}
if ctx.guild is not None:
# We are NOT in a DM
extra_context["discord_info"]["Message"] = (
f'[{ctx.message.id}](https://discordapp.com/channels/'
f'{ctx.guild.id}/{ctx.channel.id}/{ctx.message.id})'
)
else:
extra_context["discord_info"]["Message"] = f"{ctx.message.id} (DM)"
log.exception(error, extra=extra_context)
if __name__ == "__main__":
bot = ImageMan()
log.addHandler(DiscordHandler(bot))
bot.run(config.token)
``` |
{
"source": "josborne-noaa/PyFerret",
"score": 3
} |
#### File: external_functions/pyef/template.py
```python
import numpy
def ferret_init(efid):
'''
Initialization function for this PyFerret PyEF. Returns
a dictionary describing the features of this PyFerret PyEF.
At a minimum, assigns the number of arguments expected and
a descriptions of the functions. May also provide
descriptions of the arguments and specifications for a
non-standard result grid.
'''
init_dict = { }
init_dict["numargs"] = 1
init_dict["descript"] = "Pass through"
return init_dict
def ferret_result_limits(efid):
'''
Defines the index limits for all abstract axes in the result grid.
Returns an (X,Y,Z,T,E,F)-axis six-tuple of either (low,high) pairs,
for an abstract axis, or None, for a non-abstract axis. The low
and high values are integer values. If the result grid has no
abstract axes, this function will not be called and can be deleted.
'''
axis_limits = (None, None, None, None, None, None)
return axis_limits
def ferret_custom_axes(efid):
'''
Defines all custom axes in ther result grid. Returns a (X,Y,Z,T,E,F)-
axis six-tuple of either a (low, high, delta, unit_name, is_modulo)
tuple, for a custom axis, or None, for a non-custom axis. The low,
high, and delta values are floating-point values in units of the axis
coordinate ("world coordinates"). If the result grid has no custom
axes, this function will not be called and can be deleted.
'''
axis_info = (None, None, None, None, None, None)
return axis_info
def ferret_compute(efid, result, result_bdf, inputs, input_bdfs):
'''
Computation function for this PyFerret PyEF. Assign values to the
elements of result; do not reassign result itself. In other words,
assign values using notation similar to 'result[...] = ...'; do not
use notation similar to 'result = ...' as this will simply define
a new local variable called result, hiding the variable passed into
this function.
If an error is detected, raise an appropriate exception. ValueError
is commonly used for unexpected values. IndexError is commonly used
for unexpected array sizes.
Arguments:
result - numpy float array to be assigned
result_bdf - numpy read-only float array of one element giving the
missing-data value to be used when assigning result
inputs - tuple of numpy read-only float arrays giving the input
values provided by the caller
input_bdfs - numpy read-only float arrays of one element giving the
missing-data value for the corresponding inputs array
'''
# Create masks of values that are undefined and that are defined
bad_mask = ( inputs[0] == input_bdfs[0] )
good_mask = numpy.logical_not(bad_mask)
result[good_mask] = inputs[0][good_mask]
result[bad_mask] = result_bdf
return
```
#### File: pyfermod/eofanal/eofanalysistests.py
```python
import eofanalysis
import math
import numpy
import unittest
class EOFAnalysisTests(unittest.TestCase):
'''
Tests of the eofanalysis.EOFAnalysis
and eofanalysis.InvalidStateError classes
'''
def setUp(self):
'''
Create some repeatedly used test data
'''
self.t_steps = numpy.arange(0.0, 60.0, 0.25)
self.cos_t = numpy.cos(self.t_steps * numpy.pi / 6.0)
self.sin_t = numpy.sin(self.t_steps * numpy.pi / 6.0)
self.csmat = numpy.matrix([self.cos_t,
self.sin_t + 1.0]).T
self.ccssmat = numpy.matrix([self.cos_t * self.cos_t,
self.cos_t * self.sin_t + 1.0,
self.cos_t * self.sin_t + 2.0,
self.sin_t * self.sin_t + 3.0]).T
self.onetmat = numpy.matrix([self.cos_t])
self.onelocmat = self.onetmat.T
self.novarmat = numpy.matrix([self.cos_t, self.cos_t, self.cos_t])
def test01InvalidStateError(self):
'''
Tests initialization of InvalidStateError instances.
'''
err = eofanalysis.InvalidStateError("Test case")
self.assertNotEqual(err, None)
def test02init(self):
'''
Tests initialization of EOFAnalysis instances.
'''
# check instantiation from a matrix
eofanalysis.EOFAnalysis(self.csmat)
# check instantiation from an numpy.array
eofanalysis.EOFAnalysis(numpy.array([self.cos_t,
self.sin_t]).T)
# check instantiation from a list of lists
eofanalysis.EOFAnalysis([[1.0, 0.0, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.5],
[0.0, 0.0, 0.0, 1.0]])
# check instantiation from a string
eofanalysis.EOFAnalysis("1.0, 0.0, 0.0, 0.0 ; " + \
"0.5, 0.5, 0.5, 0.5 ; " + \
"0.0, 0.0, 0.0, 1.0")
# check TypeError raised if spacetimedata is not given
self.assertRaises(TypeError, eofanalysis.EOFAnalysis)
# check ValueError raised if spacetime is not valid
self.assertRaises(ValueError, eofanalysis.EOFAnalysis,
numpy.matrix([['a', 'b', 'c'],
['d', 'e', 'f']]))
# check UserWarning raised if one time value in each time series
self.assertRaises(UserWarning, eofanalysis.EOFAnalysis, self.onetmat)
# no problems if only one location, however
eofanalysis.EOFAnalysis(self.onelocmat)
def test03analyze(self):
'''
Tests the EOFAnalysis.analyze method.
'''
# check a valid case
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
# check UserError raised if constant time series
novaranal = eofanalysis.EOFAnalysis(self.novarmat)
self.assertRaises(UserWarning, novaranal.analyze)
def test04signiffracs(self):
'''
Tests the EOFAnalysis.signiffracs method.
'''
# check a trivial case with one location
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
onelocfracs = onelocanal.signiffracs()
self.assertTrue(numpy.allclose(onelocfracs, 1.0))
# check a valid case with all significant EOFs
csanal = eofanalysis.EOFAnalysis(self.csmat)
csanal.analyze()
csfracs = csanal.signiffracs()
self.assertTrue(numpy.allclose(csfracs, [0.5, 0.5]))
# check a valid case with some insignificant EOFs
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
ccssfracs = ccssanal.signiffracs()
self.assertTrue(numpy.allclose(ccssfracs, [0.5, 0.5, 0.0, 0.0]))
# check a warned case where no significant EOFs
novaranal = eofanalysis.EOFAnalysis(self.novarmat)
try:
novaranal.analyze()
except UserWarning:
pass
novarfracs = novaranal.signiffracs()
self.assertTrue(numpy.allclose(novarfracs, 0.0))
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.signiffracs)
def test05minsignif(self):
'''
Test of the EOFAnalysis.minsignif and EOFAnalysis.setminsignif methods.
'''
csanal = eofanalysis.EOFAnalysis(self.csmat)
dfltminsignif = csanal.minsignif()
# Check the default value
self.assertAlmostEqual(dfltminsignif, 0.01)
# Reset the value and check that it took
csanal.setminsignif(0.05)
resetminsignif = csanal.minsignif()
self.assertAlmostEqual(resetminsignif, 0.05)
# Try resetting to invalid values
self.assertRaises(ValueError, csanal.setminsignif, 0.00000001)
self.assertRaises(ValueError, csanal.setminsignif, 0.99999999)
def test06numeofs(self):
'''
Tests the EOFAnalysis.numeofs method.
'''
# check a trivial case with one location
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
self.assertEqual(onelocanal.numeofs(), 1)
# check a valid case with all significant EOFs
csanal = eofanalysis.EOFAnalysis(self.csmat)
csanal.analyze()
self.assertEqual(csanal.numeofs(), 2)
# check a valid case with some insignificant EOFs
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
self.assertEqual(ccssanal.numeofs(), 2)
# check a warned case where no significant EOFs
novaranal = eofanalysis.EOFAnalysis(self.novarmat)
try:
novaranal.analyze()
except UserWarning:
pass
self.assertEqual(novaranal.numeofs(), 0)
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.numeofs)
def test07eofvec(self):
'''
Tests of the EOFAnalysis.eofvec method. More extensive tests
are accomplished in test_datapiece.
'''
# check a trivial case
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
oneloceof1 = onelocanal.eofvec(1)
self.assertTrue(numpy.allclose(oneloceof1, math.sqrt(0.5)))
# check a "simple" case
csanal = eofanalysis.EOFAnalysis(self.csmat)
csanal.analyze()
cseof1 = csanal.eofvec(1)
cseof2 = csanal.eofvec(2)
if cseof1[0] < 1.0E-10:
self.assertTrue(numpy.allclose(cseof1, [0.0, math.sqrt(0.5)]))
self.assertTrue(numpy.allclose(cseof2, [math.sqrt(0.5), 0.0]))
else:
self.assertTrue(numpy.allclose(cseof1, [math.sqrt(0.5), 0.0]))
self.assertTrue(numpy.allclose(cseof2, [0.0, math.sqrt(0.5)]))
# check a EOF properties of a more complicated example
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
ccsseof1 = ccssanal.eofvec(1)
ccsseof2 = ccssanal.eofvec(2)
self.assertAlmostEqual(numpy.dot(ccsseof1, ccsseof1), 0.25)
self.assertAlmostEqual(numpy.dot(ccsseof1, ccsseof2), 0.0)
self.assertAlmostEqual(numpy.dot(ccsseof2, ccsseof2), 0.25)
# check ValueError raised for invalid EOF numbers
self.assertRaises(ValueError, ccssanal.eofvec, -1)
self.assertRaises(ValueError, ccssanal.eofvec, 0)
self.assertRaises(ValueError, ccssanal.eofvec, 3)
self.assertRaises(ValueError, ccssanal.eofvec, 5)
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.eofvec, 1)
def test08tafvec(self):
'''
Test of the EOFAnalysis.tafvec method. More extensive tests
are accomplished in test_datapiece.
'''
# check a trivial case
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
oneloctaf1 = onelocanal.tafvec(1)
self.assertTrue(numpy.allclose(oneloctaf1, math.sqrt(2.0) * self.cos_t))
# check a "simple" case
csanal = eofanalysis.EOFAnalysis(self.csmat)
csanal.analyze()
cstaf1 = csanal.tafvec(1)
cstaf2 = csanal.tafvec(2)
if cstaf1[0] < 1.0E-10:
self.assertTrue(numpy.allclose(cstaf1, math.sqrt(2.0) * self.sin_t))
self.assertTrue(numpy.allclose(cstaf2, math.sqrt(2.0) * self.cos_t))
else:
self.assertTrue(numpy.allclose(cstaf1, math.sqrt(2.0) * self.cos_t))
self.assertTrue(numpy.allclose(cstaf2, math.sqrt(2.0) * self.sin_t))
# check a EOF properties of a more complicated example
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
ccsstaf1 = ccssanal.tafvec(1)
ccsstaf2 = ccssanal.tafvec(2)
self.assertAlmostEqual(numpy.dot(ccsstaf1, ccsstaf1), 240.0)
self.assertAlmostEqual(numpy.dot(ccsstaf1, ccsstaf2), 0.0)
self.assertAlmostEqual(numpy.dot(ccsstaf2, ccsstaf2), 240.0)
# check ValueError raised for invalid TAF numbers
self.assertRaises(ValueError, ccssanal.tafvec, -1)
self.assertRaises(ValueError, ccssanal.tafvec, 0)
self.assertRaises(ValueError, ccssanal.tafvec, 3)
self.assertRaises(ValueError, ccssanal.tafvec, 5)
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.tafvec, 1)
def test09nullvec(self):
'''
Test of the EOFAnalysis.nullvec method.
'''
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
ccsseof1 = ccssanal.eofvec(1)
ccsseof2 = ccssanal.eofvec(2)
ccssnv1 = ccssanal.nullvec(1)
ccssnv2 = ccssanal.nullvec(2)
self.assertAlmostEqual(numpy.dot(ccssnv1, ccssnv1), 1.0)
self.assertAlmostEqual(numpy.dot(ccssnv1, ccssnv2), 0.0)
self.assertAlmostEqual(numpy.dot(ccssnv2, ccssnv2), 1.0)
self.assertAlmostEqual(numpy.dot(ccssnv1, ccsseof1), 0.0)
self.assertAlmostEqual(numpy.dot(ccssnv1, ccsseof2), 0.0)
self.assertAlmostEqual(numpy.dot(ccssnv2, ccsseof1), 0.0)
self.assertAlmostEqual(numpy.dot(ccssnv2, ccsseof2), 0.0)
# check ValueError raised for invalid null vector numbers
self.assertRaises(ValueError, ccssanal.nullvec, -1)
self.assertRaises(ValueError, ccssanal.nullvec, 0)
self.assertRaises(ValueError, ccssanal.nullvec, 3)
self.assertRaises(ValueError, ccssanal.nullvec, 5)
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.nullvec, 1)
def test10datapiece(self):
'''
Tests of the EOFAnalysis.datapiece method.
'''
# Test a trivial example
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
self.assertTrue(numpy.allclose(onelocanal.datapiece(1),
self.onelocmat))
# Test the results from a valid example
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
datatotal = ccssanal.datapiece(0)
for k in range(1, ccssanal.numeofs()+1):
eofvec = ccssanal.eofvec(k)
tafvec = ccssanal.tafvec(k)
tafeof = numpy.outer(tafvec, eofvec)
datapiece = ccssanal.datapiece(k)
self.assertTrue(numpy.allclose(datapiece, tafeof),
"Not True: datapiece(%d) == tafvec(%d).T * eofvec(%d)" % \
(k,k,k))
datatotal += datapiece
self.assertTrue(numpy.allclose(datatotal, self.ccssmat),
"Not True: Sum[k=0->numeofs](datapiece(k)) == OriginalData")
self.assertTrue(numpy.allclose(ccssanal.datapiece(3), 0.0),
"Not True: datapiece of insignificant EOF is insignificant")
self.assertTrue(numpy.allclose(ccssanal.datapiece(4), 0.0),
"Not True: datapiece of insignificant EOF is insignificant")
# check ValueError raised for invalid EOF numbers
self.assertRaises(ValueError, ccssanal.datapiece, -1)
self.assertRaises(ValueError, ccssanal.datapiece, 5)
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.datapiece, 0)
def test11dataexplained(self):
'''
Tests of the EOFAnalysis.dataexplained method.
'''
# Test a trivial example
onelocanal = eofanalysis.EOFAnalysis(self.onelocmat)
onelocanal.analyze()
self.assertTrue(numpy.allclose(onelocanal.dataexplained(1),
self.onelocmat))
# Test the results from a valid example
ccssanal = eofanalysis.EOFAnalysis(self.ccssmat)
ccssanal.analyze()
datatotal = numpy.matrix(numpy.zeros(self.ccssmat.shape))
for k in range(ccssanal.numeofs()+1):
datatotal += ccssanal.datapiece(k)
dataexpld = ccssanal.dataexplained(k)
self.assertTrue(numpy.allclose(dataexpld, datatotal),
"Not True: dataexplained(%d) == Sum[k=0->%d](datapiece(k))" % \
(k,k))
self.assertTrue(numpy.allclose(ccssanal.dataexplained(3), self.ccssmat),
"Not True: dataexplained of insignif EOF is OriginalData")
self.assertTrue(numpy.allclose(ccssanal.dataexplained(4), self.ccssmat),
"Not True: dataexplained of insignif EOF is OriginalData")
# check ValueError raised for invalid EOF numbers
self.assertRaises(ValueError, ccssanal.dataexplained, -1)
self.assertRaises(ValueError, ccssanal.dataexplained, 5)
# check InvalidStateError raised if analyze had not been called
noanal = eofanalysis.EOFAnalysis(self.csmat)
self.assertRaises(eofanalysis.InvalidStateError, noanal.dataexplained, 0)
if __name__ == "__main__":
'''
Run the unit tests in this module.
'''
unittest.main()
```
#### File: PyFerret/pyfermod/feraggdset.py
```python
from __future__ import print_function
import sys
import os
import tempfile
import pyferret
from pyferret.ferdset import _anonymous_dataset_qualifier
class FerAggDSet(pyferret.FerDSet):
'''
An aggregation of data sets and the variables they have in common
'''
def __init__(self, name, dsets, along='T', title='', warn=True, hide=False):
'''
Aggregates the given list of datasets along the given axis using the
Ferret "DEFINE DATA /AGGREGATE" command. Creates a FerVar for each data
variable in common among these datasets, and assigns it as an attribute
of this class instance using the variable name.
name (string): Ferret name for this aggregated dataset
dsets (sequence of strings and/or FerDSets): datasets to aggregate.
A string will be interpreted as a filename for creating a FerDSet.
along ('T', 'E', 'F'): axis along which to aggregate the datasets
title (string): title for the dataset for plots and listing;
if not given, the Ferret name for the dataset will be used
warn (bool): issue warning messages about variables not in common among
all member datasets (either not present or not using the same grid)
hide (bool): hide the member datasets in standard Ferret listings
such as with pyferret.showdata()
'''
# Create an empty dataset with the given Ferret name
super(FerAggDSet, self).__init__('', qual=_anonymous_dataset_qualifier)
if not isinstance(name, str):
raise ValueError('Ferret name for the aggregate dataset must be astring')
aggname = name.strip()
if not aggname:
raise ValueError('Ferret name for the aggregate dataset is blank')
self._filename = aggname
self._dsetname = aggname
# Need to keep the given order of component datasets
self._compdsetnames = [ ]
# But still use a dictionary with uppercase names for keys
self._compdsets = { }
if along not in ('T', 'E', 'F'):
raise ValueError("along must be one of 'T', 'E', or 'F'")
self._along = along
self._comphidden = bool(hide)
# Create a Ferret string variable containing all the dataset names to be aggregated
if not ( isinstance(dsets, tuple) or isinstance(dsets, list) ):
raise ValueError('dsets must be a tuple or list of strings and/or FerDSets')
filesfile = tempfile.NamedTemporaryFile(mode='w', delete=False,
prefix=aggname + '_', suffix='_agg.txt')
filesfilename = filesfile.name
deletefilesfile = True
try:
for myitem in dsets:
if isinstance(myitem, str):
mydset = pyferret.FerDSet(myitem)
elif isinstance(myitem, pyferret.FerDSet):
mydset = myitem
else:
raise ValueError('dsets must be a tuple or list of strings and/or FerDSets')
if mydset._dsetname.upper() in self._compdsets:
raise ValueError('duplicate dataset name ' + mydset._dsetname)
print(mydset._dsetname, file=filesfile)
self._compdsetnames.append(mydset._dsetname)
self._compdsets[mydset._dsetname.upper()] = mydset
deletefilesfile = False
finally:
filesfile.close()
if deletefilesfile:
os.unlink(filesfilename)
filesvarname = aggname + "_datafile_names"
cmdstr = 'LET ' + filesvarname + ' = SPAWN("cat \'' + filesfilename + '\'")'
(errval, errmsg) = pyferret.run(cmdstr)
if errval != pyferret.FERR_OK:
os.unlink(filesfilename)
raise ValueError(errmsg)
# filesfile not read (SPAWN command executed) until filesvarname is needed
# Create the DEFINE DATA /AGGREGATE Ferret command, creating
# and saving component FerDSets as needed
cmdstr = 'DEFINE DATA/AGGREGATE/' + self._along
if title:
cmdstr += '/TITLE="' + str(title) + '"'
if not warn:
cmdstr += '/QUIET'
if self._comphidden:
cmdstr += '/HIDE'
cmdstr += ' ' + aggname + ' = ' + filesvarname
(errval, errmsg) = pyferret.run(cmdstr)
# filesfile now read so can delete it
os.unlink(filesfilename)
if errval != pyferret.FERR_OK:
raise ValueError(errmsg)
# create a FerVar for each variable in this dataset
namesdict = pyferret.getstrdata('..varnames')
for varname in namesdict['data'].flatten():
if sys.version_info[0] > 2:
# For Python3.x, namesdict['data'] is a NumPy array of bytes; convert to unicode
varname = str(varname, 'UTF-8')
# create a FerVar representing this existing Ferret aggregated file variable
filevar = pyferret.FerVar()
filevar._markasknownvar(varname, self._dsetname, True)
# assign this FerVar - uppercase the variable name keys to make case-insensitive
self._fervars[varname.upper()] = filevar
# keep a original-case version of the name
self._fervarnames.add(varname)
def __repr__(self):
'''
Representation to of this FerAggDSet.
Includes the variable names as variables can be added after creation.
'''
infostr = "FerAggDSet(name='%s', dsets=%s, along='%s', hide=%s) with variables %s" % \
(self._dsetname, str(self._compdsetnames), self._along,
str(self._comphidden), str(self.fernames(sort=True)))
return infostr
def __eq__(self, other):
'''
Two FerAggDSets are equal if their Ferret names, lists of aggregated
dataset names, aggregation axis, component dataset hidden status, and
dictionary of FerVar are variables all equal. All string values are
compared case-insensitive.
'''
if not isinstance(other, pyferret.FerDSet):
return NotImplemented
if not isinstance(other, FerAggDSet):
return False
if not super(FerAggDSet, self).__eq__(other):
return False
if self._along != other._along:
return False
if self._comphidden != other._comphidden:
return False
if len(self._compdsetnames) != len(other._compdsetnames):
return False
for k in range(len(self._compdsetnames)):
if self._compdsetnames[k].upper() != other._compdsetnames[k].upper():
return False
return True
def __ne__(self, other):
'''
Two FerDSets are not equal if their Ferret names, lists of aggregated
dataset names, or dictionary of FerVar variables are not equal.
All string values are compared case-insensitive.
'''
if not isinstance(other, pyferret.FerDSet):
return NotImplemented
return not self.__eq__(other)
def getdsetnames(self):
'''
Returns a copy of the list of component dataset names (original-case)
in the order of their aggregation.
'''
return list(self._compdsetnames)
def getdsets(self):
'''
Returns a list of component FerDSet datasets
in the order of their aggregation.
'''
return [ self._compdsets[name.upper()] for name in self._compdsetnames ]
def close(self):
'''
Removes (cancels) all the variables in Ferret associated with this dataset,
then closes (cancels) this dataset in Ferret. If the aggregated dataset was
created with hide=True, this will close (cancel) all the component datasets
as well; otherwise the component datasets and dataset names will remain here
and in Ferret. Raises a ValueError if there is a problem.
'''
# if the dataset is already closed, ignore this command
if not self._dsetname:
return
# run the Ferret CANCEL commands in FerDSet.close
super(FerAggDSet, self).close()
# if component datasets were hidden, the above close also closed all the
# component datasets, so also clear the component dataset information
if self._comphidden:
self._compdsets.clear()
self._compdsetnames = [ ]
```
#### File: PyFerret/pyfermod/feraxis.py
```python
import numbers
import time
import numpy
import pyferret
# set of valid axis types
_VALID_AXIS_TYPES = frozenset( (pyferret.AXISTYPE_LONGITUDE,
pyferret.AXISTYPE_LATITUDE,
pyferret.AXISTYPE_LEVEL,
pyferret.AXISTYPE_TIME,
pyferret.AXISTYPE_CUSTOM,
pyferret.AXISTYPE_ABSTRACT,
pyferret.AXISTYPE_NORMAL) )
# Supported formats for time.strptime
_TIME_PARSE_FORMATS = (
'%d-%b-%Y %H:%M:%S',
'%d-%b-%Y %H:%M',
'%d-%b-%Y',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d',
)
_TIME_NOYEAR_PARSE_FORMATS = (
'%d-%b %H:%M:%S',
'%d-%b',
)
class FerAxis(object):
'''
Ferret axis object
'''
def __init__(self, coords=None, axtype=None, unit=None, name=None):
'''
Describe a Ferret axis using the given information about the axis.
axtype (int): type of the axis; valid values are
pyferret.AXISTYPE_LONGITUDE
pyferret.AXISTYPE_LATITUDE
pyferret.AXISTYPE_LEVEL
pyferret.AXISTYPE_TIME
pyferret.AXISTYPE_CUSTOM (axis unit not recognized by Ferret)
pyferret.AXISTYPE_ABSTRACT (axis is unit-less integer values)
pyferret.AXISTYPE_NORMAL (axis is normal to the data)
if not given but coords is given, AXISTYPE_CUSTOM is used,
if not given and coords is not given, AXISTYPE_NORMAL is used.
coords (sequence of numeric): coordinate values of the axis;
for an axis that is neither a time axis, an abstract axis, nor normal
to the data, a 1-D array of numeric values;
for a time axis, an (n,6) 2D array of integers where each time step is
formed from the six integers for the day, month, year, hour, minute,
and second in the index given by
pyferret.TIMEARRAY_DAYINDEX
pyferret.TIMEARRAY_MONTHINDEX
pyferret.TIMEARRAY_YEARINDEX
pyferret.TIMEARRAY_HOURINDEX
pyferret.TIMEARRAY_MINUTEINDEX
pyferret.TIMEARRAY_SECONDINDEX
(Thus, coords[t, TIMEARRAY_YEARINDEX] gives the year of time t.)
Note: a relative time axis will be of type AXISTYPE_CUSTOM, with a unit
indicating the starting point, such as 'days since 01-JAN-2000'
For an abstract axis or an axis normal to the data, this argument is ignored.
unit (string): unit of the axis; for a time axis, this gives the calendar
as one of
pyferret.CALTYPE_360DAY
pyferret.CALTYPE_NOLEAP
pyferret.CALTYPE_GREGORIAN
pyferret.CALTYPE_JULIAN
pyferret.CALTYPE_ALLLEAP
pyferret.CALTYPE_NONE (calendar not specified)
For abstract axes, or axes normal to the data, this argument is ignored.
name (string): Ferret name for the axis
For an axis normal to the data, this argument is ignored.
'''
# axis type
if axtype:
if not axtype in _VALID_AXIS_TYPES:
raise ValueError('axis type %s is not valid' % str(axtype))
self._axtype = axtype
elif coords is not None:
self._axtype = pyferret.AXISTYPE_CUSTOM
else:
self._axtype = pyferret.AXISTYPE_NORMAL
# axis name
if name and (self._axtype != pyferret.AXISTYPE_NORMAL):
if not isinstance(name, str):
raise ValueError('axis name %s is not valid' % str(name))
self._name = name.strip()
else:
self._name = ''
# axis unit
if unit and (self._axtype != pyferret.AXISTYPE_NORMAL) \
and (self._axtype != pyferret.AXISTYPE_ABSTRACT):
if not isinstance(unit, str):
raise ValueError('axis unit %s is not valid' % str(unit))
self._unit = unit.strip()
else:
self._unit = ''
# axis coordinates
if (coords is not None) and (self._axtype != pyferret.AXISTYPE_NORMAL) \
and (self._axtype != pyferret.AXISTYPE_ABSTRACT):
if self._axtype == pyferret.AXISTYPE_TIME:
try:
self._coords = numpy.array(coords, dtype=numpy.int32, copy=True)
except ValueError:
raise ValueError('coordinates for a time axis is not an integer array')
if self._coords.ndim != 2:
raise ValueError('coordinates for a time axis is not a 2-D array')
if self._coords.shape[1] != 6:
raise ValueError('second dimenstion of coordinates for a time axis is not 6')
else:
try:
self._coords = numpy.array(coords, dtype=numpy.float64, copy=True)
except ValueError:
raise ValueError('coordinates for an axis is not a numeric array')
if self._coords.ndim != 1:
raise ValueError('coordinates for a lon/lat/level/custom axis is not a 1-D array' % k)
else:
self._coords = None
def __repr__(self):
'''
Representation to recreate this FerAxis
'''
# Not elegant, but will do
infostr = "FerAxis(coords=" + repr(self._coords) + \
", axtype=" + repr(self._axtype) + \
", unit='" + self._unit + \
"', name='" + self._name + "')"
return infostr
def __eq__(self, other):
'''
Two FerAxis objects are equal is all their contents are the same.
All string values are compared case-insensitive.
'''
if not isinstance(other, FerAxis):
return NotImplemented
# _axtype is an integer
if self._axtype != other._axtype:
return False
# _name is a string
if self._name.upper() != other._name.upper():
return False
# _unit is a string
if self._unit.upper() != other._unit.upper():
return False
# _coords is an ndarray or None
if (self._coords is None) and (other._coords is None):
return True
if (self._coords is None) or (other._coords is None):
return False
if not numpy.allclose(self._coords, other._coords):
return False
return True
def __ne__(self, other):
'''
Two FerAxis obect are not equal is any of their contents are not
the same. All string values are compared case-insensitive.
'''
if not isinstance(other, FerAxis):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, name):
'''
Return the axis type (if name='axtype'), unit (if name='unit'),
name (if name='name'), or a copy of the coordinates (if name='coords')
'''
if name == 'axtype':
return self.getaxtype()
if name == 'unit':
return self.getunit()
if name == 'name':
return self.getname()
if name == 'coords':
return self.getcoords()
raise KeyError("unknown key '%s'" % str(name))
def __getattr__(self, name):
'''
Return the axis type (if name='axtype'), unit (if name='unit'),
name (if name='name'), or a copy of the coordinates (if name='coords')
Note that this method is only called when the parent object
does not have an attribute with this name.
'''
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError("unknown attribute '%s'" % name)
def __dir__(self):
'''
Returns a list of known attributes, including those added
by the __getattr__ method.
'''
mydir = [ 'axtype', 'coords', 'name', 'unit' ]
mydir.extend( dir(super(FerAxis, self)) )
return mydir
def copy(self):
'''
Returns a copy of this FerAxis object. The FerAxis object returned
does not share any mutable values (namely, the coordinates array)
with this FerAxis object.
'''
# __init__ forces a copy of the coordinates array
duplicate = FerAxis(axtype=self._axtype, coords=self._coords,
unit=self._unit, name=self._name)
return duplicate
def getaxtype(self):
'''
Returns the type of this axis as one of the integer constants
pyferret.AXISTYPE_LONGITUDE
pyferret.AXISTYPE_LATITUDE
pyferret.AXISTYPE_LEVEL
pyferret.AXISTYPE_TIME
pyferret.AXISTYPE_CUSTOM (axis unit not recognized by Ferret)
pyferret.AXISTYPE_ABSTRACT (axis is unit-less integer values)
pyferret.AXISTYPE_NORMAL (axis is normal to the data)
'''
return self._axtype
def getcoords(self):
'''
Returns a copy of the coordinates ndarray for this axis,
or None if there is no coordinates array for this axis.
'''
if self._coords is not None:
coords = self._coords.copy('A')
else:
coords = None
return coords
def getunit(self):
'''
Returns the unit string for this axis. May be an empty string.
'''
return self._unit
def getname(self):
'''
Returns the name string for this axis. May be an empty string.
'''
return self._name
@staticmethod
def _parsegeoslice(geoslice):
'''
Parses the contents of the slice attributes, interpreting any geo- or time-references
and returns a tuple with the resulting interpreted axis type, start, stop, and step values.
geoslice (slice): slice that can contain georeferences or time references
returns (axtype, start, stop, step) where:
axtype is one of:
pyferret.AXISTYPE_LONGITUDE (longitude units detected)
pyferret.AXISTYPE_LATITUDE (latitude units detected)
pyferret.AXISTYPE_LEVEL (level units detected)
pyferret.AXISTYPE_TIME (time units detected)
pyferret.AXISTYPE_ABSTRACT (no units)
start, stop, and step are:
None if the correspond geoslice attribute is not given; otherwise,
a list of six numbers if axtype is pyferret.AXISTYPE_TIME, or
a number if axtype is not pyferret.AXISTYPE_TIME
The list of six numbers for time values are ordered according to the indices:
pyferret.TIMEARRAY_DAYINDEX
pyferret.TIMEARRAY_MONTHINDEX
pyferret.TIMEARRAY_YEARINDEX
pyferret.TIMEARRAY_HOURINDEX
pyferret.TIMEARRAY_MINUTEINDEX
pyferret.TIMEARRAY_SECONDINDEX
For non-time values, the start, stop, and step values are int objects
if only if corresponding slice objects were int objects. Thus, int
objects should be interpreted as axis indices and float objects
should be interpreted as axis values.
Raises a ValueError if start and stop indicate different axes; i.e.,
"10E":"20N" or 10:"20N" or 10:"20-JAN-2000", or if the value contain
unrecognized units. If not a time slice, it is acceptable for step to
have no units even when start and stop do. If a time slice, the step
must have a unit of y, d, h, m, or s, which corresponds to year, day,
hour, minute, or second; there is no month time step unit.
Raises a TypeError if geoslice is not a slice or None, or if the values
in the slice are not None and cannot be interpreted.
'''
if geoslice is None:
return (pyferret.AXISTYPE_ABSTRACT, None, None, None)
if not isinstance(geoslice, slice):
raise TypeError('not a slice object: %s' % repr(geoslice))
(starttype, start) = FerAxis._parsegeoval(geoslice.start)
(stoptype, stop) = FerAxis._parsegeoval(geoslice.stop)
# start and stop types must match (so 10:"25E" also fails)
if starttype != stoptype:
raise ValueError('mismatch of units: %s and %s' % (geoslice.start, geoslice.stop))
axtype = starttype
if axtype == pyferret.AXISTYPE_TIME:
(steptype, step) = FerAxis._parsegeoval(geoslice.step, istimestep=True)
if (step is not None) and (steptype != pyferret.AXISTYPE_TIME):
raise ValueError('a time unit y, d, h, m, or s must be given with time slice steps')
else:
(steptype, step) = FerAxis._parsegeoval(geoslice.step)
if (steptype != pyferret.AXISTYPE_ABSTRACT) and (steptype != axtype):
raise ValueError('mismatch of units: %s, %s' % (geoslice.start, geoslice.step))
return (axtype, start, stop, step)
@staticmethod
def _parsegeoval(val, istimestep=False):
'''
Parses the value as either a longitude, latitude, level, time, or abstract number.
If val is a numeric value, the tuple (pyferret.AXISTYPE_ABSTRACT, val) is returned.
If val is None, the tuple (pyferret.AXISTYPE_ABSTRACT, None) is returned.
If val is a longitude string (unit E or W when istimestep is false),
(pyferret.AXISTYPE_LONGITUDE, fval) is returned where fval
is the floating point longitude value.
If val is a latitude string (unit N or S when istimestep is false),
(pyferret.AXISTYPE_LATITUDE, fval) is returned where fval
is the floating point latitude value.
If val is a level string (unit m when istimestep is False),
(pyferret.AXISTYPE_LEVEL, fval) is returned where fval
is the floating point level value.
If val is a date and, optionally, time string matching one of the formats given
in _TIME_PARSE_FORMATS or _TIME_NOYEAR_PARSE_FORMATS,
(pyferret.AXISTYPE_TIME, tval) is returned where
tval is a list of six numbers ordered by the indices:
pyferret.TIMEARRAY_DAYINDEX
pyferret.TIMEARRAY_MONTHINDEX
pyferret.TIMEARRAY_YEARINDEX
pyferret.TIMEARRAY_HOURINDEX
pyferret.TIMEARRAY_MINUTEINDEX
pyferret.TIMEARRAY_SECONDINDEX
If istimestep is true and val is a time step string (unit y, d, h, m, or s),
(pyferret.AXISTYPE_TIME, tval) is returned where tval is a list of six values
ordered by the above TIMEARRAY indices.
Note that m is minutes; there is no month timestep.
If val is a string of a unitless number, (pyferret.AXISTYPE_ABSTACT, fval) is
returned where fval is the floating point value specified by val.
If val is not numeric or a string, a TypeError is raised.
If val is a string that cannot be parsed, a ValueError is raised.
'''
# if just a number, return it with abstract axis type
if isinstance(val, numbers.Real):
return (pyferret.AXISTYPE_ABSTRACT, val)
# if None or empty, return None with abstract axis type
if not val:
return (pyferret.AXISTYPE_ABSTRACT, None)
if not isinstance(val, str):
raise TypeError('not a string: %s' % repr(val))
if not istimestep:
# not a time *step* - first try parsing as a date/time string using the accepted formats
for fmt in _TIME_PARSE_FORMATS:
try:
tval = time.strptime(val, fmt)
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_DAYINDEX] = tval.tm_mday
tlist[pyferret.TIMEARRAY_MONTHINDEX] = tval.tm_mon
tlist[pyferret.TIMEARRAY_YEARINDEX] = tval.tm_year
tlist[pyferret.TIMEARRAY_HOURINDEX] = tval.tm_hour
tlist[pyferret.TIMEARRAY_MINUTEINDEX] = tval.tm_min
tlist[pyferret.TIMEARRAY_SECONDINDEX] = tval.tm_sec
return (pyferret.AXISTYPE_TIME, tlist)
except ValueError:
pass
for fmt in _TIME_NOYEAR_PARSE_FORMATS:
try:
tval = time.strptime(val, fmt)
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_DAYINDEX] = tval.tm_mday
tlist[pyferret.TIMEARRAY_MONTHINDEX] = tval.tm_mon
# leave the year as zero - time.strptime assigns 1900
tlist[pyferret.TIMEARRAY_HOURINDEX] = tval.tm_hour
tlist[pyferret.TIMEARRAY_MINUTEINDEX] = tval.tm_min
tlist[pyferret.TIMEARRAY_SECONDINDEX] = tval.tm_sec
return (pyferret.AXISTYPE_TIME, tlist)
except ValueError:
pass
# not a date/time, so parse as a number with possibly a final letter for the unit
try:
lastchar = val[-1].upper()
if (not istimestep) and (lastchar == 'E'): # degrees E
# make sure the rest is just numeric
fval = float(val[:-1])
return(pyferret.AXISTYPE_LONGITUDE, val.upper())
elif (not istimestep) and (lastchar == 'W'): # degrees W
# make sure the rest is just numeric
fval = float(val[:-1])
return(pyferret.AXISTYPE_LONGITUDE, val.upper())
elif (not istimestep) and (lastchar == 'N'): # degrees N
# make sure the rest is just numeric
fval = float(val[:-1])
return(pyferret.AXISTYPE_LATITUDE, val.upper())
elif (not istimestep) and (lastchar == 'S'): # degrees S
# make sure the rest is just numeric
fval = float(val[:-1])
return(pyferret.AXISTYPE_LATITUDE, val.upper())
elif (not istimestep) and (lastchar == 'M'): # meters (or kilometers, etc.)
return(pyferret.AXISTYPE_LEVEL, val.upper())
elif istimestep and (lastchar == 'Y'): # years
fval = float(val[:-1])
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_YEARINDEX] = fval
return (pyferret.AXISTYPE_TIME, tlist)
elif istimestep and (lastchar == 'D'): # days
fval = float(val[:-1])
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_DAYINDEX] = fval
return (pyferret.AXISTYPE_TIME, tlist)
elif istimestep and (lastchar == 'H'): # hours
fval = float(val[:-1])
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_HOURINDEX] = fval
return (pyferret.AXISTYPE_TIME, tlist)
elif istimestep and (lastchar == 'M'): # minutes
fval = float(val[:-1])
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_MINUTEINDEX] = fval
return (pyferret.AXISTYPE_TIME, tlist)
elif istimestep and (lastchar == 'S'): # seconds
fval = float(val[:-1])
tlist = [ 0, 0, 0, 0, 0, 0 ]
tlist[pyferret.TIMEARRAY_SECONDINDEX] = fval
return (pyferret.AXISTYPE_TIME, tlist)
else:
# maybe just numeric string; if not, will raise an exception
fval = float(val)
return(pyferret.AXISTYPE_ABSTRACT, fval)
except Exception:
raise ValueError('unable to parse: %s' % val)
@staticmethod
def _makedatestring(timearray):
'''
Creates a date and time string for the format DD-MON-YYYY HH:MM:SS
corresponding the values in the given time array. If the year is
zero, -YYYY is omitted. If the seconds is zero, :SS is omitted;
if hours, minutes, and seconds are all zero, HH:MM:SS is omitted.
timearray: tuple of six int with time values given by the indices
pyferret.TIMEARRAY_DAYINDEX
pyferret.TIMEARRAY_MONTHINDEX
pyferret.TIMEARRAY_YEARINDEX
pyferret.TIMEARRAY_HOURINDEX
pyferret.TIMEARRAY_MINUTEINDEX
pyferret.TIMEARRAY_SECONDINDEX
'''
day = timearray[pyferret.TIMEARRAY_DAYINDEX]
monthstr = pyferret.datamethods._UC_MONTH_NAMES[timearray[pyferret.TIMEARRAY_MONTHINDEX]]
year = timearray[pyferret.TIMEARRAY_YEARINDEX]
hour = timearray[pyferret.TIMEARRAY_HOURINDEX]
minute = timearray[pyferret.TIMEARRAY_MINUTEINDEX]
second = timearray[pyferret.TIMEARRAY_SECONDINDEX]
if year > 0:
datestr = '%02d-%3s-%04d' % (day, monthstr, year)
else:
datestr = '%02d-%3s' % (day, monthstr)
if second > 0:
timestr = ' %02d:%02d:%02d' % (hour, minute, second)
elif (minute > 0) or (hour > 0):
timestr = ' %02d:%02d' % (hour, minute)
else:
timestr = ''
return datestr + timestr
```
#### File: pyfermod/fershp/__init__.py
```python
from __future__ import print_function
import numpy
import shapefile
import os
import os.path
import pyferret.fershp.mapprj
def createprjfile(shapefile_mapprj, shapefile_name):
"""
Creates a map projection (.prj) file for a shapefile.
Arguments:
shapefile_mapprj - either the common name or the WKT
description of the map projection;
if None or blank, "WGS 84" is used.
shapefile_name - name of the shapefile; any filename
extensions are ignored.
Raises:
ValueError if the map projection is invalid.
"""
# If the string given looks like a WKT description, just use it;
# otherwise, try to convert the name into a description.
if (not shapefile_mapprj) or shapefile_mapprj.isspace():
prj_descript = pyferret.fershp.mapprj.name_to_descript("WGS 84")
elif shapefile_mapprj.startswith('GEOGCS["') or \
shapefile_mapprj.startswith('PROJCS["'):
prj_descript = shapefile_mapprj
else:
prj_descript = pyferret.fershp.mapprj.name_to_descript(shapefile_mapprj)
(sfname, ext) = os.path.splitext(shapefile_name)
prjfile = file("%s.prj" % sfname, "w")
print(prj_descript, file=prjfile)
prjfile.close()
def quadxycentroids(xvals, yvals):
"""
Returns the centroids of X,Y-quadrilaterals whose vertices
are given by xvals and yvals.
Arguments:
xvals - 2D array of X values of the quadrilateral vertices
yvals - 2D array of Y values of the quadrilateral vertices
Quadrilaterals are defined by the (xvals, yvals) of
[i,j] -> [i,j+1] -> [i+1,j+1] -> [i+1,j] -> [i,j]
Returns:
Two 2D arrays of X values and Y values of the quadrilateral
centroids. The size of each dimension is decreased by one.
Raises:
ValueError if the arguments are invalid
"""
xarray = numpy.asarray(xvals, dtype=float)
yarray = numpy.asarray(yvals, dtype=float)
if len(xarray.shape) < 2:
raise ValueError("xvals and yvals must be (at least) two dimensional")
if xarray.shape != yarray.shape:
raise ValueError("xvals and yvals must have the same dimensions")
sixareas = xarray[:-1,:-1] * yarray[:-1,1:] - xarray[:-1,1:] * yarray[:-1,:-1]
sixareas += xarray[:-1,1:] * yarray[1:,1:] - xarray[1:,1:] * yarray[:-1,1:]
sixareas += xarray[1:,1:] * yarray[1:,:-1] - xarray[1:,:-1] * yarray[1:,1:]
sixareas += xarray[1:,:-1] * yarray[:-1,:-1] - xarray[:-1,:-1] * yarray[1:,:-1]
sixareas *= 3.0
cenxs = ( xarray[:-1,:-1] * yarray[:-1,1:] - xarray[:-1,1:] * yarray[:-1,:-1] ) \
* ( xarray[:-1,:-1] + xarray[:-1,1:] )
cenxs += ( xarray[:-1,1:] * yarray[1:,1:] - xarray[1:,1:] * yarray[:-1,1:] ) \
* ( xarray[:-1,1:] + xarray[1:,1:] )
cenxs += ( xarray[1:,1:] * yarray[1:,:-1] - xarray[1:,:-1] * yarray[1:,1:] ) \
* ( xarray[1:,1:] + xarray[1:,:-1] )
cenxs += ( xarray[1:,:-1] * yarray[:-1,:-1] - xarray[:-1,:-1] * yarray[1:,:-1] ) \
* ( xarray[1:,:-1] + xarray[:-1,:-1] )
cenxs /= sixareas
cenys = ( xarray[:-1,:-1] * yarray[:-1,1:] - xarray[:-1,1:] * yarray[:-1,:-1] ) \
* ( yarray[:-1,:-1] + yarray[:-1,1:] )
cenys += ( xarray[:-1,1:] * yarray[1:,1:] - xarray[1:,1:] * yarray[:-1,1:] ) \
* ( yarray[:-1,1:] + yarray[1:,1:] )
cenys += ( xarray[1:,1:] * yarray[1:,:-1] - xarray[1:,:-1] * yarray[1:,1:] ) \
* ( yarray[1:,1:] + yarray[1:,:-1] )
cenys += ( xarray[1:,:-1] * yarray[:-1,:-1] - xarray[:-1,:-1] * yarray[1:,:-1] ) \
* ( yarray[1:,:-1] + yarray[:-1,:-1] )
cenys /= sixareas
return (cenxs, cenys)
def quadxycenters(xvals, yvals):
"""
Returns the average centers of X,Y-quadrilaterals whose vertices
are given by xvals and yvals.
Arguments:
xvals - 2D array of X values of the quadrilateral vertices
yvals - 2D array of Y values of the quadrilateral vertices
Quadrilaterals are defined by the (xvals, yvals) of
[i,j] -> [i,j+1] -> [i+1,j+1] -> [i+1,j] -> [i,j]
Returns:
Two 2D arrays of X values and Y values of the quadrilateral
average centers. The size of each dimension is decreased by one.
Raises:
ValueError if the arguments are invalid
"""
xarray = numpy.asarray(xvals, dtype=float)
yarray = numpy.asarray(yvals, dtype=float)
if len(xarray.shape) < 2:
raise ValueError("xvals and yvals must be (at least) two dimensional")
if xarray.shape != yarray.shape:
raise ValueError("xvals and yvals must have the same dimensions")
cenxs = 0.25 * ( xarray[:-1,:-1] + xarray[:-1,1:] + xarray[1:,1:] + xarray[1:,:-1] )
cenys = 0.25 * ( yarray[:-1,:-1] + yarray[:-1,1:] + yarray[1:,1:] + yarray[1:,:-1] )
return (cenxs, cenys)
def addquadxyvalues(sfwriter, pt0, pt1, pt2, pt3, zcoord, vals):
"""
Adds a quadrilateral shape to sfwriter defined by the X,Y vertices
pt0 - pt1 - pt2 - pt3 - pt0, and possibly the common Z coordinate
(or array of Z coordinates) zcoord, along with the associated values
in vals.
Arguments:
sfwriter - the shapefile.Writer object to add the shape and values to
pt1, pt2,
pt3, pt4 - the (X,Y) numeric coordinates of the vertices of the simple
quadrilateral; in sequence, but not necessarily the correct
winding. Any coordinates after the first two in each point
are ignored.
zcoord - the numeric Z coordinate or array of numeric Z coordinates
for this quadrilateral; may be None
vals - the list of values to be associated with this shape. The
fields for these values must already have been created in
sfwriter.
Note: the winding of the quadrilateral is determined only using the X and
Y coordinates, even when multiple Z coordinates are provided.
"""
# Get the correct polygon type
if zcoord != None:
shapetype = shapefile.POLYGONZ
else:
shapetype = shapefile.POLYGON
x0 = float(pt0[0]); y0 = float(pt0[1])
x1 = float(pt1[0]); y1 = float(pt1[1])
x2 = float(pt2[0]); y2 = float(pt2[1])
x3 = float(pt3[0]); y3 = float(pt3[1])
# Compute 2 * signed area of this simple quadrilateral
dqarea = x0 * y1 - x1 * y0
dqarea += x1 * y2 - x2 * y1
dqarea += x2 * y3 - x3 * y2
dqarea += x3 * y0 - x0 * y3
# Create the correctly ordered array of coordinates for this single shape part
part = [ ]
if dqarea < 0.0:
# negative means clockwise which is desired direction
part.append([ x0, y0 ])
part.append([ x1, y1 ])
part.append([ x2, y2 ])
part.append([ x3, y3 ])
part.append([ x0, y0 ])
else:
# positive means counterclockwise so reverse ordering
part.append([ x0, y0 ])
part.append([ x3, y3 ])
part.append([ x2, y2 ])
part.append([ x1, y1 ])
part.append([ x0, y0 ])
# Append the Z coordinate(s) if given
if zcoord != None:
try:
# First try it as a single Z coordinate for all points
z = float(zcoord)
for pt in part:
pt.append(z)
except TypeError:
if len(zcoord) != 4:
raise ValueError("zcoord must be None, a single value, or a list of four values")
# Assume it is an array of numbers
if dqarea < 0.0:
zvals = list(zcoord) + [ zcoord[0] ]
else:
zvals = [ zcoord[0] ] + list(zcoord[::-1])
for (pt, z) in zip(part, zvals):
pt.append(float(z))
# Add the shape
sfwriter.poly([ part, ], shapetype)
# Add the values for this shape
sfwriter.record(*vals)
#
# The following is only for testing this module from the command line
#
if __name__ == "__main__":
shapefilename = "testsf"
wgs84_descript = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'
wgs84upsnorth_descript = 'PROJCS["WGS 84 / UPS North",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",90],PARAMETER["central_meridian",0],PARAMETER["scale_factor",0.994],PARAMETER["false_easting",2000000],PARAMETER["false_northing",2000000],UNIT["metre",1]]'
# Test createprjfile
createprjfile(None, "%s.jnk" % shapefilename)
prjfile = file("%s.prj" % shapefilename, "r")
datalines = prjfile.readlines()
prjfile.close()
if len(datalines) != 1:
raise ValueError("None for mapprj: more than one line given in the .prj file")
descript = datalines[0].strip()
if descript != wgs84_descript:
raise ValueError("None for mapprj:\n" \
" expect: %s\n" \
" found: %s" % (wgs84_descript, descript))
del prjfile, datalines, descript
prjjunk = 'GEOGCS["Junk",DATUM["Junk"]]'
createprjfile(prjjunk, "%s.jnk" % shapefilename)
prjfile = file("%s.prj" % shapefilename, "r")
datalines = prjfile.readlines()
prjfile.close()
if len(datalines) != 1:
raise ValueError("Junk for mapprj: more than one line given in the .prj file")
descript = datalines[0].strip()
if descript != prjjunk:
raise ValueError("Junk for mapprj:\n" \
" expect: %s\n" \
" found: %s" % (prjjunk, descript))
del prjjunk, prjfile, datalines, descript
createprjfile("WGS 84 / UPS North", "%s.jnk" % shapefilename)
prjfile = file("%s.prj" % shapefilename, "r")
datalines = prjfile.readlines()
prjfile.close()
if len(datalines) != 1:
raise ValueError("'WGS 84 / UPS North' for mapprj: more than one line given in the .prj file")
descript = datalines[0].strip()
if descript != wgs84upsnorth_descript:
raise ValueError("'WGS 84 / UPS North' for mapprj:\n" \
" expect: %s\n" \
" found: %s" % (wgs84upsnorth_descript, descript))
del prjfile, datalines, descript
print("createprjfile: SUCCESS")
# Test quadxycentroids
xvals = ( ( 0, 1 ), ( 3, 4 ) )
yvals = ( ( 0, 2 ), ( 1, 3 ) )
expectx = [ [ 2.0 ] ]
expecty = [ [ 1.5 ] ]
(centx, centy) = quadxycentroids(xvals, yvals)
if not numpy.allclose(centx, expectx):
raise ValueError("Centroid X values: expected %s; found %s" % \
(str(expectx), str(centx)))
if not numpy.allclose(centy, expecty):
raise ValueError("Centroid Y values: expected %s; found %s" % \
(str(expecty), str(centy)))
del xvals, yvals, expectx, expecty, centx, centy
xvals = ( ( 0, 1 ), ( 2, 3 ) )
yvals = ( ( 0, 2 ), ( 1, 5 ) )
expectx = [ [ 39.0 / 24.0 ] ]
expecty = [ [ 49.0 / 24.0 ] ]
(centx, centy) = quadxycentroids(xvals, yvals)
if not numpy.allclose(centx, expectx):
raise ValueError("Centroid X values: expected %s; found %s" % \
(str(expectx), str(centx)))
if not numpy.allclose(centy, expecty):
raise ValueError("Centroid Y values: expected %s; found %s" % \
(str(expecty), str(centy)))
del xvals, yvals, expectx, expecty, centx, centy
print("quadxycentroids: SUCCESS")
# Test quadxycenters
xvals = ( ( 0, 1 ), ( 3, 4 ) )
yvals = ( ( 0, 2 ), ( 1, 3 ) )
expectx = [ [ 2.0 ] ]
expecty = [ [ 1.5 ] ]
(centx, centy) = quadxycenters(xvals, yvals)
if not numpy.allclose(centx, expectx):
raise ValueError("Centroid X values: expected %s; found %s" % \
(str(expectx), str(centx)))
if not numpy.allclose(centy, expecty):
raise ValueError("Centroid Y values: expected %s; found %s" % \
(str(expecty), str(centy)))
del xvals, yvals, expectx, expecty, centx, centy
xvals = ( ( 0, 1 ), ( 2, 3 ) )
yvals = ( ( 0, 2 ), ( 1, 5 ) )
expectx = [ [ 1.5 ] ]
expecty = [ [ 2.0 ] ]
(centx, centy) = quadxycenters(xvals, yvals)
if not numpy.allclose(centx, expectx):
raise ValueError("Centroid X values: expected %s; found %s" % \
(str(expectx), str(centx)))
if not numpy.allclose(centy, expecty):
raise ValueError("Centroid Y values: expected %s; found %s" % \
(str(expecty), str(centy)))
del xvals, yvals, expectx, expecty, centx, centy
print("quadxycenters: SUCCESS")
# Test addquadxyvalues
coords = [ [0.0, 0.0], [1.0, 0.0], [1.0, -1.0], [2.0, 1.0] ]
zval = [ -5.34, -4.23, -3.12, -2.01 ]
vals = [ 3.28573, 7.46952 ]
expectedxy = [ coords[0], coords[3], coords[2], coords[1], coords[0] ]
expectedz = [ zval[0], zval[3], zval[2], zval[1], zval[0] ]
# Create the shapefile
sfwriter = shapefile.Writer(shapefile.POLYGONZ)
sfwriter.field("VAL0", "N", 20, 7)
sfwriter.field("VAL1", "N", 20, 7)
# Add the shape and values and save the shapefile
addquadxyvalues(sfwriter, coords[0], coords[1], coords[2], coords[3], zval, vals)
sfwriter.save(shapefilename)
del zval, coords, sfwriter
# Read the shapefile and check the shape and values
sfreader = shapefile.Reader(shapefilename)
shapes = sfreader.shapes()
if len(shapes) != 1:
raise ValueError("Expected one shape; found %d" % len(shapes))
if shapes[0].shapeType != shapefile.POLYGONZ:
raise ValueError("Expected shapetype %d; found %d" % \
(shapefile.POLYGONZ, shapes[0].shapeType))
if not numpy.allclose(shapes[0].points, expectedxy):
raise ValueError("Expected (X,Y) coordinates %s; found %s" % \
(str(expectedxy), str(shapes[0].points)))
if not numpy.allclose(shapes[0].z, expectedz):
raise ValueError("Expected Z coordinates %s; found %s" % \
(str(expectedz), str(shapes[0].z)))
records = sfreader.records()
if len(records) != 1:
raise ValueError("Expected one set of records; found %d" % len(records))
if not numpy.allclose(records[0], vals):
raise ValueError("Expected record values %s; found %s" % \
(str(vals), str(records[0])))
del expectedxy, expectedz, sfreader, shapes, records
os.remove("%s.dbf" % shapefilename)
os.remove("%s.shp" % shapefilename)
os.remove("%s.shx" % shapefilename)
os.remove("%s.prj" % shapefilename)
print("addquadxyvalues: SUCCESS")
```
#### File: pyfermod/fershp/shapefile_writexyval.py
```python
from __future__ import print_function
import shapefile
import pyferret
import pyferret.fershp
def ferret_init(efid):
"""
Initialization for the shapefile_writexyval PyEF
"""
retdict = { "numargs": 6,
"descript": "Writes a shapefile of XY quadrilaterals from the curvilinear data arrays.",
"restype": pyferret.FLOAT_ARRAY,
"axes": ( pyferret.AXIS_ABSTRACT,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST, ),
"argnames": ( "SHAPEFILE", "GRIDX", "GRIDY", "VALUE", "VALNAME", "MAPPRJ"),
"argdescripts": ( "Shapefile name (any extension given is ignored)",
"X values (longitudes) for the quad. grid; must be 2D on X and Y axes",
"Y values (latitudes) for the quad. grid; must be 2D on X and Y axes",
"Shape values; must be 2D on X and Y axes",
"Name for the shape value",
"Common name or WKT description of map projection; " \
"if blank, WGS 84 is used", ),
"argtypes": ( pyferret.STRING_ONEVAL,
pyferret.FLOAT_ARRAY,
pyferret.FLOAT_ARRAY,
pyferret.FLOAT_ARRAY,
pyferret.STRING_ONEVAL,
pyferret.STRING_ONEVAL, ),
"influences": ( (False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False), ),
}
return retdict
def ferret_result_limits(efid):
"""
Abstract axis limits for the shapefile_writexyval PyEF
"""
return ( (1, 1), None, None, None, None, None, )
def ferret_compute(efid, result, resbdf, inputs, inpbdfs):
"""
Create the shapefile named in inputs[0] using the grid X coordinates given
in inputs[1], grid Y coordinates given in inputs[2], and shape values given
in inputs[3]. The X,Y coordinates are used for the quadrilaterals vertices
and must have an additional value along each dimension. The value [i,j]
is used for the quadrilateral with diagonal corners [i, j] and [i+1, j+1].
Quadrilateral associated with missing values are omitted from the shapefile.
The field name for the value in the shapefile given in inputs[4]. Either a
common name or a WKT description of the map projection for the coordinates
should be given in inputs[5]. If blank, WGS 84 is used. If successful,
fills result (which might as well be a 1x1x1x1 array) with zeros. If a
problem occurs, an error will be raised.
"""
shapefile_name = inputs[0]
grid_xs = inputs[1]
grid_ys = inputs[2]
grid_vals = inputs[3]
missing_val = inpbdfs[3]
field_name = inputs[4].strip()
if not field_name:
field_name = "VALUE"
map_projection = inputs[5]
# Verify the shapes are as expected
if (grid_vals.shape[2] != 1) or (grid_vals.shape[3] != 1) or \
(grid_vals.shape[4] != 1) or (grid_vals.shape[5] != 1):
raise ValueError("The Z, T, E, and F axes of VALUE must be undefined or singleton axes")
exp_shape = ( grid_vals.shape[0] + 1, grid_vals.shape[1] + 1, 1, 1, 1, 1 )
if (grid_xs.shape != exp_shape) or (grid_ys.shape != exp_shape):
raise ValueError('GRIDX and GRIDY must have one more value along both X and Y axes compared to VALUE')
# Create polygons with a single field value
sfwriter = shapefile.Writer(shapefile.POLYGON)
sfwriter.field(field_name, "N", 20, 7)
# Add the shapes with their values
shape_written = False
for j in range(grid_vals.shape[1]):
for i in range(grid_vals.shape[0]):
if grid_vals[i, j, 0, 0, 0, 0] != missing_val:
shape_written = True
pyferret.fershp.addquadxyvalues(sfwriter,
(grid_xs[i, j, 0, 0, 0, 0], grid_ys[i, j, 0, 0, 0, 0]),
(grid_xs[i, j+1, 0, 0, 0, 0], grid_ys[i, j+1, 0, 0, 0, 0]),
(grid_xs[i+1, j+1, 0, 0, 0, 0], grid_ys[i+1, j+1, 0, 0, 0, 0]),
(grid_xs[i+1, j, 0, 0, 0, 0], grid_ys[i+1, j, 0, 0, 0, 0]),
None, [ float(grid_vals[i, j, 0, 0, 0, 0]) ])
if not shape_written:
raise ValueError("All values are missing values")
sfwriter.save(shapefile_name)
# Create the .prj file from the map projection common name or the WKT description
pyferret.fershp.createprjfile(map_projection, shapefile_name)
result[:, :, :, :, :, :] = 0
#
# The following is only for testing this module from the command line
#
if __name__ == "__main__":
import numpy
import os
shapefilename = "tripolar"
fieldname = "AREA"
wgs84_descript = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'
# Real world longitudes and latitudes of tripolar coordinates X=80W:60E:10 + 100E:120W:10,Y=45N:90N:5
geolon_c = numpy.array([
[ -100.0,-100.0,-100.0,-100.0,-100.0,-100.0,-100.0,-100.0, 80.0, 80.0,
80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0,
80.0, 80.0, 260.0, 260.0, 260.0, 260.0, 260.0, 260.0, 260.0, 260.0, ],
[ -92.1, -87.7, -82.4, -75.7, -66.7, -53.8, -34.9, -10.0, 14.9, 33.8,
46.7, 55.7, 62.4, 67.7, 72.1, 87.9, 92.3, 97.6, 104.3, 113.3,
126.2, 145.1, 170.0, 194.9, 213.8, 226.7, 235.7, 242.4, 247.7, 252.1, ],
[ -86.0, -78.5, -70.2, -60.9, -50.2, -38.1, -24.5, -10.0, 4.5, 18.1,
30.2, 40.9, 50.2, 58.5, 66.0, 94.0, 101.5, 109.8, 119.1, 129.8,
141.9, 155.5, 170.0, 184.5, 198.1, 210.2, 220.9, 230.2, 238.5, 246.0, ],
[ -82.3, -73.1, -63.6, -53.7, -43.3, -32.5, -21.4, -10.0, 1.4, 12.5,
23.3, 33.7, 43.6, 53.1, 62.3, 97.7, 106.9, 116.4, 126.3, 136.7,
147.5, 158.6, 170.0, 181.4, 192.5, 203.3, 213.7, 223.6, 233.1, 242.3, ],
[ -80.5, -70.6, -60.7, -50.7, -40.6, -30.5, -20.3, -10.0, 0.3, 10.5,
20.6, 30.7, 40.7, 50.6, 60.5, 99.5, 109.4, 119.3, 129.3, 139.4,
149.5, 159.7, 170.0, 180.3, 190.5, 200.6, 210.7, 220.7, 230.6, 240.5, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, ],
], dtype=numpy.float64)
geolon_c = geolon_c.T[:, :, numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis]
geolat_c = numpy.array([
[ 72.35, 75.41, 78.20, 80.77, 83.20, 85.52, 87.78, 90.00, 87.78, 85.52,
83.20, 80.77, 78.20, 75.41, 72.35, 72.35, 75.41, 78.20, 80.77, 83.20,
85.52, 87.78, 90.00, 87.78, 85.52, 83.20, 80.77, 78.20, 75.41, 72.35, ],
[ 71.85, 74.69, 77.25, 79.54, 81.58, 83.30, 84.53, 85.00, 84.53, 83.30,
81.58, 79.54, 77.25, 74.69, 71.85, 71.85, 74.69, 77.25, 79.54, 81.58,
83.30, 84.53, 85.00, 84.53, 83.30, 81.58, 79.54, 77.25, 74.69, 71.85, ],
[ 70.51, 72.81, 74.83, 76.56, 77.99, 79.08, 79.76, 80.00, 79.76, 79.08,
77.99, 76.56, 74.83, 72.81, 70.51, 70.51, 72.81, 74.83, 76.56, 77.99,
79.08, 79.76, 80.00, 79.76, 79.08, 77.99, 76.56, 74.83, 72.81, 70.51, ],
[ 68.71, 70.29, 71.67, 72.83, 73.76, 74.44, 74.86, 75.00, 74.86, 74.44,
73.76, 72.83, 71.67, 70.29, 68.71, 68.71, 70.29, 71.67, 72.83, 73.76,
74.44, 74.86, 75.00, 74.86, 74.44, 73.76, 72.83, 71.67, 70.29, 68.71, ],
[ 66.80, 67.60, 68.30, 68.90, 69.37, 69.72, 69.93, 70.00, 69.93, 69.72,
69.37, 68.90, 68.30, 67.60, 66.80, 66.80, 67.60, 68.30, 68.90, 69.37,
69.72, 69.93, 70.00, 69.93, 69.72, 69.37, 68.90, 68.30, 67.60, 66.80, ],
[ 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00,
65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00,
65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, ],
[ 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00,
60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00,
60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, ],
[ 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00,
55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00,
55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, ],
[ 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00,
50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00,
50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, ],
[ 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00,
45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00,
45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, ],
], dtype=numpy.float64)
geolat_c = geolat_c.T[:, :, numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis]
# Make the value an approximate sphere surface area (in square degrees) of the quadrilateral
vals = geolon_c[:-1, :-1] * geolat_c[:-1, 1:]
vals -= geolon_c[:-1, 1:] * geolat_c[:-1, :-1]
vals += geolon_c[:-1, 1:] * geolat_c[ 1:, 1:]
vals -= geolon_c[ 1:, 1:] * geolat_c[:-1, 1:]
vals += geolon_c[ 1:, 1:] * geolat_c[ 1:, :-1]
vals -= geolon_c[ 1:, :-1] * geolat_c[ 1:, 1:]
vals += geolon_c[ 1:, :-1] * geolat_c[:-1, :-1]
vals -= geolon_c[:-1, :-1] * geolat_c[ 1:, :-1]
vals = 0.5 * numpy.fabs(vals)
vals *= numpy.cos( 0.25 * numpy.deg2rad(geolat_c[:-1, :-1] + \
geolat_c[:-1, 1:] + \
geolat_c[ 1:, 1:] + \
geolat_c[ 1:, :-1]) )
# Assign the value of the rectangles between 60E and 100E with the missing value
resbdf = numpy.array([-99999.0], dtype=numpy.float64)
inpbdfs = numpy.array([-88888.0, -77777.0, -66666.0, -55555.0, -44444.0, -33333.0], dtype=numpy.float64)
vals[14,:,0,0] = inpbdfs[3]
# Make sure these calls do not generate errors
info = ferret_init(0)
del info
limits = ferret_result_limits(0)
del limits
# Create the shapefile
result = numpy.ones((1,1,1,1,1,1), dtype=numpy.float64)
ferret_compute(0, result, resbdf, (shapefilename, geolon_c, geolat_c, vals, fieldname, ""), inpbdfs)
# Read the shapefile back in and check
sfreader = shapefile.Reader(shapefilename)
shapes = sfreader.shapes()
records = sfreader.records()
explen = (vals.shape[0] - 1) * vals.shape[1]
if len(shapes) != explen:
raise ValueError("Expected %d shapes; found %d" % (explen, len(shapes)))
if len(records) != explen:
raise ValueError("Expected %d records; found %d" % (explen, len(records)))
# Create the expected arrays of shape coordinates and values
exppoints = []
expvals = []
for j in range(vals.shape[1]):
for i in range(vals.shape[0]):
if vals[i, j, 0, 0, 0, 0] != inpbdfs[3]:
exppoints.append( numpy.array([ [ geolon_c[i, j, 0, 0, 0, 0],
geolat_c[i, j, 0, 0, 0, 0] ],
[ geolon_c[i+1, j, 0, 0, 0, 0],
geolat_c[i+1, j, 0, 0, 0, 0] ],
[ geolon_c[i+1, j+1, 0, 0, 0, 0],
geolat_c[i+1, j+1, 0, 0, 0, 0] ],
[ geolon_c[i, j+1, 0, 0, 0, 0],
geolat_c[i, j+1, 0, 0, 0, 0] ],
[ geolon_c[i, j, 0, 0, 0, 0],
geolat_c[i, j, 0, 0, 0, 0] ] ]) )
expvals.append(vals[i, j, 0, 0, 0, 0])
# Verify these arrays - does not depend on the same ordering of the shapes
for (shape, record) in zip(shapes, records):
for k in range(len(exppoints)):
if numpy.allclose(shape.points, exppoints[k], rtol=1.0E-4):
break
else:
raise ValueError("Unexpected vertices %s" % str(shape.points))
if not numpy.allclose(record, expvals[k], rtol=1.0E-4):
raise ValueError("Expected value %s; found %s for shape.points %s" % \
(str(expvals[k]), str(record), str(shape.points)))
junk = exppoints.pop(k)
junk = expvals.pop(k)
# Verify the projection file
prjfile = file("%s.prj" % shapefilename, "r")
datalines = prjfile.readlines()
prjfile.close()
if len(datalines) != 1:
raise ValueError("Number of lines in the .prj file: expected: 1, found %d" % len(datalines))
descript = datalines[0].strip()
if descript != wgs84_descript:
raise ValueError("Description in the .prj file:\n" \
" expect: %s\n" \
" found: %s" % (wgs84_descript, descript))
# Shapefile data files no longer needed
os.remove("%s.dbf" % shapefilename)
os.remove("%s.shp" % shapefilename)
os.remove("%s.shx" % shapefilename)
os.remove("%s.prj" % shapefilename)
# Commented-out code used for further testing
testcode = """
sortedvals = numpy.sort(vals[ vals != inpbdfs[3] ])
numvals = sortedvals.shape[0]
limits = [ sortedvals[1 * numvals // 5],
sortedvals[2 * numvals // 5],
sortedvals[3 * numvals // 5],
sortedvals[4 * numvals // 5], ]
print str( [ sortedvals[0] ] + limits + [ sortedvals[-1] ] )
partvals = vals.copy()
partvals[ partvals >= limits[0] ] = inpbdfs[3]
ferret_compute(0, result, resbdf, (shapefilename + "_1",
geolon_c, geolat_c, partvals,
fieldname, ""), inpbdfs)
partvals = vals.copy()
partvals[ partvals < limits[0] ] = inpbdfs[3]
partvals[ partvals >= limits[1] ] = inpbdfs[3]
ferret_compute(0, result, resbdf, (shapefilename + "_2",
geolon_c, geolat_c, partvals,
fieldname, ""), inpbdfs)
partvals = vals.copy()
partvals[ partvals < limits[1] ] = inpbdfs[3]
partvals[ partvals >= limits[2] ] = inpbdfs[3]
ferret_compute(0, result, resbdf, (shapefilename + "_3",
geolon_c, geolat_c, partvals,
fieldname, ""), inpbdfs)
partvals = vals.copy()
partvals[ partvals < limits[2] ] = inpbdfs[3]
partvals[ partvals >= limits[3] ] = inpbdfs[3]
ferret_compute(0, result, resbdf, (shapefilename + "_4",
geolon_c, geolat_c, partvals,
fieldname, ""), inpbdfs)
partvals = vals.copy()
partvals[ partvals < limits[3] ] = inpbdfs[3]
ferret_compute(0, result, resbdf, (shapefilename + "_5",
geolon_c, geolat_c, partvals,
fieldname, ""), inpbdfs)
"""
print("shapefile_writexyval: SUCCESS")
```
#### File: PyFerret/pyfermod/fervar.py
```python
import numbers
import pyferret
# common regridding methods
REGRID_LINEAR = "@LIN"
REGRID_AVERAGE = "@AVE"
REGRID_ASSOCIATE = "@ASN"
REGRID_MEAN = "@BIN"
REGRID_NEAREST = "@NRST"
REGRID_MIN = "@MIN"
REGRID_MAX = "@MAX"
REGRID_EXACT = "@XACT"
_ADDED_ATTRIBUTES = ('data', 'grid', 'missval', 'unit')
class FerVar(object):
'''
Ferret variable object
'''
def __init__(self, defn=None, title=None):
'''
Creates a Ferret variable with (optionally) a title and a Ferret-syntax definition
defn (string): Ferret-syntax definition of the variable
title (string): title (descriptive long name) for this variable
'''
# Record or generate the definition, or set to an empty string
if defn:
if not isinstance(defn, str):
raise ValueError("defn is not a string")
self._definition = defn
else:
self._definition = ''
# Name of the variable in the dataset
self._varname = ''
# Name of the dataset
self._dsetname = ''
# Record the title for this variable, or am empty string if not given
self.settitle(title)
# Is this a file variable?
self._isfilevar = False
# The list of uppercase _varname's that are know to be used
# in the definition. This list is not guaranteed to be complete
# and is not used in comparisons.
self._requires = set()
# Call the unload method to create and set the defaults for
# _datagrid, _dataarray, _dataunit, and _missingvalue.
# _datagrid is a FerGrid describing the Ferret grid for the variable.
# _dataarray is a NumPy ndarray contains the Ferret data for the variable.
# _dataunit is a string given the unit of the data
# _missingvalue is the missing value used for the data
self.unload()
def copy(self):
'''
Return an anonymous copy (only the definition is copied) of this FerVar.
'''
newvar = FerVar(defn=self._definition)
newvar._requires.update(self._requires)
return newvar
def settitle(self, title):
'''
Assigns the title (long descriptive name) for this FerVar. If this
variable is defined in Ferret, the title for the Ferret variable is
also updated.
title (string): title to assign
Raises ValueError if title is not a string or if there is a problem
updating the title in Ferret
'''
if title:
if not isinstance(title, str):
raise ValueError("title is not a string")
self._title = title
else:
self._title = ''
if self._varname:
cmdstr = 'SET VAR/TITLE="%s" %s' % (self._title, self.fername())
(errval, errmsg) = pyferret.run(cmdstr)
if errval != pyferret.FERR_OK:
raise ValueError('problems updating the variable title in Ferret for ' + \
'%s to "%s": %s' % (self.fername(), self._title, errmsg))
def fername(self):
'''
Returns the Ferret name for this variable; namely,
<_varname>[d=<_dsetname>]
if _dsetname is given; otherwise, just
<_varname>
Raises ValueError if _varname is not defined
'''
if not self._varname:
raise ValueError('this FerVar does not contain a Ferret variable name')
if self._dsetname:
fername = '%s[d=%s]' % (self._varname, self._dsetname)
else:
fername = '%s' % self._varname
return fername
def __repr__(self):
'''
Representation of this FerVar
'''
infostr = "FerVar(varname='%s', dsetname='%s', title = '%s', defn='%s')" \
% (self._varname, self._dsetname, self._title, self._definition)
return infostr
def __del__(self):
'''
Removes this variable, if possible, from Ferret.
Any error are ignored.
'''
# Try to remove from Ferret but ignore errors
try:
self._removefromferret()
except Exception:
pass
def __cmp__(self, other):
'''
FerVars are ordered alphabetically, case-insensitive, first by
the Ferret variable name, then by the dataset name, title, and
finally by the definition. (Used by the "rich comparison" methods.)
'''
if not isinstance(other, FerVar):
raise NotImplementedError('other is not a FerVar')
supper = self._varname.upper()
oupper = other._varname.upper()
if supper < oupper:
return -1
if supper > oupper:
return 1
supper = self._dsetname.upper()
oupper = other._dsetname.upper()
if supper < oupper:
return -1
if supper > oupper:
return 1
supper = self._title.upper()
oupper = other._title.upper()
if supper < oupper:
return -1
if supper > oupper:
return 1
supper = self._definition.upper()
oupper = other._definition.upper()
if supper < oupper:
return -1
if supper > oupper:
return 1
return 0
def __eq__(self, other):
'''
Two FerVars are equal if all of the following are True:
they have the same Ferret variable name,
they have the same dataset name,
they have the same title, and
they have the same definition.
All these comparisons are case-insensitive.
'''
try:
return ( self.__cmp__(other) == 0 )
except NotImplementedError:
return NotImplemented
def __ne__(self, other):
'''
Two FerVars are not equal if any of the following are True:
they have different Ferret variable names,
they have different dataset names,
they have different titles, or
they have different definitions.
All these comparisons are case-insensitive.
'''
try:
return ( self.__cmp__(other) != 0 )
except NotImplementedError:
return NotImplemented
def __lt__(self, other):
'''
FerVars are ordered alphabetically, case-insensitive, first by
the Ferret variable name, then by the dataset name, title, and
finally by the definition.
'''
try:
return ( self.__cmp__(other) < 0 )
except NotImplementedError:
return NotImplemented
def __le__(self, other):
'''
FerVars are ordered alphabetically, case-insensitive, first by
the Ferret variable name, then by the dataset name, title, and
finally by the definition.
'''
try:
return ( self.__cmp__(other) <= 0 )
except NotImplementedError:
return NotImplemented
def __gt__(self, other):
'''
FerVars are ordered alphabetically, case-insensitive, first by
the Ferret variable name, then by the dataset name, title, and
finally by the definition.
'''
try:
return ( self.__cmp__(other) > 0 )
except NotImplementedError:
return NotImplemented
def __ge__(self, other):
'''
FerVars are ordered alphabetically, case-insensitive, first by
the Ferret variable name, then by the dataset name, title, and
finally by the definition.
'''
try:
return ( self.__cmp__(other) >= 0 )
except NotImplementedError:
return NotImplemented
def __nonzero__(self):
'''
Returns False if the Ferret variable name, dataset name, title,
and definition are all empty. (For Python2.x)
'''
if self._varname:
return True
if self._dsetname:
return True
if self._title:
return True
if self._definition:
return True
return False
def __bool__(self):
'''
Returns False if the Ferret variable name, dataset name, title
and definition are all empty. (For Python3.x)
'''
return self.__nonzero__()
def __add__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the sum of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the sum of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) + (%s)' % (self._definition, other._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '(%s) + %s' % (self._definition, str(other))
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __radd__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the sum of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the sum of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) + (%s)' % (other._definition, self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '%s + (%s)' % (str(other), self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __sub__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the difference (self - other) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the difference (self - other) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) - (%s)' % (self._definition, other._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '(%s) - %s' % (self._definition, str(other))
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __rsub__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the difference (other - self) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the difference (other - self) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) - (%s)' % (other._definition, self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '%s - (%s)' % (str(other), self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __mul__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the product of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the product of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) * (%s)' % (self._definition, other._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '(%s) * %s' % (self._definition, str(other))
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __rmul__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the product of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the product of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) * (%s)' % (other._definition, self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '%s * (%s)' % (str(other), self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __truediv__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the quotient (self / other) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the quotient (self / other) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
(For Python3.x)
'''
if isinstance(other, FerVar):
newdef = '(%s) / (%s)' % (self._definition, other._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '(%s) / %s' % (self._definition, str(other))
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __rtruediv__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the quotient (other / self) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the quotient (other / self) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
(For Python3.x)
'''
if isinstance(other, FerVar):
newdef = '(%s) / (%s)' % (other._definition, self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '%s / (%s)' % (str(other), self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __div__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the quotient (self / other) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the quotient (self / other) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
(For Python2.x)
'''
return self.__truediv__(other)
def __rdiv__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the quotient (other / self) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the quotient (other / self) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
(For Python2.x)
'''
return self.__rtruediv__(other)
def __pow__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the exponentiation (self ^ other) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the exponentiation (self ^ other) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) ^ (%s)' % (self._definition, other._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '(%s) ^ %s' % (self._definition, str(other))
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __rpow__(self, other):
'''
If other is a FerVar, returns an anonymous FerVar whose definition
is the exponentiation (other ^ self) of the FerVar definitions.
If other is Real, returns an anonymous FerVar whose definition
is the exponentiation (other ^ self) of the FerVar definition with the number.
If other is not a FerVar or Real, returns NotImplemented
'''
if isinstance(other, FerVar):
newdef = '(%s) ^ (%s)' % (other._definition, self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
newvar._requires.update(other._requires)
return newvar
if isinstance(other, numbers.Real):
newdef = '%s ^ (%s)' % (str(other), self._definition)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
return NotImplemented
def __neg__(self):
'''
Returns an anonymous FerVar whose definition is
the product of -1.0 and this FerVar definition.
'''
newdef = '-1.0 * (%s)' % self._definition
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
def __pos__(self):
'''
Returns an anonymous FerVar whose definition is
the same as this FerVar definition.
'''
newvar = FerVar(defn=self._definition)
newvar._requires.update(self._requires)
return newvar
def __abs__(self):
'''
Returns an anonymous FerVar whose definition is
the absolute value of this FerVar definition.
'''
newdef = 'abs(%s)' % self._definition
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
def __getitem__(self, key):
'''
This FerVar must be assigned in Ferret.
If key is 'data', returns the data array for this FerVar,
loading it if necessary.
If key is 'grid', returns the data grid for this FerVar,
loading it if necessary.
If key is 'missval', returns the value for missing data
for this FerVar.
If key is 'unit', returns the data unit for this FerVar.
Otherwise, assumes key is a slice or subset specification,
and returns an anonymous FerVar whose definition is a
subset of this FerVar.
key is an int, float, string, int slice, float slice,
string slice, or a tuple of these values.
- int are interpreted as index/indices
- floats are interpreted as axis values
- strings are interpreted as axis values possibly with units
Units in a string designate an axis; otherwise the index
within the given tuple (or zero if not a tuple) specifies the axis.
For example ['20N':'50N'] will always be a latitude subset.
TODO: handle step values
'''
if key is None:
raise KeyError('None is not a valid key')
if not self._varname:
raise NotImplementedError('variable not assigned in Ferret')
if key == 'data':
return self.getdata()
if key == 'grid':
return self.getgrid()
if key == 'missval':
return self.getmissval()
if key == 'unit':
return self.getunit()
coordlimits = [ None ] * pyferret.MAX_FERRET_NDIM
indexlimits = [ None ] * pyferret.MAX_FERRET_NDIM
changed = False
# TODO: handle step values, try to condense code
if isinstance(key, tuple):
for k in range(len(key)):
piece = key[k]
if piece is None:
continue
if isinstance(piece, slice):
try:
(axtype, start, stop, step) = pyferret.FerAxis._parsegeoslice(piece)
except Exception as ex:
raise KeyError('%s is not valid: %s' % (str(piece), str(ex)))
if step is not None:
raise KeyError('step values in slices are not supported at this time')
if axtype == pyferret.AXISTYPE_LONGITUDE:
if coordlimits[pyferret.X_AXIS] or indexlimits[pyferret.X_AXIS]:
raise KeyError('two longitude slices given')
coordlimits[pyferret.X_AXIS] = '%s:%s' % (str(start), str(stop))
changed = True
elif axtype == pyferret.AXISTYPE_LATITUDE:
if coordlimits[pyferret.Y_AXIS] or indexlimits[pyferret.Y_AXIS]:
raise KeyError('two latitude slices given')
coordlimits[pyferret.Y_AXIS] = '%s:%s' % (str(start), str(stop))
changed = True
elif axtype == pyferret.AXISTYPE_LEVEL:
if coordlimits[pyferret.Z_AXIS] or indexlimits[pyferret.Z_AXIS]:
raise KeyError('two level slices given')
coordlimits[pyferret.Z_AXIS] = '%s:%s' % (str(start), str(stop))
changed = True
elif axtype == pyferret.AXISTYPE_TIME:
if coordlimits[pyferret.T_AXIS] or indexlimits[pyferret.T_AXIS]:
raise KeyError('two time slices given')
starttime = pyferret.FerAxis._makedatestring(start)
stoptime = pyferret.FerAxis._makedatestring(stop)
coordlimits[pyferret.T_AXIS] = '%s:%s' % (starttime, stoptime)
changed = True
elif isinstance(start,int) and isinstance(stop,int):
if coordlimits[k] or indexlimits[k]:
raise KeyError('two slices for axis index %d given' % k)
# do not know the axis length at this time
if (start < 0) or (stop < 0):
raise KeyError('negative indices not supported at this time')
# Ferret indices start at 1
start += 1
stop += 1
indexlimits[k] = '%d:%d' % (start, stop)
changed = True
elif isinstance(start,numbers.Real) and isinstance(stop,numbers.Real):
if coordlimits[k] or indexlimits[k]:
raise KeyError('two slices for axis index %d given' % k)
coordlimits[k] = '%s:%s' % (str(start), str(stop))
changed = True
elif (start is None) and (stop is None):
# full range on this axis
if coordlimits[k] or indexlimits[k]:
raise KeyError('two slices for axis index %d given' % k)
continue
else:
raise KeyError('%s in not valid' % str(piece))
else:
try:
(axtype, val) = pyferret.FerAxis._parsegeoval(piece)
except Exception as ex:
raise KeyError('%s is not valid: %s' % (str(piece), str(ex)))
if axtype == pyferret.AXISTYPE_LONGITUDE:
if coordlimits[pyferret.X_AXIS] or indexlimits[pyferret.X_AXIS]:
raise KeyError('two longitude slices given')
coordlimits[pyferret.X_AXIS] = '%s' % str(val)
changed = True
elif axtype == pyferret.AXISTYPE_LATITUDE:
if coordlimits[pyferret.Y_AXIS] or indexlimits[pyferret.Y_AXIS]:
raise KeyError('two latitude slices given')
coordlimits[pyferret.Y_AXIS] = '%s' % str(val)
changed = True
elif axtype == pyferret.AXISTYPE_LEVEL:
if coordlimits[pyferret.Z_AXIS] or indexlimits[pyferret.Z_AXIS]:
raise KeyError('two level slices given')
coordlimits[pyferret.Z_AXIS] = '%s' % str(val)
changed = True
elif axtype == pyferret.AXISTYPE_TIME:
if coordlimits[pyferret.T_AXIS] or indexlimits[pyferret.T_AXIS]:
raise KeyError('two time slices given')
coordlimits[pyferret.T_AXIS] = pyferret.FerAxis._makedatestring(val)
changed = True
elif isinstance(val,int):
if coordlimits[k] or indexlimits[k]:
raise KeyError('two slices for axis index %d given' % k)
# do not know the axis length at this time
if val < 0:
raise KeyError('negative indices not supported at this time')
# Ferret indices start at 1
val += 1
indexlimits[k] = '%d' % val
changed = True
elif isinstance(val,numbers.Real):
if coordlimits[k] or indexlimits[k]:
raise KeyError('two slices for axis index %d given' % k)
coordlimits[k] = '%s' % str(val)
changed = True
else:
raise KeyError('%s in not valid' % str(piece))
elif isinstance(key, slice):
try:
(axtype, start, stop, step) = pyferret.FerAxis._parsegeoslice(key)
except Exception as ex:
raise KeyError('%s is not valid: %s' % (str(key), str(ex)))
if step is not None:
raise KeyError('step values in slices are not supported at this time')
if axtype == pyferret.AXISTYPE_LONGITUDE:
coordlimits[pyferret.X_AXIS] = '%s:%s' % (str(start), str(stop))
changed = True
elif axtype == pyferret.AXISTYPE_LATITUDE:
coordlimits[pyferret.Y_AXIS] = '%s:%s' % (str(start), str(stop))
changed = True
elif axtype == pyferret.AXISTYPE_LEVEL:
coordlimits[pyferret.Z_AXIS] = '%s:%s' % (str(start), str(stop))
changed = True
elif axtype == pyferret.AXISTYPE_TIME:
starttime = pyferret.FerAxis._makedatestring(start)
stoptime = pyferret.FerAxis._makedatestring(stop)
coordlimits[pyferret.T_AXIS] = '%s:%s' % (starttime, stoptime)
changed = True
elif isinstance(start,int) and isinstance(stop,int):
# do not know the axis length at this time
if (start < 0) or (stop < 0):
raise KeyError('negative indices not supported at this time')
# Ferret indices start at 1
start += 1
stop += 1
indexlimits[0] = '%d:%d' % (start, stop)
changed = True
elif isinstance(start,numbers.Real) and isinstance(stop,numbers.Real):
coordlimits[0] = '%s:%s' % (str(start), str(stop))
changed = True
elif (start is None) and (stop is None):
# full range - standard way of generating a duplicate
pass
else:
raise KeyError('%s in not valid' % str(key))
else:
try:
(axtype, val) = pyferret.FerAxis._parsegeoval(key)
except Exception as ex:
raise KeyError('%s is not valid: %s' % (str(key), str(ex)))
if axtype == pyferret.AXISTYPE_LONGITUDE:
coordlimits[pyferret.X_AXIS] = '%s' % str(val)
changed = True
elif axtype == pyferret.AXISTYPE_LATITUDE:
coordlimits[pyferret.Y_AXIS] = '%s' % str(val)
changed = True
elif axtype == pyferret.AXISTYPE_LEVEL:
coordlimits[pyferret.Z_AXIS] = '%s' % str(val)
changed = True
elif axtype == pyferret.AXISTYPE_TIME:
coordlimits[pyferret.T_AXIS] = pyferret.FerAxis._makedatestring(val)
changed = True
elif isinstance(val,int):
# do not know the axis length at this time
if val < 0:
raise KeyError('negative indices not supported at this time')
# Ferret indices start at 1
val += 1
indexlimits[k] = '%d' % val
changed = True
elif isinstance(start,numbers.Real):
coordlimits[k] = '%s' % str(val)
changed = True
else:
raise KeyError('%s in not valid' % str(key))
if not changed:
# the whole thing - definition is just this variable
newvar = FerVar(defn=self.fername())
newvar._requires.update(self._requires)
return newvar
# create the subset definition in Ferret
if self._dsetname:
newdef = '%s[d=%s,' % (self._varname, self._dsetname)
else:
newdef = '%s[' % self._varname
if coordlimits[pyferret.X_AXIS]:
newdef += 'X=%s,' % coordlimits[pyferret.X_AXIS]
if indexlimits[pyferret.X_AXIS]:
newdef += 'I=%s,' % indexlimits[pyferret.X_AXIS]
if coordlimits[pyferret.Y_AXIS]:
newdef += 'Y=%s,' % coordlimits[pyferret.Y_AXIS]
if indexlimits[pyferret.Y_AXIS]:
newdef += 'J=%s,' % indexlimits[pyferret.Y_AXIS]
if coordlimits[pyferret.Z_AXIS]:
newdef += 'Z=%s,' % coordlimits[pyferret.Z_AXIS]
if indexlimits[pyferret.Z_AXIS]:
newdef += 'K=%s,' % indexlimits[pyferret.Z_AXIS]
if coordlimits[pyferret.T_AXIS]:
newdef += 'T=%s,' % coordlimits[pyferret.T_AXIS]
if indexlimits[pyferret.T_AXIS]:
newdef += 'L=%s,' % indexlimits[pyferret.T_AXIS]
if coordlimits[pyferret.E_AXIS]:
newdef += 'E=%s,' % coordlimits[pyferret.E_AXIS]
if indexlimits[pyferret.E_AXIS]:
newdef += 'M=%s,' % indexlimits[pyferret.E_AXIS]
if coordlimits[pyferret.F_AXIS]:
newdef += 'F=%s,' % coordlimits[pyferret.F_AXIS]
if indexlimits[pyferret.F_AXIS]:
newdef += 'N=%s,' % indexlimits[pyferret.F_AXIS]
# replace the final , with ]
newdef = newdef[:-1] + ']'
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
def __getattr__(self, name):
'''
Return the data array (if name='data'), data grid (if name='grid'),
name (if name='name'), or a copy of the coordinates (if name='coords')
Note that this method is only called when the parent object
does not have an attribute with this name.
'''
try:
if name in _ADDED_ATTRIBUTES:
return self.__getitem__(name)
except KeyError:
pass
raise AttributeError("unknown attribute '%s'" % name)
def __dir__(self):
'''
Returns a list of known attributes, including those added
by the __getattr__ method.
'''
mydir = list(_ADDED_ATTRIBUTES)
mydir.extend( dir(super(FerVar, self)) )
return mydir
def _markasknownvar(self, varname, dsetname, isfilevar):
'''
Marks this variable as a variable already defined in Ferret.
'''
if not varname:
raise ValueError('varname is not given')
if not isinstance(varname, str):
raise ValueError('varname is not a string')
if dsetname and not isinstance(varname, str):
raise ValueError('dsetname name is not a string')
self._varname = varname
if dsetname:
self._dsetname = dsetname
else:
self._dsetname = ''
self._isfilevar = bool(isfilevar)
self._definition = self.fername()
self._requires.add(varname.upper())
self.unload()
def _assigninferret(self, varname, dsetname):
'''
Defines this FerVar in Ferret using the given variable name
associated with the given dataset name.
varname (string): name for the variable in Ferret
dsetname (string): name of the dataset to contain the variable
Raises a ValueError if there is a problem.
'''
if not self._definition:
raise ValueError('this FerVar does not contain a definition')
if not varname:
raise ValueError('variable name to be assigned is not given')
if varname.upper() in self._requires:
raise ValueError('recursive definitions cannot be implemented in Ferret')
# Assign the variable in Ferret
cmdstr = 'DEFINE VAR'
if dsetname:
cmdstr += '/D="%s"' % dsetname
if self._title:
cmdstr += '/TITLE="%s"' % self._title
cmdstr += ' %s = %s' % (varname, self._definition)
(errval, errmsg) = pyferret.run(cmdstr)
if errval != pyferret.FERR_OK:
raise ValueError('problems defining %s (%s) in Ferret: %s' % (varname, cmdstr, errmsg))
# Revise the fields in this FerVar to reflect this assignment
self._markasknownvar(varname, dsetname, False)
def _removefromferret(self):
'''
Removes (cancels) this variable in Ferret, then unloads this FerVar
and erases _varname. Raises a NotImplementedError is this is a file
variable. Raises a ValueError if there is a Ferret problem. This
normally is not called by the user; instead delete the FerVar from
the dataset.
'''
# ignore if this Ferrer variable has already been removed from Ferret
if not self._varname:
return
fername = self.fername()
if self._isfilevar:
raise NotImplementedError('%s is a file variable; close the dataset to remove' % fername)
cmdstr = 'CANCEL VAR %s' % fername
(errval, errmsg) = pyferret.run(cmdstr)
if errval != pyferret.FERR_OK:
raise ValueError('unable to remove variable %s from Ferret: %s' % (fername, errmsg))
self._varname = ''
self.unload()
def unload(self):
'''
Clears the grid and data stored in this FerVar. After this call, any
request for the grid or data will automatically load the latest values
from Ferret. This method should be called anytime there is a change
in the definition of this variable, or a variable this variable uses.
'''
self._datagrid = None
self._dataarray = None
self._dataunit = ''
self._missingvalue = None
def load(self):
'''
Retrieves the grid and data for this Ferret variable from Ferret.
This method is automatically called before returning the grid or data
for the first time for this variable. This can be called to update
the grid or data in this FerVar after any change in the definition
of the variable. Alternatively, cleardata can be called to clear any
stored grid and data, delaying the update from Ferret until the grid
or data is requested.
Raises a ValueEror if problems occur.
'''
fername = self.fername()
datadict = pyferret.getdata(fername, False)
feraxes = [ ]
for (axistype,axcoords,axunit,axname) in zip(
datadict["axis_types"], datadict["axis_coords"],
datadict["axis_units"], datadict["axis_names"]):
feraxes.append( pyferret.FerAxis(coords=axcoords,
axtype=axistype, unit=axunit, name=axname) )
self._datagrid = pyferret.FerGrid(axes=feraxes, name=fername)
self._dataarray = datadict["data"]
self._dataunit = datadict["data_unit"]
self._missingvalue = datadict["missing_value"]
def getdata(self):
'''
Returns a copy of the data array for this Ferret variable,
first loading this variable if necessary.
Raises a ValueError is a problem occurs.
'''
if (self._datagrid is None) or (self._dataarray is None):
self.load()
return self._dataarray.copy('A')
def getgrid(self):
'''
Returns a copy of the data grid for this Ferret variable,
first loading this variable if necessary.
Raises a ValueError is a problem occurs.
'''
if (self._datagrid is None) or (self._dataarray is None):
self.load()
return self._datagrid.copy()
def getmissval(self):
'''
Returns the value used for missing data for this Ferret
variable, first loading this variable if necessary.
Raises a ValueError is a problem occurs.
'''
if (self._datagrid is None) or (self._dataarray is None):
self.load()
# The missing value is a single-element ndarray
return self._missingvalue[0]
def getunit(self):
'''
Returns the unit string of the data for this Ferret
variable, first loading this variable if necessary.
Raises a ValueError is a problem occurs.
'''
if (self._datagrid is None) or (self._dataarray is None):
self.load()
return self._dataunit
def showgrid(self, qual=''):
'''
Show the Ferret grid information about this variable. This uses
the Ferret SHOW GRID command to create and display the information.
qual (string): Ferret qualifiers to add to the SHOW GRID command
'''
if not isinstance(qual, str):
raise ValueError('qual (Ferret qualifiers) must be a string')
cmdstr = 'SHOW GRID'
if qual:
cmdstr += qual
cmdstr += ' '
cmdstr += self.fername()
(errval, errmsg) = pyferret.run(cmdstr)
if errval != pyferret.FERR_OK:
raise ValueError('Ferret command "%s" failed: %s' % (cmdstr, errmsg))
def regrid(self, newgrid, method=REGRID_LINEAR):
'''
Returns an anonymous FerVar that is this variable regridded to the grid
implied by newgrid using the given method.
newgrid (FerVar | string | FerGrid): regrid to this implied grid;
if a FerVar, the implied grid is the grid used by the Ferret variable,
if a string, the implied grid is the grid known to Ferret by this name
if a FerGrid, the implied grid is this grid (TODO: implement)
method (string): method to perform the regridding; typically one of
pyferret.REGRID_LINEAR (default)
(multi-axis) linear interpolation of nearest source points around destination point
pyferret.REGRID_AVERAGE
length-weighted averaging of source point cells overlapping destination point cell
pyferret.REGRID_ASSOCIATE
blind association of source points to destination points by indices
pyferret.REGRID_MEAN
unweighted mean of source points in destination point cell
pyferret.REGRID_NEAREST
value of source point nearest the destination point
pyferret.REGRID_MIN
minimum value of source points in destination point cell
pyferret.REGRID_MAX
maximum value of source points in destination point cell
pyferret.REGRID_EXACT
copy values where source and destination points coincide;
other destination points assigned missing value
'''
if not self._varname:
raise NotImplementedError('regridding can only be performed on variables assigned in Ferret')
if not ( isinstance(method, str) and (method[0] == '@') ):
raise ValueError('invalid regridding method %s' % str(method))
if isinstance(newgrid, FerVar):
if not newgrid._varname:
raise ValueError('FerVar used for the new grid is not assigned in Ferret')
gridname = newgrid.fername()
elif isinstance(newgrid, str):
gridname = newgrid
elif isinstance(newgrid, pyferret.FerGrid):
raise NotImplementedError('regrid using FerGrid not implemented at this time')
if self._dsetname:
newdef = '%s[d=%s,g=%s%s]' % (self._varname, self._dsetname, gridname, method)
else:
newdef = '%s[g=%s%s]' % (self._varname, gridname, method)
newvar = FerVar(defn=newdef)
newvar._requires.update(self._requires)
return newvar
```
#### File: pyfermod/graphbind/abstractpyferretbindings.py
```python
class AbstractPyFerretBindings(object):
'''
Abstract base class for providing bindings to graphics calls
from PyFerret for a graphics engine. The methods defined in
this class should all be re-implemented in a subclass for
proper PyFerret behavior.
'''
def __init__(self):
'''
When PyFerret creates a Window for an engine, it creates
an instance of the appropriate bindings class, then calls
the createWindow method of this instance created. Thus
instance variables will be for the one window associated
with the bindings instance.
'''
super(AbstractPyFerretBindings, self).__init__()
def createWindow(self, title, visible, noalpha, rasteronly):
'''
Creates a "Window object" for this graphics engine. Here,
a Window is the complete drawing area. However, no drawing
will be performed on a Window, only on Views (see beginView).
Initializes the graphics engine if needed.
The rasteronly option is for possible faster drawing by
drawing directly to an image surface. If true, deleting
segments may not be supported.
Arguments:
title: display title for the Window
visible: display Window on start-up?
noalpha: do not use the alpha channel in colors?
rasteronly: only raster images will be used ?
Returns True if a Window was successfully created.
'''
return False
def deleteWindow(self):
'''
Deletes the Window associated with this instance of the bindings.
When this call returns True, the Window should not be visible,
all resources associated with this Window should have been released,
After calling this function, this instance of the bindings should
be deleted and considered no longer usable.
'''
return False
def setImageName(self, imagename, imgnamelen, formatname, fmtnamelen):
'''
Assigns the name and format of the image file to be created.
Arguments:
imagename - name for the image file (can be NULL)
imgnamelen - actual length of imagename (zero if NULL)
formatname - name of the image format (case insensitive,
can be NULL)
fmtnamelen - actual length of formatname (zero if NULL)
If formatname is empty or NULL, the filename extension of
imagename, if it exists and is recognized, will determine
the format.
This method only suggests the name of the image file to
be created. A file using the given name may or may not
be open from this call.
If a file was opened from this call (image data written
to file as it is being drawn), the saveWindow method may
not be supported.
If a file was not opened from this call, the saveWindow
method must be called to save the image. Thus, the
filename provided here may only be used as a default
filename.
'''
raise AttributeError('not implemented')
def setAntialias(self, antialias):
'''
Turns on (antilaias True) or off (antialias False) anti-aliasing
in future drawing commands. May not be implemented and thus raise
an AttributeError.
'''
raise AttributeError('not implemented')
def beginView(self, leftfrac, bottomfrac, rightfrac, topfrac,
clipit):
'''
Creates a "View object" for the given Window. The view fractions
start at (0.0, 0.0) in the left top corner and increase to
(1.0, 1.0) in the right bottom corner; thus leftfrac must be less
than rightfrac and topfrac must be less than bottomfrac.
Arguments:
leftfrac: [0,1] fraction of the Window width
for the left side of the View
bottomfrac: [0,1] fraction of the Window height
for the bottom side of the View
rightfrac: [0,1] fraction of the Window width
for the right side of the View
topfrac: [0,1] fraction of the Window height
for the top side of the View
clipit: clip drawing to this View?
'''
raise AttributeError('not implemented')
def clipView(self, clipit):
'''
Enable or disable clipping to the current View.
Arguments:
clipit: clip drawings to the current View?
'''
raise AttributeError('not implemented')
def endView(self):
'''
Closes the current View. When this call returns, the graphics
drawn to the View should be visible in its Window.
'''
raise AttributeError('not implemented')
def beginSegment(self, segid):
'''
Creates a "Segment object" for the given Window.
A Segment is just a group of drawing commands.
Arguments:
segid: ID for the Segment
'''
raise AttributeError('not implemented')
def endSegment(self):
'''
End the current "Segment" for the Window.
'''
raise AttributeError('not implemented')
def deleteSegment(self, segid):
'''
Deletes the drawing commands in the indicated Segment.
Arguments:
segid: ID for the Segment to be deleted
'''
raise AttributeError('not implemented')
def updateWindow(self):
'''
Indicates the viewer should update the graphics displayed.
'''
raise AttributeError('not implemented')
def clearWindow(self, bkgcolor):
'''
Clears the Window of all drawings. The window is
initialized to all bkgcolor (the background color).
Arguments:
bkgcolor: initialize (fill) the Window with this Color
'''
raise AttributeError('not implemented')
def redrawWindow(self, bkgcolor):
'''
Redraw the current drawing except using bkgcolor as the
background color (the initialization color for the Window).
Arguments:
bkgcolor: initialize (fill) the Window with this Color
before redrawing the current drawing.
'''
raise AttributeError('not implemented')
def windowScreenInfo(self):
'''
Returns the four-tuple (dpix, dpiy, screenwidth, screenheight) for
the default screen (display) of this Window
dpix: dots (pixels) per inch, in the horizontal (X) direction
dpiy: dots (pixels) per inch, in the vertical (Y) direction
screenwidth: width of the screen (display) in pixels (dots)
screenheight: height of the screen (display) in pixels (dots)
'''
raise AttributeError('not implemented')
def resizeWindow(self, width, height):
'''
Sets the current size of the Window.
Arguments:
width: width of the Window, in "device units"
height: height of the window in "device units"
"device units" is pixels at the current window DPI
'''
raise AttributeError('not implemented')
def scaleWindow(self, scale):
'''
Sets the scaling factor for the Window. If zero, switch to
auto-scaling (automatically scales to best fit window size
without changing aspect ratio). If negative, scale using
the absolute value and then switch to auto-scaling.
Arguments:
scale: scaling factor to use
'''
raise AttributeError('not implemented')
def showWindow(self, visible):
'''
Display or hide a Window. A graphics engine that does not
have the ability to display a Window should ignore this call.
Arguments:
visible: display (if True) or hide (if False) the Window
'''
raise AttributeError('not implemented')
def saveWindow(self, filename, fileformat, transparent,
xinches, yinches, xpixels, ypixels, annotations):
'''
Save the contents of the window to a file. This might be called
when there is no image to save; in this case the call should be
ignored.
Arguments:
filename: name of the file to create
fileformat: name of the format to use
transparent: use a transparent background?
xinches: horizontal size of vector image in inches
yinches: vertical size of vector image in inches
xpixels: horizontal size of raster image in pixels
ypixels: vertical size of raster image in pixels
annotations: tuple of annotation strings
If fileformat is NULL, the fileformat is guessed from the
filename extension.
If transparent is False, the entire scene is initialized
to the last clearing color. If transparent is True, the
entire scene is initialized as transparent.
If annotations is not None, the strings given in the tuple
are to be displayed above the image. These annotations add
height, as needed, to the saved image (i.e., yinches or
ypixels is the height of the image below these annotations).
'''
raise AttributeError('not implemented')
def createColor(self, redfrac, greenfrac, bluefrac, opaquefrac):
'''
Returns a Color object from fractional [0.0, 1.0] intensities
of the red, green, and blue channels.
Arguments:
redfrac: fractional [0.0, 1.0] red intensity
greenfrac: fractional [0.0, 1.0] green intensity
bluefrac: fractional [0.0, 1.0] blue intensity
opaquefrac: fractional [0.0, 1.0] opaqueness
(0.0 is transparent; 1.0 is opaque) of the color.
If the graphics engine does not support this
feature (alpha channel), this may be silently
ignored and the color be completely opaque.
Raises an error if unable to create the Color object.
'''
raise AttributeError('not implemented')
def deleteColor(self, color):
'''
Delete a Color object created by createColor
Arguments:
color: Color to be deleted
'''
raise AttributeError('not implemented')
def createFont(self, familyname, fontsize, italic, bold, underlined):
'''
Returns a Font object.
Arguments:
familyname: name of the font family (e.g., "Helvetica", "Times")
fontsize: desired size of the font (scales with view size)
italic: use the italic version of the font?
bold: use the bold version of the font?
underlined: use the underlined version of the font?
Raises an error if unable to create the Font object.
'''
raise AttributeError('not implemented')
def deleteFont(self, font):
'''
Delete a Font object created by createFont
Arguments:
font: Font to be deleted
'''
raise AttributeError('not implemented')
def createPen(self, color, width, style, capstyle, joinstyle):
'''
Returns a Pen object.
Arguments:
color: Color to use
width: line width (scales with view size)
style: line style name (e.g., "solid", "dash")
capstyle: end-cap style name (e.g., "square")
joinstyle: join style name (e.g., "bevel")
Raises an error if unable to create the Pen object.
'''
raise AttributeError('not implemented')
def replacePenColor(self, pen, newcolor):
'''
Replaces the color in pen with newcolor.
Arguments:
pen: Pen object to modify
newcolor: Color to use
Raises an error if unable to replace the Color in the Pen.
'''
raise AttributeError('not implemented')
def deletePen(self, pen):
'''
Delete a Pen object created by createPen
Arguments:
pen: Pen to be deleted
'''
raise AttributeError('not implemented')
def createBrush(self, color, style):
'''
Returns a Brush object.
Arguments:
color: Color to use
style: fill style name (e.g., "solid", "cross")
Raises an error if unable to create the Brush object.
'''
raise AttributeError('not implemented')
def replaceBrushColor(self, brush, newcolor):
'''
Replaces the color in brush with newcolor.
Arguments:
brush: Brush object to modify
newcolor: Color to use
Raises an error if unable to replace the Color in the Brush.
'''
raise AttributeError('not implemented')
def deleteBrush(self, brush):
'''
Delete a Brush object created by createBrush
Arguments:
brush: Brush to be deleted
'''
raise AttributeError('not implemented')
def createSymbol(self, name, pts=None, fill=False):
'''
Returns a Symbol object associated with the given name.
If pts is not given, the symbol name must already be known,
either as a pre-defined symbol or from a previous call to
this method.
If pts is given, the value is coordinates that define the symbol
as multiline subpaths in a [-50,50] square. The location of the
point this symbol represents will be at the center of the square.
An invalid coordinate (outside [-50,50]) will terminate the current
subpath, and the next valid coordinate will start a new subpath.
This definition will replace an existing symbol with the given name.
Arguments:
name: (string) name of the symbol
pts: (sequence of pairs of floats) vertex coordinates
fill: (bool) color-fill symbol?
Raises an error
if name is not a string,
if pts, if not None, is not a sequence of pairs of numbers, or
if unable to create the Symbol object for any other reason.
Returns a Symbol object.
'''
raise AttributeError('not implemented')
def deleteSymbol(self, symbol):
'''
Delete a Symbol object created by createSymbol
Arguments:
symbol: Symbol to be deleted
'''
raise AttributeError('not implemented')
def setWidthFactor(self, widthfactor):
'''
Assigns the scaling factor to be used for pen widths and symbols sizes
Arguments:
widthfactor: positive float giving the new scaling factor to use
'''
raise AttributeError('not implemented')
def drawMultiline(self, ptsx, ptsy, pen):
'''
Draws connected line segments.
Arguments:
ptsx: X-coordinates of the endpoints
ptsy: Y-coordinates of the endpoints
pen: the Pen to use to draw the line segments
Coordinates are measured from the upper left corner
in "device units" (pixels at the current window DPI).
'''
raise AttributeError('not implemented')
def drawPoints(self, ptsx, ptsy, symbol, color, ptsize, highlight):
'''
Draws discrete points.
Arguments:
ptsx: X-coordinates of the points
ptsy: Y-coordinates of the points
symbol: the Symbol to use to draw a point
color: color of the symbol (default color if None or empty)
ptsize: size of the symbol (scales with view size)
highlight: outline color of the symbol (do not outline if None or empty)
Coordinates are measured from the upper left corner
in "device units" (pixels at the current window DPI).
'''
raise AttributeError('not implemented')
def drawPolygon(self, ptsx, ptsy, brush, pen):
'''
Draws a polygon.
Arguments:
ptsx: X-coordinates of the vertices
ptsy: Y-coordinates of the vertices
brush: the Brush to use to fill the polygon; if None
the polygon will not be filled
pen: the Pen to use to outline the polygon; if None
the polygon will not be outlined
Coordinates are measured from the upper left corner
in "device units" (pixels at the current window DPI).
'''
raise AttributeError('not implemented')
def drawRectangle(self, left, bottom, right, top, brush, pen):
'''
Draws a rectangle.
Arguments:
left: X-coordinate of the left edge
bottom: Y-coordinate of the bottom edge
right: X-coordinate of the right edge
top: Y-coordinate of the top edge
brush: the Brush to use to fill the polygon; if None
the polygon will not be filled
pen: the Pen to use to outline the polygon; if None
the polygon will not be outlined
Coordinates are measured from the upper left corner
in "device units" (pixels at the current window DPI).
'''
raise AttributeError('not implemented')
def textSize(self, text, font):
'''
Returns the width and height of the text if drawn in the given font.
The width is such that continuing text should be positioned at the
start of this text plus this width. The height will always be the
ascent plus descent for the font and is independent of the text.
Arguments:
text: the text string to draw
font: the font to use
Returns: (width, height) of the text in "device units"
(pixels at the current window DPI)
'''
raise AttributeError('not implemented')
def drawText(self, text, startx, starty, font, color, rotate):
'''
Draws text.
Arguments:
text: the text string to draw
startx: the X-coordinate of the beginning baseline
starty: the Y-coordinate of the beginning baseline
font: the font to use
color: the color to use as a solid brush or pen
rotate: the angle of the text baseline in degrees
clockwise from horizontal
Coordinates are measured from the upper left corner
in "device units" (pixels at the current window DPI).
'''
raise AttributeError('not implemented')
def setWaterMark(self, filename, len_filename, xloc, yloc, scalefrac, opacity):
'''
Overlays watermark.
Arguments:
filename: path to water mark image
len_filename: number of characters in filename
xloc: horizontal position of upper left corner of watermark image
yloc: vertical position of upper left corner of watermark image
scalefrac: multiple of original image size to display plot as
opacity: image visibility in range [0.0,1.0] where 0->invisible, 1->opaque
'''
print(filename)
print(xloc)
print(yloc)
print(scalefrac)
print(opacity)
raise AttributeError('not implented')
```
#### File: pyfermod/stats/stats_chisquare.py
```python
from __future__ import print_function
import numpy
import pyferret
import scipy.stats
def ferret_init(id):
"""
Initialization for the stats_chisquare Ferret PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 3,
"descript": "Returns chi-square test stat. and prob. (and num. good categories, N) " \
"that sample counts of cat. data matches pop. expected counts. ",
"axes": axes_values,
"argnames": ( "SAMPLE_CNTS", "EXPECT_CNTS", "DELTA_DEGFREE", ),
"argdescripts": ( "Sample counts of categorical data",
"Expected counts or relative frequencies (will be adjusted)",
"Difference from standard (N-1) degrees of freedom (num. computed parameters)", ),
"argtypes": ( pyferret.FLOAT_ARRAY, pyferret.FLOAT_ARRAY, pyferret.FLOAT_ONEVAL, ),
"influences": ( false_influences, false_influences, false_influences, ),
}
return retdict
def ferret_custom_axes(id):
"""
Define custom axis of the stats_chisquare Ferret PyEF
"""
axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM
axis_defs[0] = ( 1, 3, 1, "X2,P,N", False )
return axis_defs
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Performs a chi-square test that a sample with the observed counts
of categorical data, given in inputs[0], comes from a population
with the expected counts or relative frequencies given in inputs[1].
The difference from the standard (n-1) degrees of freedom (eg, the
number of population parameters estimated from the sample) are given
in inputs[2]. The test statistic value and probability are returned
in result. The counts arrays must either have the same shape or both
have a single defined non-sigular axis of the same size. Categories
that contain undefined counts are elimated before performing the
test. Population values that are used in the test are adjust so
their sum equals the sum of the sample counts used in the test.
"""
if inputs[0].shape != inputs[1].shape :
shp0 = inputs[0].squeeze().shape
shp1 = inputs[1].squeeze().shape
if (len(shp0) > 1) or (len(shp1) > 1) or (shp0 != shp1):
raise ValueError("SAMPLE_CNTS and EXPECT_CNTS must either have identical dimensions " \
"or both have only one defined non-singular axis of the same length")
samcnts = inputs[0].reshape(-1)
popcnts = inputs[1].reshape(-1)
badsam = ( numpy.fabs(samcnts - inpbdfs[0]) < 1.0E-5 )
badsam = numpy.logical_or(badsam, numpy.isnan(samcnts))
goodsam = numpy.logical_not(badsam)
badpop = ( numpy.fabs(popcnts - inpbdfs[1]) < 1.0E-5 )
badpop = numpy.logical_or(badpop, numpy.isnan(popcnts))
goodpop = numpy.logical_not(badpop)
goodmask = numpy.logical_and(goodsam, goodpop)
samcnts = numpy.array(samcnts[goodmask], dtype=numpy.float64)
numgood = len(samcnts)
if numgood < 2:
raise ValueError("Not enough defined counts in common in SAMPLE_CNTS and EXPECT_CNTS")
popcnts = numpy.array(popcnts[goodmask], dtype=numpy.float64)
# Adjust the expected counts so its sum matches the sum of the sample
# counts; thus expected counts can be proportions instead of counts
# and removes issues about missing values. Get the adjustment factor
# from the means instead of the sums for accuracy.
popcnts = popcnts * (samcnts.mean() / popcnts.mean())
ddof = int(float(inputs[2]) + 0.5)
fitparams = scipy.stats.chisquare(samcnts, popcnts, ddof)
result[:] = resbdf
# chi-square test statistic
result[0] = fitparams[0]
# probability
result[1] = fitparams[1]
# number of good categories
result[2] = numgood
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
# make sure ferret_init and ferret_custom_axes do not have problems
info = ferret_init(0)
info = ferret_custom_axes(0)
# Get a sample histogram and expected frequencies
ddof = 3
nbins = 90
ssize = 100 * nbins
distf = scipy.stats.weibull_min(2.0, 5.0)
chival = 1000.0
while chival > 100.0:
sample = distf.rvs(ssize)
bedges = distf.isf(numpy.linspace(0.95,0.05,nbins+1))
(histgr, retedges) = numpy.histogram(sample, bins=bedges)
histgr = numpy.array(histgr, dtype=numpy.float64)
exphist = numpy.ones((nbins,), dtype=numpy.float64) * histgr.mean()
chival = ((histgr - exphist)**2 / exphist).sum()
print("created a sample with chival = %f" % chival)
prob = scipy.stats.chi2(nbins - 1 - ddof).sf(chival)
expect = numpy.array([chival, prob, nbins], dtype=numpy.float64)
print("sample histogram = \n%s" % str(histgr))
print("expect histogram value for all bins = %f" % exphist[0])
print("expect result = %s" % str(expect))
# setup for the call to ferret_compute - one non-singular axis
inpbdfs = numpy.array([-9999.0, -8888.0, -7777.0], dtype=numpy.float64)
resbdf = numpy.array([-6666.0], dtype=numpy.float64)
samhist = inpbdfs[0] * numpy.ones((1, 1, 2 * nbins, 1, 1, 1), dtype=numpy.float64, order='F')
samhist[0, 0, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0, 0, 0] = histgr
pophist = numpy.ones((1, 2 * nbins, 1, 1, 1, 1), dtype=numpy.float64, order='F')
ddofarr = numpy.array([ddof], dtype=numpy.float64)
result = -5555.0 * numpy.ones((3, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
# call ferret_compute and check the result
ferret_compute(0, result, resbdf, (samhist, pophist, ddofarr), inpbdfs)
result = result.reshape(-1)
print(" found result = %s" % str(result))
if not numpy.allclose(result, expect):
raise ValueError("Unexpected result")
# setup for the call to ferret_compute - multiple dimensions
inpbdfs = numpy.array([-9999.0, -8888.0, -7777.0], dtype=numpy.float64)
resbdf = numpy.array([-6666.0], dtype=numpy.float64)
samhist = inpbdfs[0] * numpy.ones((1, 2, nbins, 1, 1, 1), dtype=numpy.float64, order='F')
samhist[0, 0, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0, 0, 0] = histgr[0:nbins//2]
samhist[0, 1, fc00:db20:35b:7399::5, 0, 0, 0] = histgr[nbins//2:]
pophist = numpy.ones((1, 2, nbins, 1, 1, 1), dtype=numpy.float64, order='F')
ddofarr = numpy.array([ddof], dtype=numpy.float64)
result = -5555.0 * numpy.ones((3, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
# call ferret_compute and check the result
ferret_compute(0, result, resbdf, (samhist, pophist, ddofarr), inpbdfs)
result = result.reshape(-1)
print(" found result = %s" % str(result))
if not numpy.allclose(result, expect):
raise ValueError("Unexpected result")
# All successful
print("Success")
```
#### File: pyfermod/stats/stats_histogram.py
```python
from __future__ import print_function
import numpy
import pyferret
# The following is just to circumvent to call to pyferret.get_axis_info for testing
DOING_UNIT_TEST = False
def ferret_init(id):
"""
Initialization for the stats_histogram Ferret PyEF
"""
axes_values = [ pyferret.AXIS_IMPLIED_BY_ARGS ] * pyferret.MAX_FERRET_NDIM
true_influences = [ True ] * pyferret.MAX_FERRET_NDIM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 2,
"descript": "Returns unweighted histogram bin counts for all defined data values",
"axes": axes_values,
"argnames": ("VALS", "BINS_TEMPLATE"),
"argdescripts": ("Values to put into bins and then count",
"Template argument whose one defined axis gives midpoints of bins"),
"argtypes": (pyferret.FLOAT_ARRAY, pyferret.FLOAT_ARRAY),
"influences": (false_influences, true_influences),
}
return retdict
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Assigns result with histogram bin counts of data in inputs[0]. Bin
limits are defined using the values of the one defined non-singular
axis associated with inputs[1]. The argument inputs[1] is otherwise
unused. Undefined values in inputs[0] are eliminated before binning.
"""
# get the box limits of the one defined non-singular axis of the second argument
if DOING_UNIT_TEST:
limits_func = my_get_box_limits
else:
limits_func = pyferret.get_axis_box_limits
limits_tuple = None
axis_used = None
for axis_num in (pyferret.X_AXIS, pyferret.Y_AXIS, pyferret.Z_AXIS,
pyferret.T_AXIS, pyferret.E_AXIS, pyferret.F_AXIS):
this_tuple = limits_func(id, pyferret.ARG2, axis_num)
if (this_tuple != None) and (len(this_tuple[0]) > 1):
if limits_tuple != None:
raise ValueError("BINS_TEMPLATE has more than one defined non-singular axis")
limits_tuple = this_tuple
axis_used = axis_num
if limits_tuple is None:
raise ValueError("BINS_TEMPLATE does not have a defined non-singular axis")
# get the histogram bin limits from the axis box limits
if not numpy.allclose(limits_tuple[0][1:], limits_tuple[1][:-1]):
raise ValueError("Unexpected error: gaps exist between axis box limits")
bin_edges = numpy.empty( ( len(limits_tuple[1]) + 1, ), dtype=numpy.float64)
bin_edges[0] = limits_tuple[0][0]
bin_edges[1:] = limits_tuple[1]
# get the clean data as a flattened array
badmask = ( numpy.fabs(inputs[0] - inpbdfs[0]) < 1.0E-5 )
badmask = numpy.logical_or(badmask, numpy.isnan(inputs[0]))
goodmask = numpy.logical_not(badmask)
values = inputs[0][goodmask]
# compute the histogram and assign the counts to result
(hist, edges) = numpy.histogram(values, bins=bin_edges)
if axis_used == pyferret.X_AXIS:
result[:,0,0,0,0,0] = hist
elif axis_used == pyferret.Y_AXIS:
result[0,:,0,0,0,0] = hist
elif axis_used == pyferret.Z_AXIS:
result[0,0,:,0,0,0] = hist
elif axis_used == pyferret.T_AXIS:
result[0,0,0,:,0,0] = hist
elif axis_used == pyferret.E_AXIS:
result[0,0,0,0,:,0] = hist
elif axis_used == pyferret.F_AXIS:
result[0,0,0,0,0,:] = hist
else:
raise ValueError("Unexpected axis_used value: %d" % axis_used)
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
DOING_UNIT_TEST = True
# create the my_get_box_limits function for testing
def my_get_box_limits(id, argnum, axisnum):
if id != 0:
raise ValueError("Unexpected my_get_box_limits argnum; expected: 0, found: %d" % \
argnum)
if argnum != pyferret.ARG2:
raise ValueError("Unexpected my_get_box_limits argnum; expected: %d, found: %d" % \
(pyferret.ARG2, argnum))
if axisnum != pyferret.Z_AXIS:
return None
limits = numpy.array([1.0, 2.0, 3.0, 4.0, 6.0, 9.0], dtype=numpy.float64)
return (limits[:-1], limits[1:])
# create the input values array with values on the edges and outside
values = numpy.arange(0.0, 10.2, 0.1, dtype=numpy.float64).reshape((1,6,1,17,1,1), order='F')
# create the expected results array
expected = -1.0 * numpy.ones((1,1,5,1,1,1), dtype=numpy.float64, order='F')
expected[0,0,:,0,0,0] = (10.0, 10.0, 10.0, 20.0, 31.0)
# make sure no errors when ferret_init called
info = ferret_init(0)
# make the call to ferret_compute
result = 999.0 * expected
resbdf = numpy.array([-1.0], dtype=numpy.float64)
inpbdfs = numpy.array([-1.0, -1.0], dtype=numpy.float64)
ferret_compute(0, result, resbdf, (values, None), inpbdfs)
# verify the results
if not numpy.allclose(result, expected):
raise ValueError("Unexpected results; expected:\n%s\nfound:\n%s" % (str(expected), str(result)))
print("Success")
```
#### File: pyfermod/stats/stats_spearmanr.py
```python
from __future__ import print_function
import math
import numpy
import pyferret
import scipy.stats
def ferret_init(id):
"""
Initialization for the stats_spearmanr PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 2,
"descript": "Returns Spearman's rank correlation coeff, " \
"and num good points, between two samples of data",
"axes": axes_values,
"argnames": ( "SAMPLEA", "SAMPLEB", ),
"argdescripts": ( "First array of sample data",
"Second array of sample data", ),
"argtypes": ( pyferret.FLOAT_ARRAY, pyferret.FLOAT_ARRAY, ),
"influences": ( false_influences, false_influences, ),
}
return retdict
def ferret_custom_axes(id):
"""
Define custom axis of the stats_spearmanr Ferret PyEF
"""
axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM
axis_defs[0] = ( 1, 2, 1, "R,N", False )
return axis_defs
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Assigns result with Spearman's rank correlation coefficient,
and the number of good point, between the two samples of
data given in inputs[0] and inputs[1]. Values compared are
only from positions that are defined in both arrays.
"""
if inputs[0].shape != inputs[1].shape :
shp0 = inputs[0].squeeze().shape
shp1 = inputs[1].squeeze().shape
if (len(shp0) > 1) or (len(shp1) > 1) or (shp0 != shp1):
raise ValueError("SAMPLEA and SAMPLEB must either have identical dimensions or "\
"both have only one defined non-singular axis of the same length")
sampa = inputs[0].reshape(-1)
sampb = inputs[1].reshape(-1)
bada = ( numpy.fabs(sampa - inpbdfs[0]) < 1.0E-5 )
bada = numpy.logical_or(bada, numpy.isnan(sampa))
badb = ( numpy.fabs(sampb - inpbdfs[1]) < 1.0E-5 )
badb = numpy.logical_or(badb, numpy.isnan(sampb))
goodmask = numpy.logical_not(numpy.logical_or(bada, badb))
valsa = numpy.array(sampa[goodmask], dtype=numpy.float64)
numpts = len(valsa)
if numpts < 2:
raise ValueError("Not enough defined points in common in SAMPLEA and SAMPLEB")
valsb = numpy.array(sampb[goodmask], dtype=numpy.float64)
fitparams = scipy.stats.spearmanr(valsa, valsb)
result[:] = resbdf
# correlation coefficient
result[0] = fitparams[0]
# ignore the probability of uncorrelated
# number of good pts
result[1] = numpts
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
# make sure ferret_init and ferret_custom_axes do not have problems
info = ferret_init(0)
info = ferret_custom_axes(0)
# Get a random sample from a normal distribution
ydim = 83
zdim = 17
samplesize = ydim * zdim
sampa = scipy.stats.norm(15.0, 2.0).rvs(samplesize)
# Create a correlated distribution
sampc = -numpy.log(sampa)
# Create an uncorrelated distribution
sampu = scipy.stats.norm(15.0, 2.0).rvs(samplesize)
# setup for the call to ferret_compute
inpbdfs = numpy.array([-9999.0, -8888.0], dtype=numpy.float64)
resbdf = numpy.array([-7777.0], dtype=numpy.float64)
inputa = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
inputc = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
inputu = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
index = 0
numgood = 0
numpos = 0
for j in range(ydim):
for k in range(zdim):
if (index % 23) == 3:
inputa[0, j, k, 0, 0, 0] = inpbdfs[0]
else:
inputa[0, j, k, 0, 0, 0] = sampa[index]
if (index % 31) == 3:
inputc[0, j, k, 0, 0, 0] = inpbdfs[1]
inputu[0, j, k, 0, 0, 0] = inpbdfs[1]
else:
inputc[0, j, k, 0, 0, 0] = sampc[index]
inputu[0, j, k, 0, 0, 0] = sampu[index]
if ((index % 23) != 3) and ((index % 31) != 3):
numgood += 1
if sampa[index] > 0.0:
numpos += 1
index += 1
resultc = -6666.0 * numpy.ones((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
expectc = numpy.empty((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
expectc[0,0,0,0,0,0] = -1.0
expectc[1,0,0,0,0,0] = numpos
resultu = -6666.0 * numpy.ones((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
expectu = numpy.empty((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
# rough expected correlation coefficient for uncorrelated
expectu[0,0,0,0,0,0] = 0.0
expectu[1,0,0,0,0,0] = numgood
# call ferret_compute with correlated data and check the results
ferret_compute(0, resultc, resbdf, (inputa, inputc), inpbdfs)
if not numpy.allclose(resultc, expectc):
raise ValueError("Unexpected result; expected: %s; found %s" % \
(str(expectc.reshape(-1)), str(resultc.reshape(-1))))
# call ferret_compute with uncorrelated data and check the results
ferret_compute(0, resultu, resbdf, (inputa, inputu), inpbdfs)
if not numpy.allclose(resultu, expectu, atol=0.08):
raise ValueError("Unexpected result; expected: %s; found %s" % \
(str(expectu.reshape(-1)), str(resultu.reshape(-1))))
# All successful
print("Success")
```
#### File: pyfermod/stats/stats_zscore.py
```python
from __future__ import print_function
import math
import numpy
import pyferret
import scipy.stats
def ferret_init(id):
"""
Initialization for the stats_zscore PyEF
"""
axes_values = [ pyferret.AXIS_IMPLIED_BY_ARGS ] * pyferret.MAX_FERRET_NDIM
true_influences = [ True ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 1,
"descript": "Returns standard scores for data values relative to " \
"a normal distribution with same mean and variance as the data",
"axes": axes_values,
"argnames": ( "VALUES", ),
"argdescripts": ( "Array of data values", ),
"argtypes": ( pyferret.FLOAT_ARRAY, ),
"influences": ( true_influences, ),
}
return retdict
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Assigns result standard scores of data values given in inputs[0]
relative to a normal distribution with the same mean and variance
as the data. For undefined data values, the result value will
be undefined.
"""
badmask = ( numpy.fabs(inputs[0] - inpbdfs[0]) < 1.0E-5 )
badmask = numpy.logical_or(badmask, numpy.isnan(inputs[0]))
goodmask = numpy.logical_not(badmask)
result[badmask] = resbdf
# convert to 64-bit for precision in calculating the mean and variance
sample = numpy.array(inputs[0][goodmask], dtype=numpy.float64)
# array[goodmask] is a flattened array
result[goodmask] = scipy.stats.zscore(sample)
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
# make sure ferret_init does not have problems
info = ferret_init(0)
# Get a random sample and the expected standard scores
ydim = 83
zdim = 17
samplesize = 1300
sample = scipy.stats.norm(5.0, 2.0).rvs(samplesize)
zscores = (sample - sample.mean()) / math.sqrt(sample.var(0))
# setup for the call to ferret_compute
inpbdfs = numpy.array([-9999.0], dtype=numpy.float64)
resbdf = numpy.array([-8888.0], dtype=numpy.float64)
input = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
expected = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
sindex = 0
iindex = 0
for j in range(ydim):
for k in range(zdim):
if ((iindex % 13) == 3) or (sindex >= samplesize):
input[0, j, k, 0, 0, 0] = inpbdfs[0]
expected[0, j, k, 0, 0, 0] = resbdf
else:
input[0, j, k, 0, 0, 0] = sample[sindex]
expected[0, j, k, 0, 0, 0] = zscores[sindex]
sindex += 1
iindex += 1
if sindex != samplesize:
raise ValueError("Unexpected final sindex of %d (ydim,zdim too small)" % sindex)
result = -7777.0 * numpy.ones((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
# call ferret_compute and check the results
ferret_compute(0, result, resbdf, (input, ), inpbdfs)
if not numpy.allclose(result, expected, rtol=2.0E-7, atol=2.0E-7):
print("expected (flattened) =\n%s" % str(expected.reshape(-1, order='F')))
print("result (flattened) =\n%s" % str(result.reshape(-1, order='F')))
raise ValueError("Unexpected result")
# All successful
print("Success")
``` |
{
"source": "josbys1/twitter-svm",
"score": 2
} |
#### File: josbys1/twitter-svm/TweetObj.py
```python
class Tweet:
def __init__(self,text,author):
self.text = text
self.author = author
self.vector = None
``` |
{
"source": "joscani/Data-And-Model-Drift-Checker",
"score": 3
} |
#### File: pydrift/core/drift_checker_estimator.py
```python
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from typing import List
from .drift_checker import DataDriftChecker
from ..models import ScikitModel
from ..exceptions import DriftEstimatorException
class DriftCheckerEstimator(BaseEstimator, ClassifierMixin):
"""Creates a sklearn estimator ready to use it or
within a pipeline
Parameter `column_names` is needed because sklearn
transform data to numpy (no columns)
Check `DataDriftChecker` to change the rest of
parameters if you need
"""
def __init__(self,
ml_classifier_model: ScikitModel,
column_names: List[str],
verbose: bool = False,
minimal: bool = True,
pvalue_threshold_numerical: float = .05,
pvalue_threshold_categorical: float = .05,
cardinality_threshold: int = 20):
self.ml_classifier_model = ml_classifier_model
self.column_names = column_names
self.df_left_data = None
self.df_right_data = None
self.verbose = verbose
self.minimal = minimal
self.pvalue_threshold_numerical = pvalue_threshold_numerical
self.pvalue_threshold_categorical = pvalue_threshold_categorical
self.cardinality_threshold = cardinality_threshold
self.data_drift_checker = None
self.is_drift_in_numerical_columns = False
self.is_drift_in_categorical_columns = False
self.is_drift_in_ml_model_can_discriminate = False
def fit(self, X: np.array, y: np.array = None):
"""Fits estimator in `self.ml_classifier_model`
and assigns X to `self.df_left_data`
"""
self.df_left_data = X
self.ml_classifier_model.fit(X, y)
return self
def fill_data_drift_checker(self, X: np.array):
"""Fill data drift checker object and stores it
in `self.data_drift_checker`
"""
self.df_right_data = X
self.data_drift_checker = DataDriftChecker(
df_left_data=pd.DataFrame(self.df_left_data,
columns=self.column_names),
df_right_data=pd.DataFrame(self.df_right_data,
columns=self.column_names),
verbose=self.verbose,
minimal=self.minimal,
pvalue_threshold_numerical=self.pvalue_threshold_numerical,
pvalue_threshold_categorical=self.pvalue_threshold_categorical,
cardinality_threshold=self.cardinality_threshold
)
def check_drift(self):
"""Checks data drift for numerical and categorical
data and the discriminative model
"""
self.is_drift_in_numerical_columns = (
self.data_drift_checker.check_numerical_columns()
)
self.is_drift_in_categorical_columns = (
self.data_drift_checker.check_categorical_columns()
)
self.is_drift_in_ml_model_can_discriminate = (
self.data_drift_checker.ml_model_can_discriminate()
)
is_there_drift = (
self.is_drift_in_numerical_columns
or self.is_drift_in_categorical_columns
or self.is_drift_in_ml_model_can_discriminate
)
if is_there_drift:
if self.is_drift_in_numerical_columns:
print('Drift found in numerical columns check step')
if self.is_drift_in_categorical_columns:
print('Drift found in categorical columns check step')
if self.is_drift_in_ml_model_can_discriminate:
print('Drift found in discriminative model step')
raise DriftEstimatorException(
'Drift found in your estimation process'
)
def get_drifted_features(self):
"""Alias to self.data_drift_checker.drifted_features
"""
return ', '.join(self.data_drift_checker.drifted_features)
def get_high_cardinality_features(self):
"""Alias to self.data_drift_checker.high_cardinality_features
"""
return ', '.join(self.data_drift_checker.high_cardinality_features)
def predict(self, X: np.array):
"""Checks if there is a data drift and makes a prediction
with `predict` method of sklearn model
"""
self.fill_data_drift_checker(X)
self.check_drift()
return self.ml_classifier_model.predict(X)
def predict_proba(self, X: np.array):
"""Checks if there is a data drift and makes a prediction
with `predict_proba` method of sklearn model
"""
self.fill_data_drift_checker(X)
self.check_drift()
return self.ml_classifier_model.predict_proba(X)
``` |
{
"source": "Josca/ppr",
"score": 3
} |
#### File: ppr/ppr/ppr.py
```python
import os
def _gen_setup_file(project_name: str) -> str:
"""
Generate setup.py module content.
"""
content = """\
from setuptools import setup, find_packages
setup(
name='%s',
description='App description.',
version='1.0',
packages=find_packages(),
# package_data={'package': ['subfolder/*']},
# install_requires=[
# 'numpy',
# 'Pillow'
# ],
# entry_points='''
# [console_scripts]
# cmd=package.module:cmd_fun
# '''
)
""" % project_name
return content
def _gen_main_module() -> str:
"""
Generate main module text content.
"""
content = """\
def hello():
print('Hello world')
if __name__ == '__main__':
hello()
"""
return content
def make_project(project_name: str):
"""
Generate project folder structure.
:param project_name: Project name.
"""
# Create project folder and package folder.
pth = os.path.join(os.getcwd(), project_name, project_name)
print(str(pth))
os.makedirs(pth)
# Generate setup.py module file.
setup_content = _gen_setup_file(project_name)
with open(os.path.join(project_name, 'setup.py'), 'w') as f:
f.write(setup_content)
# Generate project_name.py module file.
src_content = _gen_main_module()
with open(os.path.join(project_name, project_name, '%s.py' % project_name), 'w') as f:
f.write(src_content)
# Generate package __init__.py file.
with open(os.path.join(project_name, project_name, '__init__.py'), 'w') as f:
f.write('')
``` |
{
"source": "joscelino/Request-API",
"score": 3
} |
#### File: domain/usecases/starships_list_colector.py
```python
from abc import ABC, abstractclassmethod
from typing import Dict, List
class StarShipsListColectorInterface(ABC):
"""Starships Colector Interface"""
@abstractclassmethod
def list(self, page: int) -> List[Dict]:
"""Para ser implementado"""
raise Exception("Deve implementar o metodo de lista.")
```
#### File: src/infra/swap_api_consumer.py
```python
from collections import namedtuple
from typing import Dict, Tuple, Type
import requests
from requests import Request
from src.data.interfaces import SwapiAPiConsumerInterface
from src.errors import HttpRequestError
class SwapApiConsumer(SwapiAPiConsumerInterface):
"""Class to consume swapi api with http requests"""
def __init__(self) -> None:
self.get_starships_response = namedtuple(
"GET_Starships", "status_code request response"
)
def get_starships(self, page: int) -> Tuple[int, Type[Request], Dict]:
"""Request starships nas paginacoes.
Args:
page (int): numero inteiro da pagina de navegacao.
Raises:
HttpRequestError: Erros de requisicao
Returns:
Tuple[int, Type[Request], Dict]: tupla com status code, request e
response.
"""
req = requests.Request(
method="GET", url="https://swapi.dev/api/starships/", params={"page": page}
)
req_prepared = req.prepare()
response = self.__send_http_requests(req_prepared)
status_code = response.status_code
if status_code >= 200 and status_code <= 299:
return self.get_starships_response(
status_code=status_code, request=req, response=response.json()
)
else:
raise HttpRequestError(
message=response.json()["detail"], status_code=status_code
)
@classmethod
def __send_http_requests(cls, req_prepared: Type[Request]) -> any:
"""Prepara a sessao e envia a requisicao http.
Args:
req_prepared (Type[Request]): Objeto de requisicao com todos
os parametros
Returns:
any: resposta da requisicao HTTP
"""
http_session = requests.Session()
response = http_session.send(req_prepared)
return response
``` |
{
"source": "joscha0/daily-wiki",
"score": 3
} |
#### File: daily-wiki/tests/test_firestore.py
```python
import unittest
from db import firestore
class TestFirestore(unittest.TestCase):
def test_adduser(self):
result = firestore.adduser("<EMAIL>", "en")
self.assertFalse(result)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joscha0/ExoticAlgorithms",
"score": 4
} |
#### File: joscha0/ExoticAlgorithms/BozoSort.py
```python
import random
def is_sorted(lst):
for i in range(len(lst) - 1):
if lst[i] > lst[i + 1]:
return False
return True
def bozoSort(lst):
while not is_sorted(lst):
i, j = int(len(lst)*random.random()), int(len(lst)*random.random())
lst[i], lst[j] = lst[j], lst[i]
return lst
if __name__ == "__main__":
list1 = [1,4,2,5,6,5,3]
print('\n\n-----Bozosort Sorting Algorithm-----\n')
print('unsorted list: '+str(list1))
print('sorted list: '+str(bozoSort(list1)))
``` |
{
"source": "joscha0/instagram-scraper",
"score": 3
} |
#### File: joscha0/instagram-scraper/getpdf.py
```python
from weasyprint import HTML
from jinja2 import Environment, FileSystemLoader
import sys
from scraper import get_data
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("template.html")
def get_template(name):
data = get_data(name)
template_vars = {'data': data, }
html_out = template.render(template_vars)
HTML(string=html_out).write_pdf('out.pdf', stylesheets=['style.css'])
if __name__ == "__main__":
name = str(sys.argv[1])
get_template(name)
``` |
{
"source": "joschabach/micropsi2",
"score": 3
} |
#### File: nodenet/dict_engine/dict_netentity.py
```python
import micropsi_core.tools
__author__ = 'joscha'
__date__ = '09.05.12'
class NetEntity(object):
"""The basic building blocks of node nets.
Attributes:
uid: the unique identifier of the net entity
index: an attempt at creating an ordering criterion for net entities
name: a human readable name (optional)
position: a pair of coordinates on the screen
nodenet: the node net in which the entity resides
parent_nodespace: the node space this entity is contained in
"""
@property
def uid(self):
return self.__uid
@uid.setter
def uid(self, uid):
self.__uid = uid
@property
def index(self):
return self.__index
@index.setter
def index(self, index):
self.__index = index
@property
def position(self):
return self.__position
@position.setter
def position(self, position):
position = list(position)
position = (position + [0] * 3)[:3]
self.__position = position
self.last_changed = self.nodenet.current_step
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def parent_nodespace(self):
return self.__parent_nodespace
def __init__(self, nodenet, parent_nodespace, position, name="", entitytype="abstract_entities",
uid=None, index=None):
"""create a net entity at a certain position and in a given node space"""
self.__uid = None
self.__index = 0
self.__name = None
self.__parent_nodespace = None
self.__position = None
self.uid = uid or micropsi_core.tools.generate_uid()
self.nodenet = nodenet
self.index = index or len(nodenet.get_node_uids()) + len(nodenet.get_nodespace_uids())
self.entitytype = entitytype
self.name = name
self.position = position
if parent_nodespace:
self.__parent_nodespace = parent_nodespace
nodespace = self.nodenet.get_nodespace(parent_nodespace)
if not nodespace.is_entity_known_as(self.entitytype, self.uid):
nodespace._register_entity(self)
else:
self.__parent_nodespace = None
```
#### File: micropsi_core/nodenet/native_modules.py
```python
import os
nodetypes = {}
try:
import numpy as np
import theano
numpy_installed = True
except ImportError:
numpy_installed = False
if numpy_installed:
# only register these native modules if we
# have theano and numpy installed.
nodetypes["GradientDescent"] = {
"name": "GradientDescent",
"engine": "theano_engine",
"slottypes": ["gen"],
"gatetypes": ["gen"],
"nodefunction_name": "gradient_descent",
"symbol": "☲",
"category": "nn_learning",
"path": os.path.abspath(__file__),
"parameters": [
"ae_type",
"adadelta_rho",
"adadelta_eps",
"check_grad",
"weight_decay",
"tied_weights",
"sparsity_value",
"sparsity_penalty",
"t",
"ctr",
"input_prefix",
"hidden_prefix",
"output_prefix",
"input_nodespace"
],
"parameter_values": {
"ae_type": ["sparse", "denoising"],
"tied_weights": ["True", "False"],
"check_grad": ["yes", "no"]
},
"parameter_defaults": {
"ae_type": "denoising",
"tied_weights": "True",
"hidden_prefix": "hidden_1",
"output_prefix": "output_1"
}
}
def gradient_descent(netapi, node=None, **params):
"""
Online gradient descent with backpropagation for three layers (input, hidden,
and output layer) and AdaDelta for adapting the learning rate per parameter.
References:
[1] Werbos, PJ. "Beyond Regression: New Tools for Prediction and Analysis
in the Behavioral Sciences." (1974).
[2] Zeiler, MD. "ADADELTA: An adaptive learning rate method." (2012).
[3] <NAME>. "Extracting and Composing Robust Features with Denoising
Autoencoders." (2008).
"""
# To be able to switch this native module on and off, require positive
# activation on the gen slot for its code to be run.
if node.get_slot('gen').activation > 0:
import theano
import theano.tensor as T
# get shared name prefix of nodes in input, hidden, and output layers
input_ = node.get_parameter('input_prefix')
hidden = node.get_parameter('hidden_prefix')
output = node.get_parameter('output_prefix')
# get the name of the nodespace where the input lives
ns_input_name = node.get_parameter('input_nodespace')
# get nodespace uids of nodes in input, hidden, and output layers
# assumption: if the input layer consists of sensor nodes, they have their
# own nodespace, all other nodes are in this node's nodespace
ns_input_uid = None
for ns in netapi.get_nodespaces():
if ns.name == ns_input_name:
ns_input_uid = ns.uid
break
ns_hidden_uid = node.parent_nodespace
ns_output_uid = node.parent_nodespace
# initialization
if not hasattr(node, 'initialized'):
node.set_state('cumulative_error', 0)
sparse = str(node.get_parameter('ae_type')) == "sparse"
# denoising = str(node.get_parameter('ae_type')) == "denoising"
tied_weights = str(node.get_parameter('tied_weights')) == "True"
# group nodes
netapi.group_nodes_by_names(ns_input_uid, node_name_prefix=input_)
netapi.group_nodes_by_names(ns_hidden_uid, node_name_prefix=hidden)
netapi.group_nodes_by_names(ns_output_uid, node_name_prefix=output)
# get activation values
a_i_array = netapi.get_activations(ns_input_uid, input_)
a_h_array = netapi.get_activations(ns_hidden_uid, hidden)
a_o_array = netapi.get_activations(ns_output_uid, output)
node.set_parameter('error', 0.0) # store error values to observe how training develops
len_input = len(a_i_array)
len_hidden = len(a_h_array)
len_output = len(a_o_array)
if len_input == 0:
netapi.logger.warn("Node net has no input nodes whose names start with '%s'", input_)
node.set_parameter('ctr', 0)
return
elif len_hidden == 0:
netapi.logger.warn("Node net has no hidden nodes whose names start with '%s'.", hidden)
node.set_parameter('ctr', 0)
return
elif len_output == 0:
netapi.logger.warn("Node net has no output names whose names start with '%s'.", output)
node.set_parameter('ctr', 0)
return
else:
netapi.logger.info("Initializing theano-based autoencoder backprop with layout: %i -> %i -> %i",
len_input, len_hidden, len_output)
# get parameter values from node net
b_h_array = netapi.get_thetas(ns_hidden_uid, hidden)
b_o_array = netapi.get_thetas(ns_output_uid, output)
w_hi_array = netapi.get_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden)
w_oh_array = netapi.get_link_weights(ns_hidden_uid, hidden, ns_output_uid, output)
# declare shared variables ( shared b/w theano and node nets )
a_i = node.a_i = theano.shared(value=a_i_array.astype(T.config.floatX), name="a_i", borrow=False)
a_h = node.a_h = theano.shared(value=a_h_array.astype(T.config.floatX), name="a_h", borrow=False)
a_o = node.a_o = theano.shared(value=a_o_array.astype(T.config.floatX), name="a_o", borrow=False)
b_h = node.b_h = theano.shared(value=b_h_array.astype(T.config.floatX), name="b_h", borrow=False)
b_o = node.b_o = theano.shared(value=b_o_array.astype(T.config.floatX), name="b_o", borrow=False)
w_hi = node.w_hi = theano.shared(value=w_hi_array.astype(T.config.floatX), name="w_hi", borrow=False)
w_oh = node.w_oh = theano.shared(value=w_oh_array.astype(T.config.floatX), name="w_oh", borrow=False)
# write initial parameter values to shared variables
node.b_h.set_value(b_h_array, borrow=True)
node.b_o.set_value(b_o_array, borrow=True)
node.w_hi.set_value(w_hi_array, borrow=True)
node.w_oh.set_value(w_oh_array, borrow=True)
# initialize accumulation variables for AdaDelta, ie. mean square gradients and mean square deltas
ms_grad_b_o = node.ms_grad_b_o = theano.shared(value=np.zeros_like(b_o_array), name="ms_grad_b_o", borrow=True)
ms_grad_w_oh = node.ms_grad_w_oh = theano.shared(value=np.zeros_like(w_oh_array), name="ms_grad_w_oh", borrow=True)
ms_grad_b_h = node.ms_grad_b_h = theano.shared(value=np.zeros_like(b_h_array), name="ms_grad_b_h", borrow=True)
ms_grad_w_hi = node.ms_grad_w_hi = theano.shared(value=np.zeros_like(w_hi_array), name="ms_grad_w_hi", borrow=True)
ms_delta_b_o = node.ms_delta_b_o = theano.shared(value=np.zeros_like(b_o_array), name="ms_delta_b_o", borrow=True)
ms_delta_w_oh = node.ms_delta_w_oh = theano.shared(value=np.zeros_like(w_oh_array), name="ms_delta_w_oh", borrow=True)
ms_delta_b_h = node.ms_delta_b_h = theano.shared(value=np.zeros_like(b_h_array), name="ms_delta_b_h", borrow=True)
ms_delta_w_hi = node.ms_delta_w_hi = theano.shared(value=np.zeros_like(w_hi_array), name="ms_delta_w_hi", borrow=True)
# make function parameters theano compatible
weight_decay = T.scalar("weight_decay", dtype=T.config.floatX)
sparsity_value = T.scalar("sparsity_value", dtype=T.config.floatX)
sparsity_penalty = T.scalar("sparsity_penalty", dtype=T.config.floatX)
ada_rho = T.scalar("ada_rho", dtype=T.config.floatX)
ada_eps = T.scalar("ada_eps", dtype=T.config.floatX)
# declare the reconstruction error
error_term = T.sum(T.square(a_o - a_i)) / 2. # squared error
# error_term = -T.sum(a_i * T.log(a_o) + (1. - a_i) * T.log(1. - a_o)) # cross-entropy
# use a weight constraint as a regularizer
weight_constraint = (weight_decay / 2.) * (T.sum(T.square(w_hi)) + T.sum(T.square(w_oh)))
if sparse: # training criterion for a sparse autoencoder
# save the average activation of hidden units; initialize to first activation received
avg_a_h = node.avg_a_h = theano.shared(value=a_h_array, name="avg_a_h", borrow=False)
new_avg_a_h = 0.95 * avg_a_h + (1 - 0.95) * a_h # for gradient checking, set new_avg_a_h = a_h
rho = sparsity_value
information_gain = rho * T.log(rho / new_avg_a_h) + (1. - rho) * T.log((1. - rho) / (1. - new_avg_a_h))
sparsity_constraint = sparsity_penalty * T.sum(information_gain)
cost = error_term + weight_constraint + sparsity_constraint
else: # training criterion for a denoising autoencoder
cost = error_term + weight_constraint
node.cost = theano.function([weight_decay, sparsity_value, sparsity_penalty], cost, on_unused_input='ignore')
node.error = theano.function([], error_term / len(b_h_array))
# compute gradients
sigmoid_deriv_a_o = a_o * (1. - a_o)
grad_o = (a_o - a_i) * sigmoid_deriv_a_o # squared error # T.grad(cost, z_o)
# grad_o = ((a_i - a_o) / (a_o - a_o**2)) * sigmoid_deriv_a_o # cross-entropy
sigmoid_deriv_a_h = a_h * (1. - a_h)
if sparse:
grad_w_oh = T.dot(T.reshape(grad_o, (len_input, 1)), T.reshape(a_h, (1, len_hidden))) + weight_decay * w_oh
grad_sparsity = (- rho / new_avg_a_h + (1. - rho) / (1. - new_avg_a_h)).T
grad_h = (T.dot(w_oh.T, grad_o) + sparsity_penalty * grad_sparsity) * sigmoid_deriv_a_h
grad_w_hi = T.dot(T.reshape(grad_h, (len_hidden, 1)), T.reshape(a_i, (1, len_input))) + weight_decay * w_hi
else: # denoising
grad_w_oh = T.dot(T.reshape(grad_o, (len_input, 1)), T.reshape(a_h, (1, len_hidden))) + weight_decay * w_oh
grad_h = T.dot(w_oh.T, grad_o) * sigmoid_deriv_a_h
grad_w_hi = T.dot(T.reshape(grad_h, (len_hidden, 1)), T.reshape(a_i, (1, len_input))) + weight_decay * w_hi
if tied_weights:
grad_w_oh = grad_w_oh + grad_w_hi.T
gradients = [grad_o, grad_w_oh, grad_h]
ms_grad = [ms_grad_b_o, ms_grad_w_oh, ms_grad_b_h]
ms_delta = [ms_delta_b_o, ms_delta_w_oh, ms_delta_b_h]
else:
gradients = [grad_o, grad_w_oh, grad_h, grad_w_hi]
ms_grad = [ms_grad_b_o, ms_grad_w_oh, ms_grad_b_h, ms_grad_w_hi]
ms_delta = [ms_delta_b_o, ms_delta_w_oh, ms_delta_b_h, ms_delta_w_hi]
# update accumulation variables for AdaDelta and compute new deltas
# compute an exponentially decaying average of squared gradients
# ie. recent gradients are more important and the quantity doesn't continue to grow
# thereby allowing the learning rate to grow or shrink as time progresses ( rather than just shrink as in AdaGrad )
new_ms_grad = [ada_rho * ms_g + (1 - ada_rho) * (g**2) for ms_g, g in zip(ms_grad, gradients)]
# Note: the square root of the mean squared gradients plus epsilon is effectively the RMS of the gradients
# epsilon is added ~"to start off the first iteration and to ensure progress when previous updates become small"
deltas = [(T.sqrt(ms_d + ada_eps) / T.sqrt(ms_g + ada_eps)) * g for ms_d, ms_g, g in zip(ms_delta, new_ms_grad, gradients)]
# compute an exponentially decaying average of squared deltas -- this is to ensure correct units
new_ms_delta = [ada_rho * ms_d + (1 - ada_rho) * (d**2) for ms_d, d in zip(ms_delta, deltas)]
# update parameters, ie. old_value - learning_rate * delta_value
if tied_weights:
new_b_o, new_w_oh, new_b_h = (old - update for old, update in zip([b_o, w_oh, b_h], deltas))
new_w_hi = new_w_oh.T
new_ms_grad.append(new_ms_grad[1].T)
new_ms_delta.append(new_ms_delta[1].T)
gradients.append(gradients[1].T)
else:
new_b_o, new_w_oh, new_b_h, new_w_hi = (old - update for old, update in zip([b_o, w_oh, b_h, w_hi], deltas))
if sparse:
update_function = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps],
None,
updates=[(b_o, new_b_o),
(w_oh, new_w_oh),
(b_h, new_b_h),
(w_hi, new_w_hi),
(avg_a_h, new_avg_a_h),
(ms_grad_b_o, new_ms_grad[0]),
(ms_grad_w_oh, new_ms_grad[1]),
(ms_grad_b_h, new_ms_grad[2]),
(ms_grad_w_hi, new_ms_grad[3]),
(ms_delta_b_o, new_ms_delta[0]),
(ms_delta_w_oh, new_ms_delta[1]),
(ms_delta_b_h, new_ms_delta[2]),
(ms_delta_w_hi, new_ms_delta[3])],
on_unused_input='ignore')
else: # denoising
update_function = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps],
None,
updates=[(b_o, new_b_o),
(w_oh, new_w_oh),
(b_h, new_b_h),
(w_hi, new_w_hi),
(ms_grad_b_o, new_ms_grad[0]),
(ms_grad_w_oh, new_ms_grad[1]),
(ms_grad_b_h, new_ms_grad[2]),
(ms_grad_w_hi, new_ms_grad[3]),
(ms_delta_b_o, new_ms_delta[0]),
(ms_delta_w_oh, new_ms_delta[1]),
(ms_delta_b_h, new_ms_delta[2]),
(ms_delta_w_hi, new_ms_delta[3])],
on_unused_input='ignore')
node.get_updated_parameters = update_function
# for gradient checking use the following function:
node.get_gradients = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps],
[gradients[0], gradients[1], gradients[2], gradients[3]], on_unused_input='ignore')
node.initialized = True
# get input activations from node net
a_i_array = netapi.get_activations(ns_input_uid, input_)
# learn only if activation on the input layer has been persistent for as many steps as your neural net has layers
# Note: since we're currently using denoising autoencoders, this means persistent up to Bernoulli noise
try:
# check if activation has changed since the last step ( by testing if there's any different activation value )
bool_idx = node.prev_a_i != a_i_array
input_changed = np.any(bool_idx)
# if deviating activations were 0 ( i.e most likely the effect of Bernoulli noising ), assume no change
is_zero = node.prev_a_i[bool_idx] == 0
# if is_zero contains elements but not all input activations and their values are all zero, assume no change
if len(is_zero) and len(is_zero) < len(a_i_array) and np.all(is_zero):
input_changed = False
except:
input_changed = True
node.prev_a_i = a_i_array
if input_changed:
node.set_parameter('ctr', 1)
else:
node.set_parameter('ctr', int(node.get_parameter('ctr')) + 1)
# until counter equals number of layers, ie. the same activation has reached all layers, don't compute
if node.get_parameter('ctr') < 3:
return
# get other activations from node net
a_h_array = netapi.get_activations(ns_hidden_uid, hidden)
a_o_array = netapi.get_activations(ns_output_uid, output)
# define learning parameters
param = node.get_parameter('weight_decay')
if param is None:
weight_decay = netapi.floatX(4e-06) # 0.0001 . 1e-07 assuming batches of size 1000 . 4e-06 assuming batches of size 256
node.set_parameter('weight_decay', str(weight_decay)) # store as regular float to appease the serializer
else:
weight_decay = netapi.floatX(param)
param = node.get_parameter('sparsity_value')
if param is None:
sparsity_value = netapi.floatX(0.05)
node.set_parameter('sparsity_value', str(sparsity_value))
else:
sparsity_value = netapi.floatX(param)
param = node.get_parameter('sparsity_penalty')
if param is None:
sparsity_penalty = netapi.floatX(0.001) # 3.0 . 0.003 assuming batches of size 1000 . 0.01 assuming batches of size 256
node.set_parameter('sparsity_penalty', str(sparsity_penalty))
else:
sparsity_penalty = netapi.floatX(param)
param = node.get_parameter('adadelta_rho')
if param is None:
ada_rho = netapi.floatX(0.95)
node.set_parameter('adadelta_rho', str(ada_rho))
else:
ada_rho = netapi.floatX(param)
param = node.get_parameter('adadelta_eps')
if param is None:
ada_eps = netapi.floatX(1e-6)
node.set_parameter('adadelta_eps', str(ada_eps))
else:
ada_eps = netapi.floatX(param)
param = node.get_parameter('ae_type')
if param is None:
ae_type = 'sparse' # options: 'sparse', 'denoising'
node.set_parameter('ae_type', 'sparse')
else:
ae_type = str(param)
param = node.get_parameter('t')
if param is None:
t = 0
node.set_parameter('t', t)
else:
t = int(param)
# gradient checking
# Note: use double precision when running gradient checks
if node.get_parameter('check_grad') == 'yes':
# get values of biases and weights from node net
b_h_array = netapi.get_thetas(ns_hidden_uid, hidden)
b_o_array = netapi.get_thetas(ns_output_uid, output)
w_hi_array = netapi.get_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden)
w_oh_array = netapi.get_link_weights(ns_hidden_uid, hidden, ns_output_uid, output)
# compute the analytical gradient
anal_grad = compute_analytic_gradient(
netapi, node, a_i_array, a_h_array, a_o_array, b_h_array, b_o_array, w_hi_array, w_oh_array,
weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps)
# compute the numerical gradient
num_grad = compute_numeric_gradient(
netapi, node, a_i_array, a_h_array, a_o_array, b_h_array, b_o_array, w_hi_array, w_oh_array,
weight_decay, sparsity_value, sparsity_penalty)
# compare them
diff = np.linalg.norm(num_grad - anal_grad) / np.linalg.norm(num_grad + anal_grad)
print("Gradient difference: %e" % diff) # %.10f" % diff
print("The norm of the difference between numerical and analytical gradient should be < 1e-9\n")
# write values to shared variables
node.a_i.set_value(a_i_array, borrow=True)
node.a_h.set_value(a_h_array, borrow=True)
node.a_o.set_value(a_o_array, borrow=True)
# update values in shared variables ( using backpropgation of the gradients )
node.get_updated_parameters(weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps)
# write new parameter values to node net
netapi.set_thetas(ns_output_uid, output, node.b_o.get_value(borrow=True))
netapi.set_link_weights(ns_hidden_uid, hidden, ns_output_uid, output, node.w_oh.get_value(borrow=True))
netapi.set_thetas(ns_hidden_uid, hidden, node.b_h.get_value(borrow=True))
netapi.set_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden, node.w_hi.get_value(borrow=True))
error = float(node.error())
# save current error as node parameter
node.set_parameter('error', error)
node.set_state('cumulative_error', node.get_state('cumulative_error') + error)
t = int(node.get_parameter('t'))
if t % 1000 == 0:
netapi.logger.debug("Number of backprop steps computed %d" % t)
netapi.logger.debug("Average Error %.6f (Latest: 0=%.6f)" % ((node.get_state('cumulative_error') / 1000), error))
node.set_state('cumulative_error', 0.0)
# reset counter after successful backprop step; cf. must wait for new sensory activation to reach output layer
node.set_parameter('ctr', 0)
node.set_parameter('t', t + 1)
def sigmoid(z):
""" The sigmoid ( activation ) function. """
return 1. / (1. + np.exp(-z))
def compute_analytic_gradient(netapi, node, a_i, a_h, a_o, b_h, b_o, w_hi, w_oh, weight_decay,
sparsity_value, sparsity_penalty, ada_rho, ada_eps):
# make sure borrow is False here because otherwise the buffers are overwritten and
# compute_numerical_gradient(..) still needs these same input values for proper comparison
node.a_i.set_value(a_i, borrow=False)
node.a_h.set_value(a_h, borrow=False)
node.a_o.set_value(a_o, borrow=False)
node.b_h.set_value(b_h, borrow=False)
node.b_o.set_value(b_o, borrow=False)
node.w_hi.set_value(w_hi, borrow=False)
node.w_oh.set_value(w_oh, borrow=False)
delta_o, delta_w_oh, delta_h, delta_w_hi = \
node.get_gradients(weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps)
gradient = np.concatenate((delta_o, np.ravel(delta_w_oh), delta_h, np.ravel(delta_w_hi)))
return gradient
def compute_numeric_gradient(netapi, node, a_i, a_h, a_o, b_h, b_o, w_hi, w_oh, weight_decay, sparsity_value, sparsity_penalty):
""" Compute numerical gradient for validating backprop implementation above. """
from copy import deepcopy
# helper variables
epsilon = netapi.floatX(1e-4)
ni = len(b_o)
nh = len(b_h)
nih = ni * nh
theta = np.concatenate((b_o, np.ravel(w_oh), b_h, np.ravel(w_hi)))
n = theta.shape[0]
I = np.eye(n, dtype=netapi.floatX)
gradient = np.zeros(theta.shape, dtype=netapi.floatX)
for i in range(n):
eps_vec = np.array(I[:, i] * epsilon, dtype=netapi.floatX)
eps_plus = theta + eps_vec
eps_minus = theta - eps_vec
# split theta into parts, recompute activations, update shared variables, compute cost
b_o_plus = eps_plus[: ni]
w_oh_plus = eps_plus[ni: ni + nih].reshape((ni, nh))
b_h_plus = eps_plus[ni + nih: ni + nih + nh]
w_hi_plus = eps_plus[ni + nih + nh:].reshape((nh, ni))
a_i_plus = deepcopy(a_i)
a_h_plus = np.ravel(sigmoid(w_hi_plus.dot(a_i_plus) + b_h_plus))
a_o_plus = np.ravel(sigmoid(w_oh_plus.dot(a_h_plus) + b_o_plus))
node.a_i.set_value(a_i_plus, borrow=True)
node.a_h.set_value(a_h_plus, borrow=True)
node.a_o.set_value(a_o_plus, borrow=True)
node.b_h.set_value(b_h_plus, borrow=True)
node.b_o.set_value(b_o_plus, borrow=True)
node.w_hi.set_value(w_hi_plus, borrow=True)
node.w_oh.set_value(w_oh_plus, borrow=True)
cost = node.cost(weight_decay, sparsity_value, sparsity_penalty)
# split theta into parts, recompute activations, update shared variables, compute cost
b_o_minus = eps_minus[: ni]
w_oh_minus = eps_minus[ni: ni + nih].reshape((ni, nh))
b_h_minus = eps_minus[ni + nih: ni + nih + nh]
w_hi_minus = eps_minus[ni + nih + nh:].reshape((nh, ni))
a_i_minus = deepcopy(a_i)
a_h_minus = np.ravel(sigmoid(w_hi_minus.dot(a_i_minus) + b_h_minus))
a_o_minus = np.ravel(sigmoid(w_oh_minus.dot(a_h_minus) + b_o_minus))
node.a_i.set_value(a_i_minus, borrow=True)
node.a_h.set_value(a_h_minus, borrow=True)
node.a_o.set_value(a_o_minus, borrow=True)
node.b_h.set_value(b_h_minus, borrow=True)
node.b_o.set_value(b_o_minus, borrow=True)
node.w_hi.set_value(w_hi_minus, borrow=True)
node.w_oh.set_value(w_oh_minus, borrow=True)
cost_ = node.cost(weight_decay, sparsity_value, sparsity_penalty)
# compute cost difference
gradient[i] = (cost - cost_) / (2. * epsilon)
if i % 1000 == 0:
print("Computed numeric gradient for %d parameters" % i)
return gradient
```
#### File: nodenet/theano_engine/theano_netapi.py
```python
__author__ = 'rvuine'
from micropsi_core.nodenet.netapi import NetAPI
class TheanoNetAPI(NetAPI):
"""
Theano / numpy extension of the NetAPI, giving native modules access to bulk operations and efficient
data structures for machine learning purposes.
"""
def __init__(self, nodenet):
super(TheanoNetAPI, self).__init__(nodenet)
self.__nodenet = nodenet
@property
def floatX(self):
return self.__nodenet.numpyfloatX
def announce_nodes(self, nodespace_uid, numer_of_nodes, average_element_per_node):
self.__nodenet.announce_nodes(nodespace_uid, numer_of_nodes, average_element_per_node)
def decay_por_links(self, nodespace_uid):
""" Decays all por-links in the given nodespace """
# por_cols = T.lvector("por_cols")
# por_rows = T.lvector("por_rows")
# new_w = T.set_subtensor(nodenet.w[por_rows, por_cols], nodenet.w[por_rows, por_cols] - 0.0001)
# self.decay = theano.function([por_cols, por_rows], None, updates={nodenet.w: new_w}, accept_inplace=True)
import numpy as np
from .theano_definitions import node_from_id, PIPE, POR
nodespace_uid = self.get_nodespace(nodespace_uid).uid
porretdecay = self.__nodenet.get_modulator('base_porret_decay_factor')
ns = self.get_nodespace(nodespace_uid)
partition = ns._partition
if partition.has_pipes and porretdecay != 0:
ns_id = node_from_id(nodespace_uid)
node_ids = np.where(partition.allocated_node_parents == ns_id)[0]
pipe_ids = np.where(partition.allocated_nodes == PIPE)[0]
ns_pipes = np.intersect1d(node_ids, pipe_ids, assume_unique=True)
por_cols = partition.allocated_node_offsets[ns_pipes] + POR
w = partition.w.get_value(borrow=True)
por_rows = np.nonzero(w[:, por_cols] > 0.)[0]
cols, rows = np.meshgrid(por_cols, por_rows)
w_update = w[rows, cols]
w_update *= (1 - porretdecay)
w[rows, cols] = w_update
partition.w.set_value(w, borrow=True)
```
#### File: micropsi_core/tests/test_node_activation.py
```python
from micropsi_core import runtime as micropsi
def prepare(fixed_nodenet):
nodenet = micropsi.get_nodenet(fixed_nodenet)
netapi = nodenet.netapi
source = netapi.create_node("Register", None, "Source")
netapi.link(source, "gen", source, "gen")
source.activation = 1
nodenet.step()
register = netapi.create_node("Register", None)
netapi.link(source, "gen", register, "gen")
return nodenet, netapi, source, register
def test_gate_arithmetics_propagation(fixed_nodenet):
# propagate activation, expect it to show up at the gen gate
net, netapi, source, register = prepare(fixed_nodenet)
net.step()
assert register.get_gate("gen").activation == 1
def test_gate_arithmetics_maximum(fixed_nodenet):
# set maximum, expect the cutoff to work
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gate_parameter("gen", "maximum", 0.5)
net.step()
assert register.get_gate("gen").activation == 0.5
def test_gate_arithmetics_minimum(fixed_nodenet):
# set minimum, expect it to show up
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gate_parameter("gen", "maximum", 2)
register.set_gate_parameter("gen", "minimum", 1.5)
net.step()
assert register.get_gate("gen").activation == 1.5
def test_gate_arithmetics_threshold(fixed_nodenet):
# set threshold, expect it to mute the node
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gate_parameter("gen", "maximum", 2)
register.set_gate_parameter("gen", "threshold", 1.5)
net.step()
assert register.get_gate("gen").activation == 0
def test_gate_arithmetics_amplification(fixed_nodenet):
# set maximum and amplification, expect amplification to be applied after maximum
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gate_parameter("gen", "maximum", 10)
register.set_gate_parameter("gen", "amplification", 10)
net.step()
assert register.get_gate("gen").activation == 10
def test_gate_arithmetics_amplification_and_threshold(fixed_nodenet):
# set maximum, amplification and threshold, expect the threshold to mute the node despite amplification
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gate_parameter("gen", "maximum", 10)
register.set_gate_parameter("gen", "amplification", 10)
register.set_gate_parameter("gen", "threshold", 2)
net.step()
assert register.get_gate("gen").activation == 0
def test_gate_arithmetics_directional_activator_amplification(fixed_nodenet):
# set maximum and threshold with a directional activator in place
net, netapi, source, register = prepare(fixed_nodenet)
activator = netapi.create_node("Activator", None)
activator.set_parameter('type', 'sub')
netapi.link(source, "gen", activator, "gen", 5)
testpipe = netapi.create_node("Pipe", None)
netapi.link(source, "gen", testpipe, "sub", 1)
testpipe.set_gate_parameter("sub", "maximum", 10)
testpipe.set_gate_parameter("sub", "threshold", 0)
net.step()
assert testpipe.get_gate("sub").activation == 5
def test_gate_arithmetics_directional_activator_muting(fixed_nodenet):
# have the directional activator mute the node
net, netapi, source, register = prepare(fixed_nodenet)
activator = netapi.create_node("Activator", None)
activator.set_parameter('type', 'sub')
netapi.link(source, "gen", activator, "gen", 0)
testpipe = netapi.create_node("Pipe", None)
netapi.link(source, "gen", testpipe, "sub", 1)
testpipe.set_gate_parameter("sub", "maximum", 10)
testpipe.set_gate_parameter("sub", "threshold", 0)
net.step()
assert testpipe.get_gate("sub").activation == 0
def test_gate_arithmetics_directional_activator_threshold(fixed_nodenet):
# have the directional activator amplify alpha above threshold
net, netapi, source, register = prepare(fixed_nodenet)
activator = netapi.create_node("Activator", None)
activator.set_parameter('type', 'sub')
netapi.link(source, "gen", activator, "gen", 2)
testpipe = netapi.create_node("Pipe", None)
netapi.link(source, "gen", testpipe, "sub", 1)
testpipe.set_gate_parameter("sub", "maximum", 10)
testpipe.set_gate_parameter("sub", "threshold", 1)
net.step()
assert testpipe.get_gate("sub").activation == 2
def test_gatefunction_sigmoid(fixed_nodenet):
# set a node function for gen gates, expect it to be used
from micropsi_core.nodenet.gatefunctions import sigmoid
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gatefunction_name("gen", "sigmoid")
net.step()
assert round(register.get_gate("gen").activation, 5) == round(sigmoid(1, 0, 0), 5)
def test_gatefunction_none_is_identity(fixed_nodenet):
from micropsi_core.nodenet.gatefunctions import identity
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gatefunction_name("gen", None)
net.step()
assert register.get_gate("gen").activation == identity(1, 0, 0)
def test_gatefunctions(fixed_nodenet):
# call every gatefunction once
import micropsi_core.nodenet.gatefunctions as funcs
assert funcs.absolute(-1., 0, 0) == 1
assert funcs.one_over_x(2., 0, 0) == 0.5
assert funcs.identity(1, 0, 0) == 1
assert funcs.sigmoid(0, 0, 0) == 0.5
def test_node_activation_is_gen_gate_activation(fixed_nodenet):
from micropsi_core.nodenet.gatefunctions import sigmoid
net, netapi, source, register = prepare(fixed_nodenet)
register.set_gatefunction_name('gen', 'sigmoid')
sig = round(sigmoid(1, 0, 0), 4)
net.step()
assert round(register.activation, 4) == sig
assert round(register.get_gate('gen').activation, 4) == sig
```
#### File: micropsi_core/tests/test_node_netapi.py
```python
import pytest
from micropsi_core import runtime as micropsi
def prepare(fixed_nodenet):
nodenet = micropsi.get_nodenet(fixed_nodenet)
netapi = nodenet.netapi
source = netapi.create_node("Register", None, "Source")
netapi.link(source, "gen", source, "gen")
source.activation = 1
nodenet.step()
return nodenet, netapi, source
def test_node_netapi_create_register_node(fixed_nodenet):
# test register node creation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Register", None, "TestName")
# basic logic tests
assert node is not None
root_ns = netapi.get_nodespace(None)
assert node.parent_nodespace == root_ns.uid
assert node.type == "Register"
assert node.uid is not None
assert len(node.get_gate('gen').get_links()) == 0
assert len(node.get_gate('gen').activations) == 1
# frontend/persistency-oriented data dictionary test
data = node.get_data()
assert data['uid'] == node.uid
assert data['name'] == node.name
assert data['type'] == node.type
node = netapi.create_node("Register", None)
# TODO: teh weirdness, server-internally, we return uids as names, clients don't see this, confusion ensues
# assert data['name'] == node.name
def test_node_netapi_create_pipe_node(fixed_nodenet):
# test concept node generation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Pipe", None, "TestName")
# basic logic tests
assert node is not None
assert node.parent_nodespace == netapi.get_nodespace(None).uid
assert node.type == "Pipe"
assert node.uid is not None
assert len(node.get_gate('gen').get_links()) == 0
assert len(node.get_gate('gen').activations) == 1
assert len(node.get_gate('sub').get_links()) == 0
assert len(node.get_gate('sub').activations) == 1
assert len(node.get_gate('sur').get_links()) == 0
assert len(node.get_gate('sur').activations) == 1
assert len(node.get_gate('por').get_links()) == 0
assert len(node.get_gate('por').activations) == 1
assert len(node.get_gate('ret').get_links()) == 0
assert len(node.get_gate('ret').activations) == 1
assert len(node.get_gate('cat').get_links()) == 0
assert len(node.get_gate('cat').activations) == 1
assert len(node.get_gate('exp').get_links()) == 0
assert len(node.get_gate('exp').activations) == 1
# frontend/persistency-oriented data dictionary test
data = node.get_data()
assert data['uid'] == node.uid
for key in node.get_gate_types():
assert key not in data['gate_parameters']
for parameter, value in node.nodetype.gate_defaults[key].items():
assert node.get_gate(key).get_parameter(parameter) == value
assert data['name'] == node.name
assert data['type'] == node.type
node = netapi.create_node("Pipe", None)
# TODO: teh weirdness, server-internally, we return uids as names, clients don't see this, confusion ensues
# assert data['name'] == node.name
@pytest.mark.engine("dict_engine")
def test_node_netapi_create_concept_node(fixed_nodenet):
# test concept node generation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Concept", None, "TestName")
# basic logic tests
assert node is not None
assert node.parent_nodespace == netapi.get_nodespace(None).uid
assert node.type == "Concept"
assert node.uid is not None
assert len(node.get_gate('gen').get_links()) == 0
assert len(node.get_gate('gen').activations) == 1
assert len(node.get_gate('sub').get_links()) == 0
assert len(node.get_gate('sub').activations) == 1
assert len(node.get_gate('sur').get_links()) == 0
assert len(node.get_gate('sur').activations) == 1
assert len(node.get_gate('por').get_links()) == 0
assert len(node.get_gate('por').activations) == 1
assert len(node.get_gate('ret').get_links()) == 0
assert len(node.get_gate('ret').activations) == 1
assert len(node.get_gate('cat').get_links()) == 0
assert len(node.get_gate('cat').activations) == 1
assert len(node.get_gate('exp').get_links()) == 0
assert len(node.get_gate('exp').activations) == 1
assert len(node.get_gate('sym').get_links()) == 0
assert len(node.get_gate('sym').activations) == 1
assert len(node.get_gate('ref').get_links()) == 0
assert len(node.get_gate('ref').activations) == 1
# frontend/persistency-oriented data dictionary test
data = node.get_data()
assert data['uid'] == node.uid
assert data['name'] == node.name
assert data['type'] == node.type
node = netapi.create_node("Pipe", None)
# TODO: teh weirdness, server-internally, we return uids as names, clients don't see this, confusion ensues
# assert data['name'] == node.name
def test_node_netapi_create_node_in_nodespace(fixed_nodenet):
# test register node in nodespace creation
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node = netapi.create_node("Register", nodespace.uid, "TestName")
assert node.parent_nodespace == nodespace.uid
assert node.get_data()['parent_nodespace'] == nodespace.uid
def test_node_netapi_get_nodespace_one(fixed_nodenet):
# test single nodespace querying
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "TestName")
queried_nodespace = netapi.get_nodespace(nodespace.uid)
assert queried_nodespace.uid == nodespace.uid
assert queried_nodespace.name == nodespace.name
def test_node_netapi_get_nodespace_multi(fixed_nodenet):
# test nodespace listing
net, netapi, source = prepare(fixed_nodenet)
nodespace1 = netapi.create_nodespace(None, "TestName1")
nodespace2 = netapi.create_nodespace(None, "TestName2")
nodespace3 = netapi.create_nodespace(nodespace2.uid, "TestName3")
root_ns = netapi.get_nodespace(None)
queried_nodespaces = netapi.get_nodespaces(root_ns.uid)
assert len(queried_nodespaces) == 2
assert nodespace1.uid in [x.uid for x in queried_nodespaces]
assert nodespace2.uid in [x.uid for x in queried_nodespaces]
assert nodespace3.uid not in [x.uid for x in queried_nodespaces]
def test_node_netapi_get_node(fixed_nodenet):
# test register node creation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Register", None, "TestName")
queried_node = netapi.get_node(node.uid)
assert queried_node.uid == node.uid
assert queried_node.name == node.name
assert queried_node.get_data() == node.get_data()
assert queried_node.type == node.type
def test_node_netapi_get_nodes(fixed_nodenet):
# test get_nodes plain
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
nodes = netapi.get_nodes(netapi.get_nodespace(None).uid)
assert node1.uid in [n.uid for n in nodes]
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_by_name(fixed_nodenet):
# test get_nodes by name
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
nodes = netapi.get_nodes(netapi.get_nodespace(None).uid, node_name_prefix="TestName")
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_by_nodespace(fixed_nodenet):
# test get_nodes by name and nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", nodespace.uid, "TestName1")
node2 = netapi.create_node("Register", nodespace.uid, "TestName2")
nodes = netapi.get_nodes(nodespace.uid)
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_by_nodetype(fixed_nodenet):
# test get_nodes by name and nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Pipe", nodespace.uid, "TestName1")
node2 = netapi.create_node("Register", nodespace.uid, "TestName2")
nodes = netapi.get_nodes(nodetype="Register")
assert len(nodes) == 2
uids = [n.uid for n in nodes]
assert node1.uid not in uids
assert node2.uid in uids
assert source.uid in uids
def test_node_netapi_get_nodes_by_name_and_nodespace(fixed_nodenet):
# test get_nodes by name and nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", nodespace.uid, "TestName2")
nodes = netapi.get_nodes(nodespace.uid, "TestName")
assert len(nodes) == 1
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field(fixed_nodenet):
# test get_nodes_in_gate_field
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node1, "sub")
assert len(nodes) == 3
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field_all_links(fixed_nodenet):
# test get_nodes_in_gate_field without specifying a gate parameter
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node2)
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field_with_limitations(fixed_nodenet):
# test get_nodes_in_gate_field with limitations: no por links
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node1, "sub", ["por"])
assert len(nodes) == 2
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field_with_limitations_and_nodespace(fixed_nodenet):
# test get_nodes_in_gate_field with limitations: no por links
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", nodespace.uid, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node1, "sub", ["por"], netapi.get_nodespace(None).uid)
assert len(nodes) == 1
assert node3.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_slot_field(fixed_nodenet):
# test get_nodes_in_slot_field
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
node4 = netapi.create_node("Register", None, "TestName4")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node4, "gen", node1, "gen")
nodes = netapi.get_nodes_in_slot_field(node1, "gen")
assert len(nodes) == 3
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_slot_field_all_links(fixed_nodenet):
# test get_nodes_in_slot_field without a gate parameter
net, netapi, source = prepare(fixed_nodenet)
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_slot_field(node1)
assert len(nodes) == 3
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_with_nodespace_limitation(fixed_nodenet):
# test get_nodes_feed with nodespace limitation
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
node4 = netapi.create_node("Register", nodespace.uid, "TestName4")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node4, "gen", node1, "gen")
nodes = netapi.get_nodes_in_slot_field(node1, "gen", None, netapi.get_nodespace(None).uid)
assert len(nodes) == 2
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_slot_field_with_limitations_and_nodespace(fixed_nodenet):
# test get_nodes_in_gate_field with limitations: no por links
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", nodespace.uid, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_slot_field(node1, "sur", ["por"], netapi.get_nodespace(None).uid)
assert len(nodes) == 1
assert node3.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_active(fixed_nodenet):
# test get_nodes_active
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
node4 = netapi.create_node("Register", nodespace.uid, "TestName4")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node4, "gen", node1, "gen")
netapi.link(source, "gen", node2, "gen", 0.5)
netapi.link(source, "gen", node4, "gen", 0.5)
net.step()
net.step()
nodes = netapi.get_nodes_active(netapi.get_nodespace(None).uid, "Register", 0.7, "gen")
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert source.uid in [n.uid for n in nodes]
nodes = netapi.get_nodes_active(netapi.get_nodespace(None).uid, "Register")
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert source.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_active_with_nodespace_limitation(fixed_nodenet):
# test get_nodes_active with nodespace filtering
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
node4 = netapi.create_node("Register", nodespace.uid, "TestName4")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node4, "gen", node1, "gen")
netapi.link(source, "gen", node2, "gen", 0.5)
netapi.link(source, "gen", node4, "gen", 0.5)
net.step()
net.step()
nodes = netapi.get_nodes_active(nodespace.uid, "Register", 0.4)
assert len(nodes) == 1
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_delete_node(fixed_nodenet):
# test simple delete node case
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
olduid = node1.uid
netapi.delete_node(node1)
with pytest.raises(KeyError):
netapi.get_node(olduid)
assert len(node2.get_gate("gen").get_links()) == 0
def test_node_netapi_delete_nodespace(fixed_nodenet):
# test delete node case deleting a nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
node4 = netapi.create_node("Register", nodespace.uid, "TestName4")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node4, "gen", node1, "gen")
node4uid = node4.uid
netapi.delete_nodespace(nodespace)
with pytest.raises(KeyError):
netapi.get_node(node4uid)
def test_node_netapi_link(fixed_nodenet):
# test linking nodes
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
netapi.link(node2, "gen", node1, "gen")
assert len(node2.get_gate("gen").get_links()) == 1
for link in node2.get_gate("gen").get_links():
# basic internal logic
assert link.source_node.uid == node2.uid
assert link.target_node.uid == node1.uid
assert link.weight == 1
found = False
for otherside_link in node1.get_slot("gen").get_links():
if otherside_link.signature == link.signature:
found = True
assert found
# frontend/persistency-facing
assert link.get_data()['weight'] == link.weight
assert link.get_data()['target_node_uid'] == node1.uid
assert link.get_data()['target_slot_name'] == 'gen'
# frontend/persistency-facing
assert link.get_data(complete=True)['source_node_uid'] == node2.uid
assert link.get_data(complete=True)['source_gate_name'] == 'gen'
def test_node_netapi_link_change_weight(fixed_nodenet):
# test linking nodes, the changing weights
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
netapi.link(node2, "gen", node1, "gen")
net.step()
netapi.link(node2, "gen", node1, "gen", 0.8)
assert len(node2.get_gate("gen").get_links()) == 1
for link in node2.get_gate("gen").get_links():
# basic internal logic
assert link.source_node.uid == node2.uid
assert link.target_node.uid == node1.uid
assert round(link.weight, 5) == 0.8
found = False
for otherside_link in node1.get_slot("gen").get_links():
if otherside_link.signature == link.signature:
found = True
assert found
# frontend/persistency-facing
assert link.get_data()['weight'] == link.weight
assert link.get_data()['target_node_uid'] == node1.uid
assert link.get_data()['target_slot_name'] == 'gen'
def test_node_netapi_link_with_reciprocal(fixed_nodenet):
# test linking pipe and concept nodes with reciprocal links
net, netapi, source = prepare(fixed_nodenet)
n_head = netapi.create_node("Pipe", None, "Head")
n_a = netapi.create_node("Pipe", None, "A")
n_b = netapi.create_node("Pipe", None, "B")
n_c = netapi.create_node("Pipe", None, "C")
netapi.link_with_reciprocal(n_head, n_a, "subsur")
netapi.link_with_reciprocal(n_head, n_b, "subsur")
netapi.link_with_reciprocal(n_head, n_c, "subsur")
netapi.link_with_reciprocal(n_a, n_b, "porret", 0.5)
netapi.link_with_reciprocal(n_b, n_c, "porret", 0.5)
assert len(n_head.get_gate("sub").get_links()) == 3
assert len(n_head.get_slot("sur").get_links()) == 3
assert len(n_a.get_gate("sur").get_links()) == 1
assert len(n_a.get_slot("sub").get_links()) == 1
assert len(n_b.get_gate("sur").get_links()) == 1
assert len(n_b.get_slot("sub").get_links()) == 1
assert len(n_c.get_gate("sur").get_links()) == 1
assert len(n_c.get_slot("sub").get_links()) == 1
assert len(n_a.get_gate("por").get_links()) == 1
assert len(n_a.get_slot("ret").get_links()) == 1
assert len(n_a.get_slot("por").get_links()) == 0
assert len(n_b.get_gate("por").get_links()) == 1
assert len(n_b.get_slot("ret").get_links()) == 1
assert len(n_b.get_gate("ret").get_links()) == 1
assert len(n_b.get_slot("por").get_links()) == 1
assert len(n_c.get_gate("por").get_links()) == 0
assert len(n_c.get_slot("ret").get_links()) == 0
for link in n_b.get_gate("por").get_links():
assert link.weight == 0.5
@pytest.mark.engine("dict_engine")
def test_node_netapi_link_with_reciprocal_and_concepts(fixed_nodenet):
# test linking pipe and concept nodes with reciprocal links
net, netapi, source = prepare(fixed_nodenet)
n_head = netapi.create_node("Pipe", None, "Head")
n_d = netapi.create_node("Concept", None, "D")
n_e = netapi.create_node("Concept", None, "E")
netapi.link_with_reciprocal(n_head, n_d, "catexp")
netapi.link_with_reciprocal(n_d, n_e, "symref")
assert len(n_d.get_gate("sym").get_links()) == 1
assert len(n_d.get_slot("gen").get_links()) == 2
assert len(n_head.get_gate("cat").get_links()) == 1
assert len(n_head.get_slot("exp").get_links()) == 1
def test_node_netapi_unlink(fixed_nodenet):
# test completely unlinking a node
net, netapi, source = prepare(fixed_nodenet)
n_head = netapi.create_node("Pipe", None, "Head")
n_a = netapi.create_node("Pipe", None, "A")
n_b = netapi.create_node("Pipe", None, "B")
n_c = netapi.create_node("Pipe", None, "C")
n_d = netapi.create_node("Pipe", None, "D")
nodes = [n_a, n_b, n_c, n_d]
for source in nodes:
for target in nodes:
netapi.link_with_reciprocal(source, target, "porret")
netapi.unlink(n_b)
assert len(n_a.get_slot('por').get_links()) == 3
assert len(n_b.get_slot('por').get_links()) == 3
assert len(n_c.get_slot('por').get_links()) == 3
assert len(n_d.get_slot('por').get_links()) == 3
def test_node_netapi_unlink_specific_link(fixed_nodenet):
# test removing a specific link
net, netapi, source = prepare(fixed_nodenet)
n_head = netapi.create_node("Pipe", None, "Head")
n_a = netapi.create_node("Pipe", None, "A")
n_b = netapi.create_node("Pipe", None, "B")
n_c = netapi.create_node("Pipe", None, "C")
n_d = netapi.create_node("Pipe", None, "D")
nodes = [n_a, n_b, n_c, n_d]
for source in nodes:
for target in nodes:
netapi.link_with_reciprocal(source, target, "porret")
netapi.unlink(n_b, "por", n_c, "por")
assert len(n_a.get_slot('por').get_links()) == 4
assert len(n_b.get_slot('por').get_links()) == 4
assert len(n_c.get_slot('por').get_links()) == 3
assert len(n_d.get_slot('por').get_links()) == 4
def test_node_netapi_unlink_gate(fixed_nodenet):
# test unlinking a gate
net, netapi, source = prepare(fixed_nodenet)
n_head = netapi.create_node("Pipe", None, "Head")
n_a = netapi.create_node("Pipe", None, "A")
n_b = netapi.create_node("Pipe", None, "B")
n_c = netapi.create_node("Pipe", None, "C")
n_d = netapi.create_node("Pipe", None, "D")
nodes = [n_a, n_b, n_c, n_d]
for source in nodes:
for target in nodes:
netapi.link_with_reciprocal(source, target, "porret")
netapi.unlink(n_b, "por")
assert len(n_a.get_slot('por').get_links()) == 3
assert len(n_b.get_slot('por').get_links()) == 3
assert len(n_c.get_slot('por').get_links()) == 3
assert len(n_d.get_slot('por').get_links()) == 3
def test_node_netapi_unlink_direction(fixed_nodenet):
# test unlinking a gate
net, netapi, source = prepare(fixed_nodenet)
n_head = netapi.create_node("Pipe", None, "Head")
n_a = netapi.create_node("Pipe", None, "A")
n_b = netapi.create_node("Pipe", None, "B")
n_c = netapi.create_node("Pipe", None, "C")
netapi.link_with_reciprocal(n_head, n_a, "subsur")
netapi.link_with_reciprocal(n_head, n_b, "subsur")
netapi.link_with_reciprocal(n_head, n_c, "subsur")
nodes = [n_a, n_b, n_c]
for source in nodes:
for target in nodes:
netapi.link_with_reciprocal(source, target, "porret")
netapi.unlink_direction(n_b, "por")
assert len(n_head.get_gate('sub').get_links()) == 3
assert len(n_head.get_slot('sur').get_links()) == 3
assert len(n_a.get_slot('por').get_links()) == 2
assert len(n_b.get_slot('por').get_links()) == 0
assert len(n_c.get_slot('por').get_links()) == 2
netapi.unlink_direction(n_head, "sub")
assert len(n_head.get_gate('sub').get_links()) == 0
assert len(n_head.get_slot('sur').get_links()) == 3
assert len(n_a.get_slot('sub').get_links()) == 0
assert len(n_b.get_slot('sub').get_links()) == 0
assert len(n_c.get_slot('sub').get_links()) == 0
def test_node_netapi_import_actors(fixed_nodenet, test_world):
# test importing data targets as actors
net, netapi, source = prepare(fixed_nodenet)
micropsi.set_nodenet_properties(fixed_nodenet, world_uid=test_world, worldadapter='Braitenberg')
root_ns = netapi.get_nodespace(None)
netapi.import_actors(root_ns.uid)
actors = netapi.get_nodes(root_ns.uid, nodetype="Actor")
assert len(actors) == 2
assert set([a.get_parameter('datatarget') for a in actors]) == set(net.worldadapter_instance.datatargets.keys())
# do it again, make sure we can call import multiple times
netapi.import_actors(root_ns.uid)
actors = netapi.get_nodes(root_ns.uid, nodetype="Actor")
assert len(actors) == 2
def test_node_netapi_import_sensors(fixed_nodenet, test_world):
# test importing data sources as sensors
net, netapi, source = prepare(fixed_nodenet)
micropsi.set_nodenet_properties(fixed_nodenet, world_uid=test_world, worldadapter='Braitenberg')
root_ns = netapi.get_nodespace(None)
netapi.import_sensors(root_ns.uid)
sensors = netapi.get_nodes(root_ns.uid, nodetype="Sensor")
assert len(sensors) == 2
assert set([s.get_parameter('datasource') for s in sensors]) == set(net.worldadapter_instance.datasources.keys())
# do it again, make sure we can call import multiple times
netapi.import_sensors(root_ns.uid)
sensors = netapi.get_nodes(root_ns.uid, nodetype="Sensor")
assert len(sensors) == 2
def test_set_gate_function(fixed_nodenet):
# test setting a custom gate function
from micropsi_core.nodenet.gatefunctions import sigmoid
net, netapi, source = prepare(fixed_nodenet)
some_other_node_type = netapi.create_node("Pipe", None)
netapi.unlink(source, "gen")
net.step()
assert source.get_gate("gen").activation == 0
netapi.set_gatefunction(netapi.get_nodespace(None).uid, "Register", "gen", "sigmoid")
source.set_gate_parameter('gen', 'theta', 1)
net.step()
assert round(source.get_gate("gen").activation, 5) == round(sigmoid(0, 0, 1), 5)
assert some_other_node_type.get_gate("gen").activation == 0
def test_autoalign(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
for uid in net.get_node_uids():
net.get_node(uid).position = [12, 13, 11]
netapi.autoalign_nodespace(netapi.get_nodespace(None).uid)
positions = []
for uid in net.get_node_uids():
if net.get_node(uid).parent_nodespace == netapi.get_nodespace(None).uid:
positions.extend(net.get_node(uid).position)
assert set(positions) != set([12, 13, 11])
for uid in net.get_node_uids():
net.get_node(uid).position = [12, 13, 11]
netapi.autoalign_nodespace('InVaLiD')
positions = []
for uid in net.get_node_uids():
positions.extend(net.get_node(uid).position)
assert set(positions) == set([12, 13, 11])
def test_autoalign_updates_last_changed(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
for uid in net.get_node_uids():
net.get_node(uid).position = [12, 13, 11]
net.step()
net.step()
netapi.autoalign_nodespace(netapi.get_nodespace(None).uid)
changes = net.get_nodespace_changes([None], 2)
for uid in net.get_node_uids():
if net.get_node(uid).position != [12, 13, 11]:
assert uid in changes['nodes_dirty']
def test_copy_nodes(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, name='copy')
a1 = netapi.get_node('n0001')
a2 = netapi.get_node('n0002')
a1.set_parameter('expecation', 0.6)
a1.set_gate_parameter('gen', 'amplification', 0.27)
mapping = netapi.copy_nodes([a1, a2], nodespace.uid)
assert a1 in mapping
assert a2 in mapping
assert a1.name == mapping[a1].name
assert mapping[a1].parent_nodespace == nodespace.uid
assert mapping[a2].parent_nodespace == nodespace.uid
assert set(nodespace.get_known_ids()) == set([mapping[a1].uid, mapping[a2].uid])
assert len(mapping[a1].get_slot('gen').get_links()) == 0 # incoming link from outside not copied
assert mapping[a1].get_gate('por').get_links()[0].target_node.uid == mapping[a2].uid
assert a1.clone_parameters() == mapping[a1].clone_parameters()
assert a1.get_gate_parameters() == mapping[a1].get_gate_parameters()
def test_group_nodes_by_names(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
seppen_act = netapi.get_activations(None, "sepp")
assert len(seppen_act) == 3
def test_group_nodes_by_ids(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
ids = ["n0001", "n0002"]
netapi.group_nodes_by_ids(None, ids, "some")
some_act = netapi.get_activations(None, "some")
assert len(some_act) == 2
def test_ungroup_nodes(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
ids = ["n0001", "n0002"]
netapi.group_nodes_by_ids(None, ids, "some")
netapi.ungroup_nodes(None, "some")
def test_get_activations(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
seppen_act = netapi.get_activations(None, "sepp")
assert len(seppen_act) == 3
assert seppen_act[0] == 0
assert seppen_act[1] == 0
assert seppen_act[2] == 0
netapi.link(source, "gen", sepp2, "gen")
net.step()
seppen_act = netapi.get_activations(None, "sepp")
assert seppen_act[0] == 0
assert seppen_act[1] == 1
assert seppen_act[2] == 0
def test_substitute_activations(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1").uid
sepp2 = netapi.create_node("Register", None, "sepp2").uid
sepp3 = netapi.create_node("Register", None, "sepp3").uid
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
netapi.link(source, "gen", netapi.get_node(sepp2), "gen")
net.step()
suddenly_a_wild_activation_appears = [0.2, -1, 42]
netapi.substitute_activations(None, "sepp", suddenly_a_wild_activation_appears)
assert round(netapi.get_node(sepp1).get_gate('gen').activation, 2) == 0.2
assert round(netapi.get_node(sepp2).get_gate('gen').activation, 2) == -1
assert round(netapi.get_node(sepp3).get_gate('gen').activation, 2) == 42
netapi.link(netapi.get_node(sepp2), "gen", netapi.get_node(sepp3), "gen")
net.step()
seppen_act = netapi.get_activations(None, "sepp")
assert round(seppen_act[0], 2) == 0
assert round(seppen_act[1], 2) == 1
assert round(seppen_act[2], 2) == -1
def test_get_thetas(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
seppen_theta = netapi.get_thetas(None, "sepp")
assert len(seppen_theta) == 3
assert seppen_theta[0] == 0
assert seppen_theta[1] == 0
assert seppen_theta[2] == 0
def test_set_thetas(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
some_thetas = [1, 2, 3]
netapi.set_thetas(None, "sepp", some_thetas)
net.step()
seppen_theta = netapi.get_thetas(None, "sepp")
assert round(seppen_theta[0], 2) == 1
assert round(seppen_theta[1], 2) == 2
assert round(seppen_theta[2], 2) == 3
def test_get_link_weights(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
hugo1 = netapi.create_node("Register", None, "hugo1")
hugo2 = netapi.create_node("Register", None, "hugo2")
netapi.group_nodes_by_names(None, node_name_prefix="hugo")
netapi.link(sepp2, "gen", hugo1, "gen", 0.4)
w = netapi.get_link_weights(None, "sepp", None, "hugo")
value = None
# list style indexing
try:
value = round(float(w[0][1]), 2)
except:
pass
# numpy style indexing
try:
value = round(float(w[0, 1]), 2)
except:
pass
assert value == 0.4
def test_set_link_weights(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
hugo1 = netapi.create_node("Register", None, "hugo1")
hugo2 = netapi.create_node("Register", None, "hugo2")
netapi.group_nodes_by_names(None, node_name_prefix="hugo")
netapi.link(sepp2, "gen", hugo1, "gen", 0.4)
w = netapi.get_link_weights(None, "sepp", None, "hugo")
# change value
# list style indexing
try:
w[0][1] = 0.6
except:
pass
# numpy style indexing
try:
w[0, 1] = 0.6
except:
pass
netapi.set_link_weights(None, "sepp", None, "hugo", w)
assert round(float(netapi.get_node(sepp2.uid).get_gate('gen').get_links()[0].weight), 2) == 0.6
# remove link
# list style indexing
try:
w[0][1] = 0
except:
pass
# numpy style indexing
try:
w[0, 1] = 0
except:
pass
netapi.set_link_weights(None, "sepp", None, "hugo", w)
assert len(netapi.get_node(sepp2.uid).get_gate('gen').get_links()) == 0
# create link
# list style indexing
try:
w[1][1] = 0.5
except:
pass
# numpy style indexing
try:
w[1, 1] = 0.5
except:
pass
netapi.set_link_weights(None, "sepp", None, "hugo", w)
assert len(netapi.get_node(sepp2.uid).get_gate('gen').get_links()) == 1
def test_get_node_ids(fixed_nodenet):
net, netapi, source = prepare(fixed_nodenet)
sepp1 = netapi.create_node("Register", None, "sepp1")
sepp2 = netapi.create_node("Register", None, "sepp2")
sepp3 = netapi.create_node("Register", None, "sepp3")
netapi.group_nodes_by_names(None, node_name_prefix="sepp")
seppen_ids = netapi.get_node_ids(None, "sepp")
assert len(seppen_ids) == 3
assert seppen_ids[0] == sepp1.uid
assert seppen_ids[1] == sepp2.uid
assert seppen_ids[2] == sepp3.uid
def test_add_gate_monitor(test_nodenet, node):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
uid = netapi.add_gate_monitor(node, 'gen', name='sepp', color='#987654')
assert nodenet.get_monitor(uid).name == 'sepp'
assert nodenet.get_monitor(uid).type == 'gate'
assert nodenet.get_monitor(uid).color == '#987654'
@pytest.mark.engine("dict_engine")
def test_add_slot_monitor(test_nodenet, node):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
uid = netapi.add_slot_monitor(node, 'gen')
assert nodenet.get_monitor(uid).type == 'slot'
def test_add_link_monitor(test_nodenet, node):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
uid = netapi.add_link_monitor(node, 'gen', node, 'gen', name='sepplink')
assert nodenet.get_monitor(uid).name == 'sepplink'
assert nodenet.get_monitor(uid).property == 'weight'
def test_add_modulator_monitor(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
nodenet.step()
uid = netapi.add_modulator_monitor('base_age', 'age')
assert nodenet.get_monitor(uid).modulator == 'base_age'
assert nodenet.get_monitor(uid).name == 'age'
def test_add_custom_monitor(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
function = "return len(netapi.get_nodes())"
uid = netapi.add_custom_monitor(function, 'number_of_nodes', color=None)
assert nodenet.get_monitor(uid).name == 'number_of_nodes'
assert nodenet.get_monitor(uid).function == function
def test_get_monitor(test_nodenet, node):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
uid = netapi.add_gate_monitor(node, 'gen')
assert nodenet.get_monitor(uid) == netapi.get_monitor(uid)
def test_remove_monitor(test_nodenet, node):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
uid = netapi.add_gate_monitor(node, 'gen')
netapi.remove_monitor(uid)
assert nodenet.get_monitor(uid) is None
def test_set_dashboard_value(test_nodenet, node):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
netapi.set_dashboard_value('foo', 'bar')
assert nodenet.dashboard_values['foo'] == 'bar'
def test_decay_porret_links(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
pipes = []
netapi.set_modulator('base_porret_decay_factor', 0.1)
for i in range(10):
node = netapi.create_node("Pipe", None, "P%d" % i)
pipes.append(node)
if i > 0:
netapi.link_with_reciprocal(pipes[i - 1], node, 'porret', weight=0.1 * i)
netapi.link_with_reciprocal(pipes[0], pipes[1], 'subsur', weight=0.5)
reg = netapi.create_node("Register", None, "source")
netapi.link(reg, 'gen', pipes[0], 'gen', 0.4)
netapi.decay_por_links(None)
for i in range(9):
assert round(pipes[i].get_gate('por').get_links()[0].weight, 3) == round(0.1 * (i + 1) * 0.9, 3)
# sub/sur/ret/gen links unchanged
assert round(reg.get_gate('gen').get_links()[0].weight, 3) == 0.4
assert round(pipes[0].get_gate('sub').get_links()[0].weight, 3) == 0.5
assert round(pipes[7].get_gate('ret').get_links()[0].weight, 3) == 0.7
def test_unlink_gate(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
node = netapi.create_node("Pipe", None)
pipe1 = netapi.create_node("Pipe", None)
pipe2 = netapi.create_node("Pipe", None)
netapi.link_with_reciprocal(node, pipe1, 'subsur')
netapi.link_with_reciprocal(node, pipe2, 'subsur')
netapi.link(node, 'por', pipe1, 'gen')
netapi.link(node, 'por', pipe2, 'gen')
netapi.link(node, 'por', pipe1, 'sur')
micropsi.save_nodenet(test_nodenet)
netapi.unlink_gate(node, 'por')
assert node.get_gate('por').empty
assert not node.get_gate('sub').empty
micropsi.revert_nodenet(test_nodenet)
netapi = micropsi.nodenets[test_nodenet].netapi
node = netapi.get_node(node.uid)
netapi.unlink_gate(node, 'por', target_node_uid=pipe1.uid)
assert len(node.get_gate('por').get_links()) == 1
assert node.get_gate('por').get_links()[0].target_node.uid == pipe2.uid
micropsi.revert_nodenet(test_nodenet)
netapi = micropsi.nodenets[test_nodenet].netapi
node = netapi.get_node(node.uid)
netapi.unlink_gate(node, 'por', target_slot_name='sur')
assert len(node.get_gate('por').get_links()) == 2 # pipe1:gen, pipe2:gen
assert len(node.get_gate('sub').get_links()) == 2 # only por->sub unlinked
def test_unlink_slot(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
node = netapi.create_node("Pipe", None)
pipe1 = netapi.create_node("Pipe", None)
pipe2 = netapi.create_node("Pipe", None)
netapi.link_with_reciprocal(node, pipe1, 'subsur')
netapi.link_with_reciprocal(node, pipe2, 'subsur')
netapi.link(pipe1, 'gen', node, 'por')
netapi.link(pipe2, 'gen', node, 'por')
netapi.link(pipe1, 'sur', node, 'por')
micropsi.save_nodenet(test_nodenet)
netapi.unlink_slot(node, 'por')
assert node.get_slot('por').empty
assert not node.get_slot('sur').empty
micropsi.revert_nodenet(test_nodenet)
netapi = micropsi.nodenets[test_nodenet].netapi
node = netapi.get_node(node.uid)
netapi.unlink_slot(node, 'por', source_node_uid=pipe1.uid)
assert len(node.get_slot('por').get_links()) == 1
assert node.get_slot('por').get_links()[0].source_node.uid == pipe2.uid
micropsi.revert_nodenet(test_nodenet)
netapi = micropsi.nodenets[test_nodenet].netapi
node = netapi.get_node(node.uid)
netapi.unlink_slot(node, 'por', source_gate_name='sur')
assert len(node.get_slot('por').get_links()) == 2 # pipe1:gen, pipe2:gen
assert len(node.get_slot('sur').get_links()) == 2 # only sur->por unlinked
def test_nodespace_properties(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
rootns = netapi.get_nodespace(None)
netapi.set_nodespace_properties(None, {'foo': 'bar'})
data = netapi.get_nodespace_properties()
assert data[rootns.uid] == {'foo': 'bar'}
```
#### File: micropsi_core/tests/test_runtime_nodes.py
```python
from micropsi_core import runtime as micropsi
import pytest
__author__ = 'joscha'
__date__ = '29.10.12'
def prepare_nodenet(test_nodenet):
res, node_a_uid = micropsi.add_node(test_nodenet, "Pipe", [200, 250, 10], None, state=None, name="A")
res, node_b_uid = micropsi.add_node(test_nodenet, "Pipe", [500, 350, 10], None, state=None, name="B")
res, node_c_uid = micropsi.add_node(test_nodenet, "Pipe", [300, 150, 10], None, state=None, name="C")
res, node_s_uid = micropsi.add_node(test_nodenet, "Sensor", [200, 450, 10], None, state=None, name="S")
return {
'a': node_a_uid,
'b': node_b_uid,
'c': node_c_uid,
's': node_s_uid
}
def test_add_node(test_nodenet):
micropsi.load_nodenet(test_nodenet)
# make sure nodenet is empty
nodespace = micropsi.get_nodes(test_nodenet)
try:
for i in nodespace["nodes"]:
micropsi.delete_node(test_nodenet, i)
except:
pass
nodespace = micropsi.get_nodes(test_nodenet)
assert len(nodespace.get("nodes", [])) == 0
res, uid = micropsi.add_node(test_nodenet, "Pipe", [200, 250, 10], None, state=None, name="A")
nodespace = micropsi.get_nodes(test_nodenet)
assert len(nodespace["nodes"]) == 1
node1 = nodespace["nodes"][uid]
assert node1["name"] == "A"
assert node1["position"] == [200, 250, 10]
def test_position_always_3d(test_nodenet):
res, nuid = micropsi.add_node(test_nodenet, "Pipe", [200], None, state=None, name="A")
res, nsuid = micropsi.add_nodespace(test_nodenet, [200, 125, 0, 134], None, name="NS")
data = micropsi.get_nodes(test_nodenet)
assert data['nodes'][nuid]['position'] == [200, 0, 0]
assert data['nodespaces'][nsuid]['position'] == [200, 125, 0]
def test_get_nodenet_activation_data(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
uid = nodes['a']
activation_data = micropsi.get_nodenet_activation_data(test_nodenet, [None])
assert activation_data["activations"][uid][0] == 0
assert activation_data["activations"][uid][1] == 0
assert activation_data["activations"][uid][2] == 0
assert activation_data["activations"][uid][3] == 0
assert activation_data["activations"][uid][4] == 0
assert activation_data["activations"][uid][5] == 0
assert activation_data["activations"][uid][6] == 0
micropsi.set_node_activation(test_nodenet, nodes['a'], 0.34556865)
activation_data = micropsi.get_nodenet_activation_data(test_nodenet, [None])
assert activation_data["activations"][uid][0] == 0.3
def test_get_nodenet_activation_data_for_nodespace(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
uid = nodes['a']
nodespace = micropsi.nodenets[test_nodenet].get_nodespace_uids()[0]
activation_data = micropsi.get_nodenet_activation_data(test_nodenet, [nodespace])
assert activation_data["activations"][uid][0] == 0
def test_get_nodespace(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
nodespace = micropsi.get_nodes(test_nodenet)
assert len(nodespace["nodes"]) == 4
node1 = nodespace["nodes"][nodes['a']]
assert node1["name"] == "A"
assert node1["position"] == [200, 250, 10]
def test_get_nodespace_list(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
data = micropsi.get_nodespace_list(test_nodenet)
uid = list(data.keys())[0]
assert data[uid]['name'] == 'Root'
assert nodes['a'] in data[uid]['nodes']
node = data[uid]['nodes'][nodes['a']]
assert node['name'] == 'A'
assert node['type'] == 'Pipe'
def test_get_nodespace_list_with_empty_nodespace(test_nodenet):
res, uid = micropsi.add_nodespace(test_nodenet, [200, 250, 10], None, name="Foospace")
data = micropsi.get_nodespace_list(test_nodenet)
assert data[uid]['nodes'] == {}
def test_add_link(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 0.5, 1)
micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 1, 0.1)
micropsi.add_link(test_nodenet, nodes['c'], "ret", nodes['b'], "gen", 1, 1)
nodespace = micropsi.get_nodes(test_nodenet)
assert len(nodespace["nodes"]) == 4
link_a_b = nodespace["nodes"][nodes['a']]['links']['por'][0]
assert link_a_b['target_node_uid'] == nodes['b']
assert link_a_b['target_slot_name'] == 'gen'
assert link_a_b['weight'] == 1
link_c_b = nodespace['nodes'][nodes['c']]['links']['ret'][0]
assert link_c_b["target_node_uid"] == nodes['b']
assert link_c_b["target_slot_name"] == "gen"
assert nodespace['nodes'][nodes['b']]['links'] == {}
assert nodespace['nodes'][nodes['s']]['links'] == {}
def test_delete_link(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
success, link = micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 0.5, 1)
assert success
micropsi.delete_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen")
nodespace = micropsi.get_nodes(test_nodenet)
assert nodespace['nodes'][nodes['a']]['links'] == {}
def test_save_nodenet(test_nodenet):
prepare_nodenet(test_nodenet)
# save_nodenet
micropsi.save_nodenet(test_nodenet)
# unload_nodenet
micropsi.unload_nodenet(test_nodenet)
try:
micropsi.get_nodes(test_nodenet)
assert False, "could fetch a Nodespace that should not have been in memory"
except:
pass
# load_nodenet
micropsi.get_nodenet(test_nodenet)
nodespace = micropsi.get_nodes(test_nodenet)
assert len(nodespace["nodes"]) == 4
micropsi.delete_nodenet(test_nodenet)
def test_reload_native_modules(fixed_nodenet):
def hashlink(l):
return "%s:%s:%s:%s" % (l['source_node_uid'], l['source_gate_name'], l['target_node_uid'], l['target_slot_name'])
data_before = micropsi.nodenets[fixed_nodenet].export_json()
links_before = set([hashlink(l) for l in data_before.pop('links')])
micropsi.reload_native_modules()
data_after = micropsi.nodenets[fixed_nodenet].export_json()
links_after = set([hashlink(l) for l in data_after.pop('links')])
assert data_before == data_after
assert links_before == links_after
def test_native_module_and_recipe_categories(fixed_nodenet, resourcepath):
import os
os.mkdir(os.path.join(resourcepath, 'Test', 'Test2'))
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
recipe_file = os.path.join(resourcepath, 'Test', 'Test2', 'recipes.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"]\
}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
with open(recipe_file, 'w') as fp:
fp.write("def testrecipe(netapi):\r\n pass")
micropsi.reload_native_modules()
res = micropsi.get_available_native_module_types(fixed_nodenet)
assert res['Testnode']['category'] == 'Test'
res = micropsi.get_available_recipes()
assert res['testrecipe']['category'] == 'Test/Test2'
@pytest.mark.engine("dict_engine")
# This behavior is not available in theano_engine: Default inheritance at runtime is not implemented for
# performance reasons, changed defaults will only affect newly created nodes.
# This test will have to be replaced when the generic solution proposed in TOL-90 has been
# implemented.
def test_gate_defaults_change_with_nodetype(fixed_nodenet, resourcepath,):
# gate_parameters are a property of the nodetype, and should change with
# the nodetype definition if not explicitly overwritten for a given node
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t",\
"gate_defaults":{\
"foo": {\
"amplification": 13\
}\
}}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
micropsi.reload_native_modules()
res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10], name="Testnode")
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t",\
"gate_defaults":{\
"foo": {\
"amplification": 5\
}\
}}}')
micropsi.reload_native_modules()
params = micropsi.nodenets[fixed_nodenet].get_node(uid).get_gate_parameters()
assert params["foo"]["amplification"] == 5
def test_non_standard_gate_defaults(test_nodenet):
nodenet = micropsi.nodenets[test_nodenet]
res, uid = micropsi.add_node(test_nodenet, 'Register', [30, 30, 10], name='test')
node = nodenet.netapi.get_node(uid)
genparams = {'maximum': 0.5}
micropsi.set_gate_parameters(nodenet.uid, node.uid, 'gen', genparams)
assert node.clone_non_default_gate_parameters()['gen']['maximum'] == 0.5
assert node.get_data()['gate_parameters'] == {'gen': {'maximum': 0.5}}
assert nodenet.get_data()['nodes'][uid]['gate_parameters'] == {'gen': {'maximum': 0.5}}
data = micropsi.get_nodes(test_nodenet)
assert data['nodes'][uid]['gate_parameters'] == {'gen': {'maximum': 0.5}}
def test_ignore_links(test_nodenet):
nodes = prepare_nodenet(test_nodenet)
micropsi.add_link(test_nodenet, nodes['a'], "por", nodes['b'], "gen", 0.5, 1)
nodespace = micropsi.get_nodes(test_nodenet, [])
assert len(nodespace["nodes"]) == 4
assert 'links' not in nodespace
assert len(nodespace["nodes"][nodes['a']]['links']['por']) == 1
nodespace = micropsi.get_nodes(test_nodenet, [], include_links=False)
assert 'links' not in nodespace["nodes"][nodes['a']]
def test_remove_and_reload_native_module(fixed_nodenet, resourcepath):
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t",\
"gate_defaults":{\
"foo": {\
"amplification": 13\
}\
}}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
micropsi.reload_native_modules()
res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10, 10], name="Testnode")
os.remove(nodetype_file)
os.remove(nodefunc_file)
micropsi.reload_native_modules()
assert 'Testnode' not in micropsi.get_available_native_module_types(fixed_nodenet)
@pytest.mark.engine("dict_engine")
def test_engine_specific_nodetype_dict(fixed_nodenet, resourcepath):
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"engine": "theano_engine",\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t",\
"gate_defaults":{\
"foo": {\
"amplification": 13\
}\
}}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
micropsi.reload_native_modules()
data = micropsi.get_nodenet_metadata(fixed_nodenet)
assert "Testnode" not in data['native_modules']
@pytest.mark.engine("theano_engine")
def test_engine_specific_nodetype_theano(fixed_nodenet, resourcepath):
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"engine": "dict_engine",\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t",\
"gate_defaults":{\
"foo": {\
"amplification": 13\
}\
}}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
micropsi.reload_native_modules()
data = micropsi.get_nodenet_metadata(fixed_nodenet)
assert "Testnode" not in data['native_modules']
def test_node_parameters_none_resets_to_default(fixed_nodenet):
nodenet = micropsi.nodenets[fixed_nodenet]
res, uid = micropsi.add_node(fixed_nodenet, 'Pipe', [30, 30, 10], name='test')
node = nodenet.netapi.get_node(uid)
micropsi.set_node_parameters(fixed_nodenet, node.uid, {'expectation': '', 'wait': 0})
assert node.get_parameter('expectation') == 1
assert node.get_parameter('wait') == 0
def test_get_recipes(fixed_nodenet, resourcepath):
import os
recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def testfoo(netapi, count=23):
return {'count':count}
""")
micropsi.reload_native_modules()
recipes = micropsi.get_available_recipes()
assert 'testfoo' in recipes
assert len(recipes['testfoo']['parameters']) == 1
assert recipes['testfoo']['parameters'][0]['name'] == 'count'
assert recipes['testfoo']['parameters'][0]['default'] == 23
def test_run_recipe(fixed_nodenet, resourcepath):
import os
recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def testfoo(netapi, count=23):
return {'count':count}
""")
micropsi.reload_native_modules()
state, result = micropsi.run_recipe(fixed_nodenet, 'testfoo', {'count': 42})
assert state
assert result['count'] == 42
def test_node_parameter_defaults(fixed_nodenet, resourcepath):
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"gatetypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"parameters": ["testparam"],\
"parameter_defaults": {\
"testparam": 13\
}\
}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
micropsi.reload_native_modules()
res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10, 10], name="Test")
node = micropsi.nodenets[fixed_nodenet].get_node(uid)
assert node.get_parameter("testparam") == 13
def test_node_parameters_from_persistence(fixed_nodenet, resourcepath):
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"gatetypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"parameters": ["testparam"],\
"parameter_defaults": {\
"testparam": 13\
}\
}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
micropsi.reload_native_modules()
res, uid = micropsi.add_node(fixed_nodenet, "Testnode", [10, 10, 10], name="Test")
node = micropsi.nodenets[fixed_nodenet].get_node(uid)
node.set_parameter("testparam", 42)
micropsi.save_nodenet(fixed_nodenet)
micropsi.revert_nodenet(fixed_nodenet)
node = micropsi.nodenets[fixed_nodenet].get_node(uid)
assert node.get_parameter("testparam") == 42
```
#### File: world/island/island.py
```python
import math
import os
import logging
from micropsi_core.world.world import World
from micropsi_core.world.worldadapter import WorldAdapter
from micropsi_core.world.worldobject import WorldObject
from micropsi_core.world.island import png
class Island(World):
""" A simple Doerner Island-World"""
supported_worldadapters = ['Braitenberg', 'Survivor', 'StructuredObjects']
groundmap = {
'image': "psi_1.png",
'start_position': (700, 400),
'scaling': (8, 8)
}
assets = {
'background': "island/psi_1.png",
'template': 'island/island.tpl',
'paperjs': "island/island.js",
'x': 2048,
'y': 2048,
'icons': {
'Lightsource': 'island/lamp.png',
'Braitenberg': 'island/braitenberg.png',
'Survivor': 'island/Micropsi.png',
'PalmTree': 'island/palm-tree.png',
'Maple': 'island/maple.png',
'Braintree': 'island/braintree.png',
'Wirselkraut': 'island/wirselkraut.png',
'Thornbush': 'island/unknownbox.png',
'Juniper': 'island/juniper-berries.png',
'Champignon': 'island/boletus-edulis.png',
'FlyAgaric': 'island/fly-agaris.png',
'Stone': 'island/rock.png',
'Boulder': 'island/boulder.png',
'Menhir': 'island/menhir.png',
'Waterhole': 'island/well.png'
}
}
def __init__(self, filename, world_type="Island", name="", owner="", engine=None, uid=None, version=1, config={}):
World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version)
self.load_groundmap()
# self.current_step = 0
self.data['assets'] = self.assets
def load_groundmap(self):
"""
Imports a groundmap for an island world from a png file. We expect a bitdepth of 8 (i.e. each pixel defines
a point with one of 256 possible values).
"""
filename = os.path.join(os.path.dirname(__file__), 'resources', 'groundmaps', self.groundmap["image"])
with open(filename, 'rb') as file:
png_reader = png.Reader(file)
x, y, image_array, image_params = png_reader.read()
self.ground_data = list(image_array)
self.scale_x = self.groundmap["scaling"][0]
self.scale_y = self.groundmap["scaling"][1]
self.x_max = x - 1
self.y_max = y - 1
def get_ground_at(self, x, y):
"""
returns the ground type (an integer) at the given position
"""
_x = int(min(self.x_max, max(0, round(x / self.scale_x))))
_y = int(min(self.y_max, max(0, round(y / self.scale_y))))
return self.ground_data[_y][_x]
def get_brightness_at(self, position):
"""calculate the brightness of the world at the given position; used by sensors of agents"""
brightness = 0
for key in self.objects:
if hasattr(self.objects[key], "get_intensity"):
# adapted from micropsi1
pos = self.objects[key].position
diff = (pos[0] - position[0], pos[1] - position[1])
dist = _2d_vector_norm(diff) + 1
lightness = self.objects[key].get_intensity()
brightness += (lightness /dist /dist)
return brightness
def get_movement_result(self, start_position, effort_vector, diameter=0):
"""determine how much an agent moves in the direction of the effort vector, starting in the start position.
Note that agents may be hindered by impassable terrain and other objects"""
efficiency = ground_types[self.get_ground_at(*start_position)]['move_efficiency']
if not efficiency:
return start_position
movement_vector = (effort_vector[0] * efficiency, effort_vector[1] * efficiency)
# make sure we don't bump into stuff
target_position = None
while target_position is None and _2d_distance_squared((0, 0), movement_vector) > 0.01:
target_position = _2d_translate(start_position, movement_vector)
for i in self.objects.values():
if _2d_distance_squared(target_position, i.position) < (diameter + i.diameter) / 2:
movement_vector = (movement_vector[0] * 0.5, movement_vector[1] * 0.5) # should be collision point
target_position = None
break
if target_position is not None and ground_types[self.get_ground_at(target_position[0], target_position[1])]['agent_allowed']:
return target_position
else:
return start_position
class Lightsource(WorldObject):
"""A pretty inert and boring light source, with a square falloff"""
@property
def diameter(self):
return self.data.get('diameter', 1.)
@diameter.setter
def diameter(self, diameter):
self.data['diameter'] = diameter
@property
def intensity(self):
return self.data.get('intensity', 10000.)
@intensity.setter
def intensity(self, intensity):
self.data['intensity'] = intensity
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
def get_intensity(self, falloff_func=1.):
"""returns the strength of the light, optionally depending on a given fall-off function"""
return self.intensity * self.diameter * self.diameter / falloff_func
def action_eat(self):
return True, 0, 0, -0.7
def action_drink(self):
return False, 0, 0, 0
class PalmTree(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "PalmTree"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return False, 0, 0, 0
class Maple(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Maple"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return False, 0, 0, 0
class Braintree(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Braintree"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return False, 0, 0, 0
class Wirselkraut(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Wirselkraut"
def action_eat(self):
return True, 0, 0, 0.5
def action_drink(self):
return False, 0, 0, 0
class Thornbush(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Thornbush"
def action_eat(self):
logging.getLogger("world").debug("... and the whirlwind is in the thorn tree...")
return True, 0, 0, -0.1
def action_drink(self):
return False, 0, 0, 0
class Juniper(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Juniper"
def action_eat(self):
return True, 0.1, 0.1, 0
def action_drink(self):
return False, 0, 0, 0
class Champignon(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Champignon"
def action_eat(self):
return True, 0.3, 0, 0
def action_drink(self):
return True, 0, 0, 0
class FlyAgaric(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "FlyAgaric"
def action_eat(self):
return True, 0.1, 0, -0.9
def action_drink(self):
return False, 0, 0, 0
class Stone(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Stone"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return False, 0, 0, 0
class Boulder(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Boulder"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return False, 0, 0, 0
class Menhir(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Menhir"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return False, 0, 0, 0
class Waterhole(WorldObject):
def __init__(self, world, uid=None, **data):
WorldObject.__init__(self, world, category="objects", uid=uid, **data)
self.structured_object_type = "Waterhole"
def action_eat(self):
return False, 0, 0, 0
def action_drink(self):
return True, 0, 1, 0
class Survivor(WorldAdapter):
def __init__(self, world, uid=None, **data):
super(Survivor, self).__init__(world, uid, **data)
self.datasources = dict((s, 0) for s in ['body-energy', 'body-water', 'body-integrity'])
self.datatargets = dict((t, 0) for t in ['action_eat', 'action_drink', 'loco_north', 'loco_south', 'loco_east', 'loco_west'])
self.currentobject = None
self.energy = 1.0
self.water = 1.0
self.integrity = 1.0
self.is_dead = False
self.action_cooloff = 5
self.datasources['body-energy'] = self.energy
self.datasources['body-water'] = self.water
self.datasources['body-integrity'] = self.integrity
def initialize_worldobject(self, data):
if "position" not in data:
self.position = self.world.groundmap['start_position']
def update_data_sources_and_targets(self):
"""called on every world calculation step to advance the life of the agent"""
if self.is_dead:
return
effortvector = ((50*self.datatargets['loco_east'])+(50 * -self.datatargets['loco_west']),
(50*self.datatargets['loco_north'])-(50* -self.datatargets['loco_south']))
desired_position = (self.position[0] + effortvector[0], self.position[1] + effortvector[1])
self.datatargets['loco_east'] = 0
self.datatargets['loco_west'] = 0
self.datatargets['loco_north'] = 0
self.datatargets['loco_south'] = 0
if ground_types[self.world.get_ground_at(desired_position[0], desired_position[1])]['agent_allowed']:
self.position = desired_position
#find nearest object to load into the scene
lowest_distance_to_worldobject = float("inf")
nearest_worldobject = None
for key, worldobject in self.world.objects.items():
# TODO: use a proper 2D geometry library
distance = _2d_distance_squared(self.position, worldobject.position)
if distance < lowest_distance_to_worldobject:
lowest_distance_to_worldobject = distance
nearest_worldobject = worldobject
if self.currentobject is not nearest_worldobject and hasattr(nearest_worldobject, "structured_object_type"):
self.currentobject = nearest_worldobject
logging.getLogger("agent.%s" % self.uid).debug("Survivor WA selected new scene: %s",
self.currentobject.structured_object_type)
self.manage_body_parameters(nearest_worldobject)
def manage_body_parameters(self, nearest_worldobject):
"""called by update() to update energy, water and integrity"""
for datatarget in self.datatargets:
if datatarget.startswith("action_"):
self.datatarget_feedback[datatarget] = 0
if self.datatargets[datatarget] >= 1 and self.action_cooloff <= 0:
self.datatargets[datatarget] = 0
if hasattr(nearest_worldobject, datatarget):
cando, delta_energy, delta_water, delta_integrity = nearest_worldobject.action_eat()
else:
cando, delta_energy, delta_water, delta_integrity = False, 0, 0, 0
if cando:
self.action_cooloff = 6
self.energy += delta_energy
self.water += delta_water
self.integrity += delta_integrity
self.datatarget_feedback[datatarget] = 1
logging.getLogger("agent.%s" % self.uid).debug("Agent "+self.name+" "+ datatarget +
"("+nearest_worldobject.data["type"]+") result: "+
" energy "+str(delta_energy)+
" water "+str(delta_water)+
" integrity "+str(delta_integrity))
else:
logging.getLogger("agent.%s" % self.uid).debug("Agent "+self.name+" "+ datatarget +
"("+nearest_worldobject.data["type"]+") result: "+
"cannot do.")
self.action_cooloff -= 1
self.energy -= 0.005
self.water -= 0.005
if self.energy > 1: self.energy = 1
if self.water > 1: self.water = 1
if self.integrity > 1: self.integrity = 1
if self.energy <= 0 or self.water <= 0 or self.integrity <= 0:
self.is_dead = True
logging.getLogger("agent.%s" % self.uid).debug("Agent "+self.name+" has died:"+
" energy "+str(self.energy)+
" water "+str(self.water)+
" integrity "+str(self.integrity))
self.datasources["body-energy"] = self.energy
self.datasources["body-water"] = self.water
self.datasources["body-integrity"] = self.integrity
def is_alive(self):
"""called by the world to check whether the agent has died and should be removed"""
return not self.is_dead
class Braitenberg(WorldAdapter):
"""A simple Braitenberg vehicle chassis, with two light sensitive sensors and two engines"""
# positions of sensors, relative to origin of agent center
brightness_l_offset = (-25, -50)
brightness_r_offset = (+25, -50)
# positions of engines, relative to origin of agent center
engine_l_offset = (-25, 0)
engine_r_offset = (+25, 0)
# agent diameter
diameter = 50 # note: this is also used as the distance between the wheels
radius = 25
# maximum speed
speed_limit = 1.
def __init__(self, world, uid=None, **data):
super(Braitenberg, self).__init__(world, uid, **data)
self.datasources = {'brightness_l': 0, 'brightness_r': 0}
self.datatargets = {'engine_l': 0, 'engine_r': 0}
self.datatarget_feedback = {'engine_l': 0, 'engine_r': 0}
def initialize_worldobject(self, data):
if "position" not in data:
self.position = self.world.groundmap['start_position']
def update_data_sources_and_targets(self):
"""called on every world calculation step to advance the life of the agent"""
# drive engines
l_wheel_speed = self.datatargets["engine_l"]
r_wheel_speed = self.datatargets["engine_r"]
# constrain speed
if l_wheel_speed + r_wheel_speed > 2 * self.speed_limit: # too fast
f = 2 * self.speed_limit / (l_wheel_speed + r_wheel_speed)
r_wheel_speed *= f
l_wheel_speed *= f
# (left - right) because inverted rotation circle ( doesn't change x because cosine, does change y because sine :)
rotation = math.degrees((self.radius * l_wheel_speed - self.radius * r_wheel_speed) / self.diameter)
self.orientation += rotation
avg_velocity = (self.radius * r_wheel_speed + self.radius * l_wheel_speed) / 2
translation = _2d_rotate((0, avg_velocity), self.orientation + rotation)
# you may decide how far you want to go, but it is up the world to decide how far you make it
self.position = self.world.get_movement_result(self.position, translation, self.diameter)
# sense light sources
brightness_l_position = _2d_translate(_2d_rotate(self.brightness_l_offset, self.orientation), self.position)
brightness_r_position = _2d_translate(_2d_rotate(self.brightness_r_offset, self.orientation), self.position)
brightness_l = self.world.get_brightness_at(brightness_l_position)
brightness_r = self.world.get_brightness_at(brightness_r_position)
self.datasources['brightness_l'] = brightness_l
self.datasources['brightness_r'] = brightness_r
def _2d_rotate(position, angle_degrees):
"""rotate a 2d vector around an angle (in degrees)"""
radians = math.radians(angle_degrees)
# take the negative of the angle because the orientation circle works clockwise in this world
cos = math.cos(-radians)
sin = math.sin(-radians)
x, y = position
return x * cos - y * sin, - (x * sin + y * cos)
def _2d_distance_squared(position1, position2):
"""calculate the square of the distance bwtween two 2D coordinate tuples"""
return (position1[0] - position2[0]) ** 2 + (position1[1] - position2[1]) ** 2
def _2d_translate(position1, position2):
"""add two 2d vectors"""
return (position1[0] + position2[0], position1[1] + position2[1])
def _2d_vector_norm(vector):
"""Calculates the length /norm of a given vector."""
return math.sqrt(sum(i**2 for i in vector))
# the indices of ground types correspond to the color numbers in the groundmap png
ground_types = (
{
'type': 'grass',
'move_efficiency': 1.0,
'agent_allowed': True,
},
{
'type': 'sand',
'move_efficiency': 1.0,
'agent_allowed': True,
},
{
'type': 'swamp',
'move_efficiency': 0.5,
'agent_allowed': True,
},
{
'type': 'darkgrass',
'move_efficiency': 1.0,
'agent_allowed': True,
},
{
'type': 'shallowwater',
'move_efficiency': 0.2,
'agent_allowed': True,
},
{
'type': 'rock',
'move_efficiency': 1.0,
'agent_allowed': True,
},
{
'type': 'clay',
'move_efficiency': 0.7,
'agent_allowed': True,
},
{
'type': 'water',
'move_efficiency': 0.0,
'agent_allowed': False,
},
{
'type': 'cliff',
'move_efficiency': 1.0,
'agent_allowed': False,
}
)
```
#### File: island/structured_objects/structured_objects.py
```python
__author__ = 'rvuine'
import logging
from micropsi_core.world.island import island
from micropsi_core.world.island.structured_objects.objects import *
from micropsi_core.world.island.structured_objects.scene import Scene
from micropsi_core.world.worldadapter import WorldAdapter
class StructuredObjects(WorldAdapter):
"""A world adapter exposing objects composed of basic shapes and colors to the agent"""
def __init__(self, world, uid=None, **data):
super(StructuredObjects, self).__init__(world, uid, **data)
self.datasources = {'fov-x': 0, 'fov-y': 0, 'major-newscene': 0}
self.datatargets = {'fov_x': 0, 'fov_y': 0, 'fov_reset': 0}
self.shapetypes = []
self.shapecolors = []
for key, objecttype in OBJECTS.items():
for shapeline in objecttype['shape_grid']:
for shape in shapeline:
if shape is not None and shape.type not in self.shapetypes:
self.shapetypes.append(shape.type)
if shape is not None and shape.color not in self.shapecolors:
self.shapecolors.append(shape.color)
for shapetype in self.shapetypes:
self.datasources['fovea-' + shapetype] = 0
self.datasources['presence-' + shapetype] = 0
for shapecolor in self.shapecolors:
self.datasources["fovea-" + shapecolor] = 0
self.datasources["presence-" + shapecolor] = 0
self.currentobject = None
self.scene = None
self.scene = Scene(world, uid)
self.scene.load_object("PalmTree", OBJECTS["PalmTree"]["shape_grid"])
def initialize_worldobject(self, data):
if "position" not in data:
self.position = self.world.groundmap['start_position']
def get_datasource(self, key):
"""
allows the agent to read a value from a datasource.
overrides default to make sure newscene signals are picked up by the node net
"""
if key == "major-newscene":
if self.datasource_snapshots[key] == 1:
self.datasources[key] = 0
return 1
else:
return WorldAdapter.get_datasource(self, key)
def update_data_sources_and_targets(self):
"""called on every world calculation step to advance the life of the agent"""
# we don't move, for now
self.position = self.world.get_movement_result(self.position, (0, 0))
#find nearest object to load into the scene
lowest_distance_to_worldobject = float("inf")
nearest_worldobject = None
for key, worldobject in self.world.objects.items():
# TODO: use a proper 2D geometry library
distance = island._2d_distance_squared(self.position, worldobject.position)
if distance < lowest_distance_to_worldobject:
lowest_distance_to_worldobject = distance
nearest_worldobject = worldobject
if self.currentobject is not nearest_worldobject and nearest_worldobject.structured_object_type is not None:
self.currentobject = nearest_worldobject
self.scene.load_object(self.currentobject.structured_object_type,
OBJECTS[self.currentobject.structured_object_type]['shape_grid'])
self.datasources["major-newscene"] = 1
logging.getLogger("agent.%s" % self.uid).debug("StructuredObjects WA selected new scene: %s",
self.currentobject.structured_object_type)
#manage the scene
if self.datatargets['fov_reset'] > 0:
self.scene.reset_fovea()
self.scene.move_fovea_x(self.datatargets['fov_x'])
self.scene.move_fovea_y(self.datatargets['fov_y'])
self.datasources["fov-x"] = self.scene.fovea_x
self.datasources["fov-y"] = self.scene.fovea_y
for shapetype in self.shapetypes:
self.datasources["fovea-"+shapetype] = 1 if self.scene.is_fovea_on_shape_type(shapetype) else 0
self.datasources["presence-"+shapetype] = 1 if self.scene.is_shapetype_in_scene(shapetype) else 0
for shapecolor in self.shapecolors:
self.datasources["fovea-"+shapecolor] = 1 if self.scene.is_fovea_on_shape_color(shapecolor) else 0
self.datasources["presence-"+shapecolor] = 1 if self.scene.is_shapecolor_in_scene(shapecolor) else 0
```
#### File: micropsi_server/tests/test_json_api.py
```python
import pytest
import json
import re
def assert_success(response):
assert response.json_body['status'] == 'success'
assert 'data' in response.json_body
def assert_failure(response):
assert response.json_body['status'] == 'error'
assert 'data' in response.json_body
def test_generate_uid(app):
response = app.get_json('/rpc/generate_uid()')
assert_success(response)
assert re.match('[a-f0-9]+', response.json_body['data']) is not None
def test_create_and_invalidate_auth_token(app):
response = app.get_json('/rpc/create_auth_token(user="Pytest User",password="<PASSWORD>")')
assert_success(response)
from micropsi_server.micropsi_app import usermanager
token = response.json_body['data']
assert token in usermanager.users['Pytest User']['sessions']
response = app.get_json('/rpc/invalidate_auth_token(token="%s")' % token)
assert_success(response)
assert token not in usermanager.users['Pytest User']['sessions']
def test_get_nodenet_metadata(app, test_nodenet, node):
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
data = response.json_body['data']
assert 'nodetypes' in data
assert 'native_modules' in data
assert 'engine' in data
assert 'nodespaces' in data
assert 'nodes' not in data
assert 'links' not in data
assert data['current_step'] == 0
assert data['uid'] == test_nodenet
def test_new_nodenet(app, engine):
app.set_auth()
response = app.post_json('/rpc/new_nodenet', params={
'name': 'FooBarTestNet',
'engine': engine
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % uid)
assert_success(response)
assert response.json_body['data']['name'] == 'FooBarTestNet'
assert response.json_body['data']['engine'] == engine
def test_get_available_nodenets(app, test_nodenet):
response = app.get_json('/rpc/get_available_nodenets(user_id="Pytest User")')
assert_success(response)
assert test_nodenet in response.json_body['data']
def test_delete_nodenet(app, test_nodenet):
response = app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
response = app.get_json('/rpc/get_available_nodenets(user_id="Pytest User")')
assert test_nodenet not in response.json_body['data']
def test_set_nodenet_properties(app, test_nodenet, test_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world))
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'new_name'
assert data['worldadapter'] == 'Braitenberg'
def test_set_node_state(app, test_nodenet, resourcepath):
import os
app.set_auth()
# create a native module:
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t"}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
response = app.get_json('/rpc/reload_native_modules()')
assert_success(response)
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Testnode',
'position': [23, 23, 12],
'nodespace': None,
'name': 'Testnode'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/set_node_state', params={
'nodenet_uid': test_nodenet,
'node_uid': uid,
'state': {'foo': 'bar'}
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
assert response.json_body['data']['nodes'][uid]['state'] == {'foo': 'bar'}
def test_set_node_activation(app, test_nodenet, node):
response = app.post_json('/rpc/set_node_activation', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'activation': '0.734'
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
sheaves = response.json_body['data']['nodes'][node]['sheaves']
assert float("%.3f" % sheaves['default']['activation']) == 0.734
def test_start_calculation(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=test_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
assert response.json_body['data']['is_active']
def test_start_calculation_with_condition(app, test_nodenet):
import time
app.set_auth()
app.post_json('/rpc/set_runner_properties', params={
'timestep': 10,
'factor': 1
})
response = app.post_json('/rpc/set_runner_condition', params={
'nodenet_uid': test_nodenet,
'steps': '2'
})
assert_success(response)
assert response.json_body['data']['step'] == 2
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=test_nodenet))
assert_success(response)
time.sleep(1)
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
assert not response.json_body['data']['is_active']
assert response.json_body['data']['current_step'] == 2
response = app.post_json('/rpc/remove_runner_condition', params=dict(nodenet_uid=test_nodenet))
assert_success(response)
def test_get_runner_properties(app):
app.set_auth()
response = app.get_json('/rpc/get_runner_properties()')
assert_success(response)
assert 'timestep' in response.json_body['data']
assert 'factor' in response.json_body['data']
def test_set_runner_properties(app):
app.set_auth()
response = app.post_json('/rpc/set_runner_properties', params=dict(timestep=123, factor=1))
assert_success(response)
response = app.get_json('/rpc/get_runner_properties()')
assert_success(response)
assert response.json_body['data']['timestep'] == 123
assert response.json_body['data']['factor'] == 1
def test_get_is_calculation_running(app, test_nodenet):
response = app.get_json('/rpc/get_is_calculation_running(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert not response.json_body['data']
def test_stop_calculation(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/start_calculation', params=dict(nodenet_uid=test_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_is_calculation_running(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert response.json_body['data']
response = app.post_json('/rpc/stop_calculation', params=dict(nodenet_uid=test_nodenet))
assert_success(response)
response = app.get_json('/rpc/get_is_calculation_running(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert not response.json_body['data']
def test_step_calculation(app, test_nodenet):
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
assert response.json_body['data']['current_step'] == 0
response = app.get_json('/rpc/step_calculation(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert response.json_body['data'] == 1
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
assert response.json_body['data']['current_step'] == 1
def test_get_calculation_state(app, test_nodenet, test_world, node):
from time import sleep
app.set_auth()
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
assert response.json_body['data']['current_step'] == 0
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world))
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub',
})
monitor_uid = response.json_body['data']
response = app.get_json('/rpc/step_calculation(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
response = app.get_json('/rpc/start_calculation(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
sleep(1)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet': {
'nodespaces': [None],
'step': -1,
},
'monitors': {
'logger': ['system', 'world', 'nodenet'],
'after': 0,
'monitor_from': 2,
'monitor_count': 2
},
'world': {
'step': -1
}
})
data = response.json_body['data']
assert data['current_nodenet_step'] > 0
assert data['current_world_step'] > 0
assert data['calculation_running']
assert 'servertime' in data['monitors']['logs']
assert 'logs' in data['monitors']['logs']
assert len(data['monitors']['monitors'][monitor_uid]['values']) == 2
assert test_nodenet in data['world']['agents']
assert data['world']['current_step'] > 0
def test_revert_nodenet(app, test_nodenet, test_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world))
assert_success(response)
response = app.get_json('/rpc/revert_nodenet(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'Testnet'
assert data['worldadapter'] is None
def test_revert_both(app, test_nodenet, test_world):
app.set_auth()
app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Braitenberg", world_uid=test_world))
for i in range(5):
app.get_json('/rpc/step_calculation(nodenet_uid="%s")' % test_nodenet)
res = app.get_json('/rpc/get_calculation_state(nodenet_uid="%s")' % test_nodenet)
assert res.json_body['data']['current_nodenet_step'] > 0
assert res.json_body['data']['current_world_step'] > 0
app.get_json('/rpc/revert_calculation(nodenet_uid="%s")' % test_nodenet)
res = app.get_json('/rpc/get_calculation_state(nodenet_uid="%s")' % test_nodenet)
assert res.json_body['data']['current_nodenet_step'] == 0
assert res.json_body['data']['current_world_step'] == 0
def test_save_nodenet(app, test_nodenet, test_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, nodenet_name="new_name", worldadapter="Braitenberg", world_uid=test_world))
assert_success(response)
response = app.get_json('/rpc/save_nodenet(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
response = app.get_json('/rpc/revert_nodenet(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
assert data['name'] == 'new_name'
assert data['worldadapter'] == 'Braitenberg'
# now delete the nodenet, to get default state back.
app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % test_nodenet)
def test_export_nodenet(app, test_nodenet, node):
response = app.get_json('/rpc/export_nodenet(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
data = json.loads(response.json_body['data'])
assert data['name'] == 'Testnet'
assert data['nodes'][node]['type'] == 'Pipe'
assert 'links' in data
def test_import_nodenet(app, test_nodenet, node):
app.set_auth()
response = app.get_json('/rpc/export_nodenet(nodenet_uid="%s")' % test_nodenet)
data = json.loads(response.json_body['data'])
del data['uid']
response = app.post_json('/rpc/import_nodenet', params={
'nodenet_data': json.dumps(data)
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % uid)
assert response.json_body['data']['name'] == data['name']
assert response.json_body['data']['world'] == data['world']
assert response.json_body['data']['worldadapter'] == data['worldadapter']
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % uid)
assert list(response.json_body['data']['nodes'].keys()) == [node]
response = app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % uid)
def test_merge_nodenet(app, test_nodenet, engine, node):
app.set_auth()
response = app.get_json('/rpc/export_nodenet(nodenet_uid="%s")' % test_nodenet)
data = json.loads(response.json_body['data'])
response = app.post_json('/rpc/new_nodenet', params={
'name': 'ImporterNet',
'engine': engine,
'worldadapter': 'Braitenberg',
'owner': 'Pytest User'
})
uid = response.json_body['data']
data['uid'] = uid
response = app.post_json('/rpc/merge_nodenet', params={
'nodenet_uid': uid,
'nodenet_data': json.dumps(data)
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % uid)
assert len(list(response.json_body['data']['nodes'].keys())) == 1
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % uid)
assert response.json_body['data']['name'] == 'ImporterNet'
response = app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % uid)
###################################################
##
##
## WORLD
##
##
###################################################
def test_get_available_worlds(app, test_world):
response = app.get_json('/rpc/get_available_worlds()')
assert_success(response)
assert test_world in response.json_body['data']
def test_get_available_worlds_for_user(app, test_world):
response = app.get_json('/rpc/get_available_worlds(user_id="Pytest User")')
assert_success(response)
assert test_world in response.json_body['data']
# TODO: get_nodenet_properties is missing.
def test_get_world_properties(app, test_world):
response = app.get_json('/rpc/get_world_properties(world_uid="%s")' % test_world)
assert_success(response)
data = response.json_body['data']
assert data['uid'] == test_world
assert data['name'] == "World of Pain"
assert 'available_worldadapters' in data
assert 'available_worldobjects' in data
def test_get_worldadapters(app, test_world):
response = app.get_json('/rpc/get_worldadapters(world_uid="%s")' % test_world)
assert_success(response)
assert 'Braitenberg' in response.json_body['data']
def test_get_world_objects(app, test_world):
response = app.get_json('/rpc/get_world_objects(world_uid="%s")' % test_world)
assert_success(response)
assert response.json_body['data'] == {}
def test_add_worldobject(app, test_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': test_world,
'type': 'Braintree',
'position': [10, 10],
'name': 'Testtree'
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_world_objects(world_uid="%s")' % test_world)
assert uid in response.json_body['data']
def test_delete_worldobject(app, test_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': test_world,
'type': 'Braintree',
'position': [10, 10],
'name': 'Testtree'
})
uid = response.json_body['data']
response = app.post_json('/rpc/delete_worldobject', params={
'world_uid': test_world,
'object_uid': uid
})
assert_success(response)
response = app.get_json('/rpc/get_world_objects(world_uid="%s")' % test_world)
assert uid not in response.json_body['data']
def test_set_worldobject_properties(app, test_world):
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': test_world,
'type': 'Braintree',
'position': [10, 10],
'name': 'Testtree'
})
uid = response.json_body['data']
response = app.post_json('/rpc/set_worldobject_properties', params={
'world_uid': test_world,
'uid': uid,
'position': [20, 20],
'orientation': 27,
'name': 'edited'
})
assert_success(response)
response = app.get_json('/rpc/get_world_objects(world_uid="%s")' % test_world)
data = response.json_body['data']
assert data[uid]['position'] == [20, 20]
assert data[uid]['orientation'] == 27
assert data[uid]['name'] == 'edited'
def test_get_world_view(app, test_world):
response = app.get_json('/rpc/get_world_view(world_uid="%s", step=0)' % test_world)
assert_success(response)
assert 'agents' in response.json_body['data']
assert 'objects' in response.json_body['data']
assert response.json_body['data']['current_step'] == 0
assert 'step' not in response.json_body['data']
def test_set_worldagent_properties(app, test_world, test_nodenet):
# create agent.
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Braitenberg", world_uid=test_world))
response = app.post_json('/rpc/set_worldagent_properties', params={
'world_uid': test_world,
'uid': test_nodenet,
'position': [23, 23],
'orientation': 37,
'name': 'Sepp'
})
assert_success(response)
response = app.get_json('/rpc/get_world_view(world_uid="%s", step=0)' % test_world)
data = response.json_body['data']['agents'][test_nodenet]
assert data['position'] == [23, 23]
assert data['orientation'] == 37
assert data['name'] == 'Sepp'
def test_new_world(app):
app.set_auth()
response = app.post_json('/rpc/new_world', params={
'world_name': 'FooBarTestWorld',
'world_type': 'Island'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_available_worlds(user_id="Pytest User")')
assert uid in response.json_body['data']
def test_get_available_world_types(app):
response = app.get_json('/rpc/get_available_world_types()')
assert_success(response)
assert 'Island' in response.json_body['data']
def test_delete_world(app, test_world):
response = app.get_json('/rpc/delete_world(world_uid="%s")' % test_world)
assert_success(response)
response = app.get_json('/rpc/get_available_worlds(user_id="Pytest User")')
assert test_world not in response.json_body['data']
def test_set_world_properties(app, test_world):
app.set_auth()
response = app.post_json('/rpc/set_world_properties', params={
'world_uid': test_world,
'world_name': 'asdf',
'owner': 'Pytest User'
})
assert_success(response)
response = app.get_json('/rpc/get_world_properties(world_uid="%s")' % test_world)
assert response.json_body['data']['name'] == "asdf"
def test_revert_world(app, test_world):
app.set_auth()
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': test_world,
'type': 'Braintree',
'position': [10, 10],
'name': 'Testtree'
})
response = app.get_json('/rpc/revert_world(world_uid="%s")' % test_world)
assert_success(response)
response = app.get_json('/rpc/get_world_view(world_uid="%s",step=0)' % test_world)
data = response.json_body['data']
assert data['objects'] == {}
def test_save_world(app, test_world):
app.set_auth()
response = app.post_json('/rpc/add_worldobject', params={
'world_uid': test_world,
'type': 'Braintree',
'position': [10, 10],
'name': 'Testtree'
})
uid = response.json_body['data']
response = app.get_json('/rpc/save_world(world_uid="%s")' % test_world)
assert_success(response)
response = app.get_json('/rpc/revert_world(world_uid="%s")' % test_world)
response = app.get_json('/rpc/get_world_view(world_uid="%s",step=0)' % test_world)
data = response.json_body['data']
assert uid in data['objects']
# delete the world, to get the default state back
app.get_json('/rpc/delete_world(world_uid="%s")' % test_world)
def test_export_world(app, test_world):
response = app.get_json('/rpc/export_world(world_uid="%s")' % test_world)
assert_success(response)
export_data = json.loads(response.json_body['data'])
assert export_data['uid'] == test_world
assert export_data['name'] == 'World of Pain'
assert export_data['objects'] == {}
assert export_data['agents'] == {}
assert export_data['owner'] == 'Pytest User'
assert export_data['current_step'] == 0
assert export_data['world_type'] == 'Island'
def test_import_world(app, test_world):
response = app.get_json('/rpc/export_world(world_uid="%s")' % test_world)
data = json.loads(response.json_body['data'])
del data['uid']
data['name'] = 'Copied Pain'
response = app.post_json('/rpc/import_world', params={
'worlddata': json.dumps(data)
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/export_world(world_uid="%s")' % uid)
data = json.loads(response.json_body['data'])
assert data['owner'] == 'Pytest User'
assert data['name'] == 'Copied Pain'
assert data['objects'] == {}
assert data['agents'] == {}
assert uid != test_world
###################################################
##
##
## MONITORS
##
##
###################################################
def test_export_monitor_data_all(app, test_nodenet):
response = app.get_json('/rpc/export_monitor_data(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert response.json_body['data'] == {}
def test_add_gate_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub',
'sheaf': 'default'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/export_monitor_data', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert response.json_body['data']['node_uid'] == node
assert response.json_body['data']['target'] == 'sub'
assert response.json_body['data']['type'] == 'gate'
assert response.json_body['data']['sheaf'] == 'default'
assert response.json_body['data']['values'] == {}
@pytest.mark.engine("dict_engine")
def test_add_slot_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_slot_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'slot': 'gen',
'name': 'Foobar'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/export_monitor_data', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert response.json_body['data']['name'] == 'Foobar'
assert response.json_body['data']['node_uid'] == node
assert response.json_body['data']['target'] == 'gen'
assert response.json_body['data']['type'] == 'slot'
assert response.json_body['data']['sheaf'] == 'default'
assert response.json_body['data']['values'] == {}
def test_add_link_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_link_monitor', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': 'gen',
'target_node_uid': node,
'slot_type': 'gen',
'property': 'weight',
'name': 'LinkWeight'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/export_monitor_data', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert response.json_body['data']['name'] == 'LinkWeight'
assert response.json_body['data']['source_node_uid'] == node
assert response.json_body['data']['gate_type'] == 'gen'
assert response.json_body['data']['target_node_uid'] == node
assert response.json_body['data']['slot_type'] == 'gen'
assert response.json_body['data']['property'] == 'weight'
def test_add_custom_monitor(app, test_nodenet):
response = app.post_json('/rpc/add_custom_monitor', params={
'nodenet_uid': test_nodenet,
'function': 'return len(netapi.get_nodes())',
'name': 'nodecount'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/export_monitor_data', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert response.json_body['data']['name'] == 'nodecount'
def test_remove_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen'
})
uid = response.json_body['data']
response = app.post_json('/rpc/remove_monitor', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert_success(response)
response = app.post_json('/rpc/export_monitor_data', params={
'nodenet_uid': test_nodenet
})
assert uid not in response.json_body['data']
def test_clear_monitor(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen'
})
uid = response.json_body['data']
response = app.post_json('/rpc/clear_monitor', params={
'nodenet_uid': test_nodenet,
'monitor_uid': uid
})
assert_success(response)
def test_get_monitor_data(app, test_nodenet, node):
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'sub'
})
uid = response.json_body['data']
response = app.post_json('/rpc/get_monitor_data', params={
'nodenet_uid': test_nodenet,
'step': 0,
'monitor_from': 3,
'monitor_count': 20
})
assert_success(response)
assert uid in response.json_body['data']['monitors']
###################################################
##
##
## NODENET
##
##
###################################################
def test_get_nodespace_list(app, test_nodenet, node):
response = app.get_json('/rpc/get_nodespace_list(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
rootid = list(response.json_body['data'].keys())[0]
assert response.json_body['data'][rootid]['name'] == 'Root'
assert response.json_body['data'][rootid]['parent'] is None
assert node in response.json_body['data'][rootid]['nodes']
def test_get_nodespace_activations(app, test_nodenet, node):
response = app.post_json('/rpc/get_nodespace_activations', params={
'nodenet_uid': test_nodenet,
'nodespaces': [None],
'last_call_step': -1
})
assert_success(response)
assert node in response.json_body['data']['activations']
def test_get_node(app, test_nodenet, node):
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, node))
assert_success(response)
assert response.json_body['data']['type'] == 'Pipe'
def test_add_node(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Register',
'position': [23, 42, 13],
'nodespace': None,
'name': 'N2'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, uid))
assert response.json_body['data']['name'] == 'N2'
def test_add_nodespace(app, test_nodenet):
app.set_auth()
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'position': [23, 42, 13],
'nodespace': None,
'name': 'nodespace'
})
assert_success(response)
uid = response.json_body['data']
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % (test_nodenet))
assert uid in response.json_body['data']['nodespaces']
assert uid not in response.json_body['data']['nodes']
def test_clone_nodes(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/clone_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node],
'clone_mode': 'all',
'nodespace': None,
'offset': [23, 23, 23]
})
assert_success(response)
node = list(response.json_body['data'].values())[0]
assert node['name'] == 'N1_copy'
assert node['position'] == [33, 33, 33]
assert node['links']['gen'][0]['target_node_uid'] == node['uid']
def test_set_entity_positions(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_entity_positions', params={
'nodenet_uid': test_nodenet,
'positions': {node: [42, 23, 11]}
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, node))
assert response.json_body['data']['position'] == [42, 23, 11]
def test_set_node_name(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_node_name', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'name': 'changed'
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, node))
assert response.json_body['data']['name'] == 'changed'
def test_delete_node(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/delete_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node]
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
assert response.json_body['data']['nodes'] == {}
def test_delete_nodespace(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'position': [23, 42, 13],
'nodespace': None,
'name': 'nodespace'
})
uid = response.json_body['data']
response = app.post_json('/rpc/delete_nodespace', params={
'nodenet_uid': test_nodenet,
'nodespace': uid
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
assert uid not in response.json_body['data']['nodespaces']
def test_align_nodes(app, test_nodenet):
app.set_auth()
# TODO: Why does autoalign only move a node if it has no links?
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Register',
'position': [5, 5, 0],
'nodespace': None,
'name': 'N2'
})
uid = response.json_body['data']
response = app.post_json('/rpc/align_nodes', params={
'nodenet_uid': test_nodenet,
'nodespace': None
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, uid))
assert response.json_body['data']['position'] != [5, 5]
def test_get_available_node_types(app, test_nodenet):
response = app.get_json('/rpc/get_available_node_types(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert 'Pipe' in response.json_body['data']['nodetypes']
assert 'Register' in response.json_body['data']['nodetypes']
assert 'Sensor' in response.json_body['data']['nodetypes']
def test_get_available_native_module_types(app, test_nodenet, engine):
response = app.get_json('/rpc/get_available_native_module_types(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
if engine == 'dict_engine':
assert response.json_body['data'] == {}
elif engine == 'theano_engine':
assert "GradientDescent" in response.json_body['data']
def test_set_node_parameters(app, test_nodenet):
app.set_auth()
# add activator
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Activator',
'nodespace': None,
'position': [23, 42, 0],
})
uid = response.json_body['data']
response = app.post_json('/rpc/set_node_parameters', params={
'nodenet_uid': test_nodenet,
'node_uid': uid,
'parameters': {'type': 'sub'}
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['type'] == 'sub'
def test_get_gatefunction(app, test_nodenet, node):
response = app.post_json('/rpc/get_gatefunction', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen'
})
assert_success(response)
assert response.json_body['data'] == 'identity'
def test_set_gatefunction(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_gatefunction', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
'gatefunction': 'sigmoid'
})
assert_success(response)
response = app.post_json('/rpc/get_gatefunction', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
})
assert response.json_body['data'] == 'sigmoid'
def test_get_available_gatefunctions(app, test_nodenet):
response = app.post_json('/rpc/get_available_gatefunctions', params={'nodenet_uid': test_nodenet})
funcs = response.json_body['data']
assert 'sigmoid' in funcs
assert 'identity' in funcs
assert 'absolute' in funcs
def test_set_gate_parameters(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_gate_parameters', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate_type': 'gen',
'parameters': {'minimum': -2}
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, node))
assert response.json_body['data']['gate_parameters']['gen']['minimum'] == -2
def test_get_available_datasources(app, test_nodenet, test_world):
app.set_auth()
# set worldadapter
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=test_world, worldadapter="Braitenberg"))
response = app.get_json('/rpc/get_available_datasources(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert 'brightness_l' in response.json_body['data']
assert 'brightness_l' in response.json_body['data']
def test_get_available_datatargets(app, test_nodenet, test_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=test_world, worldadapter="Braitenberg"))
response = app.get_json('/rpc/get_available_datatargets(nodenet_uid="%s")' % test_nodenet)
assert_success(response)
assert 'engine_l' in response.json_body['data']
assert 'engine_r' in response.json_body['data']
def test_bind_datasource_to_sensor(app, test_nodenet, test_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=test_world, worldadapter="Braitenberg"))
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Sensor',
'position': [23, 42, 13],
'nodespace': None,
})
uid = response.json_body['data']
response = app.post_json('/rpc/bind_datasource_to_sensor', params={
'nodenet_uid': test_nodenet,
'sensor_uid': uid,
'datasource': 'brightness_l'
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['datasource'] == 'brightness_l'
def test_bind_datatarget_to_actor(app, test_nodenet, test_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, world_uid=test_world, worldadapter="Braitenberg"))
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Actor',
'position': [23, 42, 13],
'nodespace': None,
})
uid = response.json_body['data']
response = app.post_json('/rpc/bind_datatarget_to_actor', params={
'nodenet_uid': test_nodenet,
'actor_uid': uid,
'datatarget': 'engine_l'
})
assert_success(response)
response = app.get_json('/rpc/get_node(nodenet_uid="%s",node_uid="%s")' % (test_nodenet, uid))
assert response.json_body['data']['parameters']['datatarget'] == 'engine_l'
def test_add_link(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/add_link', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': 'sub',
'target_node_uid': node,
'slot_type': 'gen',
'weight': 0.7
})
assert_success(response)
uid = response.json_body['data']
assert uid is not None
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
assert data['nodes'][node]['links']['sub'][0]['target_node_uid'] == node
assert round(data['nodes'][node]['links']['sub'][0]['weight'], 3) == 0.7
def test_set_link_weight(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/set_link_weight', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': "gen",
'target_node_uid': node,
'slot_type': "gen",
'weight': 0.345
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
assert float("%.3f" % data['nodes'][node]['links']['gen'][0]['weight']) == 0.345
def test_get_links_for_nodes(app, test_nodenet, node):
response = app.post_json('/rpc/get_links_for_nodes', params={
'nodenet_uid': test_nodenet,
'node_uids': [node]
})
assert_success(response)
link = list(response.json_body['data']['links'])[0]
assert link['source_node_uid'] == node
def test_delete_link(app, test_nodenet, node):
app.set_auth()
response = app.post_json('/rpc/delete_link', params={
'nodenet_uid': test_nodenet,
'source_node_uid': node,
'gate_type': "gen",
'target_node_uid': node,
'slot_type': "gen"
})
assert_success(response)
response = app.get_json('/rpc/get_nodes(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
data['nodes'][node]['links'] == {}
def test_reload_native_modules(app, test_nodenet, resourcepath):
app.set_auth()
# create a native module:
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t"}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
response = app.get_json('/rpc/reload_native_modules()')
assert_success(response)
response = app.get_json('/rpc/get_available_node_types(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']['native_modules']['Testnode']
assert data['nodefunction_name'] == "testnodefunc"
assert data['gatetypes'] == ['gen', 'foo', 'bar']
assert data['slottypes'] == ['gen', 'foo', 'bar']
assert data['name'] == 'Testnode'
def test_user_prompt_response(app, test_nodenet, resourcepath):
app.set_auth()
# create a native module:
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t"}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
response = app.get_json('/rpc/reload_native_modules()')
assert_success(response)
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Testnode',
'position': [23, 23],
'nodespace': None,
'name': 'Testnode'
})
assert_success(response)
uid = response.json_body['data']
response = app.post_json('/rpc/user_prompt_response', {
'nodenet_uid': test_nodenet,
'node_uid': uid,
'values': {'foo': 'bar'},
'resume_nodenet': True
})
assert_success(response)
response = app.get_json('/rpc/export_nodenet(nodenet_uid="%s")' % test_nodenet)
data = json.loads(response.json_body['data'])
assert data['nodes'][uid]['parameters']['foo'] == 'bar'
assert data['is_active']
def test_set_logging_levels(app):
response = app.post_json('/rpc/set_logging_levels', params={
'logging_levels': {
'system': 'INFO',
'world': 'DEBUG',
}
})
assert_success(response)
import logging
assert logging.getLogger('world').getEffectiveLevel() == logging.DEBUG
assert logging.getLogger('system').getEffectiveLevel() == logging.INFO
def test_get_logger_messages(app, test_nodenet):
response = app.get_json('/rpc/get_logger_messages(logger=["system"])')
assert_success(response)
assert 'servertime' in response.json_body['data']
assert response.json_body['data']['logs'] == []
def test_get_nodenet_logger_messages(app, test_nodenet):
import logging
logging.getLogger('agent.%s' % test_nodenet).warning('asdf')
logging.getLogger('system').warning('foobar')
response = app.get_json('/rpc/get_logger_messages(logger=["system", "agent.%s"])' % test_nodenet)
assert 'servertime' in response.json_body['data']
netlog = syslog = None
for item in response.json_body['data']['logs']:
if item['logger'] == 'system':
syslog = item
elif item['logger'].startswith('agent'):
netlog = item
assert netlog['step'] == 0
assert syslog['step'] is None
def test_get_monitoring_info(app, test_nodenet):
response = app.get_json('/rpc/get_monitoring_info(nodenet_uid="%s",logger=["system,world"],monitor_from=3,monitor_count=10)' % test_nodenet)
assert_success(response)
assert 'logs' in response.json_body['data']
assert 'current_step' in response.json_body['data']
assert response.json_body['data']['monitors'] == {}
assert 'servertime' in response.json_body['data']['logs']
assert response.json_body['data']['logs']['logs'] == []
def test_400(app):
app.set_auth()
response = app.get_json('/rpc/save_nodenet("foobar")', expect_errors=True)
assert_failure(response)
assert "Malformed arguments" in response.json_body['data']
def test_401(app, test_nodenet):
app.unset_auth()
response = app.get_json('/rpc/delete_nodenet(nodenet_uid="%s")' % test_nodenet, expect_errors=True)
assert_failure(response)
assert 'Insufficient permissions' in response.json_body['data']
def test_404(app):
response = app.get_json('/rpc/notthere(foo="bar")', expect_errors=True)
assert_failure(response)
assert response.json_body['data'] == "Function not found"
def test_405(app, test_nodenet):
response = app.get_json('/rpc/get_available_nodenets', params={'nodenet_uid': test_nodenet}, expect_errors=True)
assert_failure(response)
assert response.json_body['data'] == "Method not allowed"
def test_500(app):
response = app.get_json('/rpc/generate_uid(foo="bar")', expect_errors=True)
assert_failure(response)
assert "unexpected keyword argument" in response.json_body['data']
assert response.json_body['traceback'] is not None
def test_get_recipes(app, test_nodenet, resourcepath):
app.set_auth()
import os
recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def foobar(netapi, quatsch=23):
return {'quatsch': quatsch}
""")
response = app.get_json('/rpc/reload_native_modules()')
response = app.get_json('/rpc/get_available_recipes()')
data = response.json_body['data']
assert 'foobar' in data
assert len(data['foobar']['parameters']) == 1
assert data['foobar']['parameters'][0]['name'] == 'quatsch'
assert data['foobar']['parameters'][0]['default'] == 23
def test_run_recipes(app, test_nodenet, resourcepath):
app.set_auth()
import os
recipe_file = os.path.join(resourcepath, 'Test', 'recipes.py')
with open(recipe_file, 'w') as fp:
fp.write("""
def foobar(netapi, quatsch=23):
return {'quatsch': quatsch}
""")
response = app.get_json('/rpc/reload_native_modules()')
response = app.post_json('/rpc/run_recipe', {
'nodenet_uid': test_nodenet,
'name': 'foobar',
'parameters': {
'quatsch': ''
}
})
data = response.json_body['data']
assert data['quatsch'] == 23
def test_get_agent_dashboard(app, test_nodenet, node, default_world):
app.set_auth()
response = app.post_json('/rpc/set_nodenet_properties', params=dict(nodenet_uid=test_nodenet, worldadapter="Default", world_uid=default_world))
response = app.get_json('/rpc/get_agent_dashboard(nodenet_uid="%s")' % test_nodenet)
data = response.json_body['data']
assert data['count_nodes'] == 1
def test_nodenet_data_structure(app, test_nodenet, resourcepath, node):
app.set_auth()
import os
nodetype_file = os.path.join(resourcepath, 'Test', 'nodetypes.json')
nodefunc_file = os.path.join(resourcepath, 'Test', 'nodefunctions.py')
with open(nodetype_file, 'w') as fp:
fp.write('{"Testnode": {\
"name": "Testnode",\
"slottypes": ["gen", "foo", "bar"],\
"nodefunction_name": "testnodefunc",\
"gatetypes": ["gen", "foo", "bar"],\
"symbol": "t"}}')
with open(nodefunc_file, 'w') as fp:
fp.write("def testnodefunc(netapi, node=None, **prams):\r\n return 17")
response = app.get_json('/rpc/reload_native_modules()')
response = app.post_json('/rpc/add_nodespace', params={
'nodenet_uid': test_nodenet,
'position': [23, 23, 42],
'nodespace': None,
'name': 'Test-Node-Space'
})
nodespace_uid = response.json_body['data']
response = app.post_json('/rpc/add_node', params={
'nodenet_uid': test_nodenet,
'type': 'Pipe',
'position': [42, 42, 23],
'nodespace': nodespace_uid,
'name': 'N2'
})
n2_uid = response.json_body['data']
response = app.post_json('/rpc/add_gate_monitor', params={
'nodenet_uid': test_nodenet,
'node_uid': node,
'gate': 'gen',
'name': 'Testmonitor',
'color': '#332211'
})
monitor_uid = response.json_body['data']
response = app.get_json('/rpc/get_nodenet_metadata(nodenet_uid="%s")' % test_nodenet)
metadata = response.json_body['data']
response_1 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
response = app.get_json('/rpc/save_nodenet(nodenet_uid="%s")' % test_nodenet)
response = app.get_json('/rpc/revert_nodenet(nodenet_uid="%s")' % test_nodenet)
response_2 = app.post_json('/rpc/get_calculation_state', params={'nodenet_uid': test_nodenet, 'nodenet': {'nodespaces': [None]}, 'monitors': True})
assert response_1.json_body['data']['nodenet'] == response_2.json_body['data']['nodenet']
assert response_1.json_body['data']['monitors']['monitors'] == response_2.json_body['data']['monitors']['monitors']
data = response_2.json_body['data']
# Monitors
response = app.get_json('/rpc/export_monitor_data(nodenet_uid="%s", monitor_uid="%s")' % (test_nodenet, monitor_uid))
monitor_data = response.json_body['data']
assert data['monitors']['monitors'][monitor_uid]['name'] == 'Testmonitor'
assert data['monitors']['monitors'][monitor_uid]['node_uid'] == node
assert data['monitors']['monitors'][monitor_uid]['target'] == 'gen'
assert data['monitors']['monitors'][monitor_uid]['type'] == 'gate'
assert data['monitors']['monitors'][monitor_uid]['uid'] == monitor_uid
assert data['monitors']['monitors'][monitor_uid]['values'] == {}
assert data['monitors']['monitors'][monitor_uid]['color'] == '#332211'
assert data['monitors']['monitors'][monitor_uid] == monitor_data
# Nodes
response = app.get_json('/rpc/get_node(nodenet_uid="%s", node_uid="%s")' % (test_nodenet, node))
node_data = response.json_body['data']
assert node in data['nodenet']['nodes']
assert n2_uid not in data['nodenet']['nodes']
assert nodespace_uid not in data['nodenet']['nodes']
# gates
for key in ['gen', 'por', 'ret', 'sub', 'sur', 'cat', 'exp']:
assert data['nodenet']['nodes'][node]['gate_activations'][key]['default']['activation'] == 0
assert key not in data['nodenet']['nodes'][node]['gate_parameters']
assert data['nodenet']['nodes'][node]['gate_functions'][key] == 'identity'
assert data['nodenet']['nodes'][node]['parameters']['expectation'] == 1
assert data['nodenet']['nodes'][node]['parameters']['wait'] == 10
assert data['nodenet']['nodes'][node]['position'] == [10, 10, 10]
assert data['nodenet']['nodes'][node]['type'] == "Pipe"
assert 'links' not in data
assert node_data['parameters']['expectation'] == 1
assert node_data['parameters']['wait'] == 10
assert node_data['position'] == [10, 10, 10]
assert node_data['type'] == "Pipe"
# Links
for link in data['nodenet']['nodes'][node]['links']['gen']:
assert link['weight'] == 1
assert link['target_node_uid'] == node
assert link['target_slot_name'] == 'gen'
# Nodespaces
# assert data['nodenet']['nodespaces'][nodespace_uid]['index'] == 3
assert data['nodenet']['nodespaces'][nodespace_uid]['name'] == 'Test-Node-Space'
# assert data['nodenet']['nodespaces'][nodespace_uid]['parent_nodespace'] == 'Root'
assert data['nodenet']['nodespaces'][nodespace_uid]['position'] == [23, 23, 42]
# Nodetypes
response = app.get_json('/rpc/get_available_node_types(nodenet_uid="%s")' % test_nodenet)
node_type_data = response.json_body['data']
for key in ['Comment', 'Nodespace']:
assert 'gatetypes' not in metadata['nodetypes'][key]
assert 'slottypes' not in metadata['nodetypes'][key]
for key in ['Pipe', 'Register', 'Actor']:
assert 'gatetypes' in metadata['nodetypes'][key]
assert 'slottypes' in metadata['nodetypes'][key]
assert 'slottypes' in metadata['nodetypes']['Activator']
assert 'gatetypes' not in metadata['nodetypes']['Activator']
assert 'slottypes' not in metadata['nodetypes']['Sensor']
assert 'gatetypes' in metadata['nodetypes']['Sensor']
assert metadata['nodetypes'] == node_type_data['nodetypes']
# Native Modules
response = app.get_json('/rpc/get_available_native_module_types(nodenet_uid="%s")' % test_nodenet)
native_module_data = response.json_body['data']
assert metadata['native_modules']['Testnode']['gatetypes'] == ['gen', 'foo', 'bar']
assert metadata['native_modules']['Testnode']['name'] == 'Testnode'
assert metadata['native_modules']['Testnode']['nodefunction_name'] == 'testnodefunc'
assert metadata['native_modules']['Testnode']['slottypes'] == ['gen', 'foo', 'bar']
assert metadata['native_modules']['Testnode']['symbol'] == 't'
assert metadata['native_modules'] == native_module_data
# Nodenet
assert metadata['current_step'] == 0 # TODO:
assert 'step' not in data # current_step && step?
assert metadata['version'] == 1
assert metadata['world'] is None
assert metadata['worldadapter'] is None
def test_get_state_diff(app, test_nodenet, node):
from micropsi_core import runtime
nodenet = runtime.nodenets[test_nodenet]
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet_diff': {
'nodespaces': [None],
'step': 0,
}
})
data = response.json_body['data']['nodenet_diff']
assert 'activations' in data
assert 'changes' in data
assert node in data['changes']['nodes_dirty']
node2 = nodenet.create_node("Register", None, [10, 10], name="node2")
runtime.step_nodenet(test_nodenet)
response = app.post_json('/rpc/get_calculation_state', params={
'nodenet_uid': test_nodenet,
'nodenet_diff': {
'nodespaces': [None],
'step': 1,
}
})
data = response.json_body['data']['nodenet_diff']
assert [node2] == list(data['changes']['nodes_dirty'].keys())
def test_get_operations(app, test_nodenet):
response = app.get_json('/rpc/get_available_operations()')
data = response.json_body['data']
assert data['autoalign']['selection']['nodetypes'] == []
def test_run_operation(app, test_nodenet, node):
response = app.post_json('/rpc/run_operation', {
'nodenet_uid': test_nodenet,
'name': 'autoalign',
'parameters': {},
'selection_uids': [None]
})
assert response.json_body['status'] == 'success'
``` |
{
"source": "JoschD/cpymad_lhc",
"score": 3
} |
#### File: cpymad_lhc/cpymad_lhc/coupling_correction.py
```python
from typing import Tuple, Sequence, List
from cpymad.madx import Madx
from cpymad_lhc.general import get_coupling_knobs, get_tune_and_chroma_knobs
import logging
LOG = logging.getLogger(__name__)
def correct_coupling(madx: Madx, accel: str, sequence: str,
squeezed: bool = False, **kwargs):
""" Wrapper for coupling correction to use the default knob-names if not
otherwise given.
Args:
madx: Madx instance
accel: Accelerator we are using 'LHC' or 'HLLHC'
sequence: Sequence to use
squeezed: Use squeezed knobs (`_sq`)
Keyword Args:
Other arguments of `correct_coupling_with_knobs`
"""
def check_knobs(knobs):
not_defined = [k for k in knobs if k not in madx.globals]
if len(not_defined):
raise KeyError(f"Knobs {not_defined} are not defined in sequence!")
knobs_dict = _get_default_knob_names(accel, sequence)
for knob_arg, knob_names in knobs_dict.items():
if knob_arg not in kwargs:
if squeezed:
knob_names = tuple(f"{k}_sq" for k in knob_names)
check_knobs(knob_names)
kwargs[knob_arg] = knob_names
correct_coupling_with_knobs(madx, sequence, **kwargs)
def correct_coupling_with_knobs(madx: Madx, sequence: str,
tune_knobs: List[str],
chroma_knobs: List[str],
coupling_knobs: List[str],
qx: float, qy: float, dqx: float, dqy: float,
iterations: int = 2, tolerance: float = 1e-7,
simplex: bool = False,
pre_estimation: bool = True,
match_tunes: bool = True,
):
""" Corrects coupling via the given knobs.
If there is no coupling, the tunes should be able to be matched
to the same factional tunes (mid-tunes). If there is a remaining tune split,
this is the closest tune approach (cta) and indicates the presence of
coupling.
The algorithm is as follows:
First the coupling knob settings are estimated by a one-step newton
optimization (if `pre_estimate` is set).
Then the tune-knobs and the coupling knobs are used
alternatingly to match the tunes together. This is iterated according to the
desired `iteration` parameter.
Before and after this correction the cta is checked.
Remark: The first `twiss` call in the first `_analytical_minimization` call
is unneccessary as the nothing has changed between this one and the one in
`_cta_check`. Could be somehow checked for speed optimization.
Args:
madx: Madx instance
tune_knobs: names of elements/knobs to vary for tune matching
chroma_knobs: names of elements/knobs to vary for chroma matching:
coupling_knobs: names of elements/knobs to vary for coupling matching:
sequence: Sequence to use
qx: tune to match in x
qy: tune to match in y
dqx: chromaticity to match in x
dqy: chromaticity to match in y
tolerance: (final) tolerance for successfull matching
iterations: number of iterations in empirical matching
simplex: use simplex method in empirical matching
pre_estimation: use analytical method to estimate coupling-knob settings.
This will only work if the given coupling knobs correspond
to the imaginary and real part of F1001.
match_tunes: If true, also performs a tune and chroma match at the end,
otherwise, the original tune-knob values are recovered.
"""
qx_mid, qy_mid = _get_middle_tunes(qx, qy)
tune_knobs_saved = {k: madx.globals[k] for k in tune_knobs}
cta = _cta_check(madx, sequence, tune_knobs, qx_mid, qy_mid,
tolerance=tolerance * 10 ** iterations,
text="Initial closest tune approach")
if cta <= tolerance:
LOG.info("Coupling already below tolerance. Skipping correction.")
for k, val in tune_knobs_saved.items():
madx.globals[k] = val
return
if pre_estimation:
for knob in coupling_knobs:
_analytical_minimization(madx, sequence, knob)
_empirical_minimization(madx, sequence,
tune_knobs, coupling_knobs,
iterations, qx_mid, qy_mid, tolerance, simplex)
_cta_check(madx, sequence, tune_knobs, qx_mid, qy_mid,
tolerance=tolerance,
text="Final closest tune approach")
if match_tunes:
_recover_tunes(madx, sequence, tune_knobs, chroma_knobs,
qx=qx, qy=qy, dqx=dqx, dqy=dqy)
else:
for k, val in tune_knobs_saved.items():
madx.globals[k] = val
# Algorithm Steps --------------------------------------------------------------
def _get_middle_tunes(qx: float, qy: float) -> Tuple[float, float]:
""" Get the tunes with the factional part in the middle
between the qx and qy fractional parts, but with the same integer part. """
qx_frac, qy_frac = qx % 1, qy % 1
qmid_frac = 0.5 * (qx_frac + qy_frac)
qx_mid = int(qx) + qmid_frac
qy_mid = int(qy) + qmid_frac
return qx_mid, qy_mid
def _get_default_knob_names(accel: str, sequence: str) -> dict:
""" Get tune, chroma and coupling knobs. """
tune_chroma_knobs = list(get_tune_and_chroma_knobs(accel, int(sequence[-1])))
coupling_knobs = list(get_coupling_knobs(accel, int(sequence[-1])))
return dict(tune_knobs=tune_chroma_knobs[:2],
chroma_knobs=tune_chroma_knobs[2:],
coupling_knobs=coupling_knobs)
def _analytical_minimization(madx: Madx, sequence: str, knob: str):
""" Analytical Minimization. """
init_value = madx.globals[knob]
cta = _get_current_tune_approach(madx, sequence)
madx.globals[knob] = init_value + 0.5 * cta
cta_plus = _get_current_tune_approach(madx, sequence)
madx.globals[knob] = init_value - 0.5 * cta
cta_minus = _get_current_tune_approach(madx, sequence)
new_value = init_value + 0.5 * (cta_minus**2 - cta_plus**2) / cta
LOG.debug(f"Knob {knob} updated: {init_value} -> {new_value}.")
madx.globals[knob] = new_value
def _empirical_minimization(madx: Madx, sequence: str,
tune_knobs: Sequence[str], coupling_knobs: Sequence[str],
iterations: int, qx_mid: float, qy_mid: float,
tolerance: float, simplex: bool):
""" Push tunes together by alternative matching of tune and coupling knobs. """
calls_tune = 100 * (1+simplex)
calls_coupling = 150 * (1+simplex)
step = 1e-9
for idx in range(iterations):
current_tol = tolerance * 10**(iterations-idx-1) # ends at final tolerance
match(
madx, sequence, tune_knobs,
q1=qx_mid, q2=qy_mid,
step=step, calls=calls_tune,
tolerance=current_tol,
# simplex=simplex, # simplex is only used with coupling knobs
)
match(
madx, sequence, coupling_knobs,
q1=qx_mid, q2=qy_mid,
step=step, calls=calls_coupling,
tolerance=2*current_tol,
simplex=simplex
)
def _recover_tunes(madx: Madx, sequence: str, tune_knobs: List[str], chroma_knobs: List[str],
qx: float, qy: float, dqx: float, dqy: float):
""" Recover Tunes (i.e. normal tune matching) """
# match_tune(madx, accel, sequence, qx=qx, qy=qy, dqx=dqx, dqy=dqy)
match(
madx, sequence, tune_knobs,
chrom=True,
q1=qx, q2=qy,
step=1e-7, calls=100, tolerance=1e-21
)
match(
madx, sequence, chroma_knobs,
chrom=True,
dq1=dqx, dq2=dqy,
step=1e-7, calls=100, tolerance=1e-21
)
match(
madx, sequence, tune_knobs+chroma_knobs,
chrom=True,
q1=qx, q2=qy, dq1=dqx, dq2=dqy,
step=1e-7, calls=100, tolerance=1e-21
)
# Closest Tune Approach --------------------------------------------------------
def _cta_check(madx, sequence, tune_knobs, qx_mid, qy_mid, tolerance, text):
""" Try to match tunes and log closest tune approach. """
match(madx, sequence, tune_knobs,
q1=qx_mid, q2=qy_mid, step=1e-9, calls=100, tolerance=tolerance)
cta = _get_current_tune_approach(madx, sequence)
LOG.info(f"{text}: {cta}")
return cta
def _get_current_tune_approach(madx: Madx, sequence: str) -> float:
""" Get the current tune approach in the sequence. """
madx.twiss(sequence=sequence)
qx, qy = madx.table.twiss.summary.q1, madx.table.twiss.summary.q2
cta = _get_tune_approach_value(qx, qy)
LOG.debug(f"Current tune approach value: {cta}")
return cta
def _get_tune_approach_value(qx: float, qy: float) -> float:
""" Calculate the (fractional) tune approach of qx and qy. """
tune_split = int(qx) - int(qy)
return abs(qx - qy - tune_split)
# General Matching function ----------------------------------------------------
def match(madx: Madx, sequence: str, knobs: Sequence[str],
step: float = 1e-7, tolerance: float = 1e-21, calls: float = 100,
chrom=False, simplex=False, **kwargs):
""" Match the `knobs` to the settings in `kwargs`.
Args:
madx: Madx instance
sequence: Sequence to use
knobs: Sequence of variables to match
chrom: use the `chrom` flag in match
step: step size to vary knob
calls: number of varying calls
tolerance: (final) tolerance for successfull matching
Keyword Args:
Arguments for the MAD-X `global` command to be matched at,
e.g. `q1=`, `dqy=` etc.
"""
LOG.info(f"Matching knobs '{', '.join(knobs)}' for sequence '{sequence}'.")
madx.command.match(chrom=chrom)
madx.command.global_(sequence=sequence, **kwargs)
for name in knobs:
madx.command.vary(name=name, step=step)
if simplex:
madx.command.simplex(calls=calls, tolerance=tolerance)
else:
madx.command.lmdif(calls=calls, tolerance=tolerance)
madx.command.endmatch()
# Other Approaches
``` |
{
"source": "JoschD/plofeld",
"score": 3
} |
#### File: plofeld/utils/plotting.py
```python
from typing import Sequence
import numpy as np
from matplotlib.axes import Axes
from matplotlib.lines import Line2D
from matplotlib.patches import Circle, Polygon
from matplotlib.textpath import TextPath
from plofeld.utils.classes import Vector
from plofeld.utils.constants import CHARGE_COLORS, ZORDER, CHARGE_MARKERS, EPS
from plofeld.elements import PointCharge
def charge_to_circle(charge: PointCharge, kind: str) -> Circle:
"""Create the Circle Patch from a charge.
Args:
charge (PointCharge): The PointCharge object
kind (str): Field type
"""
color = charge.color
if color is None:
color = CHARGE_COLORS[kind][np.sign(charge.q)]
return Circle(
(charge.x, charge.y),
radius=charge.radius, linewidth=charge.linewidth,
facecolor=color, edgecolor='k',
zorder=ZORDER['charges'],
)
def charge_to_marker(charge: PointCharge, kind: str) -> Polygon:
"""Create the marker polygon for a charge.
Args:
charge (PointCharge): The PointCharge object
kind (str): Field type
"""
marker = charge.marker
if marker == 'default':
marker = CHARGE_MARKERS[kind][np.sign(charge.q)]
# let mpl create a text-path but convert it to polygon manually
# (TextPath objects are a bit weird and did not behave as I expected)
path = TextPath((0, 0), marker, size=charge.radius*1.7)
extend = path.get_extents()
polygon = path.to_polygons()[0]
# some manual move, because TextPath assumes real text
polygon = polygon - extend.min # put directly on origin
polygon = polygon - extend.size / 2 # move center to origin
polygon = polygon + charge.location.toarray() # move to charge location
return Polygon(polygon, facecolor="white", lw=0.2, zorder=ZORDER['marker'])
def add_arrow_to_line(line: Line2D, index: int = None, direction: str = 'right',
size: int = 15, color: str = None, **kwargs):
"""Add an arrow to the middle of a line.
Based on: https://stackoverflow.com/a/34018322/5609590
Args:
line (Line2D): Matplotlib Line2D object
index (int): Index of the data at where to point the arrow head.
If direction is ``'left'`` this can't be ``0``,
if direction is ``'right'`` this can't be the last index
(or ``-1``). If ``None`` is given, the middle element of
the line is chosen.
direction (str): Direction of the arrow, either ``'left'`` or ``'right'``.
Default: ``'right'``.
size (int): size of the arrow in fontsize points. Default: ``15``
color (str): Color of the arrow. If ``None``, the line color is taken.
Default: ``None``
Keyword Args:
Passed on to `Axes.annotate()` function.
"""
xdata = line.get_xdata()
ydata = line.get_ydata()
if color is None:
color = line.get_color()
if index is None:
index = len(xdata) // 2
end_index = index + (1 if direction == "right" else -1)
line.axes.annotate('',
xytext=(xdata[index], ydata[index]),
xy=(xdata[end_index], ydata[end_index]),
arrowprops=dict(arrowstyle="->", color=color),
size=size, **kwargs
)
def is_point_visible(ax: Axes, point: Vector):
"""True if point visible within current axes limits."""
xlim, ylim = ax.get_xlim(), ax.get_ylim()
return xlim[0] < point.x < xlim[1] and ylim[0] < point.y < ylim[1]
def went_straight_through_origin(vectors: Sequence[Vector]):
"""Checks whether the line of points went through the origin.
Assumes that the line is straight."""
a, b = vectors[0].unit(), vectors[-1].unit()
# Start and end vector are on opposite sides
if (a + b).norm() > EPS:
return False
return True
```
#### File: plofeld/utils/shapes.py
```python
from typing import List
import numpy as np
from plofeld.utils.classes import Vector
from plofeld.elements import PointCharge
def get_regular_polygon(n: int, skew: bool = False, r: float = 1) -> List[Vector]:
"""Generate the coordinates for a regular polygon with 2n corners around the origin.
Args:
n (int): Half of the corners.
skew (bool): Rotate polygon by half the angle between two poles. Default: ``False``
r (float): All corners lie on a circle of ratius ``r``. Default: ``1``
Returns:
A list of Vectors defining the corners of the polygon.
"""
n_poles = 2*n
phases = (np.arange(0, n_poles) + (0.5 * ~skew)) / n_poles
coordinates = r * np.exp(2 * np.pi * 1j * phases)
return [Vector(c.real, c.imag) for c in coordinates]
def generate_mulitpole(n: int, skew: bool = False, **kwargs) -> List[PointCharge]:
"""Generate a multipole field consisting of 2n PointCharges with alternating
polarity around origin.
Args:
n (int): Half of the corners.
skew (bool): Rotate polygon by half the angle between two poles. Default: ``False``
Keyword Args:
Are passed on to PointCharge objects
Return:
List of 2n PointCharges defining the Multipole.
"""
charge_map = {0: -1, 1: 1}
coordinates = get_regular_polygon(n, skew)
return [PointCharge(vec, charge_map[idx % 2], **kwargs) for idx, vec in enumerate(coordinates)]
``` |
{
"source": "joschnitzbauer/dalymi",
"score": 3
} |
#### File: dalymi/examples/simple.py
```python
from dalymi import Pipeline
from dalymi.resources import PandasCSV
import pandas as pd
# Define resources:
numbers_resource = PandasCSV(name='numbers', loc='numbers.csv', columns=['number'])
squares_resource = PandasCSV(name='squares', loc='squares.csv', columns=['number', 'square'])
# Define the pipeline
pl = Pipeline()
@pl.output(numbers_resource)
def create_numbers(**context):
return pd.DataFrame({'number': range(11)})
@pl.output(squares_resource)
@pl.input(numbers_resource)
def square_numbers(numbers, **context):
numbers['square'] = numbers['number']**2
return numbers
if __name__ == '__main__':
# Run the default command line interface
pl.cli()
``` |
{
"source": "joschock/edk2-pytool-extensions",
"score": 2
} |
#### File: edk2toolext/capsule/pyopenssl_signer.py
```python
import logging
import warnings
from OpenSSL import crypto
def sign(data: bytes, signature_options: dict, signer_options: dict) -> bytes:
'''
primary signing interface. Takes n the signature_options and signer_options
dictionaries that are used by capsule_tool and capsule_helper
'''
# NOTE: Currently, we only support the necessary algorithms for capsules.
# The following _if_ clause handles the deprecated signature_option 'sign_alg' for backwards compatibility
# when the deprecated option is supplied, this code adds the new, required options based on prior code behavior
if 'sign_alg' in signature_options:
warnings.warn('Signature_option "sign_alg" is deprecated, use "type"', DeprecationWarning)
if signature_options['sign_alg'] == 'pkcs12':
# map legacy behavior to new options and backwards-compatible values
signature_options['type'] = 'bare'
signature_options['encoding'] = 'binary'
signer_options['key_file_format'] = 'pkcs12'
else:
raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}!")
''' signature type 'bare' is just a binary signed digest, no PEM headers/footers or ASN '''
if signature_options['type'] != 'bare':
raise ValueError(f"Unsupported signature type: {signature_options['type']}")
if 'type_options' in signature_options:
raise ValueError("Signature type options not supported")
if signature_options['encoding'] != 'binary':
raise ValueError(f"Unsupported signature encoding: {signature_options['encoding']}")
if signature_options['hash_alg'] != 'sha256':
raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}")
if signer_options['key_file_format'] != 'pkcs12':
raise ValueError(f"Unsupported signer key file format: {signer_options['key_file_format']}")
logging.debug("Executing PKCS1 Signing")
# If a key file is provided, use it for signing.
if 'key_file' in signer_options:
with open(signer_options['key_file'], 'rb') as key_file:
signer_options['key_data'] = key_file.read()
# TODO: Figure out OIDs.
# TODO: Figure out EKU.
pkcs12 = crypto.load_pkcs12(signer_options['key_data'])
return crypto.sign(pkcs12.get_privatekey(), data, signature_options['hash_alg'])
```
#### File: edk2-pytool-extensions/edk2toolext/edk2_logging.py
```python
import logging
import os
import shutil
import re
try:
from edk2toollib.log import ansi_handler
except ImportError:
ansi_handler = None
try:
from edk2toollib.log import markdown_handler
except ImportError:
markdown_handler = None
try:
from edk2toollib.log import string_handler
except ImportError:
string_handler = None
try:
from edk2toollib.log import file_handler
except ImportError:
file_handler = logging
# These three are for emitting different events
# section is for marking different sections of the build process
# subsection is similar to sub section but denotes a subsection of the current section
# both of the section levels are high enough that they won't get filtered out
# progress is for marking things like a process completed. Similar to critical but doesn't mean the process is exiting
# progress is below critical so it can be turned off but still high enough that it doesn't get filtered out
SECTION = logging.CRITICAL + 2 # just above critical
SUB_SECTION = logging.CRITICAL + 1 # just above critical
PROGRESS = logging.CRITICAL - 1 # just below critical
# sub_directory is relative to ws argument
def clean_build_logs(ws, sub_directory=None):
# Make sure that we have a clean environment.
if sub_directory is None:
sub_directory = os.path.join("Build", "BuildLogs")
if os.path.isdir(os.path.join(ws, sub_directory)):
shutil.rmtree(os.path.join(ws, sub_directory))
def get_section_level():
return SECTION
def get_subsection_level():
return SUB_SECTION
def get_progress_level():
return PROGRESS
def get_edk2_filter(verbose=False):
gEdk2Filter = Edk2LogFilter()
if verbose:
gEdk2Filter.setVerbose(verbose)
return gEdk2Filter
def log_progress(message):
logging.log(get_progress_level(), message)
def setup_section_level():
# todo define section level
# add section as a level to the logger
section_level = get_section_level()
subsection_level = get_subsection_level()
progress_level = get_progress_level()
if logging.getLevelName(section_level) != "SECTION":
logging.addLevelName(section_level, "SECTION")
if logging.getLevelName(subsection_level) != "SUBSECTION":
logging.addLevelName(subsection_level, "SUBSECTION")
if logging.getLevelName(progress_level) != "PROGRESS":
logging.addLevelName(progress_level, "PROGRESS")
# creates the the plaintext logger
def setup_txt_logger(directory, filename="log", logging_level=logging.INFO,
formatter=None, logging_namespace='', isVerbose=False):
logger = logging.getLogger(logging_namespace)
log_formatter = formatter
if log_formatter is None:
log_formatter = logging.Formatter("%(levelname)s - %(message)s")
if not os.path.isdir(directory):
os.makedirs(directory)
# Create file logger
logfile_path = os.path.join(directory, filename + ".txt")
filelogger = file_handler.FileHandler(filename=(logfile_path), mode='w+')
filelogger.setLevel(logging_level)
filelogger.setFormatter(log_formatter)
logger.addHandler(filelogger)
filelogger.addFilter(get_edk2_filter(isVerbose))
return logfile_path, filelogger
# creates the markdown logger
def setup_markdown_logger(directory, filename="log", logging_level=logging.INFO,
formatter=None, logging_namespace='', isVerbose=False):
logger = logging.getLogger(logging_namespace)
log_formatter = formatter
if log_formatter is None:
log_formatter = logging.Formatter("%(levelname)s - %(message)s")
if not os.path.isdir(directory):
os.makedirs(directory)
# add markdown handler
markdown_filename = filename + ".md"
markdown_path = os.path.join(directory, markdown_filename)
if markdown_handler:
markdownHandler = markdown_handler.MarkdownFileHandler(markdown_path, mode="w+")
else:
markdownHandler = logging.FileHandler(markdown_path, mode="w+")
markdownHandler.setFormatter(log_formatter)
if logging_level <= logging.DEBUG:
logging_level = logging.INFO # we don't show debugging output in markdown since it gets too full
markdownHandler.addFilter(get_edk2_filter(isVerbose))
markdownHandler.setLevel(logging_level)
logger.addHandler(markdownHandler)
return markdown_path, markdownHandler
# sets up a colored console logger
def setup_console_logging(logging_level=logging.INFO, formatter=None, logging_namespace='',
isVerbose=False, use_azure_colors=False, use_color=True):
if formatter is None and isVerbose:
formatter_msg = "%(name)s: %(levelname)s - %(message)s"
elif formatter is None:
formatter_msg = "%(levelname)s - %(message)s"
else:
formatter_msg = formatter
formatter = logging.Formatter(formatter_msg)
# create a safe handler so that any logging emitted when creating the ansi logger is handled
safeHandler = logging.StreamHandler()
safeHandler.setLevel(logging_level)
safeHandler.addFilter(get_edk2_filter(isVerbose))
safeHandler.setFormatter(formatter)
logger = logging.getLogger(logging_namespace)
logger.addHandler(safeHandler)
# create the ansi logger if needed
if use_azure_colors or use_color and ansi_handler:
formatter = ansi_handler.ColoredFormatter(formatter_msg, use_azure=use_azure_colors)
coloredHandler = ansi_handler.ColoredStreamHandler()
coloredHandler.setLevel(logging_level)
coloredHandler.addFilter(get_edk2_filter(isVerbose))
coloredHandler.setFormatter(formatter)
# make sure to remove the safe handler so we don't have two handlers
logger.removeHandler(safeHandler)
logger.addHandler(coloredHandler)
return coloredHandler
# return the safe handler if we didn't create a colored handler
return safeHandler
def stop_logging(loghandle, logging_namespace=''):
logger = logging.getLogger(logging_namespace)
if loghandle is None:
return
if isinstance(loghandle, list):
# if it's an array, process each element as a handle
for handle in loghandle:
handle.close()
logger.removeHandler(handle)
else:
loghandle.close()
logger.removeHandler(loghandle)
def create_output_stream(level=logging.INFO, logging_namespace=''):
# creates an output stream that is in memory
if string_handler:
handler = string_handler.StringStreamHandler()
else:
handler = logging.StreamHandler()
logger = logging.getLogger(logging_namespace)
handler.setLevel(level)
logger.addHandler(handler)
return handler
def remove_output_stream(handler, logging_namespace=''):
logger = logging.getLogger(logging_namespace)
if isinstance(handler, list):
for single_handler in handler:
logger.removeHandler(single_handler)
else:
logger.removeHandler(handler)
def scan_compiler_output(output_stream):
# seek to the start of the output stream
def output_compiler_error(match, line, start_txt="Compiler"):
start, end = match.span()
source = line[:start]
error = line[end:]
num = match.group(1)
return f"{start_txt} #{num} from {source} {error}"
problems = []
output_stream.seek(0, 0)
error_exp = re.compile(r"error [A-EG-Z]?(\d+):")
edk2_error_exp = re.compile(r"error F(\d+):")
build_py_error_exp = re.compile(r"error (\d+)E:")
linker_error_exp = re.compile(r"error LNK(\d+):")
warning_exp = re.compile(r"warning [A-Z]?(\d+):")
for raw_line in output_stream.readlines():
line = raw_line.strip("\n").strip()
match = error_exp.search(line)
if match is not None:
error = output_compiler_error(match, line, "Compiler")
problems.append((logging.ERROR, error))
match = warning_exp.search(line)
if match is not None:
error = output_compiler_error(match, line, "Compiler")
problems.append((logging.WARNING, error))
match = linker_error_exp.search(line)
if match is not None:
error = output_compiler_error(match, line, "Linker")
problems.append((logging.ERROR, error))
match = edk2_error_exp.search(line)
if match is not None:
error = output_compiler_error(match, line, "EDK2")
problems.append((logging.ERROR, error))
match = build_py_error_exp.search(line)
if match is not None:
error = output_compiler_error(match, line, "Build.py")
problems.append((logging.ERROR, error))
return problems
class Edk2LogFilter(logging.Filter):
_allowedLoggers = ["root"]
def __init__(self):
logging.Filter.__init__(self)
self._verbose = False
self._currentSection = "root"
def setVerbose(self, isVerbose=True):
self._verbose = isVerbose
def addSection(self, section):
# TODO request the global singleton?
# how to make this class static
Edk2LogFilter._allowedLoggers.append(section)
def filter(self, record):
# check to make sure we haven't already filtered this record
if record.name not in Edk2LogFilter._allowedLoggers and record.levelno < logging.WARNING and not self._verbose:
return False
return True
```
#### File: environment/plugintypes/dsc_processor_plugin.py
```python
class IDscProcessorPlugin(object):
##
# does the transform on the DSC
#
# @param dsc - the in-memory model of the DSC
# @param thebuilder - UefiBuild object to get env information
#
# @return 0 for success NonZero for error.
##
def do_transform(self, dsc, thebuilder):
return 0
##
# gets the level that this transform operates at
#
# @param thebuilder - UefiBuild object to get env information
#
# @return 0 for the most generic level
##
def get_level(self, thebuilder):
return 0
```
#### File: edk2toolext/tests/test_version_aggregator.py
```python
import unittest
from edk2toolext.environment import version_aggregator
class TestVersionAggregator(unittest.TestCase):
def setUp(self):
version_aggregator.ResetVersionAggregator()
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_Singelton(self):
version1 = version_aggregator.GetVersionAggregator()
version2 = version_aggregator.GetVersionAggregator()
self.assertEqual(version1, version2)
self.assertIsNotNone(version1)
version3 = version_aggregator.version_aggregator()
version4 = version_aggregator.version_aggregator()
self.assertNotEqual(version3, version4)
def test_ReportVersions(self):
version1 = version_aggregator.version_aggregator()
version1.ReportVersion("test", "test", version_aggregator.VersionTypes.TOOL)
self.assertEqual(len(version1.GetAggregatedVersionInformation()), 1)
pass
def test_ReportVersions_collision(self):
version1 = version_aggregator.version_aggregator()
version1.ReportVersion("test", "test", version_aggregator.VersionTypes.TOOL)
# if keys don't collide we are good
version1.ReportVersion("test2", "test", version_aggregator.VersionTypes.COMMIT)
version1.ReportVersion("test3", "test", version_aggregator.VersionTypes.BINARY)
version1.ReportVersion("test4", "test", version_aggregator.VersionTypes.INFO)
# we're good to report the same thing twice as long as it matches
version1.ReportVersion("test", "test", version_aggregator.VersionTypes.TOOL)
with self.assertRaises(ValueError):
version1.ReportVersion("test", "test2", version_aggregator.VersionTypes.INFO)
def test_GetInformation(self):
version1 = version_aggregator.version_aggregator()
test_ver = ["I think", "I exist"]
version1.ReportVersion("I think", "therefore", version_aggregator.VersionTypes.INFO)
version1.ReportVersion("I exist", "proof", version_aggregator.VersionTypes.INFO)
test2_ver = list(version1.GetAggregatedVersionInformation().keys())
self.assertListEqual(test_ver, test2_ver)
version1.ReportVersion("test", "test", version_aggregator.VersionTypes.TOOL)
self.assertEqual(len(test2_ver), 2)
test2_ver = list(version1.GetAggregatedVersionInformation().keys())
self.assertEqual(len(test2_ver), 3)
def test_reset(self):
version1 = version_aggregator.version_aggregator()
version1.ReportVersion("test", "I exist", version_aggregator.VersionTypes.INFO)
self.assertEqual(len(version1.GetAggregatedVersionInformation()), 1)
version1.Reset()
self.assertEqual(len(version1.GetAggregatedVersionInformation()), 0)
def test_global_reset(self):
version1 = version_aggregator.version_aggregator()
version1.ReportVersion("test", "I exist", version_aggregator.VersionTypes.INFO)
self.assertEqual(len(version1.GetAggregatedVersionInformation()), 1)
version1 = version_aggregator.version_aggregator()
version_aggregator.ResetVersionAggregator()
self.assertEqual(len(version1.GetAggregatedVersionInformation()), 0)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joschock/edk2-pytool-library",
"score": 2
} |
#### File: edk2/parsers/dec_parser_test.py
```python
import unittest
import uuid
import io
from edk2toollib.uefi.edk2.parsers.dec_parser import LibraryClassDeclarationEntry
from edk2toollib.uefi.edk2.parsers.dec_parser import GuidDeclarationEntry
from edk2toollib.uefi.edk2.parsers.dec_parser import PpiDeclarationEntry
from edk2toollib.uefi.edk2.parsers.dec_parser import ProtocolDeclarationEntry
from edk2toollib.uefi.edk2.parsers.dec_parser import PcdDeclarationEntry
from edk2toollib.uefi.edk2.parsers.dec_parser import DecParser
class TestGuidDeclarationEntry(unittest.TestCase):
def test_valid_input_guid(self):
SAMPLE_DATA_GUID_DECL = '''gTestGuid = { 0x66341ae8, 0x668f, 0x4192, { 0xb4, 0x4d, 0x5f, 0x87, 0xb8, 0x68, 0xf0, 0x41 }}''' # noqa: E501
SAMPLE_DATA_GUID_STRING_REG_FORMAT = "{66341ae8-668f-4192-b44d-5f87b868f041}"
a = GuidDeclarationEntry("TestPkg", SAMPLE_DATA_GUID_DECL)
con = uuid.UUID(SAMPLE_DATA_GUID_STRING_REG_FORMAT)
self.assertEqual(str(con), str(a.guid))
self.assertEqual(a.name, "gTestGuid")
self.assertEqual(a.package_name, "TestPkg")
def test_valid_input_leading_zero_removed(self):
SAMPLE_DATA_GUID_DECL = '''gTestGuid = { 0x6341ae8, 0x68f, 0x192, { 0x4, 0xd, 0xf, 0x7, 0x8, 0x8, 0x0, 0x1 }}''' # noqa: E501
SAMPLE_DATA_GUID_STRING_REG_FORMAT = "{06341ae8-068f-0192-040d-0f0708080001}"
a = GuidDeclarationEntry("testpkg", SAMPLE_DATA_GUID_DECL)
con = uuid.UUID(SAMPLE_DATA_GUID_STRING_REG_FORMAT)
self.assertEqual(str(con), str(a.guid))
def test_invalid_guid_format(self):
SAMPLE_DATA_GUID_DECL = '''gTestGuid = 0x6341ae8, 0x668f, 0x4192, 0xb4, 0x4d, 0x5f, 0x87, 0xb8, 0x68, 0xf0, 0x41''' # noqa: E501
with self.assertRaises(ValueError):
GuidDeclarationEntry("testpkg", SAMPLE_DATA_GUID_DECL)
class TestPpiDeclarationEntry(unittest.TestCase):
def test_valid_input_guid(self):
SAMPLE_DATA_PPI_DECL = """gTestPpiGuid = {0xa66cd455, 0xc078, 0x4753, {0xbe, 0x93, 0xdd, 0x58, 0xb2, 0xaf, 0xe9, 0xc4}}""" # noqa: E501
SAMPLE_DATA_PPI_STRING_REG_FORMAT = "{a66cd455-c078-4753-be93-dd58b2afe9c4}"
a = PpiDeclarationEntry("testpkg", SAMPLE_DATA_PPI_DECL)
con = uuid.UUID(SAMPLE_DATA_PPI_STRING_REG_FORMAT)
self.assertEqual(str(con), str(a.guid))
self.assertEqual(a.name, "gTestPpiGuid")
self.assertEqual(a.package_name, "testpkg")
class TestProtocolDeclarationEntry(unittest.TestCase):
def test_valid_input_guid(self):
SAMPLE_DATA_PROTOCOL_DECL = """gTestProtocolGuid = {0xb6d12b5a, 0x5338, 0x44ac, {0xac, 0x31, 0x1e, 0x9f, 0xa8, 0xc7, 0xe0, 0x1e}}""" # noqa: E501
SAMPLE_DATA_PROTOCOL_GUID_REG_FORMAT = "{b6d12b5a-5338-44ac-ac31-1e9fa8c7e01e}"
a = ProtocolDeclarationEntry("testpkg", SAMPLE_DATA_PROTOCOL_DECL)
con = uuid.UUID(SAMPLE_DATA_PROTOCOL_GUID_REG_FORMAT)
self.assertEqual(str(con), str(a.guid))
self.assertEqual(a.name, "gTestProtocolGuid")
self.assertEqual(a.package_name, "testpkg")
class TestLibraryClassDeclarationEntry(unittest.TestCase):
def test_valid_input(self):
SAMPLE_DATA_DECL = """BmpSupportLib|Include/Library/BmpSupportLib.h"""
a = LibraryClassDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
self.assertEqual(a.path, "Include/Library/BmpSupportLib.h")
self.assertEqual(a.name, "BmpSupportLib")
class TestPcdDeclarationEntry(unittest.TestCase):
def test_valid_input(self):
SAMPLE_DATA_DECL = """gEfiMdeModulePkgTokenSpaceGuid.PcdSupportUpdateCapsuleReset|FALSE|BOOLEAN|0x0001001d"""
a = PcdDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
self.assertEqual(a.token_space_name, "gEfiMdeModulePkgTokenSpaceGuid")
self.assertEqual(a.name, "PcdSupportUpdateCapsuleReset")
self.assertEqual(a.default_value, "FALSE")
self.assertEqual(a.type, "BOOLEAN")
self.assertEqual(a.id, "0x0001001d")
def test_invalid_input_no_tokenspace(self):
SAMPLE_DATA_DECL = """garbage|FALSE|BOOLEAN|0x0001001d"""
with self.assertRaises(Exception):
PcdDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
def test_invalid_input_too_many_fields(self):
SAMPLE_DATA_DECL = """garbage.garbageNAME|FALSE|BOOLEAN|0x0001001d|morestuff|questions|this|should|fail"""
with self.assertRaises(Exception):
PcdDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
class TestDecParser(unittest.TestCase):
SAMPLE_DEC_FILE = \
"""## @file
TestDecFile
##
[Defines]
DEC_SPECIFICATION = 0x00010005
PACKAGE_NAME = TestDecParserPkg
PACKAGE_UNI_FILE = TestDecParserPkg.uni
PACKAGE_GUID = 57e8a49e-1b3f-41a0-a552-55ad831c15a8
PACKAGE_VERSION = 0.1
[Includes]
Include
[LibraryClasses]
## @libraryclass Provide comment for fakelib
#
FakeLib|Include/Library/FakeLib.h
[Guids]
gFakeTokenSpace = {0x7c004f69, 0xd730, 0x4904, {0x83, 0xe1, 0x4b, 0xbf, 0x39, 0x3a, 0xfc, 0xb1}}
gFake2TokenSpace = {<KEY>, 0xd730, 0x4904, {0x83, 0xe1, 0x4b, 0xbf, 0x39, 0x3a, 0xfc, 0xb2}}
gFakeT3okenSpace = {0x7c004f69, 0xd730, 0x4904, {0x83, 0xe1, 0x4b, 0xbf, 0x39, 0x3a, 0xfc, 0xb3}}
#
# [Error.gPcAtChipsetPkgTokenSpaceGuid]
# 0x80000001 | Invalid value provided.
#
[Protocols]
## None
gFakeProtocol = {0xe63e2ccd, 0x786e, 0x4754, {0x96, 0xf8, 0x5e, 0x88, 0xa3, 0xf0, 0xaf, 0x85}}
[Ppis]
gFakePpi = {0xeeef868e, 0x5bf5, 0x4e48, {0x92, 0xa1, 0xd7, 0x6e, 0x02, 0xe5, 0xb9, 0xa7}}
gFake2Ppi = {0xeeef868e, 0x5bf5, 0x4e48, {0x92, 0xa1, 0xd7, 0x6e, 0x02, 0xe5, 0xb9, 0xa8}}
## Copied PCDs from PcAtChipsetPkg for testing
[PcdsFeatureFlag]
## Indicates the HPET Timer will be configured to use MSI interrupts if the HPET
# TRUE - Configures the HPET Timer to use MSI interrupts if the HPET Timer supports them.<BR>
# FALSE - Configures the HPET Timer to use I/O APIC interrupts.<BR>
# @Prompt Configure HPET to use MSI.
gPcAtChipsetPkgTokenSpaceGuid.PcdHpetMsiEnable|TRUE|BOOLEAN|0x00001000
[PcdsFixedAtBuild, PcdsDynamic, PcdsDynamicEx, PcdsPatchableInModule]
## Pcd8259LegacyModeMask defines the default mask value for platform. This value is determined<BR><BR>
# 1) If platform only support pure UEFI, value should be set to 0xFFFF or 0xFFFE;
# Because only clock interrupt is allowed in legacy mode in pure UEFI platform.<BR>
# 2) If platform install CSM and use thunk module:<BR>
# a) If thunk call provided by CSM binary requires some legacy interrupt support, the corresponding bit
# should be opened as 0.<BR>
# For example, if keyboard interfaces provided CSM binary use legacy keyboard interrupt in 8259 bit 1, then
# the value should be set to 0xFFFC.<BR>
# b) If all thunk call provied by CSM binary do not require legacy interrupt support, value should be set
# to 0xFFFF or 0xFFFE.<BR>
#
# The default value of legacy mode mask could be changed by EFI_LEGACY_8259_PROTOCOL->SetMask(). But it is rarely
# need change it except some special cases such as when initializing the CSM bin
gPcAtChipsetPkgTokenSpaceGuid.Pcd8259LegacyModeMask|0xFFFF|UINT16|0x00000001
## Pcd8259LegacyModeEdgeLevel defines the default edge level for legacy mode's interrupt controller.
# For the corresponding bits, 0 = Edge triggered and 1 = Level triggered.
# @Prompt 8259 Legacy Mode edge level.
gPcAtChipsetPkgTokenSpaceGuid.Pcd8259LegacyModeEdgeLevel|0x0000|UINT16|0x00000002
## Indicates if we need enable IsaAcpiCom1 device.<BR><BR>
# TRUE - Enables IsaAcpiCom1 device.<BR>
# FALSE - Doesn't enable IsaAcpiCom1 device.<BR>
# @Prompt Enable IsaAcpiCom1 device.
gPcAtChipsetPkgTokenSpaceGuid.PcdIsaAcpiCom1Enable|TRUE|BOOLEAN|0x00000003
## Indicates if we need enable IsaAcpiCom2 device.<BR><BR>
# TRUE - Enables IsaAcpiCom2 device.<BR>
# FALSE - Doesn't enable IsaAcpiCom2 device.<BR>
# @Prompt Enable IsaAcpiCom12 device.
gPcAtChipsetPkgTokenSpaceGuid.PcdIsaAcpiCom2Enable|TRUE|BOOLEAN|0x00000004
## Indicates if we need enable IsaAcpiPs2Keyboard device.<BR><BR>
# TRUE - Enables IsaAcpiPs2Keyboard device.<BR>
# FALSE - Doesn't enable IsaAcpiPs2Keyboard device.<BR>
# @Prompt Enable IsaAcpiPs2Keyboard device.
gPcAtChipsetPkgTokenSpaceGuid.PcdIsaAcpiPs2KeyboardEnable|TRUE|BOOLEAN|0x00000005
## Indicates if we need enable IsaAcpiPs2Mouse device.<BR><BR>
# TRUE - Enables IsaAcpiPs2Mouse device.<BR>
# FALSE - Doesn't enable IsaAcpiPs2Mouse device.<BR>
# @Prompt Enable IsaAcpiPs2Mouse device.
gPcAtChipsetPkgTokenSpaceGuid.PcdIsaAcpiPs2MouseEnable|TRUE|BOOLEAN|0x00000006
## Indicates if we need enable IsaAcpiFloppyA device.<BR><BR>
# TRUE - Enables IsaAcpiFloppyA device.<BR>
# FALSE - Doesn't enable IsaAcpiFloppyA device.<BR>
# @Prompt Enable IsaAcpiFloppyA device.
gPcAtChipsetPkgTokenSpaceGuid.PcdIsaAcpiFloppyAEnable|TRUE|BOOLEAN|0x00000007
## Indicates if we need enable IsaAcpiFloppyB device.<BR><BR>
# TRUE - Enables IsaAcpiFloppyB device.<BR>
# FALSE - Doesn't enable IsaAcpiFloppyB device.<BR>
# @Prompt Enable IsaAcpiFloppyB device.
gPcAtChipsetPkgTokenSpaceGuid.PcdIsaAcpiFloppyBEnable|TRUE|BOOLEAN|0x00000008
## This PCD specifies the base address of the HPET timer.
# @Prompt HPET base address.
gPcAtChipsetPkgTokenSpaceGuid.PcdHpetBaseAddress|0xFED00000|UINT32|0x00000009
## This PCD specifies the Local APIC Interrupt Vector for the HPET Timer.
# @Prompt HPET local APIC vector.
gPcAtChipsetPkgTokenSpaceGuid.PcdHpetLocalApicVector|0x40|UINT8|0x0000000A
## This PCD specifies the defaut period of the HPET Timer in 100 ns units.
# The default value of 100000 100 ns units is the same as 10 ms.
# @Prompt Default period of HPET timer.
gPcAtChipsetPkgTokenSpaceGuid.PcdHpetDefaultTimerPeriod|100000|UINT64|0x0000000B
## This PCD specifies the base address of the IO APIC.
# @Prompt IO APIC base address.
gPcAtChipsetPkgTokenSpaceGuid.PcdIoApicBaseAddress|0xFEC00000|UINT32|0x0000000C
## This PCD specifies the minimal valid year in RTC.
# @Prompt Minimal valid year in RTC.
gPcAtChipsetPkgTokenSpaceGuid.PcdMinimalValidYear|1998|UINT16|0x0000000D
## This PCD specifies the maximal valid year in RTC.
# @Prompt Maximal valid year in RTC.
# @Expression 0x80000001 | gPcAtChipsetPkgTokenSpaceGuid.PcdMaximalValidYear
gPcAtChipsetPkgTokenSpaceGuid.PcdMaximalValidYear|2097|UINT16|0x0000000E
[PcdsFixedAtBuild, PcdsPatchableInModule]
## Defines the ACPI register set base address.
# The invalid 0xFFFF is as its default value. It must be configured to the real value.
# @Prompt ACPI Timer IO Port Address
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPortBaseAddress |0xFFFF|UINT16|0x00000010
## Defines the PCI Bus Number of the PCI device that contains the BAR and Enable for ACPI hardware registers.
# @Prompt ACPI Hardware PCI Bus Number
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPciBusNumber | 0x00| UINT8|0x00000011
## Defines the PCI Device Number of the PCI device that contains the BAR and Enable for ACPI hardware registers.
# The invalid 0xFF is as its default value. It must be configured to the real value.
# @Prompt ACPI Hardware PCI Device Number
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPciDeviceNumber | 0xFF| UINT8|0x00000012
## Defines the PCI Function Number of the PCI device that contains the BAR and Enable for ACPI hardware registers.
# The invalid 0xFF is as its default value. It must be configured to the real value.
# @Prompt ACPI Hardware PCI Function Number
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPciFunctionNumber | 0xFF| UINT8|0x00000013
## Defines the PCI Register Offset of the PCI device that contains the Enable for ACPI hardware registers.
# The invalid 0xFFFF is as its default value. It must be configured to the real value.
# @Prompt ACPI Hardware PCI Register Offset
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPciEnableRegisterOffset |0xFFFF|UINT16|0x00000014
## Defines the bit mask that must be set to enable the APIC hardware register BAR.
# @Prompt ACPI Hardware PCI Bar Enable BitMask
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoBarEnableMask | 0x00| UINT8|0x00000015
## Defines the PCI Register Offset of the PCI device that contains the BAR for ACPI hardware registers.
# The invalid 0xFFFF is as its default value. It must be configured to the real value.
# @Prompt ACPI Hardware PCI Bar Register Offset
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPciBarRegisterOffset |0xFFFF|UINT16|0x00000016
## Defines the offset to the 32-bit Timer Value register that resides within the ACPI BAR.
# @Prompt Offset to 32-bit Timer register in ACPI BAR
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiPm1TmrOffset |0x0008|UINT16|0x00000017
## Defines the bit mask to retrieve ACPI IO Port Base Address
# @Prompt ACPI IO Port Base Address Mask
gPcAtChipsetPkgTokenSpaceGuid.PcdAcpiIoPortBaseAddressMask |0xFFFE|UINT16|0x00000018
## Reset Control Register address in I/O space.
# @Prompt Reset Control Register address
gPcAtChipsetPkgTokenSpaceGuid.PcdResetControlRegister|0x64|UINT64|0x00000019
## 8bit Reset Control Register value for cold reset.
# @Prompt Reset Control Register value for cold reset
gPcAtChipsetPkgTokenSpaceGuid.PcdResetControlValueColdReset|0xFE|UINT8|0x0000001A
## Specifies the initial value for Register_A in RTC.
# @Prompt Initial value for Register_A in RTC.
gPcAtChipsetPkgTokenSpaceGuid.PcdInitialValueRtcRegisterA|0x26|UINT8|0x0000001B
## Specifies the initial value for Register_B in RTC.
# @Prompt Initial value for Register_B in RTC.
gPcAtChipsetPkgTokenSpaceGuid.PcdInitialValueRtcRegisterB|0x02|UINT8|0x0000001C
## Specifies the initial value for Register_D in RTC.
# @Prompt Initial value for Register_D in RTC.
gPcAtChipsetPkgTokenSpaceGuid.PcdInitialValueRtcRegisterD|0x00|UINT8|0x0000001D
## Specifies RTC Index Register address in I/O space.
# @Prompt RTC Index Register address
gPcAtChipsetPkgTokenSpaceGuid.PcdRtcIndexRegister|0x70|UINT8|0x0000001E
## Specifies RTC Target Register address in I/O space.
# @Prompt RTC Target Register address
gPcAtChipsetPkgTokenSpaceGuid.PcdRtcTargetRegister|0x71|UINT8|0x0000001F
[PcdsFixedAtBuild]
## Defines the UART base address.
# @Prompt UART IO Port Base Address
gPcAtChipsetPkgTokenSpaceGuid.PcdUartIoPortBaseAddress |0x3F8|UINT16|0x00000020 ## MS_CHANGE
[UserExtensions.TianoCore."ExtraFiles"]
PcAtChipsetPkgExtra.uni
"""
def test_valid_input(self):
a = DecParser()
st = io.StringIO(TestDecParser.SAMPLE_DEC_FILE)
a.ParseStream(st)
self.assertEqual(a.Dict["PACKAGE_NAME"], "TestDecParserPkg")
self.assertEqual(a.Dict["PACKAGE_GUID"], "57e8a49e-1b3f-41a0-a552-55ad831c15a8")
self.assertEqual(len(a.Guids), 3)
self.assertEqual(len(a.Protocols), 1)
self.assertEqual(len(a.PPIs), 2)
``` |
{
"source": "joschock/mu_basecore",
"score": 2
} |
#### File: Plugin/CharEncodingCheck/CharEncodingCheck.py
```python
import os
import logging
from MuEnvironment.PluginManager import IMuBuildPlugin
##
# map
EcodingMap = {
".md": 'utf-8',
".dsc": 'utf-8',
".dec": 'utf-8',
".c": 'utf-8',
".h": 'utf-8',
".asm": 'utf-8',
".masm": 'utf-8',
".nasm": 'utf-8',
".s": 'utf-8',
".inf": 'utf-8',
".asl": 'utf-8',
".uni": 'utf-8',
".py": 'utf-8'
}
class CharEncodingCheck(IMuBuildPlugin):
def GetTestName(self, packagename, environment):
return ("MuBuild CharEncodingCheck " + packagename, "MuBuild.CharEncodingCheck." + packagename)
# - package is the edk2 path to package. This means workspace/packagepath relative.
# - edk2path object configured with workspace and packages path
# - any additional command line args
# - RepoConfig Object (dict) for the build
# - PkgConfig Object (dict) for the pkg
# - EnvConfig Object
# - Plugin Manager Instance
# - Plugin Helper Obj Instance
# - testcalass Object used for outputing junit results
# - output_stream the StringIO output stream from this plugin
def RunBuildPlugin(self, packagename, Edk2pathObj, args, repoconfig, pkgconfig, environment, PLM, PLMHelper, tc, output_stream = None):
overall_status = 0
files_tested = 0
abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSytemFromEdk2RelativePath(packagename)
if abs_pkg_path is None:
tc.SetSkipped()
tc.LogStdError("No Package folder {0}".format(abs_pkg_path))
return 0
for (ext, enc) in EcodingMap.items():
files = self.WalkDirectoryForExtension([ext], abs_pkg_path)
files = [Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(x) for x in files] # make edk2relative path so can process ignores
if "IgnoreFiles" in pkgconfig:
for a in pkgconfig["IgnoreFiles"]:
a = a.lower().replace(os.sep, "/")
try:
tc.LogStdOut("Ignoring File {0}".format(a))
files.remove(a)
except:
tc.LogStdError("CharEncodingCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a))
logging.info("CharEncodingCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a))
files = [Edk2pathObj.GetAbsolutePathOnThisSytemFromEdk2RelativePath(x) for x in files]
for a in files:
files_tested += 1
if(self.TestEncodingOk(a, enc)):
logging.debug("File {0} Passed Encoding Check {1}".format(a, enc))
else:
tc.LogStdError("Encoding Failure in {0}. Not {1}".format(a, enc))
overall_status += 1
tc.LogStdOut("Tested Encoding on {0} files".format(files_tested))
if overall_status is not 0:
tc.SetFailed("CharEncoding {0} Failed. Errors {1}".format(packagename, overall_status), "CHAR_ENCODING_CHECK_FAILED")
else:
tc.SetSuccess()
return overall_status
def TestEncodingOk(self, apath, encodingValue):
try:
with open(apath, "rb") as fobj:
fobj.read().decode(encodingValue)
except Exception as exp:
logging.error("Encoding failure: file: {0} type: {1}".format(apath, encodingValue))
logging.debug("EXCEPTION: while processing {1} - {0}".format(exp, apath))
return False
return True
def ValidateConfig(self, config, name):
validOptions = ["IgnoreFiles", "skip"]
for key in config:
if key not in validOptions:
raise Exception("Invalid config option {0} in {1}".format(key, name))
``` |
{
"source": "joschock/mu_plus",
"score": 2
} |
#### File: PagingAudit/Windows/PagingReportGenerator.py
```python
import logging
import operator
import glob
import json
import datetime
import os
import sys
import argparse
#Add script dir to path for import
sp = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(sp)
from MemoryRangeObjects import *
from BinaryParsing import *
VERSION = "0.80"
class ParsingTool(object):
def __init__(self, DatFolderPath, PlatformName, PlatformVersion, Type):
self.Logger = logging.getLogger("ParsingTool")
self.MemoryAttributesTable = []
self.MemoryRangeInfo = []
self.PageDirectoryInfo = []
self.DatFolderPath = DatFolderPath
self.ErrorMsg = []
self.PlatformName = PlatformName
self.PlatformVersion = PlatformVersion
self.Type = Type
def Parse(self):
#Get Info Files
InfoFileList = glob.glob(os.path.join(self.DatFolderPath, "*MemoryInfo*.dat"))
Pte1gbFileList = glob.glob(os.path.join(self.DatFolderPath, "*1G*.dat"))
Pte2mbFileList = glob.glob(os.path.join(self.DatFolderPath, "*2M*.dat"))
Pte4kbFileList = glob.glob(os.path.join(self.DatFolderPath, "*4K*.dat"))
MatFileList = glob.glob(os.path.join(self.DatFolderPath, "*MAT*.dat"))
GuardPageFileList = glob.glob(os.path.join(self.DatFolderPath, "*GuardPage*.dat"))
logging.debug("Found %d Info Files" % len(InfoFileList))
logging.debug("Found %d 1gb Page Files" % len(Pte1gbFileList))
logging.debug("Found %d 2mb Page Files" % len(Pte2mbFileList))
logging.debug("Found %d 4kb Page Files" % len(Pte4kbFileList))
logging.debug("Found %d MAT Files" % len(MatFileList))
logging.debug("Found %d GuardPage Files" % len(GuardPageFileList))
# Parse each file, keeping PTEs and "Memory Ranges" seperate
# Memory ranges are either "memory descriptions" for memory map types and TSEG
# or "memory contents" for loaded image information or IDT/GDT
for info in InfoFileList:
self.MemoryRangeInfo.extend(ParseInfoFile(info))
for pte1g in Pte1gbFileList:
self.PageDirectoryInfo.extend(Parse1gPages(pte1g))
for pte2m in Pte2mbFileList:
self.PageDirectoryInfo.extend(Parse2mPages(pte2m))
for pte4k in Pte4kbFileList:
self.PageDirectoryInfo.extend(Parse4kPages(pte4k))
for guardpage in GuardPageFileList:
self.PageDirectoryInfo.extend(ParseInfoFile(guardpage))
for mat in MatFileList:
self.MemoryAttributesTable.extend(ParseInfoFile(mat))
if len(self.PageDirectoryInfo) == 0:
self.ErrorMsg.append("No Memory Range info found in PTE files")
else:
# Sort in descending order
self.PageDirectoryInfo.sort(key=operator.attrgetter('PhysicalStart'))
#check for Page Table Overlap - this is an error
index =0
maxindex = len(self.PageDirectoryInfo) -1
while index < maxindex: #this will allow all comparisions to work
if(self.PageDirectoryInfo[index].overlap(self.PageDirectoryInfo[index+1])):
self.ErrorMsg.append("Page Table Entry Overlap. Index %d Overlapping %d at StartAddress 0x%X" %
(index, index+1, self.PageDirectoryInfo[index].PhysicalStart))
logging.error("PTE overlap index %d and %d. Base Address = 0x%x", index, index+1, self.PageDirectoryInfo[index].PhysicalStart)
index += 1
if len(self.MemoryRangeInfo) == 0:
self.ErrorMsg.append("No Memory Range info found in Info files")
# Matching memory ranges up to page table entries
for pte in self.PageDirectoryInfo:
for mr in self.MemoryRangeInfo:
if pte.overlap(mr):
if mr.MemoryType is not None:
if (pte.PhysicalEnd > mr.PhysicalEnd) or (pte.PhysicalStart < mr.PhysicalStart):
logging.error("Memory range attribute does not cover entire page " + pte.pteDebugStr() +" " + mr.MemoryRangeToString())
self.ErrorMsg.append("Memory range attribute does not cover entire page. Base: 0x%X. "% (pte.PhysicalStart))
if pte.MemoryType is None:
pte.MemoryType = mr.MemoryType
else:
logging.error("Multiple memory types found for one region " + pte.pteDebugStr() +" " + mr.MemoryRangeToString())
self.ErrorMsg.append("Multiple memory types found for one region. Base: 0x%X. EFI Memory Type: %d and %d"% (pte.PhysicalStart, pte.MemoryType,mr.MemoryType))
if mr.ImageName is not None:
if pte.ImageName is None:
pte.ImageName = mr.ImageName
else:
self.ErrorMsg.append("Multiple memory contents found for one region. Base: 0x%X. Memory Contents: %s and %s" % (pte.PhysicalStart, pte.ImageName, mr.ImageName ))
logging.error("Multiple memory contents found for one region " +pte.pteDebugStr() + " " + mr.LoadedImageEntryToString())
if(mr.SystemMemoryType is not None):
if(pte.SystemMemoryType is None):
pte.SystemMemoryType = mr.SystemMemoryType
else:
self.ErrorMsg.append("Multiple System Memory types found for one region. Base: 0x%X. EFI Memory Type: %s and %s."% (pte.PhysicalStart,pte.SystemMemoryType, mr.SystemMemoryType))
logging.error("Multiple system memory types found for one region " +pte.pteDebugStr() + " " + mr.LoadedImageEntryToString())
for MatEntry in self.MemoryAttributesTable:
if pte.overlap(MatEntry):
pte.Attribute = MatEntry.Attribute
# Combining adjacent PTEs that have the same attributes.
index = 0
while index < (len(self.PageDirectoryInfo) - 1):
currentPte = self.PageDirectoryInfo[index]
nextPte = self.PageDirectoryInfo[index + 1]
if currentPte.sameAttributes(nextPte):
currentPte.grow(nextPte)
del self.PageDirectoryInfo[index + 1]
else:
index += 1
return 0
def AddErrorMsg(self, msg):
self.ErrorMsg.append(msg)
def OutputHtmlReport(self, ToolVersion, OutputFilePath):
# Create the dictionary to produce a JSON string.
json_dict = {
'ToolVersion': ToolVersion,
'PlatformVersion': self.PlatformVersion,
'PlatformName': self.PlatformName,
'DateCollected': datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p" ),
}
# Process all of the Page Infos and add them to the JSON.
pde_infos = []
for pde in self.PageDirectoryInfo:
info_dict = pde.toDictionary()
# Check for errors.
if info_dict['Section Type'] == "ERROR":
self.AddErrorMsg("Page Descriptor at %s has an error parsing the Section Type." % info_dict['Start'])
pde_infos.append(info_dict)
json_dict['MemoryRanges'] = pde_infos
# Finally, add any errors and produce the JSON string.
json_dict['errors'] = self.ErrorMsg
js = json.dumps(json_dict)
#
# Open template and replace placeholder with json
#
f = open(OutputFilePath, "w")
if self.Type == 'DXE':
template = open(os.path.join(sp, "DxePaging_template.html"), "r")
else:
template = open(os.path.join(sp, "SmmPaging_template.html"), "r")
for line in template.readlines():
if "%TO_BE_FILLED_IN_BY_PYTHON_SCRIPT%" in line:
line = line.replace("%TO_BE_FILLED_IN_BY_PYTHON_SCRIPT%", js)
f.write(line)
template.close()
f.close()
return 0
#
# Parse and Validate Args. Then run the tool
#
def main():
parser = argparse.ArgumentParser(description='Parse Paging information and generate HTML report')
parser.add_argument('-i', "--InputFolderPath", dest="InputFolder", help="Path to folder containing the DAT files from the UEFI shell tool (default is CWD)", default=os.getcwd())
parser.add_argument('-o', "--OutputReport", dest="OutputReport", help="Path to output html report (default is report.html)", default=os.path.join(os.getcwd(), "report.html"))
parser.add_argument('-p', "--PlatformName", dest="PlatformName", help="Name of Platform. Will show up on report", default="Test Platform")
parser.add_argument('-t', "--type", choices=['SMM', 'DXE'], dest="Type", help="SMM or DXE Paging Report", required=True)
parser.add_argument("--PlatformVersion", dest="PlatformVersion", help="Version of Platform. Will show up report", default="1.0.0")
#Turn on dubug level logging
parser.add_argument("--debug", action="store_true", dest="debug", help="turn on debug logging level for file log", default=False)
#Output debug log
parser.add_argument("-l", dest="OutputLog", help="Create an output log file: ie -l out.txt", default=None)
options = parser.parse_args()
#setup file based logging if outputReport specified
if(options.OutputLog):
if(len(options.OutputLog) < 2):
logging.critical("the output log file parameter is invalid")
return -2
else:
#setup file based logging
filelogger = logging.FileHandler(filename=options.OutputLog, mode='w')
if(options.debug):
filelogger.setLevel(logging.DEBUG)
else:
filelogger.setLevel(logging.INFO)
filelogger.setFormatter(formatter)
logging.getLogger('').addHandler(filelogger)
logging.info("Log Started: " + datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p" ))
#Do parameter validation
if(options.InputFolder is None or not os.path.isdir(options.InputFolder)):
logging.critical("Invalid Input Folder Path to folder containing DAT files")
return -5
if(options.OutputReport is None):
logging.critical("No OutputReport Path")
return -6
logging.debug("Input Folder Path is: %s" % options.InputFolder)
logging.debug("Output Report is: %s" % options.OutputReport)
spt = ParsingTool(options.InputFolder, options.PlatformName, options.PlatformVersion, options.Type)
spt.Parse()
return spt.OutputHtmlReport(VERSION, options.OutputReport)
#--------------------------------
# Control starts here
#
#--------------------------------
if __name__ == '__main__':
#setup main console as logger
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s - %(message)s")
console = logging.StreamHandler()
console.setLevel(logging.CRITICAL)
console.setFormatter(formatter)
logger.addHandler(console)
#call main worker function
retcode = main()
if retcode != 0:
logging.critical("Failed. Return Code: %i" % retcode)
#end logging
logging.shutdown()
sys.exit(retcode)
``` |
{
"source": "joschout/Multi-Directional-Rule-Set-Learning",
"score": 3
} |
#### File: experiments/dask_utils/dask_initialization.py
```python
from typing import List
from dask.distributed import Client, SSHCluster
scheduler_host_name = 'scheduler_host_name'
worker_hosts = [
'worker1',
'worker2'
]
def initialize_client_for_ssh_cluster(
scheduler_host: str,
worker_hosts: List[str]
) -> Client:
ssh_hosts = [scheduler_host, *worker_hosts]
try:
cluster = SSHCluster(
hosts=ssh_hosts,
connect_options={"known_hosts": None},
worker_options={"nthreads": 1},
# scheduler_options={"port": 0, "dashboard_address": ":8787"}
)
client = Client(cluster)
except (KeyError, OSError):
scheduler_address = f'{scheduler_host}:8786'
client = Client(address=scheduler_address)
return client
def reconnect_client_to_ssh_cluster(scheduler_host: str) -> Client:
scheduler_address = f'{scheduler_host}:8786'
client = Client(address=scheduler_address)
return client
```
#### File: e1_st_association_vs_tree_rules/model_evaluation/evaluate_single_target_mids_model.py
```python
from typing import List, Dict
import pandas as pd
from experiments.utils.experiment_logging import create_logger, close_logger
from experiments.arcbench_data_preparation.reworked_one_hot_encoding import get_original_data_fold_abs_file_name, \
TrainTestEnum
from mdrsl.rule_models.mids.io_mids import (
store_mids_interpret_stats, store_mids_target_attr_to_score_info, load_mids_classifier)
from mdrsl.evaluation.predictive_performance_metrics import ScoreInfo
from mdrsl.rule_models.mids.model_evaluation.mids_interpretability_metrics import MIDSInterpretabilityStatistics, \
MIDSInterpretabilityStatisticsCalculator
from mdrsl.rule_models.mids.mids_classifier import MIDSClassifier
from mdrsl.rule_models.mids.mids_ruleset import MIDSRuleSet
from mdrsl.rule_models.mids.model_evaluation.scoring_mids import score_MIDS_on_its_targets_without_nans
from mdrsl.rule_models.mids.model_fitting.mids_with_value_reuse import MIDSValueReuse
TargetAttr = str
def evaluate_single_target_mids_model_for_dataset_fold(
dataset_name: str,
fold_i: int,
logger_name: str,
logger_file_name: str,
mids_classifier_abs_file_name: str,
mids_target_attr_to_score_info_abs_file_name: str,
mids_interpret_stats_abs_file_name: str
):
logger = create_logger(
logger_name=logger_name,
log_file_name=logger_file_name
)
# --- load test data ----------------------------------------------------------------------------------------------
# read in original (discretized) training data
original_test_data_fold_abs_file_name = get_original_data_fold_abs_file_name(dataset_name, fold_i,
TrainTestEnum.test)
df_test_original_column_order = pd.read_csv(original_test_data_fold_abs_file_name,
delimiter=',')
# --- load classifier ---------------------------------------------------------------------------------------------
# mids_classifier_abs_file_name = get_mids_clf_abs_file_name(dataset_name, fold_i)
logger.info(f"start loading MIDS model from {mids_classifier_abs_file_name}")
mids_classifier: MIDSClassifier = load_mids_classifier(mids_classifier_abs_file_name)
logger.info("finished loading MIDS model")
logger.info(mids_classifier)
reconstructed_mids = MIDSValueReuse()
reconstructed_mids.classifier = mids_classifier
# --- Evaluate and store interpretability statistics --------------------------------------------------------------
filter_nans: bool = True
target_attr_to_score_info_map: Dict[str, ScoreInfo] = score_MIDS_on_its_targets_without_nans(
reconstructed_mids, df_test_original_column_order, filter_nans=filter_nans)
logger.info("Evaluated MIDS classifier on predictive performance")
target_attrs: List[TargetAttr] = mids_classifier.target_attrs
for target_attr in target_attrs:
target_attr_score_info: ScoreInfo = target_attr_to_score_info_map[target_attr]
logger.info(f"\t{target_attr}:\n {target_attr_score_info.to_str(' ')}")
logger.info("\t---")
store_mids_target_attr_to_score_info(mids_target_attr_to_score_info_abs_file_name, target_attr_to_score_info_map)
logger.info(f"Wrote MIDS Dict[TargetAttr, ScoreInfo] to {mids_target_attr_to_score_info_abs_file_name}")
# --- Evaluate and store interpretability statistics --------------------------------------------------------------
interpret_stats: MIDSInterpretabilityStatistics \
= MIDSInterpretabilityStatisticsCalculator.calculate_ruleset_statistics(
MIDSRuleSet(mids_classifier.rules), df_test_original_column_order, target_attributes=target_attrs)
logger.info("Evaluated MIDS classifier on interpretability")
logger.info(interpret_stats.to_str("\n"))
store_mids_interpret_stats(mids_interpret_stats_abs_file_name, interpret_stats)
logger.info(f"Wrote MIDSInterpretabilityStatistics to {mids_interpret_stats_abs_file_name}")
logger.info("---")
close_logger(logger)
```
#### File: e1_st_association_vs_tree_rules/rule_mining/single_target_car_mining_ifo_confidence_level.py
```python
import os
from typing import List, Tuple, Dict
from dask import delayed
from dask.delayed import Delayed
from distributed import Client
from experiments.dask_utils.computations import compute_delayed_functions
from experiments.dask_utils.dask_initialization import reconnect_client_to_ssh_cluster
from experiments.utils.experiment_logging import create_logger, close_logger
from experiments.utils.header_attributes import get_header_attributes
from experiments.arcbench_data_preparation.arc_model_data_preparation import prepare_arc_data
from experiments.arcbench_data_preparation.reworked_one_hot_encoding import get_original_data_fold_abs_file_name, \
TrainTestEnum
from experiments.e1_st_association_vs_tree_rules.file_naming.rules.single_target_filtered_cars_naming import (
get_single_target_filtered_cars_abs_filename,
get_single_target_filtered_cars_mining_timings_abs_filename,
assoc_vs_tree_based_single_target_car_dir
)
from experiments.io_timings import store_timings_dict
from mdrsl.data_structures.rules.multi_target_class_association_rule import MCAR
from mdrsl.rule_models.mids.io_mids import store_mcars
from mdrsl.rule_generation.association_rule_mining.mlext_impl.mlext_interaction import mine_single_target_MCARs_mlext
def mine_cars_for_dataset_fold_target_attribute(
dataset_name: str,
fold_i: int,
target_attribute: str,
min_support: float,
min_confidence: float,
max_length: int,
):
"""
1. load the required training data of the dataset fold.
2. make sure the target attribute is the last attribute
3. mine rules using the parameters settings
--> check the number of rules!
4. save the rules to file
:return:
"""
relative_name: str = f'{dataset_name}{fold_i}_{target_attribute}_{min_confidence}'
logger = create_logger(
logger_name=f'mine_filtered_single_target_cars_' + relative_name,
log_file_name=os.path.join(assoc_vs_tree_based_single_target_car_dir(),
f'{relative_name}_single_target_filtered_car_mining.log')
)
# logger.info(f"rule_cutoff={rule_cutoff}")
# # load the required training data of the dataset fold.
# original_train_data_fold_abs_file_name = get_original_data_fold_abs_file_name(
# dataset_name, fold_i, TrainTestEnum.train)
# df_train_original_column_order = pd.read_csv(original_train_data_fold_abs_file_name, delimiter=',')
# # 2. make sure the target attribute is the last attribute
# df_train_reordered = reorder_columns(df_train_original_column_order, target_attribute)
#
# # REMOVE INSTANCES WITH NAN AS TARGET VALUE:
# df_train_reordered = remove_instances_with_nans_in_column(df_train_reordered, target_attribute)
df_train_reordered = prepare_arc_data(dataset_name, fold_i, target_attribute, TrainTestEnum.train)
logger.info(f"start mining CARs for " + relative_name)
st_mcars: List[MCAR]
timings_dict: Dict[str, float]
filtered_st_mcars, timings_dict = mine_single_target_MCARs_mlext(df_train_reordered,
target_attribute=target_attribute,
min_support=min_support,
min_confidence=min_confidence,
max_length=max_length)
logger.info(f"finished mining CARs for {dataset_name} {fold_i}_{min_support}supp_{min_confidence}conf")
logger.info(
f"found {len(filtered_st_mcars)} CARs for {dataset_name} {fold_i}_{min_support}supp_{min_confidence}conf")
filtered_st_mcars_abs_file_name: str = get_single_target_filtered_cars_abs_filename(
dataset_name=dataset_name, fold_i=fold_i, target_attribute=target_attribute,
confidence_boundary_val=min_confidence
)
store_mcars(filtered_st_mcars_abs_file_name, filtered_st_mcars)
logger.info(f"finished writing CARs to file: {filtered_st_mcars_abs_file_name}")
filtered_st_mcars_mining_timings_abs_file_name = get_single_target_filtered_cars_mining_timings_abs_filename(
dataset_name=dataset_name, fold_i=fold_i, target_attribute=target_attribute,
confidence_boundary_val=min_confidence
)
store_timings_dict(filtered_st_mcars_mining_timings_abs_file_name, timings_dict)
close_logger(logger)
def main():
from experiments.arcbench_data_preparation.dataset_info import datasets
datasets = [dict(filename="iris", targetvariablename="class", numerical=True)]
from experiments.dask_utils.dask_initialization import scheduler_host_name
scheduler_host: str = scheduler_host_name
list_of_computations: List[Tuple[Delayed, Dict]] = []
min_support: float = 0.1
max_length: int = 7
confidence_boundary_values: List[float] = [0.75, 0.95]
nb_of_folds: int = 10
use_dask = False
if use_dask:
client: Client = reconnect_client_to_ssh_cluster(scheduler_host)
for dataset_info in datasets:
dataset_name = dataset_info['filename']
for fold_i in range(nb_of_folds):
original_train_data_fold_abs_file_name = get_original_data_fold_abs_file_name(dataset_name, fold_i,
TrainTestEnum.train)
target_columns: List[str] = get_header_attributes(original_train_data_fold_abs_file_name)
for target_column in target_columns:
target_attribute = str(target_column)
for conf_boundary_val in confidence_boundary_values:
if use_dask:
func_args = dict(
dataset_name=dataset_name,
fold_i=fold_i,
target_attribute=target_attribute,
min_support=min_support,
min_confidence=conf_boundary_val,
max_length=max_length
)
delayed_func = delayed(mine_cars_for_dataset_fold_target_attribute)(
**func_args
)
list_of_computations.append((delayed_func, func_args))
else:
mine_cars_for_dataset_fold_target_attribute(
dataset_name=dataset_name,
fold_i=fold_i,
target_attribute=target_attribute,
min_support=min_support,
min_confidence=conf_boundary_val,
max_length=max_length
)
if use_dask:
log_file_dir = assoc_vs_tree_based_single_target_car_dir()
logger_name: str = 'mine_single_target_cars_ifo_confidence_bound_ERROR_LOGGER'
logger_file_name: str = os.path.join(
log_file_dir,
f'ERROR_LOG_mine_single_target_cars_ifo_confidence_bound.log'
)
compute_delayed_functions(
list_of_computations=list_of_computations,
client=client,
nb_of_retries_if_erred=5,
error_logger_name=logger_name,
error_logger_file_name=logger_file_name
)
if __name__ == '__main__':
main()
```
#### File: file_naming/rules/single_target_tree_rule_naming.py
```python
import os
from experiments.file_naming.single_target_classifier_indicator import SingleTargetClassifierIndicator
from project_info import project_dir
def get_single_target_tree_rule_dir() -> str:
mcars_dir: str = os.path.join(project_dir,
'models',
'single_target_tree_rules')
if not os.path.exists(mcars_dir):
os.makedirs(mcars_dir)
return mcars_dir
def get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int
) -> str:
return (
f"{dataset_name}{fold_i}_{target_attribute}_{str(classifier_indicator.value)}"
f"_{nb_of_trees_per_model}trees"
f"_{min_support}supp_{max_depth}depth"
)
def get_single_target_tree_rules_abs_file_name(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int,
):
rules_dir = get_single_target_tree_rule_dir()
relative_file_name: str = get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name=dataset_name, fold_i=fold_i,
target_attribute=target_attribute,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
min_support=min_support, max_depth=max_depth
)
tree_derived_rule_abs_file_name = os.path.join(rules_dir, f"{relative_file_name}.json.gz")
return tree_derived_rule_abs_file_name
def get_single_target_tree_rules_gen_timing_info_abs_file_name(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int,
):
rules_dir = get_single_target_tree_rule_dir()
relative_file_name: str = get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name=dataset_name, fold_i=fold_i,
target_attribute=target_attribute,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
min_support=min_support, max_depth=max_depth
)
tree_derived_rule_abs_file_name = os.path.join(rules_dir, f"{relative_file_name}_timings.json.gz")
return tree_derived_rule_abs_file_name
```
#### File: e2_multi_directional_model_comparison/model_induction/round_robin_tree_based_model_induction.py
```python
import os
from typing import List, Tuple, Dict, Set
import pandas as pd
from dask import delayed
from dask.delayed import Delayed
from dask_utils.computations import compute_delayed_functions
from dask_utils.dask_initialization import reconnect_client_to_ssh_cluster
from experiments.arcbench_data_preparation.reworked_one_hot_encoding import get_original_data_fold_abs_file_name, \
TrainTestEnum
from experiments.file_naming.car_naming import get_tree_derived_rules_abs_file_name
from experiments.file_naming.single_target_classifier_indicator import SingleTargetClassifierIndicator
from experiments.utils.experiment_logging import create_logger, close_logger
from experiments.utils.file_creation import file_does_not_exist_or_has_been_created_earlier_than_
from experiments.e2_multi_directional_model_comparison.file_naming.round_robin_model_naming import (
get_tree_based_greedy_clf_abs_file_name, greedy_models_tree_based_dir)
from mdrsl.data_structures.rules.multi_target_class_association_rule import MCAR
from mdrsl.rule_models.mids.io_mids import load_mcars
from mdrsl.rule_models.mids.mids_rule import MIDSRule
from mdrsl.rule_models.rr.rr_rule_set_learner import GreedyRoundRobinTargetRuleClassifier
from mdrsl.rule_models.rr.io_rr_rule_set_learner import store_greedy_naive_classifier
TargetAttr = str
def learn_tree_based_greedy_model_for_dataset_fold(
dataset_name: str,
fold_i: int,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
nb_of_original_targets_to_predict: int,
min_support: float,
max_depth: int
):
logger = create_logger(
logger_name=f'learn_greedy_model_{dataset_name}{fold_i}_tree_derived_rules',
log_file_name=os.path.join(greedy_models_tree_based_dir(),
f'{dataset_name}{fold_i}_greedy_model_induction_tree_derived_rules.log')
)
# --- load train data ---------------------------------------------------------------------------------------------
# read in original (discretized) training data
df_original_train = pd.read_csv(get_original_data_fold_abs_file_name(dataset_name, fold_i, TrainTestEnum.train),
delimiter=',')
# --- load association rules --------------------------------------------------------------------------------------
tree_clf_derived_rules_abs_file_name = get_tree_derived_rules_abs_file_name(dataset_name,
fold_i,
classifier_indicator,
nb_of_trees_per_model,
nb_of_original_targets_to_predict,
min_support,
max_depth)
logger.info(f"Reading MCARs from file: {tree_clf_derived_rules_abs_file_name}")
mcars: List[MCAR] = load_mcars(tree_clf_derived_rules_abs_file_name)
mids_rules: Set[MIDSRule] = {MIDSRule(mcar) for mcar in mcars}
logger.info(f"ground set size (nb of initial MCARs): {len(mids_rules)}")
# --- Fit and save classifier -------------------------------------------------------------------------------------
greedy_clf = GreedyRoundRobinTargetRuleClassifier(df_original_train.columns, verbose=False)
selected_set, selected_set_scores = greedy_clf.fit(ground_set=mids_rules, training_data=df_original_train)
logger.info(f"Selected {len(selected_set)} out of {len(mcars)} rules "
f"({(len(selected_set) / len(mcars) *100):.2f}%)")
logger.info("start saving Naive greedy model")
tree_based_greedy_clf_abs_file_name = get_tree_based_greedy_clf_abs_file_name(
dataset_name=dataset_name, fold_i=fold_i,
classifier_indicator=classifier_indicator, nb_of_trees_per_model=nb_of_trees_per_model,
nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,
min_support=min_support, max_depth=max_depth
)
store_greedy_naive_classifier(tree_based_greedy_clf_abs_file_name, greedy_clf)
logger.info(f"finished saving greedy clf to file: {tree_based_greedy_clf_abs_file_name}")
close_logger(logger)
def main():
from experiments.arcbench_data_preparation.dataset_info import datasets
datasets = [dict(filename="iris", targetvariablename="class", numerical=True)]
from experiments.dask_utils.dask_initialization import scheduler_host_name
scheduler_host: str = scheduler_host_name
list_of_computations: List[Tuple[Delayed, Dict]] = []
nb_of_folds: int = 10
classifier_indicator = SingleTargetClassifierIndicator.random_forest
nb_of_original_targets_to_predict: int = 2
nb_of_trees_per_model_list: List[int] = [5, 10]
min_support: float = 0.1 # min_samples_leaf must be at least 1 or in (0, 0.5], got 0
max_depth: int = 7 - nb_of_original_targets_to_predict
use_dask = False
if use_dask:
client = reconnect_client_to_ssh_cluster(scheduler_host)
for dataset_info in datasets:
dataset_name = dataset_info['filename']
for fold_i in range(nb_of_folds):
for nb_of_trees_per_model in nb_of_trees_per_model_list:
clf_abs_file_name = get_tree_based_greedy_clf_abs_file_name(
dataset_name=dataset_name, fold_i=fold_i,
classifier_indicator=classifier_indicator, nb_of_trees_per_model=nb_of_trees_per_model,
nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,
min_support=min_support, max_depth=max_depth
)
n_days_in_hours = 18
should_refit: bool = file_does_not_exist_or_has_been_created_earlier_than_(
clf_abs_file_name,
n_days_in_hours
)
if should_refit:
if use_dask:
func_args = dict(
dataset_name=dataset_name,
fold_i=fold_i,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,
min_support=min_support,
max_depth=max_depth)
delayed_func = \
delayed(learn_tree_based_greedy_model_for_dataset_fold)(
**func_args
)
list_of_computations.append((delayed_func, func_args))
else:
learn_tree_based_greedy_model_for_dataset_fold(
dataset_name=dataset_name,
fold_i=fold_i,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
nb_of_original_targets_to_predict=nb_of_original_targets_to_predict,
min_support=min_support,
max_depth=max_depth
)
if use_dask:
log_file_dir: str = greedy_models_tree_based_dir()
logger_name: str = 'greedy_model_induction_tree_derived_rules_ERROR_LOGGER'
logger_file_name: str = os.path.join(
log_file_dir,
f'ERROR_LOG_greedy_model_induction_tree_derived_rules.log'
)
compute_delayed_functions(
list_of_computations=list_of_computations,
client=client,
nb_of_retries_if_erred=5,
error_logger_name=logger_name,
error_logger_file_name=logger_file_name
)
if __name__ == '__main__':
main()
```
#### File: experiments/file_naming/column_encodings.py
```python
import os
from project_info import project_dir
def get_encodings_book_keeper_abs_file_name_for(dataset_name: str, fold_i: int) -> str:
encodings_dir = os.path.join(project_dir, 'data/arcBench_processed/column_encodings')
if not os.path.exists(encodings_dir):
os.makedirs(encodings_dir)
encoding_book_keeper_abs_file_name = os.path.join(
encodings_dir, f'{dataset_name}{fold_i}.json.gz'
)
return encoding_book_keeper_abs_file_name
```
#### File: mdrsl/data_handling/reorder_dataset_columns.py
```python
import pandas as pd
TargetAttr = str
def reorder_columns(df: pd.DataFrame, target_column: TargetAttr) -> pd.DataFrame:
"""
Generates a dataframe with reordered columns, such that the given target column is the last column
:param df:
:param target_column:
:return:
"""
if target_column not in df.columns:
message = f"the given target column {target_column} is not a column of the given dataframe"
raise Exception(message)
columns = df.columns
reordered_columns = []
for possibly_other_column in columns:
if str(possibly_other_column) != str(target_column):
reordered_columns.append(possibly_other_column)
# reordered_columns = [other_col for other_col in columns if str(other_col) is not str(target_column)]
reordered_columns.append(target_column)
new_df = df[reordered_columns]
return new_df
```
#### File: data_structures/transactions/transaction.py
```python
from mdrsl.data_structures.comparable_itemset import ComparableItemSet
from mdrsl.data_structures.item import Item
class Transaction(ComparableItemSet):
"""Transaction represents one instance in a dataset.
Transaction is hashed based on its items and class
value.
Parameters
----------
row: array of ints or strings
header: array of strings
Represents column labels.
Attributes
----------
items: array of Items
tid: int
Transaction ID.
string_items: two dimensional array of strings
e.g. [["a:=:1", "b:=:2"]]
"""
id_ = 0
def __init__(self, row, header):
self.items = []
self.tid = Transaction.id_
Transaction.id_ += 1
# eg. [pay=high, eyes=green]
self.string_items = []
for idx, val in enumerate(row):
header_label = header[idx]
item = Item(header_label, val)
self.string_items.append("{}:=:{}".format(header_label, val))
self.items.append(item)
self.frozenset = frozenset(self)
def __repr__(self):
string = ", ".join(self.string_items)
return "{" + string + "}"
def __hash__(self):
return hash(tuple(self.items))
# return hash((self.tid, tuple(self.items)))
def __eq__(self, other):
return hash(self) == hash(other)
def __getitem__(self, idx):
return self.items[idx]
def getclass(self):
return self.class_val
class UniqueTransaction(Transaction):
"""Same as Transaction class except for
hashing by Transaction id.
"""
def __hash__(self):
return hash(self.tid)
```
#### File: evaluation/interpretability/basic_rule_set_stats.py
```python
from mdrsl.utils.value_collection import ValueCollector
def is_valid_fraction(val: float, lower: float = 0, higher: float = 1) -> bool:
return lower <= val <= higher
class AbstractModelStatistics:
def __init__(self, model_abbreviation: str):
self.model_abbreviation: str = model_abbreviation
def get_model_size(self) -> int:
raise NotImplementedError('Abstract method')
class BasicRuleSetStatistics(AbstractModelStatistics):
def __init__(self, rule_length_counter: ValueCollector, model_abbreviation: str):
super().__init__(model_abbreviation)
self.rule_length_counter: ValueCollector = rule_length_counter
def ruleset_size(self) -> int:
"""
Returns the size of the rule set.
:return:
"""
return self.rule_length_counter.count
def total_nb_of_literals(self) -> int:
"""
Returns the total nb of __literals in the rule set.
"""
return self.rule_length_counter.sum
def get_model_size(self) -> int:
return self.total_nb_of_literals()
def avg_nb_of_literals_per_rule(self) -> float:
"""
Returns the avg nb of __literals over the rules in the rule set.
"""
return self.rule_length_counter.get_avg()
def min_nb_of_literals(self) -> float:
"""
Returns the nb of __literals in the shortest rule
"""
return self.rule_length_counter.min
def max_nb_of_literals(self) -> float:
"""
Returns the nb of __literals in the longest rule
"""
return self.rule_length_counter.max
class SingleTargetRuleSetStatistics(BasicRuleSetStatistics):
def __init__(self,
rule_length_collector: ValueCollector,
model_abbreviation: str,
fraction_bodily_overlap: float,
fraction_uncovered_examples: float,
frac_predicted_classes: float
):
super().__init__(rule_length_collector, model_abbreviation=model_abbreviation)
self.fraction_bodily_overlap: float = fraction_bodily_overlap
self.fraction_uncovered_examples: float = fraction_uncovered_examples
self.frac_predicted_classes: float = frac_predicted_classes
def to_str(self, indentation: str = "") -> str:
output_string = (
indentation + "Rule length stats: " + str(self.rule_length_counter) + "\n"
+ indentation + "Fraction bodily overlap: " + str(self.fraction_bodily_overlap) + "\n"
+ indentation + "Fraction uncovered examples: " + str(self.fraction_uncovered_examples) + "\n"
+ indentation + "Fraction predicted classes: " + str(self.frac_predicted_classes) + "\n"
)
return output_string
def __str__(self):
return self.to_str()
if __name__ == '__main__':
value_collector = ValueCollector()
stats = BasicRuleSetStatistics(value_collector, "test")
print(stats.rule_length_counter)
```
#### File: association_rule_mining/fim_impl/mine_st_rules_with_fim.py
```python
import random
import time
from typing import List, Optional, Dict
import fim
import pandas as pd
from pyarc.data_structures.antecedent import Antecedent
from pyarc.data_structures.consequent import Consequent
from pyarc.data_structures.item import Item
from pyarc.data_structures.car import ClassAssocationRule
from pyarc.data_structures.transaction_db import TransactionDB
def mine_CARs(df: pd.DataFrame, rule_cutoff: int,
sample=False, random_seed=None,
verbose: bool = True,
**top_rules_kwargs) -> List[ClassAssocationRule]:
"""
:param df: the (training) data to mine rules on
:param rule_cutoff: the maximum number of rules to return
:param sample: bool - if the generate nb of rules is larger than the rule_cutoff and sample == True,
a random sample of rules rule_cutoff rules is returned
:param random_seed:
:param verbose:
:param top_rules_kwargs:
:return:
"""
txns = TransactionDB.from_DataFrame(df)
rules = top_rules(txns.string_representation,
appearance=txns.appeardict, # NOTE: THIS IS VERY IMPORTANT; without this, any attribute can be
# the target of the class association rule
target_rule_count=rule_cutoff,
verbose=verbose,
**top_rules_kwargs)
cars: List[ClassAssocationRule] = createCARs(rules)
cars_subset: List[ClassAssocationRule]
if len(cars) > rule_cutoff:
if sample:
if random_seed is not None:
random.seed(random_seed)
cars_subset = random.sample(cars, rule_cutoff)
else:
cars_subset = cars[:rule_cutoff]
else:
cars_subset = cars
return cars_subset
def mine_unrestricted_CARS(df: pd.DataFrame, min_support = 0.01, min_confidence = 0.5,
max_length=7) -> List[ClassAssocationRule]:
"""
:param df: the (training) data to mine rules on
:param rule_cutoff: the maximum number of rules to return
:param sample: bool - if the generate nb of rules is larger than the rule_cutoff and sample == True,
a random sample of rules rule_cutoff rules is returned
:param random_seed:
:param verbose:
:param top_rules_kwargs:
:return:
"""
txns = TransactionDB.from_DataFrame(df)
min_support_percents = min_support * 100
min_confidence_percents = min_confidence * 100
CARs: List[ClassAssocationRule] = generateCARs(txns,
support=min_support_percents,
confidence=min_confidence_percents, maxlen=max_length)
return CARs
def createCARs(rules) -> List[ClassAssocationRule]:
"""Function for converting output from fim.arules or fim.apriori
to a list of ClassAssociationRules
Parameters
----------
rules : output from fim.arules or from generateCARs
Returns
-------
list of CARs
"""
CARs: List[ClassAssocationRule] = []
for rule in rules:
con_tmp, ant_tmp, support, confidence = rule
con = Consequent(*con_tmp.split(":=:"))
ant_items = [Item(*i.split(":=:")) for i in ant_tmp]
ant = Antecedent(ant_items)
CAR = ClassAssocationRule(ant, con, support=support, confidence=confidence)
CARs.append(CAR)
CARs.sort(reverse=True)
return CARs
def generateCARs(transactionDB: TransactionDB,
support: float = 1, confidence: float = 50, maxlen: int = 10, **kwargs):
"""Function for generating ClassAssociationRules from a TransactionDB
Parameters
----------
:param transactionDB : TransactionDB
support : float
minimum support in percents if positive
absolute minimum support if negative
confidence : float
minimum confidence in percents if positive
absolute minimum confidence if negative
maxlen : int
maximum length of mined rules
**kwargs :
arbitrary number of arguments that will be
provided to the fim.apriori function
Returns
-------
list of CARs
"""
appear = transactionDB.appeardict
rules = fim.apriori(transactionDB.string_representation, supp=support, conf=confidence, mode="o", target="r",
report="sc", appear=appear, **kwargs, zmax=maxlen)
return createCARs(rules)
def top_rules(transactions,
appearance: Optional[Dict] = None,
target_rule_count: int = 1000,
init_support: float = 0.05,
init_confidence: float = 0.5,
confidence_step: float = 0.05,
support_step: float = 0.05,
min_length: int = 2,
init_max_length: int = 3,
total_timeout: float = 100.0,
max_iterations: int = 30,
verbose: bool = True):
"""
Function for finding the best n (target_rule_count) rules from transaction list
Returns list of mined rules. The rules are not ordered.
:param transactions : 2D array of strings, e.g. [["a:=:1", "b:=:3"], ["a:=:4", "b:=:2"]]
:param appearance : dict - dictionary specifying rule appearance
:param target_rule_count : int - target number of rules to mine
:param init_support : float - support from which to start mining
:param init_confidence : float - confidence from which to start mining
:param confidence_step : float
:param support_step : float
:param min_length : int - minimum len of rules to mine
:param init_max_length : int - maximum len from which to start mining
:param total_timeout : float - maximum execution time of the function
:param max_iterations : int - maximum iterations to try before stopping execution
:param verbose: bool
"""
if appearance is None:
appearance = {}
start_time: float = time.time()
# the length of a rule is at most the length of a transaction. (All transactions have the same length.)
MAX_RULE_LEN: int = len(transactions[0])
current_support: float = init_support
current_confidence: float = init_confidence
current_max_length: int = init_max_length
keep_mining: bool = True
is_max_length_decreased_due_timeout: bool = False
current_iteration: int = 0
last_rule_count = -1
rules: Optional[List] = None
if verbose:
print("STARTING top_rules")
while keep_mining:
current_iteration += 1
if current_iteration > max_iterations:
if verbose:
print("Max iterations reached")
break
if verbose:
print(f"--- iteration {current_iteration} ---")
print((f"Running apriori with setting: "
f"confidence={current_confidence}, "
f"support={current_support}, "
f"min_length={min_length}, "
f"max_length={current_max_length}, "
f"MAX_RULE_LEN={MAX_RULE_LEN}"
))
current_rules = fim.arules(transactions, supp=current_support, conf=current_confidence, mode="o", report="sc",
appear=appearance,
zmax=current_max_length, zmin=min_length)
current_nb_of_rules = len(current_rules)
# assign
rules = current_rules
if verbose:
print(f"Rule count: {current_nb_of_rules}, Iteration: {current_iteration}")
if current_nb_of_rules >= target_rule_count:
keep_mining = False
if verbose:
print(f"\tTarget rule count satisfied: {target_rule_count}")
else:
current_execution_time = time.time() - start_time
# if timeout limit exceeded
if current_execution_time > total_timeout:
if verbose:
print(f"\tExecution time exceeded: {total_timeout}")
keep_mining = False
# if we can still increase our rule length AND
# the number of rules found has changed (increased?) since last time AND
# there has
elif current_max_length < MAX_RULE_LEN and last_rule_count != current_nb_of_rules and not is_max_length_decreased_due_timeout:
current_max_length += 1
last_rule_count = current_nb_of_rules
if verbose:
print(f"\tIncreasing max_length {current_max_length}")
# if we can still increase our rule length AND
#
# we can still increase our support
# THEN:
# increase our support
# increment our max length
elif current_max_length < MAX_RULE_LEN and is_max_length_decreased_due_timeout and current_support <= 1 - support_step:
current_support += support_step
current_max_length += 1
last_rule_count = current_nb_of_rules
is_max_length_decreased_due_timeout = False
if verbose:
print(f"\tIncreasing maxlen to {current_max_length}")
print(f"\tIncreasing minsup to {current_support}")
# IF we can still decrease our confidence
# THEN decrease our confidence
elif current_confidence > confidence_step:
current_confidence -= confidence_step
if verbose:
print(f"\tDecreasing confidence to {current_confidence}")
else:
if verbose:
print("\tAll options exhausted")
keep_mining = False
if verbose:
end_of_current_iteration_message = f"--- end iteration {current_iteration} ---"
print(end_of_current_iteration_message)
print("-" * len(end_of_current_iteration_message))
if verbose:
print(f"FINISHED top_rules after {current_iteration} iterations")
return rules
```
#### File: rule_generation/association_rule_mining/frequent_itemset_mining.py
```python
from typing import List, Optional, Union, Tuple
# import fim
import pandas as pd
from bidict import bidict
attribute_value_separator = ';=;'
filter_missing_values = True
Item = str
ItemEncoding = int
Transaction = Union[List[Item], List[ItemEncoding]]
def attr_val_to_item(attr_name: str, val: object, separator=None) -> Item:
if separator is None:
separator = attribute_value_separator
return attr_name + separator + str(val)
def dataframe_to_list_of_transactions(df: pd.DataFrame) -> List[Transaction]:
"""
convert the dataframe to a list of transactions, with each transaction of the form
[ 'col1=val1', 'col2=val2', ...]
e.g. ['Passenger_Cat=3rd_class', 'Age_Cat=adult', 'Gender=male']
"""
list_of_transactions: List[Transaction] = []
row_index: int
for row_index in range(0, df.shape[0]):
row_itemset: Transaction = []
for col_name in df.columns:
try:
column_series = df[col_name]
row_value = column_series.iloc[row_index]
is_value_na: bool = pd.isna(row_value)
if (filter_missing_values and not is_value_na)\
or not filter_missing_values:
item: Item = attr_val_to_item(col_name, row_value)
row_itemset.append(item)
except Exception as err:
print(f"column {col_name} and index {row_index}")
raise err
list_of_transactions.append(row_itemset)
return list_of_transactions
class ItemEncoder:
def __init__(self):
self.encoding_bidict: bidict = bidict()
self.next_encoding = 1
def encode_item(self, item: Item) -> int:
optional_encoding: Optional[int] = self.encoding_bidict.get(item, None)
if optional_encoding is not None:
return optional_encoding
else:
item_encoding = self.next_encoding
self.encoding_bidict[item] = item_encoding
self.next_encoding += 1
return item_encoding
def decode_item(self, encoding: int) -> Item:
optional_item: Optional[Item] = self.encoding_bidict.inverse.get(encoding, None)
if optional_item is not None:
return optional_item
else:
raise Exception(f"No item for encoding {encoding}")
def dataframe_to_list_of_transactions_with_encoding(df: pd.DataFrame) -> Tuple[List[Transaction], ItemEncoder]:
"""
convert the dataframe to a list of transactions, with each transaction of the form
[ 'col1=val1', 'col2=val2', ...]
e.g. ['Passenger_Cat=3rd_class', 'Age_Cat=adult', 'Gender=male']
"""
item_encoder = ItemEncoder()
list_of_transactions: List[Transaction] = []
for row_index in range(0, df.shape[0]):
row_itemset: Transaction = []
for col_name in df.columns:
try:
column_series = df[col_name]
row_value = column_series.iloc[row_index]
is_value_na: bool = pd.isna(row_value)
if (filter_missing_values and not is_value_na)\
or not filter_missing_values:
item: Item = attr_val_to_item(col_name, row_value)
item_encoding: int = item_encoder.encode_item(item)
row_itemset.append(item_encoding)
except Exception as err:
print(f"column {col_name} and index {row_index}")
raise err
list_of_transactions.append(row_itemset)
return list_of_transactions, item_encoder
def run_fim_apriori(df: pd.DataFrame, min_suppport_thr: float) -> List[Transaction]:
try:
import fim
except Exception as e:
raise e
print("running fim apriori function")
dataset_transactions: List[Transaction] = dataframe_to_list_of_transactions(df)
print("dataset processed")
frequent_itemsets_raw = fim.apriori(dataset_transactions, supp=(min_suppport_thr*100)) # List[Tuple[]]
print("apriori runned")
frequent_itemsets: List[Transaction] = list(map(lambda i: list(i[0]), frequent_itemsets_raw)) # Li
print("apriori results processed")
return frequent_itemsets
def run_apyori_apriori(df: pd.DataFrame, min_suppport_thr: float) -> List[Transaction]:
"""
Takes a data frame and a support threshold and returns itemsets which satisfy the threshold.
The idea is to basically
1. make a list of strings out of the df
2. and run apriori api on it
3. return the frequent itemsets
:param df: dataframe, where each row is a viewed as a transaction
:param min_suppport_thr:
:return:
"""
from mdrsl.rule_generation.association_rule_mining.apyori_impl.apyori import RelationRecord, apriori
from mdrsl.rule_generation.association_rule_mining.apyori_impl.apyori_utils import print_relation_record
dataset_transactions: List[Transaction] = dataframe_to_list_of_transactions(df)
results: List[RelationRecord] = list(apriori(dataset_transactions, min_support=min_suppport_thr))
for relation_record in results:
print_relation_record(relation_record)
print("=====================================")
list_of_frequent_itemsets: List[Transaction] = []
for relation_record in results: # type: RelationRecord
itemset: Transaction = []
for pred in relation_record.items:
itemset.append(pred)
list_of_frequent_itemsets.append(itemset)
return list_of_frequent_itemsets
def run_apriori(implementation: str, df: pd.DataFrame, min_suppport_thr: float) -> List[Transaction]:
if implementation == 'apyori':
return run_apyori_apriori(df, min_suppport_thr)
elif implementation == 'fim':
return run_fim_apriori(df, min_suppport_thr)
else:
raise NotImplementedError('No Apriori implementation found for' + implementation)
```
#### File: rule_generation/decision_tree_conversion/attribute_id_to_name_conversion.py
```python
from typing import List
class DecisionTreeFeatureIDConverter:
"""
Converts an attribute id (as found in a scikit-learn decision tree) into the corresponding attribute name,
as found in the training data fed into the decision tree.
"""
def __init__(self, dt_descriptive_atts: List[str]):
self.dt_descriptive_atts = dt_descriptive_atts
def convert(self, feature_id: int):
# find the descriptive attr as used for input for the decision tree
return self.dt_descriptive_atts[feature_id]
```
#### File: rule_generation/decision_tree_conversion/pretty_drawing_file.py
```python
from typing import List
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
import graphviz
def render_decision_tree_classifier_to_file(
clf: DecisionTreeClassifier, decision_tree_descriptive_attribute_names: List[str],
image_absolute_file_path: str) -> None:
# %%
# classes_ : array of shape = [n_classes] or a list of such arrays
#
# The classes labels (single output problem), or a list of arrays of class labels (multi-output problem).
#
class_labels = clf.classes_
class_labels: List[str] = list(map(str, class_labels))
dot_data = tree.export_graphviz(clf, feature_names=decision_tree_descriptive_attribute_names,
class_names=class_labels)
graph = graphviz.Source(dot_data)
# this will create an .pdf file
graph.render(image_absolute_file_path)
```
#### File: rule_generation/decision_tree_conversion/tree_to_paths_conversion.py
```python
from typing import List, Optional
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from mdrsl.rule_generation.decision_tree_conversion.tree_branch import TreeBranch
from mdrsl.rule_generation.decision_tree_conversion.tree_edge import TreeEdge
from mdrsl.rule_generation.decision_tree_conversion.tree_path import TreePath
NodeId = int
class TreeToPathsConverter:
"""
Converts a single Scikit-learn DecisionTreeClassifier in a list of paths.
NOTE: the paths will still use the information as was stored in the Scikit-learn decision tree.
Any renaming should be done at a later point.
"""
def __init__(self, classifier: DecisionTreeClassifier):
self.tree: DecisionTreeClassifier = classifier
# The number of nodes (internal nodes + leaves) in the tree.
self.n_nodes: int = classifier.tree_.node_count
self.children_left: np.ndarray = classifier.tree_.children_left
self.children_right: np.ndarray = classifier.tree_.children_right
# feature : array of int, shape [node_count]
# feature[i] holds the feature to split on, for the internal node i.
self.feature: np.ndarray = classifier.tree_.feature
self.threshold: np.ndarray = classifier.tree_.threshold
# n_node_samples : array of int, shape [node_count]
# n_node_samples[i] holds the number of training samples reaching node i.
self.nb_training_samples_reaching_node: np.ndarray = classifier.tree_.n_node_samples
self.total_nb_of_training_samples: int = 0 # counted when walking the tree
# value : array of double, shape [node_count, n_outputs, max_n_classes]
# Contains the constant prediction value of each node.
self.value: np.array = classifier.tree_.value
self.class_labels = classifier.classes_
self.nb_of_target_attributes = classifier.n_outputs_
self.list_of_paths: List[TreePath] = []
def convert(self) -> List[TreePath]:
root_node_id: NodeId = 0
self.total_nb_of_training_samples: int = 0
self._recursive_convert(root_node_id, parent_tree_branch=None)
return self.list_of_paths
def _recursive_convert(self, node_id: NodeId, parent_tree_branch: Optional[TreeBranch]) -> None:
# check if the tnode is a test node, i.e. whether it has both children:
left_child_node_id: NodeId = self.children_left[node_id]
right_child_node_id: NodeId = self.children_right[node_id]
# If we have a test node
if left_child_node_id != right_child_node_id:
# do something with the test of this node
node_feature_id = self.feature[node_id]
node_threshold = self.threshold[node_id]
left_extended_branch = TreeBranch(parent_tree_branch=parent_tree_branch,
edge=TreeEdge(
feature_id=node_feature_id, threshold=node_threshold, is_left=True))
right_extended_branch = TreeBranch(parent_tree_branch=parent_tree_branch,
edge=TreeEdge(
feature_id=node_feature_id, threshold=node_threshold, is_left=False))
self._recursive_convert(left_child_node_id, left_extended_branch)
self._recursive_convert(right_child_node_id, right_extended_branch)
else:
class_label_counts: np.ndarray = self.value[node_id]
nb_of_training_samples_reaching_leaf: int = self.nb_training_samples_reaching_node[node_id]
self.total_nb_of_training_samples += nb_of_training_samples_reaching_leaf
all_zeros: bool = not np.any(class_label_counts)
if all_zeros:
raise Exception("all labels have count 0")
tree_path: TreePath = self._convert_to_tree_path(parent_tree_branch,
nb_of_training_samples_reaching_leaf,
class_label_counts)
self.list_of_paths.append(tree_path)
def _convert_to_tree_path(self, tree_branch: Optional[TreeBranch], leaf_nb_training_samples: int,
leaf_class_label_counts: np.ndarray) -> TreePath:
if tree_branch is None:
raise NotImplementedError("Trees consisting of only the root as leaf are currently not supported")
else:
tree_branch_edges_as_list: List[TreeEdge] = tree_branch.to_list()
return TreePath(edges=tree_branch_edges_as_list,
nb_training_samples_in_leaf=leaf_nb_training_samples,
leaf_class_label_counts=leaf_class_label_counts,
class_labels=self.class_labels,
nb_of_target_attributes=self.nb_of_target_attributes)
```
#### File: rule_models/eids/merged_model_io.py
```python
import gzip
import jsonpickle
from mdrsl.rule_models.eids.st_to_mt_model_merging import MergedSTMIDSClassifier
def store_merged_st_mids_model(merged_model_abs_file_name: str, merged_st_mids_classifier: MergedSTMIDSClassifier) -> None:
frozen = jsonpickle.encode(merged_st_mids_classifier)
with gzip.open(merged_model_abs_file_name, 'wt') as ofile:
ofile.write(frozen)
def load_merged_st_mids_model(merged_model_abs_file_name: str) -> MergedSTMIDSClassifier:
mids_classifier: MergedSTMIDSClassifier
with gzip.open(merged_model_abs_file_name, 'rt') as ifile:
file_contents = ifile.read()
mids_classifier = jsonpickle.decode(file_contents)
return mids_classifier
```
#### File: ids/model_fitting/ids_without_value_reuse.py
```python
from typing import Optional, Set
from pyids.data_structures.ids_objective_function import ObjectiveFunctionParameters
from pyids.data_structures.ids_rule import IDSRule
from pyids.data_structures.ids_ruleset import IDSRuleSet
from pyids.data_structures.ids_cacher import IDSCacher
from mdrsl.rule_models.ids.model_fitting.ids_abstract_base import IDSAbstractBase
from mdrsl.rule_models.ids.objective_function.ids_objective_function_without_value_reuse import IDSObjectiveFunction
from submodmax.abstract_optimizer import AbstractOptimizer
from submodmax.deterministic_double_greedy_search import DeterministicDoubleGreedySearch
from submodmax.deterministic_local_search import DeterministicLocalSearch
from submodmax.deterministic_local_search_pyids import DeterministicLocalSearchPyIDS
from submodmax.randomized_double_greedy_search import RandomizedDoubleGreedySearch
from submodmax.smooth_local_search import SmoothLocalSearch
from submodmax.smooth_local_search_pyids import SmoothLocalSearchPyIDS
class IDS(IDSAbstractBase):
def __init__(self):
super().__init__()
self.algorithms = dict(
SLS=SmoothLocalSearchPyIDS,
DLS=DeterministicLocalSearchPyIDS,
DLSRewrite=DeterministicLocalSearch,
SLSRewrite=SmoothLocalSearch,
DDGS=DeterministicDoubleGreedySearch,
RDGS=RandomizedDoubleGreedySearch
)
self.solution_set: Optional[Set[IDSRule]] = None
self.objective_function_value: Optional[float] = None
def _optimize(self, params: ObjectiveFunctionParameters, algorithm: str, ground_set: Set[IDSRule],
cacher: IDSCacher, objective_scale_factor: float, debug: bool) -> Set[IDSRule]:
objective_function = IDSObjectiveFunction(
objective_func_params=params,
cacher=cacher,
scale_factor=objective_scale_factor, normalize=self.normalize)
self.objective_function = objective_function
optimizer: AbstractOptimizer = self.algorithms[algorithm](
objective_function=objective_function, ground_set=ground_set, debug=debug)
solution_set: Set[IDSRule] = optimizer.optimize()
objective_function_value: float = objective_function.evaluate(IDSRuleSet(solution_set))
self.solution_set = solution_set
self.objective_function_value = objective_function_value
return solution_set
```
#### File: ids/objective_function/ids_objective_function_without_value_reuse.py
```python
import time
from typing import TypeVar, Union, Set
import numpy as np
from rule_models.ids.objective_function.ids_objective_function_abstract import AbstractIDSObjectiveFunction
from pyids.data_structures import IDSRuleSet
from pyids.data_structures.ids_objective_function import ObjectiveFunctionParameters
from submodmax.abstract_optimizer import AbstractSubmodularFunction
E = TypeVar('E')
class IDSObjectiveFunction(AbstractSubmodularFunction, AbstractIDSObjectiveFunction):
def __init__(self, objective_func_params=ObjectiveFunctionParameters(), cacher=None, scale_factor=1,
normalize=True):
AbstractIDSObjectiveFunction.__init__(self, objective_func_params, cacher=cacher, scale_factor=scale_factor,
normalize=normalize)
def f1_minimize_total_nb_of_literals(self, solution_set):
f1_unnormalized = self.f1_upper_bound_nb_of_literals - solution_set.sum_rule_length()
if self.normalize:
f1 = f1_unnormalized / self.f1_upper_bound_nb_of_literals
else:
f1 = f1_unnormalized
self._f1_boundary_check(f1)
return f1
def f2_minimize_overlap_predicting_the_same_class(self, solution_set):
overlap_intraclass_sum = 0
for i, r1 in enumerate(solution_set.ruleset):
for j, r2 in enumerate(solution_set.ruleset):
if i >= j:
continue
if r1.car.consequent.value == r2.car.consequent.value:
overlap_tmp = self.cacher.overlap(r1, r2)
overlap_intraclass_sum += overlap_tmp
f2_unnormalized = self.f2_f3_upper_bound - overlap_intraclass_sum
if self.normalize:
f2 = f2_unnormalized / self.f2_f3_upper_bound
else:
f2 = f2_unnormalized
self._boundary_check(f2, 'f2')
return f2
def f3_minimize_overlap_predicting_different_class(self, solution_set):
overlap_interclass_sum = 0
for i, r1 in enumerate(solution_set.ruleset):
for j, r2 in enumerate(solution_set.ruleset):
if i >= j:
continue
if r1.car.consequent.value != r2.car.consequent.value:
overlap_tmp = self.cacher.overlap(r1, r2)
overlap_interclass_sum += overlap_tmp
f3_unnormalized = self.f2_f3_upper_bound - overlap_interclass_sum
if self.normalize:
f3 = f3_unnormalized / self.f2_f3_upper_bound
else:
f3 = f3_unnormalized
self._boundary_check(f3, 'f3')
return f3
def f4_at_least_one_rule_per_target_value(self, solution_set):
classes_covered = set()
for rule in solution_set.ruleset:
classes_covered.add(rule.car.consequent.value)
f4_unnormalized = len(classes_covered)
if self.normalize:
f4 = f4_unnormalized / self.nb_of_target_values
else:
f4 = f4_unnormalized
self._boundary_check(f4, 'f4')
return f4
def f5_minimize_incorrect_cover(self, solution_set):
sum_incorrect_cover = 0
for rule in solution_set.ruleset:
incorrect_cover_size = np.sum(rule._incorrect_cover(self.quant_dataframe))
# incorrect_cover_size = np.sum(rule.incorrect_cover(self.quant_dataframe))
# print(f"IDS incorrect cover size: {incorrect_cover_size} for rule {rule}")
sum_incorrect_cover += incorrect_cover_size
# print(f"IDS f5 upper bound: {self.f5_upper_bound}")
# print(f"IDS f5 sum incorrect cover: {sum_incorrect_cover}")
f5_unnormalized = self.f5_upper_bound - sum_incorrect_cover
if self.normalize:
f5 = f5_unnormalized / self.f5_upper_bound
else:
f5 = f5_unnormalized
self._boundary_check(f5, 'f5')
return f5
def f6_cover_each_example(self, solution_set):
correctly_covered = np.zeros(self.nb_of_training_examples).astype(bool)
for rule in solution_set.ruleset:
correctly_covered = correctly_covered | rule.correct_cover(self.quant_dataframe)
f6_unnormalized = np.sum(correctly_covered)
if self.normalize:
f6 = f6_unnormalized / self.nb_of_training_examples
else:
f6 = f6_unnormalized
self._boundary_check(f6, 'f6')
return f6
def evaluate(self, solution_set: Union[IDSRuleSet, Set[E]]) -> float:
if type(solution_set) == set:
solution_set = IDSRuleSet(solution_set)
if type(solution_set) != IDSRuleSet:
raise Exception("Type of solution_set must be IDSRuleSet, but is ", type(solution_set))
self.call_counter += 1
self.call_set_sizes.append(len(solution_set))
start_time = time.time()
l = self.objective_func_params.params["lambda_array"]
f0 = self.f0_minimize_rule_set_size(self.ground_set_size, len(solution_set)) if l[0] != 0 else 0
f1 = self.f1_minimize_total_nb_of_literals(solution_set) if l[1] != 0 else 0
f2 = self.f2_minimize_overlap_predicting_the_same_class(solution_set) if l[2] != 0 else 0
f3 = self.f3_minimize_overlap_predicting_different_class(solution_set) if l[3] != 0 else 0
f4 = self.f4_at_least_one_rule_per_target_value(solution_set) if l[4] != 0 else 0
f5 = self.f5_minimize_incorrect_cover(solution_set) if l[5] != 0 else 0
f6 = self.f6_cover_each_example(solution_set) if l[6] != 0 else 0
self.f0_val = f0
self.f1_val = f1
self.f2_val = f2
self.f3_val = f3
self.f4_val = f4
self.f5_val = f5
self.f6_val = f6
# print(f"IDS f1:{f1}")
# print()
# print(tabulate([['value', f0, f1, f2, f3, f4, f5, f6],
# ['l*val', f0 * l[0], f1 * l[1], f2 * l[2], f3 * l[3], f4 * l[4], f5 * l[5], f6 * l[6]]
# ],
# headers=['type', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6']))
# print()
fs = np.array([
f0, f1, f2, f3, f4, f5, f6
]) / self.scale_factor
result = np.dot(l, fs)
if self.stat_collector is not None:
self.stat_collector.add_values(f0, f1, f2, f3, f4, f5, f6, result)
end_time = time.time()
elapsed_time = end_time - start_time
self.call_run_times.append(elapsed_time)
return result
def f0(self, solution_set):
current_nb_of_rules: int = len(solution_set)
ground_set_size = len(self.objective_func_params.params['all_rules'])
return self.f0_minimize_rule_set_size(ground_set_size=ground_set_size,
current_nb_of_rules=current_nb_of_rules)
def f1(self, solution_set):
return self.f1_minimize_total_nb_of_literals(solution_set)
def f2(self, solution_set):
return self.f2_minimize_overlap_predicting_the_same_class(solution_set)
def f3(self, solution_set):
return self.f3_minimize_overlap_predicting_different_class(solution_set)
def f4(self, solution_set):
return self.f4_at_least_one_rule_per_target_value(solution_set)
def f5(self, solution_set):
return self.f5_minimize_incorrect_cover(solution_set)
def f6(self, solution_set):
return self.f6_cover_each_example(solution_set)
```
#### File: mids/cover/cover_metric.py
```python
import pandas as pd
from mdrsl.data_structures.rules.rule_part import Consequent
from mdrsl.rule_models.mids.cover.cover_checker import CoverChecker
from mdrsl.rule_models.mids.mids_rule import MIDSRule
import numpy as np
def get_avg_incorrect_cover_size(rule: MIDSRule, df: pd.DataFrame, cover_checker: CoverChecker,
) -> float:
consequent: Consequent = rule.get_consequent()
nb_of_attr_in_consequent: int = len(consequent)
sum_of_incorrect_cover_sizes = 0
cover = cover_checker.get_cover(rule, df)
covers_size = np.sum(cover)
# print(f"MIDS cover: {covers_size}")
if not np.any(cover):
raise Exception()
for attr in consequent.get_attributes():
incorrect_cover_size_for_attr: int = np.sum(cover_checker.get_incorrect_cover(rule, df, attr, cover=cover))
sum_of_incorrect_cover_sizes += incorrect_cover_size_for_attr
weighted_sum = sum_of_incorrect_cover_sizes / nb_of_attr_in_consequent
# print(f"MIDS incorrect cover size: {weighted_sum} for rule {rule}")
return weighted_sum
```
#### File: rule_models/rr/io_rr_rule_set_learner.py
```python
import gzip
import jsonpickle
from rule_models.rr.rr_rule_set_learner import GreedyRoundRobinTargetRuleClassifier
def store_greedy_naive_classifier(greedy_naive_classifier_abs_file_name: str,
greedy_naive_clf: GreedyRoundRobinTargetRuleClassifier) -> None:
frozen = jsonpickle.encode(greedy_naive_clf)
with gzip.open(greedy_naive_classifier_abs_file_name, 'wt') as ofile:
ofile.write(frozen)
def load_greedy_naive_classifier(greedy_naive_classifier_abs_file_name: str) -> GreedyRoundRobinTargetRuleClassifier:
greedy_naive_clf: GreedyRoundRobinTargetRuleClassifier
with gzip.open(greedy_naive_classifier_abs_file_name, 'rt') as ifile:
file_contents = ifile.read()
greedy_naive_clf = jsonpickle.decode(file_contents)
return greedy_naive_clf
```
#### File: mdrsl/toy_data/titanic.py
```python
import os
from typing import Tuple
import pandas as pd
from mdrsl.data_handling.split_train_test import train_test_split_pd
def get_total_df_titanic(data_dir: str) -> Tuple[pd.DataFrame, str]:
df_det = pd.read_csv(os.path.join(data_dir, 'titanic_train.tab'),
' ', header=None, names=['Passenger_Cat', 'Age_Cat', 'Gender'])
df_y = pd.read_csv(os.path.join(data_dir, 'titanic_train.Y'), ' ', header=None, names=['Died', 'Survived'])
df_total = df_det.join(df_y['Survived'])
dataset_name = 'titanic'
return df_total, dataset_name
def prepare_data_titanic(data_dir: str, prop=0.25) -> Tuple[pd.DataFrame, pd.DataFrame, str]:
df_total, dataset_name = get_total_df_titanic(data_dir)
# ---------------------------
df_train, df_test = train_test_split_pd(df_total, prop=prop)
return df_train, df_test, dataset_name
``` |
{
"source": "joschout/SubmodularMaximization",
"score": 3
} |
#### File: examples/max_cut/calculate_cut.py
```python
from typing import List
from itertools import combinations, chain
import numpy as np
from examples.max_cut.max_cut_objective_function import calculate_cut_value
VIndex = int
def has_correct_shape(matrix: np.ndarray):
if len(matrix.shape) != 2:
return False
if matrix.shape[0] != matrix.shape[1]:
return False
return True
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
xs = list(iterable)
# note we return an iterator rather than a list
return chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1))
def main():
adjacency_matrix_lol: List[List[int]] = [[1, 1, 0],
[1, 0, 1],
[0, 1, 1]]
adjacency_matrix_np = np.array(adjacency_matrix_lol)
print(adjacency_matrix_np)
for subset in powerset(range(len(adjacency_matrix_lol))):
if len(subset) != 0:
subset = set(subset)
cut_value = calculate_cut_value(subset, adjacency_matrix_np)
print(f"cut {subset} has cut value {cut_value}")
if __name__ == '__main__':
main()
```
#### File: submodmax/value_reuse/abstract_double_greedy_search.py
```python
import warnings
from typing import Set, TypeVar, Tuple, Iterable
from submodmax.value_reuse.abstract_optimizer import AbstractSubmodularFunctionValueReuse, AbstractOptimizerValueReuse, FuncInfo
from submodmax.value_reuse.set_info import SetInfo
E = TypeVar('E')
class AbstractDoubleGreedySearchValueReuse(AbstractOptimizerValueReuse):
"""
Parent class for Deterministic and
Randomized Unconstrained submodular maximization, by Buchbinder and Feldman
See also:
<NAME>., <NAME>., <NAME>., & <NAME>. (2015).
A tight linear time (1/2)-approximation for unconstrained submodular maximization.
SIAM Journal on Computing, 44(5), 1384–1402. https://doi.org/10.1137/130929205
"""
def __init__(self, objective_function: AbstractSubmodularFunctionValueReuse, ground_set: Set[E],
debug: bool = True):
super().__init__(objective_function, ground_set, debug)
self.class_name = 'submodmax.value_reuse.AbstractDoubleGreedySearch'
def should_update_X(self, a: float, b: float) -> bool:
raise NotImplementedError('abstract method')
def ground_set_iterator(self) -> Iterable[E]:
raise NotImplementedError('abstract method')
def optimize(self) -> Tuple[SetInfo, FuncInfo]:
if self.debug:
print("=====================================================================")
print("START", self.class_name, "optimizer")
print("=====================================================================")
ground_set_size = len(self.ground_set)
empty_set = set()
X_prev_set_info: SetInfo = SetInfo(
ground_set_size=ground_set_size,
current_set_size=0,
added_elems=empty_set,
deleted_elems=empty_set,
intersection_previous_and_current_elems=empty_set
)
X_prev_set_info.set_current_set(set())
Y_prev_set_info = SetInfo(
ground_set_size=ground_set_size,
current_set_size=ground_set_size,
added_elems=self.ground_set,
deleted_elems=empty_set,
intersection_previous_and_current_elems=empty_set
)
Y_prev_set_info.set_current_set(self.ground_set.copy())
func_info_X_prev: FuncInfo = self.objective_function.evaluate(X_prev_set_info, previous_func_info=None)
func_info_Y_prev: FuncInfo = self.objective_function.evaluate(Y_prev_set_info, previous_func_info=None)
if self.debug:
print("initialization:")
print("X0 : size: ", X_prev_set_info.current_set_size, "/", ground_set_size, ", f(S): ",
func_info_X_prev.func_value)
print("Y0: size: ", Y_prev_set_info.current_set_size, "/", ground_set_size, ", f(S): ",
func_info_Y_prev.func_value)
elem: E
for i, elem in enumerate(self.ground_set_iterator(), 1):
singleton_set: Set[E] = {elem}
# X_prev_plus_elem: Set[E] = X_prev | {elem}
X_prev_plus_elem_set_info = SetInfo(
ground_set_size=ground_set_size,
current_set_size=X_prev_set_info.current_set_size + 1,
added_elems=singleton_set,
deleted_elems=empty_set,
intersection_previous_and_current_elems=X_prev_set_info.current_set
)
func_info_X_prev_plus_elem: FuncInfo = self.objective_function.evaluate(
X_prev_plus_elem_set_info, func_info_X_prev)
a: float = func_info_X_prev_plus_elem.func_value - func_info_X_prev.func_value
Y_prev_minus_elem_set: Set[E] = Y_prev_set_info.current_set - {elem}
Y_prev_minus_elem_set_info = SetInfo(
ground_set_size=ground_set_size,
current_set_size=Y_prev_set_info.current_set_size - 1,
added_elems=empty_set,
deleted_elems=singleton_set,
intersection_previous_and_current_elems=Y_prev_minus_elem_set
)
Y_prev_minus_elem_set_info.set_current_set(Y_prev_minus_elem_set)
func_info_Y_prev_minus_elem = self.objective_function.evaluate(
Y_prev_minus_elem_set_info, func_info_Y_prev)
b: float = func_info_Y_prev_minus_elem.func_value - func_info_Y_prev.func_value
if self.debug:
print()
print("element ", i, "/", ground_set_size)
print("\t X_prev --> size: ", X_prev_set_info.current_set_size, ", f(S):",
func_info_X_prev.func_value)
print("\t X" + str(i) + " + e" + str(i) + " --> size: ", X_prev_plus_elem_set_info.current_set_size,
", f(S):",
func_info_X_prev_plus_elem.func_value)
print()
print("\t Y_prev --> size: ", Y_prev_set_info.current_set_size, ", f(S):",
func_info_Y_prev.func_value)
print("\t Y" + str(i) + " - e" + str(i) + " --> size: ", Y_prev_minus_elem_set_info.current_set_size,
", f(S):",
func_info_Y_prev_minus_elem.func_value)
if self.should_update_X(a, b):
new_set = X_prev_set_info.current_set | singleton_set
X_prev_plus_elem_set_info.set_current_set(new_set)
X_prev_set_info = X_prev_plus_elem_set_info
func_info_X_prev = func_info_X_prev_plus_elem
# Y_prev stays the same
if self.debug:
print("\tX_prev --> size:", X_prev_set_info.current_set_size, ", f(X_prev):",
func_info_X_prev.func_value)
else:
# X_prev stays the same
Y_prev_set_info = Y_prev_minus_elem_set_info
func_info_Y_prev = func_info_Y_prev_minus_elem
if self.debug:
print("\tY_prev --> size:", Y_prev_set_info.current_set_size, ", f(Y_prev):",
func_info_Y_prev.func_value)
warnings.warn("remove equality check")
if not X_prev_set_info.current_set == Y_prev_set_info.current_set:
raise Exception("both sets should be equal")
if self.debug:
print("-- finished iteration --")
print("X_prev --> size:", X_prev_set_info.current_set_size, ", f(X_prev):", func_info_X_prev.func_value)
print("Y_prev --> size:", Y_prev_set_info.current_set_size, ", f(Y_prev):", func_info_Y_prev.func_value)
print("obj val local optimum:", str(func_info_X_prev.func_value))
return X_prev_set_info, func_info_X_prev
``` |
{
"source": "joschout/tilde",
"score": 2
} |
#### File: mai_experiments/run_experiments_refactor/example_preprocessing_refactor.py
```python
from typing import Optional, List, Set
from problog.engine import DefaultEngine
from problog.logic import Term
from problog.program import SimpleProgram
from mai_experiments.experiment_settings import FileNameData, DebugPrintingOptions
from mai_version.IO.input_format import KnowledgeBaseFormat
from mai_version.IO.label_collector import LabelCollectorMapper
from mai_version.IO.parsing_background_knowledge import parse_background_knowledge_keys
from mai_version.IO.parsing_examples import KeysExampleBuilder
from mai_version.IO.parsing_settings.setting_parser import SettingsParserMapper
from mai_version.IO.parsing_settings.utils import KeysPredictionGoalHandler
from mai_version.representation.background_knowledge import BackgroundKnowledgeWrapper
from mai_version.representation.example import InternalExampleFormat, ClauseDBExampleWrapper, Label, \
SimpleProgramExampleWrapper
from mai_version.representation.example_collection import ExampleCollection
from mai_version.representation.language import TypeModeLanguage
from mai_version.trees.TreeBuilder import TreeBuilderType
class Experiment:
"""
NOTE: we store the examples in 2 ways:
* 'usable for training': the examples containing the classification predicate
* 'usable for testing': the examples with the classification predicate removed
"""
def __init__(self):
self.training_examples_collection = None
self.examples_usable_for_testing = None
self.possible_labels = None
self.language = None
self.prediction_goal = None
def preprocess_examples_and_background_knowledge(self,
file_name_data: FileNameData,
filter_out_unlabeled_examples: bool,
debug_printing_options: DebugPrintingOptions):
engine = DefaultEngine()
engine.unknown = 1
settings_file_parser = SettingsParserMapper.get_settings_parser(KnowledgeBaseFormat.KEYS)
parsed_settings = settings_file_parser.parse(file_name_data.fname_settings)
self.language = parsed_settings.language # type: TypeModeLanguage
kb_format = KnowledgeBaseFormat.KEYS
internal_ex_format = InternalExampleFormat.CLAUSEDB
treebuilder_type = TreeBuilderType.DETERMINISTIC
prediction_goal_handler = parsed_settings.get_prediction_goal_handler() # type: KeysPredictionGoalHandler
self.prediction_goal = prediction_goal_handler.get_prediction_goal() # type: Term
# ------------------------------------------------
# --- BACKGROUND KNOWLEDGE -----------------------
# ------------------------------------------------
background_knowledge_wrapper \
= parse_background_knowledge_keys(file_name_data.fname_background,
self.prediction_goal) # type: BackgroundKnowledgeWrapper
full_background_knowledge_sp \
= background_knowledge_wrapper.get_full_background_knowledge_simple_program() # type: Optional[SimpleProgram]
stripped_background_knowledge = background_knowledge_wrapper.get_stripped_background_knowledge() # type: Optional[SimpleProgram]
# ------------------------------------------------
# EXAMPLES
example_builder = KeysExampleBuilder(self.prediction_goal, debug_printing_options.example_parsing)
self.training_examples_collection = example_builder.parse(internal_ex_format, file_name_data.fname_examples,
full_background_knowledge_sp) # type: ExampleCollection
# ------------------------------------------------
# --- LABELS -------------------------------------
index_of_label_var = prediction_goal_handler.get_predicate_goal_index_of_label_var() # type: int
label_collector = LabelCollectorMapper.get_label_collector(internal_ex_format, self.prediction_goal,
index_of_label_var,
engine=engine)
keys_of_unlabeled_examples = label_collector.extract_labels(self.training_examples_collection)
nb_of_unlabeled_examples = len(keys_of_unlabeled_examples)
possible_labels = label_collector.get_labels() # type: Set[Label]
self.possible_labels = list(possible_labels) # type: List[Label]
# ------------------------------------------------
# TODO: change this back if necessary
if filter_out_unlabeled_examples and nb_of_unlabeled_examples > 0:
total_nb_of_examples = len(self.training_examples_collection.example_wrappers_sp)
self.training_examples_collection = self.training_examples_collection.filter_examples_not_in_key_set(
keys_of_unlabeled_examples)
print("DANGEROUS: FILTERED OUT UNLABELED EXAMPLES")
# ------------------------------------------------
stripped_examples_simple_program = self.training_examples_collection.get_labeled_example_wrappers_sp() # type: List[SimpleProgramExampleWrapper]
self.examples_usable_for_testing = stripped_examples_simple_program # type: List[SimpleProgramExampleWrapper]
if internal_ex_format == InternalExampleFormat.CLAUSEDB:
stripped_examples_clausedb = ClauseDBExampleWrapper.get_clause_db_examples(stripped_examples_simple_program,
background_knowledge=stripped_background_knowledge)
self.examples_usable_for_testing = stripped_examples_clausedb # type: List[ClauseDBExampleWrapper]
```
#### File: mai_version/classification/classification_helper.py
```python
import warnings
from typing import Iterable, List
# from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, precision_score, recall_score
# python 3.6
import time
from problog.engine import GenericEngine
from mai_version.classification.classification_statistics_handler import ClassificationStatisticsHandler
from mai_version.model_validation.model_validation import ClassifierMapper, Classifier
from mai_version.representation.query_result_label_extractor import ModelsQueryResultLabelExtractor, \
KeysQueryResultLabelExtractor
try:
from typing import Collection
except ImportError:
Collection = Iterable
from problog.logic import Term
from problog.program import SimpleProgram, LogicProgram
from mai_version.representation.example import ExampleWrapper, InternalExampleFormat, Label
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
def get_models_classifier(internal_ex_format: InternalExampleFormat, model: SimpleProgram,
possible_labels: Iterable[Label],
background_knowledge: LogicProgram, debug_printing: bool = False,
engine: GenericEngine = None) -> Classifier:
query_terms = [Term('query')(label) for label in possible_labels]
query_result_label_extractor = ModelsQueryResultLabelExtractor()
classifier = ClassifierMapper.get_classifier(internal_ex_format, model, query_terms, query_result_label_extractor,
background_knowledge, debug_printing=debug_printing, engine=engine)
return classifier
def get_keys_classifier(internal_ex_format: InternalExampleFormat, model: SimpleProgram,
prediction_goal: Term, index_of_label_arg: int,
background_knowledge: LogicProgram, debug_printing: bool = False,
engine: GenericEngine = None):
query_terms = [Term('query')(prediction_goal)]
query_result_label_extractor = KeysQueryResultLabelExtractor()
query_result_label_extractor.set_index_of_label_arg(index_of_label_arg)
classifier = ClassifierMapper.get_classifier(internal_ex_format, model, query_terms, query_result_label_extractor,
background_knowledge, debug_printing=debug_printing, engine=engine)
return classifier
def do_labeled_examples_get_correctly_classified(classifier: Classifier, examples: Collection[ExampleWrapper],
possible_labels: List[Label],
debug_printing: bool = False) -> ClassificationStatisticsHandler:
warnings.warn("Model verification only supports deterministic models")
if debug_printing:
print('\n=== CHECKING MODEL ===')
print("Model verification only supports deterministic models")
statistics_handler = ClassificationStatisticsHandler(possible_labels)
# classifier.debug_printing = True
actual_labels = []
predicted_labels = []
for example in examples:
actual_label = example.label
found_labels = classifier.classify(example)
if len(found_labels) > 1:
print('actual label: ', actual_label)
print('found labels: ', found_labels)
a_predicted_label = found_labels[0]
# TEST
actual_labels.append(str(actual_label))
predicted_labels.append(str(a_predicted_label))
statistics_handler.update_statistics(actual_label, a_predicted_label)
# -------------
# conf_matrix = confusion_matrix(actual_labels, predicted_labels)
# accuracy = accuracy_score(actual_labels, predicted_labels)
#
# possible_labels_str = [str(label) for label in possible_labels]
# print("sklearn confusion matrix:")
# print(conf_matrix)
# print("pretty print:")
# print_cm(conf_matrix, labels=possible_labels_str)
print("=== MODEL VERIFICATION STATISTICS ===")
print(statistics_handler.get_accuracy()[1])
# precision = precision_score(actual_labels, predicted_labels)
# recall = recall_score(actual_labels, predicted_labels)
# print('precision:')
# print('\t' + str(precision))
# print('recall:')
# print('\t' + str(recall))
print(statistics_handler.get_classification_report_str())
print(statistics_handler.get_nb_of_examples_str_verbose() + '\n')
print(statistics_handler.get_confusion_matrix_str())
# nb_of_examples = len(examples)
# nb_of_correcty_labeled_examples = statistics_handler.nb_ex_correctly_classified
# nb_of_incorrecty_labeled_examples = statistics_handler.nb_ex_incorrectly_classified
#
# if debug_printing:
# print("total nb of examples: " + str(nb_of_examples))
# print(
# "examples labeled correctly: " + str(nb_of_correcty_labeled_examples) + "/" + str(
# nb_of_examples) + ", " + str(
# nb_of_correcty_labeled_examples / nb_of_examples * 100) + "%")
# print("examples labeled incorrectly: " + str(nb_of_incorrecty_labeled_examples) + "/" + str(
# nb_of_examples) + ", " + str(
# nb_of_incorrecty_labeled_examples / nb_of_examples * 100) + "%\n")
# print("--- confusion matrix: true/predicted --- :")
# print(statistics_handler.get_confusion_matrix_str())
return statistics_handler
# def do_labeled_examples_get_correctly_classified_keys(labeled_examples, rules_as_program, prediction_goal: Term,
# index_of_label_var: int, possible_labels: Iterable[str],
# background_knowledge, debug_printing: bool = False) -> bool:
# """
# Accepts both SimpleProgram Examples as ClauseDB examples.
#
# :param labeled_examples:
# :param rules_as_program:
# :param prediction_goal:
# :param index_of_label_var:
# :param possible_labels:
# :param background_knowledge:
# :param debug_printing:
# :return:
# """
#
# if debug_printing:
# print('\n=== Model validation ===')
#
# nb_of_examples = len(labeled_examples)
# nb_of_correcty_labeled_examples = 0
# nb_of_incorrecty_labeled_examples = 0
#
# all_training_examples_labeled_correctly = True
#
# for example in labeled_examples:
# true_label = example.label
#
# # NOTE: we strip the statements from the example and put it into a new example.
# # This is why this method works for both SimplePrograms and ClauseDBs
#
# # remove the labeling from the labeled example
# example_without_label = SimpleProgram()
# for statement in example: # type: Term
# if statement.functor != prediction_goal.functor:
# example_without_label += statement
#
# found_label = \
# get_labels_single_example_keys(example_without_label, rules_as_program, prediction_goal, index_of_label_var,
# possible_labels,
# background_knowledge)[0]
#
# label_is_correct = (true_label == found_label)
# if label_is_correct:
# nb_of_correcty_labeled_examples += 1
# # output = 'correct\treal label: ' + str(true_label) + '\tfound label: ' + str(found_label)
# # print(output)
# else:
# all_training_examples_labeled_correctly = False
# nb_of_incorrecty_labeled_examples += 1
# if debug_printing:
# output = 'incorrect\n\treal label: ' + str(true_label) + '\n\tfound label: ' + str(found_label)
# print(output)
# print('\tincorrectly labeled example:')
# for statement in example:
# print('\t\t' + str(statement))
# get_labels_single_example_models(example, rules_as_program, possible_labels, background_knowledge,
# debug_printing)
# print('----------------')
#
# if debug_printing:
# print("total nb of examples: " + str(nb_of_examples))
# print("examples labeled correctly: " + str(nb_of_correcty_labeled_examples) + "/" + str(
# nb_of_examples) + ", " + str(
# nb_of_correcty_labeled_examples / nb_of_examples * 100) + "%")
# print("examples labeled incorrectly: " + str(nb_of_incorrecty_labeled_examples) + "/" + str(
# nb_of_examples) + ", " + str(
# nb_of_incorrecty_labeled_examples / nb_of_examples * 100) + "%")
#
# return all_training_examples_labeled_correctly
```
#### File: mai_version/IO/convert_to_interpretations.py
```python
from typing import Dict
from problog.logic import Constant
from problog.program import PrologString, SimpleProgram
from mai_version.IO.parsing_settings.setting_parser import KeysSettingsParser
from mai_version.IO.parsing_settings.utils import FileSettings
fname_kb = "D:\\KUL\\KUL MAI\\Masterproef\\TILDE\\tilde\\fold\\data\\sisya\\t-0-0-0\\sisy.kb"
fname_s = "D:\\KUL\\KUL MAI\\Masterproef\\TILDE\\tilde\\fold\\data\\sisya\\t-0-0-0\\sisy.s"
parsed_settings = KeysSettingsParser().parse(fname_s) # type: FileSettings
language = parsed_settings.language
kb_pstr = PrologString(fname_kb)
example_key_type_name = "ptid"
example_databases = {} # type: Dict[Constant, SimpleProgram]
# for statement in kb_pstr:
#
#
# def interpretations(database, example_key_set, foreign_key_set):
#
#
#
# for k in example_key_set:
# for line in database:
# if k in line:
# example_databases[k].add(line)
#
#
# # gathetr all tuples in database that contain
# for line in database:
# if line has example key:
# example_databases[key] += line
#
```
#### File: IO/parsing_settings/setting_parser.py
```python
from mai_version.IO.input_format import KnowledgeBaseFormat, KnowledgeBaseFormatException
from mai_version.IO.parsing_settings.token_parser import ClassesTokenParser, TypeTokenParser, RmodeTokenParser, \
PredictionTokenParser
from mai_version.IO.parsing_settings.utils import FileSettings, SettingsParsingError
class SettingParser:
def __init__(self):
self.first_setting_token_parser = None
self.settings = FileSettings()
def parse(self, file_path) -> FileSettings:
if self.first_setting_token_parser is not None:
with open(file_path, 'r') as f:
for line in f:
self.first_setting_token_parser.parse_line(line, self.settings)
return self.settings
else:
raise SettingsParsingError("No SettingTokenParser set as first token parser")
class ModelsSettingsParser(SettingParser):
def __init__(self):
super().__init__()
# TODO: ACE also supports the use of the predict() predicate in the Models format!
classes_token_parser = ClassesTokenParser()
type_token_parser = TypeTokenParser()
rmode_token_parser = RmodeTokenParser()
self.first_setting_token_parser = classes_token_parser
classes_token_parser.set_successor(type_token_parser)
type_token_parser.set_successor(rmode_token_parser)
class KeysSettingsParser(SettingParser):
def __init__(self):
super().__init__()
prediction_token_parser = PredictionTokenParser()
type_token_parser = TypeTokenParser()
rmode_token_parser = RmodeTokenParser()
self.first_setting_token_parser = prediction_token_parser
prediction_token_parser.set_successor(type_token_parser)
type_token_parser.set_successor(rmode_token_parser)
class SettingsParserMapper:
@staticmethod
def get_settings_parser(kb_format: KnowledgeBaseFormat) -> SettingParser:
if kb_format is KnowledgeBaseFormat.KEYS:
return KeysSettingsParser()
elif kb_format is KnowledgeBaseFormat.MODELS:
return ModelsSettingsParser()
else:
raise KnowledgeBaseFormatException('Only the input formats Models and Key are supported.')
# def get_rmode_from_query():
# settings_prolog = PrologFile(settings_file_path)
# # for statement in settings_prolog:
# # print(statement)
# engine = DefaultEngine()
# # try:
# settings_db = engine.prepare(settings_prolog)
# for statement in settings_db:
# print(statement)
# # except ParseError as perr:
# # print('ParseError thrown')
# print(engine.query(settings_db, Term('rmode', 'replaceable(+V_0)')))
```
#### File: mai_version/representation/refinement_bag.py
```python
from itertools import product
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from mai_version.representation.language import TypeModeLanguage
from mai_version.representation.TILDE_query import TILDEQuery
class RefinementBag:
def __init__(self, conjunction, nb_of_times_addable='inf',):
self.nb_of_times_addable = nb_of_times_addable # type: Optional[int]
self.conjunction = conjunction # type: RefinementConjunction
class RefinementConjunction:
def __init__(self):
self.literal_list = [] # type: List[RefinementLiteral]
def get_addable_conjunctions(self, variables_already_in_query):
# a conjunction = 1+ literals
# a literal = 1+ variables
# we need all possible combinations of moded vars
# dictionary: variable name to modes
variables_in_conjunction = {} # type: Dict[str, RefinementVar]
for literal in self.literal_list:
for var in literal.refinement_var_list:
if var.name not in variables_in_conjunction:
variables_already_in_query[var.name] = var
# list of list
all_var_mode_combos = [] # type: List[List[Tuple[str, str]]]
for var in variables_in_conjunction:
var_mode_combos = [] # type: List[Tuple[str, str]]
for mode_of_var in variables_in_conjunction[var].modes:
var_mode_combos.append((var, mode_of_var))
all_var_mode_combos.append(var_mode_combos)
for combo in product(*all_var_mode_combos):
# TODO: types for unification
# TODO: unification with variables in query
class RefinementLiteral:
def __init__(self, functor: str, vars: List[RefinementVar]):
self.functor = functor # type: str
self.refinement_var_list = vars # type: List[RefinementVar]
class RefinementVar:
def __init__(self, name: str, modes: List[str], type: str):
self.name = name # type: str
self.modes = modes # type: List[str]
self.type = type
def refine_conjunctions(query: TILDEQuery, language: TypeModeLanguage, refinement_bags: List[RefinementBag]):
for refinement_bag in refinement_bags:
possible_conjunction_generator = refinement_bag.conjunction.get_addable_conjunctions()
```
#### File: test_datasets/MLE_test/MLE_test.py
```python
from typing import Optional
from mai_version.trees.tree_converter import MLETreeToProgramConverter
from mai_version.IO.input_format import KnowledgeBaseFormat
from mai_version.IO.parsing_settings.setting_parser import SettingParser
from mai_version.IO.label_collector import SimpleProgramLabelCollector
from mai_version.IO.parsing_background_knowledge import parse_background_knowledge
from mai_version.IO.parsing_examples_keys_format import parse_examples_key_format_with_key
from mai_version.classification.example_partitioning import SimpleProgramExamplePartitioner
from mai_version.run.run_keys import run_keys_clausedb
from mai_version.trees.TreeBuilder import MLEDeterministicTreeBuilder
file_name_labeled_examples = 'D:\\KUL\\KUL MAI\\Masterproef\\MLE test\\mle_test_3preds.kb'
file_name_settings = 'D:\\KUL\\KUL MAI\\Masterproef\\MLE test\\mle_test_3preds.s'
use_clausedb = False
def run_keys_simpleprogram_MLE(fname_labeled_examples: str, fname_settings: str, fname_background_knowledge:Optional[str]=None):
# SETTINGS
settings = SettingParser.get_settings_keys_format(fname_settings) # type: Settings
prediction_goal_handler = settings.get_prediction_goal_handler() # type: KeysPredictionGoalHandler
prediction_goal = prediction_goal_handler.get_prediction_goal() # type: Term
language = settings.language # type: TypeModeLanguage
# BACKGROUND KNOWLEDGE
if fname_background_knowledge is not None:
background_knowledge = parse_background_knowledge(fname_background_knowledge) # type: PrologFile
else:
background_knowledge = None
# EXAMPLES
examples = parse_examples_key_format_with_key(fname_labeled_examples) # type: List[SimpleProgramExample]
# LABELS
index_of_label_var = prediction_goal_handler.get_predicate_goal_index_of_label_var() # type: int
label_collector = SimpleProgramLabelCollector(prediction_goal, index_of_label_var)
label_collector.extract_labels(examples)
possible_labels = label_collector.get_labels()
# =================================
tree_builder = MLEDeterministicTreeBuilder(language, possible_labels, SimpleProgramExamplePartitioner(background_knowledge))
tree_builder.debug_printing(True)
tree_builder.build_tree(examples, prediction_goal)
tree = tree_builder.get_tree()
print(str(tree))
tree_to_program_converter = MLETreeToProgramConverter(KnowledgeBaseFormat.KEYS, debug_printing=True, prediction_goal=prediction_goal, index = index_of_label_var)
program = tree_to_program_converter.convert_tree_to_simple_program(tree, language)
# do_labeled_examples_get_correctly_classified_keys(examples, program, prediction_goal, index_of_label_var,
# possible_labels, background_knowledge)
if use_clausedb:
run_keys_clausedb(file_name_labeled_examples, file_name_settings)
else:
run_keys_simpleprogram_MLE(file_name_labeled_examples, file_name_settings)
```
#### File: mai_version/test/test_bongard_examples.py
```python
import unittest
from mai_version.classification.classification_helper import do_labeled_examples_get_correctly_classified_models
from mai_version.test.test_models import ModelsTestBase
class BongardModelsTest(ModelsTestBase):
def setUp(self):
fname_settings = 'D:\\KUL\\KUL MAI\\Masterproef\\data\\ACE-examples-data\\ace\\bongard\\examples\\bongard.s'
fname_labeled_examples = 'D:\\KUL\\KUL MAI\\Masterproef\\data\\ACE-examples-data\\ace\\bongard\\examples\\bongard.kb'
self.general_setup(fname_labeled_examples, fname_settings)
def test_training_examples_labeled_correctly_simpleprogram(self):
program = self.simple_program_setup()
are_training_examples_correctly_classified = do_labeled_examples_get_correctly_classified_models(self.examples,
program,
self.possible_targets,
self.background_knowledge)
self.assertEqual(are_training_examples_correctly_classified, True)
def test_training_examples_labeled_correctly_clausedb(self):
program = self.clausedb_setup()
are_training_examples_correctly_classified = do_labeled_examples_get_correctly_classified_models(self.examples,
program,
self.possible_targets,
self.background_knowledge)
self.assertEqual(are_training_examples_correctly_classified, True)
if __name__ == '__main__':
unittest.main()
```
#### File: mai_version/trees/TreeBuilder_helper_probabilistic.py
```python
from typing import Dict, Iterable
from mai_version.representation.example import Probability, Label, ExampleWrapper
from mai_version.trees import TreeNode
class ProbabilisticTreeBuildingError(Exception):
pass
def print_set_statistics_prob(example_set, possible_targets, indent: str = ""):
nb_of_ex = len(example_set)
if nb_of_ex != 0:
mean_label_prob = {}
nb_times_label_is_max = {}
for label in possible_targets: # type: Label
mean_label_prob[label] = 0
nb_times_label_is_max[label] = 0
for example in example_set:
true_example_labels = example.get_label_dict() # type: Dict[Label, Probability]
label_max_prob = None
max_prob = 0
for label in true_example_labels.keys():
label_prob = true_example_labels[label].value
mean_label_prob[label] = mean_label_prob[label] + label_prob
if label_prob > max_prob:
label_max_prob = label
max_prob = label_prob
nb_times_label_is_max[label_max_prob] = nb_times_label_is_max[label_max_prob] + 1
for label in mean_label_prob.keys():
mean_label_prob[label] = mean_label_prob[label] / nb_of_ex
# printing the statistics
print(indent + "nb of examples: " + str(nb_of_ex))
print(indent + "nb of times a label has the highest probability:")
for label in nb_times_label_is_max:
print(indent + "\t" + str(label) + ": " + str(nb_times_label_is_max[label]) + "/" + str(nb_of_ex))
print(indent + "mean probability for each label:")
for label in mean_label_prob.keys():
print(indent + "\t" + str(label) + ": " + str(mean_label_prob[label]))
else:
print(indent + "example set is empty")
def print_partition_statistics_prob(examples_satisfying_best_query, examples_not_satisfying_best_query, possible_targets, indentation):
print("examples satisfying query")
print_set_statistics_prob(examples_satisfying_best_query, possible_targets, indentation)
print("examples not satisfying query")
print_set_statistics_prob(examples_not_satisfying_best_query, possible_targets, indentation)
def create_probabilistic_leaf_node(node: TreeNode, examples: Iterable[ExampleWrapper], possible_targets):
# TODO: this still uses deterministic labels
nb_of_ex = len(examples)
if nb_of_ex != 0:
mean_label_prob = {}
nb_times_label_is_max = {}
for label in possible_targets: # type: Label
mean_label_prob[label] = 0
nb_times_label_is_max[label] = 0
for example in examples:
true_example_labels = example.get_label_dict() # type: Dict[Label, Probability]
label_max_prob = None
max_prob = 0
for label in true_example_labels.keys():
label_prob = true_example_labels[label].value
mean_label_prob[label] = mean_label_prob[label] + label_prob
if label_prob > max_prob:
label_max_prob = label
max_prob = label_prob
nb_times_label_is_max[label_max_prob] = nb_times_label_is_max[label_max_prob] + 1
for label in mean_label_prob.keys():
mean_label_prob[label] = mean_label_prob[label] / nb_of_ex
label_max_prob = None
max_prob = 0
for label in mean_label_prob.keys():
mean_prob_of_this_label = mean_label_prob[label]
if mean_prob_of_this_label > max_prob:
max_prob = mean_prob_of_this_label
label_max_prob = label
# making a leaf node
node.classification = label_max_prob
else:
raise ProbabilisticTreeBuildingError("there are no examples to make this node a leaf node")
```
#### File: mai_version/trees/tree_converter.py
```python
from typing import Optional, Dict, Iterator
from problog.logic import Term, And, Var, Constant, AnnotatedDisjunction
from problog.program import SimpleProgram
from mai_version.problog_helper.problog_helper import apply_substitution_to_term
from mai_version.representation.language import TypeModeLanguage
from mai_version.IO.input_format import KnowledgeBaseFormat
from mai_version.representation.example import Label
from mai_version.trees import TreeNode
from mai_version.trees.TreeBuilder import TreeBuilderType
from mai_version.trees.leaf_strategy import MLEDeterministicLeafStrategy
def decision_tree_to_simple_program(node: TreeNode, simple_program: SimpleProgram,
predicate_generator, previous_conjunction=Term('true'),
debug_printing=False):
if node.has_both_children():
# assign a new predicate to this node
p = next(predicate_generator)
# the following if-else is only necessary to remove an unnecessary 'true' term in the head
if previous_conjunction.functor == 'true':
conj_left = node.query.get_literal()
conj_right = ~p
else:
conj_left = And(previous_conjunction, node.query.get_literal())
conj_right = And(previous_conjunction, ~p)
clause = (p << conj_left)
simple_program += clause
# recurse on left subtree
decision_tree_to_simple_program(node.left_subtree, simple_program, predicate_generator, conj_left)
# recurse on right subtree
decision_tree_to_simple_program(node.right_subtree, simple_program, predicate_generator, conj_right)
else:
if node.can_classify():
clause = (node.strategy.classification << previous_conjunction)
simple_program += clause
else:
raise InvalidTreeNodeError()
def convert_tree_to_simple_program(tree_root: TreeNode, language: TypeModeLanguage,
debug_printing=False) -> SimpleProgram:
if debug_printing:
print('\n=== START conversion of tree to program ===')
print('tree to be converted:')
print(str(tree_root))
predicate_generator = get_predicate_generator(language)
program = SimpleProgram()
decision_tree_to_simple_program(tree_root, program, predicate_generator, debug_printing=debug_printing)
if debug_printing:
print('resulting program:')
for statement in program:
print(str(statement) + ".")
print('=== END conversion of tree to program ===\n')
return program
class InvalidTreeNodeError(Exception):
pass
class TreeToProgramConverter:
def __init__(self, kb_format: KnowledgeBaseFormat, debug_printing: bool = False):
self.kb_format = kb_format # type: KnowledgeBaseFormat
self.debug_printing = debug_printing # type: bool
self.predicate_generator = None
self.program = None
def convert_tree_to_simple_program(self, tree_root: TreeNode, language: TypeModeLanguage) -> SimpleProgram:
if self.debug_printing:
print('\n=== START conversion of tree to program ===')
print('tree to be converted:')
print(str(tree_root))
self.predicate_generator = get_predicate_generator(language)
self.program = SimpleProgram()
self._decision_tree_to_simple_program(tree_root)
if self.debug_printing:
print('resulting program:')
for statement in self.program:
print(statement)
print('=== END conversion of tree to program ===\n')
return self.program
def _decision_tree_to_simple_program(self, node: TreeNode, previous_conjunction=Term('true')):
if node.has_both_children():
self._handle_inner_node(node, previous_conjunction)
elif node.can_classify():
self._handle_leaf_node(node, previous_conjunction)
else:
raise InvalidTreeNodeError()
def _handle_inner_node(self, node: TreeNode, previous_conjunction: Term):
# assign a new predicate to this node
p = next(self.predicate_generator)
# the following if-else is only necessary to remove an unnecessary 'true' term in the head
if previous_conjunction.functor == 'true':
conj_left = node.query.get_literal()
conj_right = ~p
else:
conj_left = And(previous_conjunction, node.query.get_literal())
conj_right = And(previous_conjunction, ~p)
clause = (p << conj_left)
self.program += clause
# recurse on left subtree
self._decision_tree_to_simple_program(node.left_subtree, conj_left)
# recurse on right subtree
self._decision_tree_to_simple_program(node.right_subtree, conj_right)
def _handle_leaf_node(self, node: TreeNode, previous_conjunction: Term):
clause = self.get_leaf_node_clause(node, previous_conjunction)
self.program += clause
def get_leaf_node_clause(self, node: TreeNode, previous_conjunction: Term) -> Term:
raise NotImplementedError('abstract method')
class DeterministicTreeToProgramConverter(TreeToProgramConverter):
def __init__(self, kb_format: KnowledgeBaseFormat, debug_printing: bool = False,
prediction_goal: Optional[Term] = None, index: Optional[int] = None):
super().__init__(kb_format, debug_printing)
if self.kb_format == KnowledgeBaseFormat.KEYS:
if prediction_goal is None:
raise ValueError('prediction_goal cannot be None when kb_format==KnowledgeBaseFormat.KEYS')
else:
self.prediction_goal = prediction_goal
if index is None:
raise ValueError('index cannot be None when kb_format==KnowledgeBaseFormat.KEYS')
else:
self.index = index
def get_leaf_node_clause(self, node: TreeNode, previous_conjunction: Term) -> Term:
if self.kb_format == KnowledgeBaseFormat.MODELS:
return node.strategy.classification << previous_conjunction
elif self.kb_format == KnowledgeBaseFormat.KEYS:
var = self.prediction_goal.args[self.index] # type: Var
label = node.strategy.classification # type: Term
substitution = {var.name: label}
goal_with_label = apply_substitution_to_term(self.prediction_goal, substitution) # type: Term
return goal_with_label << previous_conjunction
else:
raise ValueError("Unexpected value of KnowledgeBaseFormat: " + str(self.kb_format))
# class ModelsTreeToProgramConverter(TreeToProgramConverter):
# def get_leaf_node_clause(self, node, previous_conjunction):
# return node.classification << previous_conjunction
#
#
# class KeyTreeToProgramConverter(TreeToProgramConverter):
# def __init__(self, prediction_goal, index, debug_printing=False):
# super().__init__(debug_printing)
# self.prediction_goal = prediction_goal
# self.index = index
#
# def get_leaf_node_clause(self, node: TreeNode, previous_conjunction: Term):
# var = self.prediction_goal.args[self.index] # type: Var
# label = node.classification # type: Term
# substitution = {var.name: label}
# goal_with_label = apply_substitution_to_term(self.prediction_goal, substitution) # type: Term
# return goal_with_label << previous_conjunction
class MLETreeToProgramConverter(DeterministicTreeToProgramConverter):
def __init__(self, kb_format: KnowledgeBaseFormat, debug_printing: bool = False,
prediction_goal: Optional[Term] = None, index: Optional[int] = None):
super().__init__(kb_format, debug_printing, prediction_goal=prediction_goal, index=index)
def get_leaf_node_clause(self, node: TreeNode, previous_conjunction: Term) -> Term:
if self.kb_format == KnowledgeBaseFormat.MODELS:
# TODO: TEST THIS
strategy = node.strategy # type: MLEDeterministicLeafStrategy
label_frequencies = strategy.label_frequencies # type: Optional[Dict[Label, float]]
goals_with_probabilities = []
for label in label_frequencies.keys():
goal = label.with_probability(label_frequencies[label])
goals_with_probabilities.append(goal)
return AnnotatedDisjunction(goals_with_probabilities, previous_conjunction)
elif self.kb_format == KnowledgeBaseFormat.KEYS:
var = self.prediction_goal.args[self.index] # type: Var
strategy = node.strategy # type: MLEDeterministicLeafStrategy
label_frequencies = strategy.label_frequencies # type: Optional[Dict[Label, float]]
goals_with_probabilities = []
for label in label_frequencies.keys():
substitution = {var.name: label} # type: Dict[str, Term]
goal_with_label = apply_substitution_to_term(self.prediction_goal, substitution) # type: Term
probability_of_goal = Constant(label_frequencies[label])
goal_with_label.probability = probability_of_goal
goals_with_probabilities.append(goal_with_label)
return AnnotatedDisjunction(goals_with_probabilities, previous_conjunction)
else:
raise ValueError("Unexpected value of KnowledgeBaseFormat: " + str(self.kb_format))
class TreeToProgramConverterMapper:
@staticmethod
def get_converter(tree_builder_type: TreeBuilderType, kb_format: KnowledgeBaseFormat, debug_printing: bool = False,
prediction_goal: Optional[Term] = None, index: Optional[int] = None):
if tree_builder_type is TreeBuilderType.DETERMINISTIC:
return DeterministicTreeToProgramConverter(kb_format, debug_printing, prediction_goal, index)
elif tree_builder_type.MLEDETERMINISTIC:
return MLETreeToProgramConverter(kb_format, debug_printing, prediction_goal, index)
elif tree_builder_type.PROBABILISITC:
return NotImplementedError('No defined treebuilder choice for: ' + str(tree_builder_type))
else:
raise NotImplementedError('No defined treebuilder choice for: ' + str(tree_builder_type))
def get_predicate_generator(language: TypeModeLanguage) -> Iterator[Term]:
count = 0 # type: int
while True:
new_name_found = False
while not new_name_found:
name = 'pred%d' % count
count += 1
if not language.does_predicate_exist(name, 1):
new_name_found = True
yield Term(name)
```
#### File: tilde/refactor/back_end_picking.py
```python
from enum import Enum
from refactor.default_interface import DefaultHandler
class QueryBackEnd(Enum):
SIMPLE_PROGRAM = 1
PROBLOG = 2
CLAUSEDB = 3
DJANGO = 4
SUBTLE = 5
FLGG = 6
class UnavailableBackEndException(Exception):
pass
def get_back_end_default(description) -> DefaultHandler:
if description == QueryBackEnd.SIMPLE_PROGRAM:
try:
import refactor.query_testing_back_end.problog.defaults
return refactor.query_testing_back_end.problog.defaults.ProblogDefaultHandler(
QueryBackEnd.SIMPLE_PROGRAM.name)
except ImportError as err:
raise UnavailableBackEndException(description.name + " backend not available, " + str(err))
if description == QueryBackEnd.PROBLOG or \
description == QueryBackEnd.CLAUSEDB:
return None
if description == QueryBackEnd.DJANGO:
try:
import refactor.query_testing_back_end.django.defaults
return refactor.query_testing_back_end.django.defaults.DjangoDefaultHandler(QueryBackEnd.DJANGO.name)
except ImportError as err:
raise UnavailableBackEndException(description.name + " backend not available, " + str(err))
if description == QueryBackEnd.FLGG:
try:
import refactor.query_testing_back_end.flgg_py4j.defaults
return refactor.query_testing_back_end.flgg_py4j.defaults.FLGGDefaultHandler(QueryBackEnd.FLGG.name)
except ImportError as err:
raise UnavailableBackEndException(description.name + " backend not available, " + str(err))
if description == QueryBackEnd.SUBTLE:
try:
import refactor.query_testing_back_end.subtle.defaults
return refactor.query_testing_back_end.subtle.defaults.SubtleDefaultHandler(QueryBackEnd.SUBTLE.name)
except ImportError as err:
raise UnavailableBackEndException(description.name + " backend not available, " + str(err))
else:
return None
if __name__ == '__main__':
print(QueryBackEnd.SIMPLE_PROGRAM.name)
```
#### File: refactor/io/parsing_background_knowledge.py
```python
from typing import Optional, Iterable, List
from problog.logic import Term
from problog.program import PrologFile, SimpleProgram
from refactor.representation.background_knowledge import BackgroundKnowledgeWrapper
#
# def parse_background_knowledge(file_name: str)-> PrologFile:
# return PrologFile(file_name)
def parse_background_knowledge_models(file_name: Optional[str] = None,
possible_labels: Optional[Iterable[Term]] = None) -> BackgroundKnowledgeWrapper:
if file_name is None:
return BackgroundKnowledgeWrapper()
logic_program = PrologFile(file_name)
if possible_labels is not None:
possible_labels_str = [str(label) for label in possible_labels] # type: List[str]
found_a_prediction_clause = False
prediction_goal_clauses = SimpleProgram()
stripped_logic_program = SimpleProgram()
for prolog_statement in logic_program:
is_prediction_clause = False
for possible_label_str in possible_labels_str:
if str(prolog_statement).startswith(possible_label_str):
is_prediction_clause = True
found_a_prediction_clause = True
break
if is_prediction_clause:
prediction_goal_clauses += prolog_statement
else:
stripped_logic_program += prolog_statement
if found_a_prediction_clause:
return BackgroundKnowledgeWrapper(logic_program=stripped_logic_program,
prediction_goal_clauses=prediction_goal_clauses)
else:
return BackgroundKnowledgeWrapper(logic_program=logic_program)
else:
return BackgroundKnowledgeWrapper(logic_program=logic_program)
def parse_background_knowledge_keys(file_name: Optional[str] = None,
prediction_goal: Optional[Term] = None) -> BackgroundKnowledgeWrapper:
if file_name is None:
return BackgroundKnowledgeWrapper()
logic_program = PrologFile(file_name)
if prediction_goal is not None:
prediction_goal_functor = prediction_goal.functor # type: str
found_a_prediction_goal_clause = False
prediction_goal_clauses = SimpleProgram()
stripped_logic_program = SimpleProgram()
for prolog_statement in logic_program:
if str(prolog_statement).startswith(prediction_goal_functor):
found_a_prediction_goal_clause = True
prediction_goal_clauses += prolog_statement
else:
stripped_logic_program += prolog_statement
if found_a_prediction_goal_clause:
return BackgroundKnowledgeWrapper(logic_program=stripped_logic_program,
prediction_goal_clauses=prediction_goal_clauses)
else:
return BackgroundKnowledgeWrapper(logic_program=logic_program)
else:
return BackgroundKnowledgeWrapper(logic_program=logic_program)
```
#### File: django/django_wrapper/ClauseWrapper.py
```python
import ctypes
from problog.logic import Term
from refactor.query_testing_back_end.django.django_wrapper.c_library import lib_django
class ConversionException(Exception):
pass
class ClauseWrapper:
__ajoute_tete = lib_django.AjouteTete
__ajoute_clause = lib_django.AjouteClause
__termine_clause = lib_django.TermineClause
__libere_clause = lib_django.LibereClause
__affice_clause_fol = lib_django.AfficheClauseFOL
def __init__(self, clause_id=None):
self.clause = lib_django.NewClause()
self.is_destructed = False
self.has_head = False
self.is_locked = False
self.clause_id = clause_id
self._problog_representation = None
def destruct(self):
if not self.is_destructed:
ClauseWrapper.__libere_clause(self.clause)
self.is_destructed = True
else:
raise ConversionException("double destruct of clause")
# def __del__(self):
# """
# WARNING: don't rely on the garbage collector to call the object's destructor.
# Cyclic dependencies can prevent the GC from ever calling this method.
# ALWAYS call destruct EXPLICITLY.
#
# :return:
# """
# self.destruct()
def print_using_c(self):
if not self.is_destructed:
ClauseWrapper.__affice_clause_fol(self.clause)
def lock_adding_to_clause(self):
if not self.is_destructed:
if not self.is_locked:
ClauseWrapper.__termine_clause(self.clause)
self.is_locked = True
else:
raise ConversionException("double lock/end of clause")
else:
raise ConversionException("tried locking a destructed clause")
def add_literal_to_body(self, literal: Term):
if not self.is_destructed:
var_array_c = _get_variable_array(literal)
functor = str(literal.functor).encode('utf-8')
arity = int(literal.arity)
ClauseWrapper.__ajoute_clause(self.clause, functor, arity, var_array_c)
else:
raise ConversionException("adding literal to body of desctructed clause")
def add_literal_as_head(self, literal: Term):
"""
NOTE: not mandatory; can be without head.
:param literal:
:return:
"""
if not self.is_destructed:
if not self.has_head:
var_array_c = _get_variable_array(literal)
functor = str(literal.functor).encode('utf-8')
arity = int(literal.arity)
ClauseWrapper.__ajoute_tete(self.clause, functor, arity, var_array_c)
self.has_head = True
else:
raise ConversionException("clause already has a head")
else:
raise ConversionException("adding literal as head to destructed clause")
def __str__(self):
return str(self._problog_representation)
def add_problog_clause(self, problog_representation: Term):
self._problog_representation = problog_representation
class HypothesisWrapper():
__new_hypothese_base = lib_django.NewHypotheseBase
__libere_hypothese = lib_django.LibereHypothese
def __init__(self, clause_wrapper: ClauseWrapper):
if clause_wrapper.is_destructed:
raise ConversionException("cannot turn destructed clause into a hypothesis")
elif not clause_wrapper.is_locked:
raise ConversionException("cannot turn an unclosed clause into a hypothesis")
else:
self.hypothesis = HypothesisWrapper.__new_hypothese_base(clause_wrapper.clause)
self.is_destructed = False
self._prolog_hypothesis = clause_wrapper._problog_representation
def destruct(self):
if not self.is_destructed:
HypothesisWrapper.__libere_hypothese(self.hypothesis)
self.is_destructed = True
else:
raise ConversionException("double destruct of hypothesis")
# def __del__(self):
# """
# WARNING: don't rely on the garbage collector to call the object's destructor.
# Cyclic dependencies can prevent the GC from ever calling this method.
# ALWAYS call destruct EXPLICITLY.
#
# :return:
# """
# self.destruct()
def __str__(self):
return str(self._prolog_hypothesis)
#
#
#
# class HypothesisWrapper(ClauseWrapper):
# __new_hypothese_base = lib_django.NewHypotheseBase
#
# __libere_hypothese = lib_django.LibereHypothese
#
# def __init__(self):
# super().__init__()
# self.is_set_to_hypothesis = False
#
# def convert_to_hypothesis(self):
# if not self.is_destructed:
# if not self.is_set_to_hypothesis:
# HypothesisWrapper.__new_hypothese_base(self.clause, 0)
# self.is_set_to_hypothesis = True
# else:
# raise ConversionException("double set to hypothesis")
# else:
# raise ConversionException("tried setting a destructed clause as a hypothesis")
#
# def destruct(self):
# if not self.is_destructed:
# HypothesisWrapper.__libere_hypothese(self.clause)
# self.is_destructed = True
# else:
# raise ConversionException("double destruct")
def _get_variable_array(literal: Term):
VariableArray = ctypes.c_char_p * literal.arity
var_array_c = VariableArray()
for i in range(literal.arity):
var_array_c[i] = str(literal.args[i]).encode('utf-8')
return var_array_c
```
#### File: query_testing_back_end/django/splitter.py
```python
from typing import Optional
from refactor.tilde_essentials.split_criterion import SplitCriterionBuilder
from refactor.tilde_essentials.splitter import Splitter, SplitInfo
from refactor.tilde_essentials.tree_node import TreeNode
class DjangoSplitter(Splitter):
def get_split(self, examples, current_node: TreeNode) -> Optional[SplitInfo]:
current_best_split_info = None
split_criterion = SplitCriterionBuilder.get_split_criterion(
self.split_criterion_str,
examples, current_node.get_labels(examples))
generator = self.test_generator_builder.generate_possible_tests(examples, current_node)
for candidate_test in generator:
if self.verbose:
print(candidate_test)
examples_satisfying_test, examples_not_satisfying_test = self._split_examples(candidate_test, examples)
candidate_test_score = split_criterion.calculate(examples_satisfying_test,
examples_not_satisfying_test
)
if current_best_split_info is None or candidate_test_score > current_best_split_info.score:
current_best_split_info = SplitInfo(test=candidate_test,
examples_left=examples_satisfying_test,
examples_right=examples_not_satisfying_test,
score=candidate_test_score,
threshold=split_criterion.get_threshold(),
split_criterion=split_criterion.get_name())
else:
# if we do not keep the query, destruct it
candidate_test.destruct()
return current_best_split_info
```
#### File: query_testing_back_end/flgg_py4j/query_wrapping.py
```python
from refactor.tilde_essentials.query_wrapping import QueryWrapper
class FLGGQueryWrapper(QueryWrapper):
def __str__(self):
return self.external_representation
```
#### File: query_testing_back_end/problog/evaluation.py
```python
from typing import Optional
import problog
from problog.engine import GenericEngine, DefaultEngine, ClauseDB
from problog.logic import Term, Var
from problog.program import LogicProgram, SimpleProgram, PrologString
from refactor.tilde_essentials.evaluation import TestEvaluator
from refactor.tilde_essentials.example import Example
from refactor.representation.TILDE_query import TILDEQuery
class ProbLogQueryEvaluator(TestEvaluator):
def evaluate(self, instance, test) -> bool:
raise NotImplementedError('abstract method')
def __init__(self, engine: GenericEngine = None):
if engine is None:
self.engine = DefaultEngine()
self.engine.unknown = 1
else:
self.engine = engine
self.to_query = Term('to_query')
class SimpleProgramQueryEvaluator(ProbLogQueryEvaluator):
def __init__(self,
background_knowledge: Optional[LogicProgram] = None,
engine: GenericEngine = None):
super().__init__(engine=engine)
if background_knowledge is None:
self.db = self.engine.prepare(SimpleProgram()) # type: ClauseDB
else:
self.db = self.engine.prepare(background_knowledge) # type: ClauseDB
self.db += Term('query')(self.to_query)
def evaluate(self, instance: Example, test: TILDEQuery) -> bool:
query_conj = test.to_conjunction()
db_to_query = self.db.extend()
for statement in instance.data:
db_to_query += statement
# TODO: remove ugly hack
if hasattr(instance, 'classification_term'):
db_to_query += instance.classification_term
db_to_query += (self.to_query << query_conj)
query_result = problog.get_evaluatable().create_from(db_to_query, engine=self.engine).evaluate()
return query_result[self.to_query] > 0.5
if __name__ == '__main__':
instance = PrologString("""
color(blue).
taste(sweet).
texture(fruity).
""")
query = TILDEQuery(None, Term('color')(Var('X')))
evaluator = SimpleProgramQueryEvaluator()
result = evaluator.evaluate(instance, query)
print(result)
```
#### File: query_testing_back_end/subtle/test_generation.py
```python
from typing import Optional
from problog.logic import Term
from refactor.tilde_essentials.test_generation import FOLTestGeneratorBuilder
from refactor.query_testing_back_end.subtle.clause_handling import build_hypothesis
from refactor.query_testing_back_end.subtle.query_wrapping import SubtleQueryWrapper
from refactor.representation.TILDE_query import TILDEQueryHiddenLiteral, TILDEQuery
from refactor.representation.language import TypeModeLanguage
from refactor.tilde_essentials.refinement_controller import RefinementController
class SubtleTestGeneratorBuilder(FOLTestGeneratorBuilder):
def __init__(self, language: TypeModeLanguage,
query_head_if_keys_format: Optional[Term] = None):
super().__init__(SubtleTestGeneratorBuilder.get_initial_query(query_head_if_keys_format))
self.language = language
def generate_possible_tests(self, examples, current_node):
query_wrapper = self._get_associated_query(current_node) # type: SubtleQueryWrapper
query_to_refine = query_wrapper.tilde_query # type: TILDEQuery
generator = RefinementController.get_refined_query_generator(
query_to_refine, self.language)
for tilde_query in generator:
tilde_query_str = build_hypothesis(tilde_query) # type: str
yield SubtleQueryWrapper(tilde_query, tilde_query_str)
@staticmethod
def get_initial_query(query_head_if_keys_format: Optional[Term] = None):
if query_head_if_keys_format is not None:
initial_tilde_query = TILDEQueryHiddenLiteral(query_head_if_keys_format)
else:
initial_tilde_query = TILDEQuery(None, None)
wrapper_initial_tilde_query = SubtleQueryWrapper(initial_tilde_query, build_hypothesis(initial_tilde_query))
return wrapper_initial_tilde_query
```
#### File: refactor/representation/example_collection.py
```python
import warnings
from typing import List, Optional, Set
from problog.logic import Constant
from refactor.representation.example import SimpleProgramExampleWrapper, ClauseDBExampleWrapper, ExampleWrapper
class UnlabeledExampleException(Exception):
pass
class EmptyExampleCollectionException(Exception):
pass
class ExampleCollection:
def __init__(self):
self.example_wrappers_sp = None # type: Optional[List[SimpleProgramExampleWrapper]]
self.example_wrappers_clausedb = None # type: Optional[List[ClauseDBExampleWrapper]]
self.are_sp_examples_labeled = False # type: bool
self.are_clausedb_examples_labeled = False # type: bool
def get_example_wrappers_sp(self) -> List[SimpleProgramExampleWrapper]:
if self.example_wrappers_sp is None:
raise EmptyExampleCollectionException("There are no SimpleProgram examples")
if not self.are_sp_examples_labeled:
warnings.warn("The SimpleProgram examples are not labeled")
return self.example_wrappers_sp
def get_example_wrappers_clausedb(self) -> List[ClauseDBExampleWrapper]:
if self.example_wrappers_clausedb is None:
raise EmptyExampleCollectionException("There are no ClauseDB examples")
if not self.are_clausedb_examples_labeled:
warnings.warn("The ClauseDB examples are not labeled")
return self.example_wrappers_clausedb
def get_labeled_example_wrappers_sp(self) -> List[SimpleProgramExampleWrapper]:
if self.example_wrappers_sp is None:
raise EmptyExampleCollectionException("There are no SimpleProgram examples")
# if they are labeled:
if self.are_sp_examples_labeled:
return self.example_wrappers_sp
else: # SimpleProgram examples are not labeled
if self.are_clausedb_examples_labeled:
if len(self.example_wrappers_sp) == len(self.example_wrappers_clausedb):
# give the label of the clausedb to the corresponding simpleprogram
for i in range(0, len(self.example_wrappers_sp)):
self.example_wrappers_sp[i].label = self.example_wrappers_clausedb[i].label
# set flag: now they are labeled
self.are_sp_examples_labeled = True
return self.example_wrappers_sp
else: # both clausedb and simpleprograms not labeled
raise UnlabeledExampleException("Both the ClauseDB and SimpleProgram examples are unlabeled")
def get_labeled_example_wrappers_clausedb(self) -> List[ClauseDBExampleWrapper]:
if self.example_wrappers_clausedb is None:
raise EmptyExampleCollectionException("There are no ClauseDB examples")
if self.are_clausedb_examples_labeled:
return self.example_wrappers_clausedb
else: # clausedb not labeled
if self.are_sp_examples_labeled:
if len(self.example_wrappers_sp) == len(self.example_wrappers_clausedb):
# give the label of the simpleprogram to the corresponding clausedb
for i in range(0, len(self.example_wrappers_sp)):
self.example_wrappers_clausedb[i].label = self.example_wrappers_sp[i].label
# now the clausedb are labeled!
self.are_clausedb_examples_labeled = True
return self.example_wrappers_clausedb
else: # both clausedb and simpleprograms not labeled
raise UnlabeledExampleException("Both the ClauseDB and SimpleProgram examples are unlabeled")
def set_example_wrappers_sp(self, example_wrappers_sp: List[SimpleProgramExampleWrapper]):
self.example_wrappers_sp = example_wrappers_sp
# if the examples are labeled, set flag
if example_wrappers_sp[0].label is not None:
self.are_sp_examples_labeled = True
def set_example_wrappers_clausedb(self, example_wrappers_clausedb: List[ClauseDBExampleWrapper]):
self.example_wrappers_clausedb = example_wrappers_clausedb
# if the examples are labeled, set flag
if example_wrappers_clausedb[0].label is not None:
self.are_clausedb_examples_labeled = True
def get_examples(self) -> List[ExampleWrapper]:
if self.example_wrappers_clausedb is not None:
return self.get_example_wrappers_clausedb()
if self.example_wrappers_sp is not None:
return self.get_example_wrappers_sp()
raise EmptyExampleCollectionException("The collection contains no SimpleProgram and no ClauseDB examples")
def get_labeled_examples(self) -> List[ExampleWrapper]:
if self.example_wrappers_clausedb is not None:
return self.get_labeled_example_wrappers_clausedb()
if self.example_wrappers_sp is not None:
return self.get_labeled_example_wrappers_sp()
raise EmptyExampleCollectionException("The collection contains no SimpleProgram and no ClauseDB examples")
def filter_examples(self, key_set: Set[Constant]) -> 'ExampleCollection':
"""
Only KEEPS the examples IN the key_set
:param key_set:
:return:
"""
filtered_collection = ExampleCollection()
example_wrappers_sp = self.example_wrappers_sp
example_wrappers_clausedb = self.example_wrappers_clausedb
if example_wrappers_sp is not None and example_wrappers_clausedb is not None:
filtered_sp = []
filtered_clausedb = []
for ex_index, ex_sp in enumerate(example_wrappers_sp):
if ex_sp.key in key_set:
filtered_sp.append(ex_sp)
filtered_clausedb.append(example_wrappers_clausedb[ex_index])
filtered_collection.set_example_wrappers_sp(filtered_sp)
filtered_collection.set_example_wrappers_clausedb(filtered_clausedb)
return filtered_collection
elif example_wrappers_sp is not None:
filtered_sp = [ex_sp for ex_sp in example_wrappers_sp if ex_sp.key in key_set]
filtered_collection.set_example_wrappers_sp(filtered_sp)
return filtered_collection
elif example_wrappers_clausedb is not None:
filtered_clausedb = [ex_clausedb for ex_clausedb in example_wrappers_clausedb if ex_clausedb.key in key_set]
filtered_collection.set_example_wrappers_clausedb(filtered_clausedb)
return filtered_collection
return filtered_collection
# TODO: merge with function above, they only differ in boolean check
def filter_examples_not_in_key_set(self, key_set: Set[Constant]) -> 'ExampleCollection':
"""
Only KEEPS the examples IN the key_set
:param key_set:
:return:
"""
filtered_collection = ExampleCollection()
example_wrappers_sp = self.example_wrappers_sp
example_wrappers_clausedb = self.example_wrappers_clausedb
if example_wrappers_sp is not None and example_wrappers_clausedb is not None:
filtered_sp = []
filtered_clausedb = []
for ex_index, ex_sp in enumerate(example_wrappers_sp):
if ex_sp.key not in key_set:
filtered_sp.append(ex_sp)
filtered_clausedb.append(example_wrappers_clausedb[ex_index])
filtered_collection.set_example_wrappers_sp(filtered_sp)
filtered_collection.set_example_wrappers_clausedb(filtered_clausedb)
return filtered_collection
elif example_wrappers_sp is not None:
filtered_sp = [ex_sp for ex_sp in example_wrappers_sp if ex_sp.key not in key_set]
filtered_collection.set_example_wrappers_sp(filtered_sp)
return filtered_collection
elif example_wrappers_clausedb is not None:
filtered_clausedb = [ex_clausedb for ex_clausedb in example_wrappers_clausedb if ex_clausedb.key not in key_set]
filtered_collection.set_example_wrappers_clausedb(filtered_clausedb)
return filtered_collection
return filtered_collection
```
#### File: refactor/representation/problog_helper.py
```python
from typing import Dict
from problog.logic import Term, Var
def apply_substitution_to_term(term: Term, substitution: Dict[str, Term]) -> Term:
complete_substitution = {}
# NOTE: all variables in the term need to be defined in the substitution
for var in term.variables():
complete_substitution[var.name] = Var(var.name)
complete_substitution.update(substitution)
term_substitution = term.apply(complete_substitution)
return term_substitution
def get_probability(term: Term) -> float:
if term.probability is None:
return 1.0
else:
return term.probability
```
#### File: refactor/representation/TILDE_query.py
```python
from typing import Optional, List
from problog.logic import Term, Clause, And
from problog.util import OrderedSet
from refactor.representation.rule import Rule
class TILDEQuery(Rule):
"""Represents a query as used in tilde.
"""
def __init__(self, parent_query: Optional['TILDEQuery'], new_literal: Optional[Term]):
Rule.__init__(self)
self.parent = parent_query # type: Optional['TILDEQuery']
self.literal = new_literal # type: Optional[Term]
def get_literals(self) -> List[Term]:
"""Get literals in the rule.
:return: list of literals including target
:rtype: list[Term]
"""
if self.parent is None:
if self.literal is None:
return []
else:
return [self.literal]
else:
return self.parent.get_literals() + [self.literal]
def has_head(self) -> bool:
# TODO: what if the literal of the root is None?
root = self._get_root()
return isinstance(root, TILDEQueryHiddenLiteral)
def get_literals_of_body(self) -> List[Term]:
"""Get literals in the body of the rule.
:return: list of literals including target
:rtype: list[Term]
"""
literals = self.get_literals()
# if len(literals) > 0 and isinstance(literals[0], TILDEQueryHiddenLiteral):
if self.has_head():
# there is a head:
return literals[1:]
else:
return literals
def get_literal(self) -> Term:
"""Get most recently added body literal.
:return: None (the body is empty for this type of rule)
:rtype: Term
"""
return self.literal
def __and__(self, literal: Term) -> 'TILDEQuery':
"""Add a literal to the body of the rule.
:param literal: literal to add
:type literal: Term
:return: new rule
:rtype: TILDEQuery
"""
return TILDEQuery(self, literal)
def to_conjunction(self, functor=None) -> And:
"""Transform query into ProbLog conjunction
:param functor: override rule functor (set to None to keep original)
:type functor: str | None
:return: clause representation
:rtype: list[Clause]
"""
literals = self.get_literals()
def rename_recursive(lit, new_functor_of_recursive):
if lit.functor == '_recursive':
return Term(new_functor_of_recursive, *lit.args)
else:
return lit
literals = [rename_recursive(lit, functor) for lit in literals]
return And.from_list(literals)
def has_new_variables(self) -> OrderedSet:
# if I am the first query literal
if self.parent is None:
return self.get_literal().variables()
else:
return self.get_literal().variables() - self.parent.get_variables()
def __str__(self) -> str:
literals = self.get_literals()
# if len(literals) > 0 and isinstance(literals[0], TILDEQueryHiddenLiteral):
if len(literals) > 0 and self.has_head():
head = literals[0]
if len(literals) == 1:
return '%s :- true.' % (head,)
else:
return '%s :- %s' % (head, ', '.join(map(str, literals[1:])))
else:
head = Term('false')
if len(literals) == 0:
return '%s :- true.' % (head,)
else:
return '%s :- %s' % (head, ', '.join(map(str, literals)))
def get_literals_as_subsumption_list(self):
if self.parent is None:
if self.literal is None:
return []
else:
# if isinstance(self, TILDEQueryHiddenLiteral):
# head = self.literal
# return [~ head]
# else:
# return [self.literal]
return []
else:
return self.parent.get_literals_as_subsumption_list() + [self.literal]
def __len__(self) -> int:
if self.parent is not None:
return len(self.parent) + 1
else:
return 1
def _get_root(self):
current_node = self
root = self
while current_node is not None:
if current_node.parent is not None:
root = current_node.parent
current_node = current_node.parent
return root
class TILDEQueryHiddenLiteral(TILDEQuery):
def __init__(self, literal: Optional[Term] = None):
TILDEQuery.__init__(self, None, literal)
def get_literals(self) -> List[Term]:
"""Get literals in the rule.
:return: list of literals including target
:rtype: list[Term]
"""
return [self.literal]
def get_literal(self):
"""Get most recently added body literal.
:return: None (the body is empty for this type of rule)
:rtype: Term
"""
return None
def __str__(self):
return '%s :- true' % self.literal
```
#### File: refactor/tilde_essentials/example.py
```python
from typing import Iterable
from refactor.tilde_essentials.destuctable import Destructible
class Example(Destructible):
"""
Container class for an example, storing its data and label (types undefined)
"""
def __init__(self, data, label):
self.data = data
self.label = label
def destruct(self):
destruct_method = getattr(self.data, 'destruct', None)
if callable(destruct_method):
self.data.destruct()
def get_labels(examples: Iterable):
labels = set()
for current_example in examples:
# for label in current_example.labels:
labels.add(current_example.label)
return labels
def calculate_majority_class(examples):
"""Calculate the majority class label in the given set of examples.
"""
label_counts = calculate_label_counts(examples)
label_with_max_count = max(label_counts, key=(lambda key: label_counts[key]))
count = label_counts[label_with_max_count] # type: int
return label_with_max_count, count
def calculate_label_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = {}
for example in examples:
label = example.label
label_counts[label] = label_counts.get(label, 0) + 1
return label_counts
def calculate_label_frequencies(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = calculate_label_counts(examples)
for label in label_counts.keys():
label_counts[label] = label_counts[label] / len(examples)
return label_counts
def calculate_label_frequencies_and_absolute_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = calculate_label_counts(examples)
label_frequencies = {}
for label in label_counts.keys():
label_frequencies[label] = label_counts[label] / len(examples)
return label_frequencies, label_counts
```
#### File: refactor/tilde_essentials/splitter.py
```python
from typing import Optional
from refactor.tilde_essentials.evaluation import TestEvaluator
from refactor.tilde_essentials.split_criterion import SplitCriterionBuilder
from refactor.tilde_essentials.test_generation import TestGeneratorBuilder
from refactor.tilde_essentials.tree_node import TreeNode
class SplitInfo:
"""
Contains the information about a split using a test on a set of training examples.
"""
def __init__(self,
test,
examples_left,
examples_right,
score,
threshold,
split_criterion):
self.test = test
self.examples_left = examples_left
self.examples_right = examples_right
self.score = score
self.threshold = threshold
self.split_criterion = split_criterion
# def get_test(self):
# return self.test
#
# @property
# def get_examples_left(self):
# return self.examples_left
#
# def get_examples_right(self):
# raise NotImplementedError('abstract method')
#
# def get_score(self) -> float:
# raise NotImplementedError('abstract method')
def get_split_criterion(self) -> str:
"""
Returns 'gini' for Gini index, 'entropy' for information gain,
'MSE' for mean squared error and 'MSA' for Mean Absolute Error
:return:
"""
raise NotImplementedError('abstract method')
def passing_score(self) -> bool:
return self.score >= self.threshold
class Splitter:
"""
Finds the best test for splitting a node based on the node's training examples.
It must be initialized with a SplitCriterion and TestEvaluator.
Reports the split info using a SplitInfo object.
"""
def __init__(self, split_criterion_str, test_evaluator: TestEvaluator,
test_generator_builder: TestGeneratorBuilder, verbose=False):
self.split_criterion_str = split_criterion_str
self.test_evaluator = test_evaluator
self.test_generator_builder = test_generator_builder
self.verbose=verbose
def get_split(self, examples, current_node: TreeNode) -> Optional[SplitInfo]:
current_best_split_info = None
split_criterion = SplitCriterionBuilder.get_split_criterion(
self.split_criterion_str,
examples, current_node.get_labels(examples))
generator = self.test_generator_builder.generate_possible_tests(examples, current_node)
for candidate_test in generator:
if self.verbose:
print(candidate_test)
examples_satisfying_test, examples_not_satisfying_test = self._split_examples(candidate_test, examples)
candidate_test_score = split_criterion.calculate(examples_satisfying_test,
examples_not_satisfying_test
)
if current_best_split_info is None or candidate_test_score > current_best_split_info.score:
current_best_split_info = SplitInfo(test=candidate_test,
examples_left=examples_satisfying_test,
examples_right=examples_not_satisfying_test,
score=candidate_test_score,
threshold=split_criterion.get_threshold(),
split_criterion=split_criterion.get_name())
# elif candidate_test_score > current_best_split_info.score:
# current_best_split_info = SplitInfo(test=can)
# current_best_split_info.test = candidate_test
# current_best_split_info.examples_left = examples_satisfying_test
# current_best_split_info.examples_right = examples_not_satisfying_test
return current_best_split_info
def _split_examples(self, test, examples):
examples_satisfying_test = set()
examples_not_satifying_test = set()
for example in examples:
succeeds_test = self.test_evaluator.evaluate(example, test)
if succeeds_test:
examples_satisfying_test.add(example)
else:
examples_not_satifying_test.add(example)
return examples_satisfying_test, examples_not_satifying_test
```
#### File: refactor/tilde_essentials/stop_criterion.py
```python
from refactor.tilde_essentials.splitter import SplitInfo
import math
class StopCriterion:
"""
Checks whether a the node should be split; i.e. whether a stop criterion is reached.
"""
# def should_investigate_node(self):
# raise NotImplementedError('abstract method')
def __init__(self, max_depth: int = math.inf,
min_samples_split: int = 2,
min_samples_leaf: int = 2 # 1
):
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
def cannot_split_before_test(self, examples, depth):
"""
If we already know we cannot split without having to calculate possible tests,
report True here.
:param depth:
:param examples:
:return:
"""
if depth >= self.max_depth:
return True
if len(examples) < self.min_samples_split:
return True
def _not_enough_examples_in_leaves(self, split_info: SplitInfo) -> bool:
"""
Return true if the smallest of the two subsets has NOT enough examples to be acceptable as a leaf.
# NOTE: I changed it back to min, this explanation isn't true anymore
# REASON: setting:
# minimal_cases(n).
# the minimal nb of examples that a leaf in the tree should cover
#
# (De Raedt: a good heuristic:
# stop expanding nodes
# WHEN the number of examples in the nodes falls below a certain (user-defined threshold)
# NOTE:
# the nodes get split into two children
# --> possible case:
# only for 1 of the children, the nb of examples falls below the threshold
# IF by splitting,
# ONE of the nodes falls below a certain user-defined threshold
# (i.e. the MIN of their nbs < threshold)
# THEN we don't split this node
:param split_info:
:return:
"""
return min(
len(split_info.examples_left), len(split_info.examples_right)
) < self.min_samples_leaf
def cannot_split_on_test(self, split_info: SplitInfo):
if split_info is None:
return True
if not split_info.passing_score():
return True
if self._not_enough_examples_in_leaves(split_info):
return True
return False
```
#### File: refactor/tilde_essentials/tree_node.py
```python
from typing import Optional
from refactor.tilde_essentials.destuctable import Destructible
from refactor.tilde_essentials.example import get_labels
#
# class TreeBuildInfo:
# def __init__(self):
# self.labels = None
# self.n_examples = None
# from refactor.tilde_essentials.tree_printer import TreeNodePrinter
class TreeNode(Destructible):
"""
"""
def __init__(self, parent=None, depth=0):
self.parent = parent # type: Optional['TreeNode']
self.depth = depth # type: int
self.left_child = None # type: Optional['TreeNode']
self.right_child = None # type: Optional['TreeNode']
self.test = None
# the set of labels occurring in this node
self.labels = None
self.leaf_strategy = None
def get_labels(self, examples):
if self.labels is None:
self.labels = get_labels(examples)
return self.labels
def is_leaf_node(self) -> bool:
return self.left_child is None and self.right_child is None
def __str__(self):
return TreeNodePrinter.to_string(self)
def destruct(self):
destruct_method = getattr(self.test, 'destruct', None)
if callable(destruct_method):
self.test.destruct()
if self.left_child is not None:
self.left_child.destruct()
if self.right_child is not None:
self.right_child.destruct()
def count_nb_of_nodes(node: Optional['TreeNode'] = None) -> int:
if node is None:
return 0
else:
count = 1 # count the node itself
count += count_nb_of_nodes(node.left_child)
count += count_nb_of_nodes(node.right_child)
return count
def count_nb_of_inner_nodes(node: Optional['TreeNode'] = None) -> int:
if node.is_leaf_node():
return 0
else:
count = 1
count += count_nb_of_inner_nodes(node.left_child)
count += count_nb_of_inner_nodes(node.right_child)
return count
class TreeNodePrinter:
"""
Pretty prints a TreeNode tree structure.
"""
setting = "full"
@staticmethod
def to_string(tree_node: TreeNode) -> str:
if TreeNodePrinter.setting == "full":
return TreeNodePrinter.to_string_full_query(tree_node)
if TreeNodePrinter.setting == "compact":
TreeNodePrinter.to_string_compact(tree_node)
@staticmethod
def to_string_full_query(tree_node: TreeNode, indentation='', current_node_number=0) -> str:
"""
Represents the tree as a string using some layouting
:param tree_node:
:param indentation:
:param current_node_number:
:return:
"""
node_indentation = indentation
child_indentation = indentation
if current_node_number == 0:
child_indentation = '\t'
elif current_node_number == 1:
node_indentation += '|-'
child_indentation += '|\t'
else:
node_indentation += '\-'
child_indentation += '\t'
if tree_node.is_leaf_node():
result = tree_node.leaf_strategy.to_string(node_indentation)
# result = node_indentation + "Leaf, class label: " + str(self.classification) + ", [" + str(
# self.nb_of_examples_with_label) + "/" + str(self.nb_of_examples_in_this_node) + "]" + '\n'
return result
else:
result = node_indentation + 'INode, query: ' + str(tree_node.test) + '\n'
if tree_node.left_child is not None:
result = result + TreeNodePrinter.to_string_full_query(tree_node.left_child, child_indentation, 1)
if tree_node.right_child is not None:
result = result + TreeNodePrinter.to_string_full_query(tree_node.right_child, child_indentation, 2)
return result
@staticmethod
def to_string_compact(tree_node: TreeNode, indentation='', current_node_number=0):
"""
Represents the tree as a string using some layouting
:param tree_node:
:param indentation:
:param current_node_number:
:return:
"""
node_indentation = indentation
child_indentation = indentation
if current_node_number == 0: # root node
child_indentation = ''
elif current_node_number == 1: # this node is the LEFT child node of its parent
node_indentation += '+--'
child_indentation += '| '
else: # this node is the RIGHT child node of its parent
node_indentation += '+--'
child_indentation += ' '
if tree_node.is_leaf_node():
if current_node_number == 0:
result = tree_node.leaf_strategy.to_string_compact()
elif current_node_number == 1:
result = node_indentation + 'yes: ' + tree_node.leaf_strategy.to_string_compact()
else:
result = node_indentation + 'no: ' + tree_node.leaf_strategy.to_string_compact()
# result = self.strategy.to_string(node_indentation)
# result = node_indentation + "Leaf, class label: " + str(self.classification) + ", [" + str(
# self.nb_of_examples_with_label) + "/" + str(self.nb_of_examples_in_this_node) + "]" + '\n'
return result
else:
if current_node_number == 0:
# TODO: remove dependency from TILDEQueryHiddenLiteral
if tree_node.test.parent is not None:
# if tree_node.test.parent is not None and isinstance(tree_node.test.parent, TILDEQueryHiddenLiteral):
result = str(tree_node.test.parent.literal) + '\n'
else:
result = ""
result = result + str(tree_node.test.get_literal()) + ' ?\n'
elif current_node_number == 1:
result = node_indentation + 'yes: ' + str(tree_node.test.get_literal()) + ' ?\n'
else:
result = node_indentation + 'no: ' + str(tree_node.test.get_literal()) + ' ?\n'
# result = node_indentation + 'INode, query: ' + str(self.query) + '\n'
if tree_node.left_child is not None:
result = result + TreeNodePrinter.to_string_compact(tree_node.left_child, child_indentation, 1)
if tree_node.right_child is not None:
result = result + TreeNodePrinter.to_string_compact(tree_node.right_child, child_indentation, 2)
return result
```
#### File: refactor/tilde_essentials/tree.py
```python
from typing import Optional
from refactor.tilde_essentials.destuctable import Destructible
from refactor.tilde_essentials.evaluation import TestEvaluator
from refactor.tilde_essentials.tree_builder import TreeBuilder
from refactor.tilde_essentials.tree_node import TreeNode, count_nb_of_nodes, count_nb_of_inner_nodes
class DecisionTree(Destructible):
"""
Decision tree used for making predictions. Initially empty.
An internal TreeNode tree is fitted on training examples using a TreeBuilder.
"""
def __init__(self):
self.tree = None # type: Optional[TreeNode]
self.tree_builder = None # type: Optional[TreeBuilder]
self.test_evaluator = None # type: Optional[TestEvaluator]
self.tree_pruner = None
def fit(self, examples, tree_builder: TreeBuilder):
self.tree_builder = tree_builder
self.tree_builder.build(examples)
self.test_evaluator = self.tree_builder.splitter.test_evaluator
self.tree = tree_builder.tree_root
if self.tree_pruner is not None:
self.tree = self.tree_pruner.prune(self.tree)
def prune(self, pruning_function):
pruning_function(self.tree)
def predict(self, example):
return self._predict_recursive(example, self.tree)
def _predict_recursive(self, example, tree_node: TreeNode):
if tree_node.is_leaf_node():
return tree_node.leaf_strategy.predict(example)
else:
succeeds_test = self.test_evaluator.evaluate(example, tree_node.test)
if succeeds_test:
return self._predict_recursive(example, tree_node.left_child)
else:
return self._predict_recursive(example, tree_node.right_child)
def __str__(self):
return self.tree.__str__()
def destruct(self):
self.tree.destruct()
def get_nb_of_nodes(self) -> int:
return count_nb_of_nodes(self.tree)
def get_nb_of_inner_nodes(self):
return count_nb_of_inner_nodes(self.tree)
def write_out_tree(fname: str, tree: DecisionTree):
# write out tree
print('\t--- writing out tree to: ' + fname)
with open(fname, 'w') as f:
f.write(str(tree))
``` |
{
"source": "JoschuaL/Stammtisch-Tool",
"score": 3
} |
#### File: Stammtisch-Tool/unittests/report.py
```python
import unittest
from db_models.report import Report
from datetime import date
from dateutil.parser import parse
from main import app
class TestReport(unittest.TestCase):
def setUp(self):
with app.app_context():
from main import get_db
self.db = get_db()
self.object_to_remove = []
# def tearDown(self):
# # delete objects which have been created by the test
# for obj in self.object_to_remove:
# self.db.session.delete(obj)
# self.db.session.commit()
def test_create_report(self):
report = Report()
report.location = "Heidelberg"
self.assertEqual(report.location, "Heidelberg")
self.assertNotEqual(report.location, "")
def test_save_object(self):
report = Report()
report.location = 'Heidelberg'
report.date = date.today()
report.start_time = parse("14:30").time()
report.end_time = parse("16:30").time()
self.object_to_remove.append(report)
report.report_saved = date.today()
self.db.session.add(report)
self.db.session.commit()
report2 = Report.get_by_date_and_location(date.today(), "Heidelberg")
self.assertIsNotNone(report2)
# self.assertEquals(report.id, report2.id)
if __name__ == '__main__':
unittest.main()
```
#### File: Stammtisch-Tool/unit_tests/report.py
```python
import unittest
from db_models.report import Report
from datetime import date
from dateutil.parser import parse
from main import get_db
db = get_db()
# import mysql
# import pymysql
# pymysql.install_as_MySQLdb()
# import MySQLdb
# mysql://sql7315108:<EMAIL>
# db = MySQLdb.connect(host="sql7.freemysqlhosting.net", # your host, usually localhost
# user="sql7315108", # your username
# passwd="<PASSWORD>", # your password
# db="sql7315108")
#
# class TestReport(unittest.TestCase):
# def setUp(self):
# # self.db = g.db
#
# # self.db = mysql.connector.connect(
# # host="sql7.freemysqlhosting.net",
# # user="sql7315108",
# # passwd="<PASSWORD>"
# #
# # )
# engine = create_engine(
# "mysql://sql7315108:<EMAIL>/sql7319232.db",
# # isolation_level="READ UNCOMMITTED"
# )
# self.db = engine.connect()
# self.object_to_remove = []
#
# def tearDown(self):
# for obj in self.object_to_remove:
# self.db.session.delete(obj)
# self.db.session.commit()
#
# def test_create_report(self):
# report = Report()
# report.location = "Heidelberg"
# assert report.location == "Heidelberg"
# assert report.location != ""
#
# def test_save_object(self):
# report = Report()
# report.location = 'Heidelberg'
# report.date = date.today()
# report.start_time = parse("14:30").time()
# report.end_time = parse("16:30").time()
# self.object_to_remove.append(report)
# report.report_saved = date.today()
# self.db.session.add(report)
# self.db.session.commit()
#
# report2 = Report.get_by_date_and_location(date.today(), "Heidelberg")
# assert report is report2
#
#
# if __name__ == '__main__':
# unittest.main()
#
#
# class TestReportObject(unittest.TestCase):
# def test_assign_attributes(self):
# report = Report()
#
# report.meetup_location = 'Heidelberg'
# report.meetup_date = date.today()
# report.male_organizers = 2
# report.female_organizers = 0
# report.other_organizers = 0
#
# self.assertEqual(report.meetup_location, 'Heidelberg')
# self.assertNotEqual(report.male_organizers, 20)
# self.assertEqual(report.meetup_date, date.today())
# self.assertEqual(report.female_organizers, 0)
# self.assertNotEqual(report.other_organizers, 4)
#
# if __name__ == '__main__':
# unittest.main()
``` |
{
"source": "joschuck/OpenCV-with-Python-Series",
"score": 3
} |
#### File: src/01_thresholding/otsus_thresholding.py
```python
import sys
import cv2
import numpy as np
from PySide2.QtCore import Qt
from PySide2.QtGui import QImage, QPixmap
from PySide2.QtWidgets import QApplication, QComboBox, QWidget, QLabel, QVBoxLayout, QSlider, QPushButton, QFileDialog
class ThresholdingGui(QWidget):
image: np.ndarray
titles = [
'Original Image',
'THRESH_BINARY + cv2.THRESH_OTSU'
]
def __init__(self):
super(ThresholdingGui, self).__init__()
self.setWindowTitle('Otsu\'s Thresholding')
open_image_btn = QPushButton('Open Image', self)
open_image_btn.clicked.connect(self.open_image)
self.method_combobox = QComboBox()
for title in self.titles:
self.method_combobox.addItem(title)
self.method_combobox.currentIndexChanged.connect(self.update_preview)
self.threshold_label = QLabel('Threshold calculated: -')
self.image_label = QLabel()
self.image = np.tile(np.arange(256, dtype=np.uint8).repeat(2), (512, 1))
q_img = QImage(self.image.data, 512, 512, 512, QImage.Format_Indexed8)
self.image_label.setPixmap(QPixmap.fromImage(q_img))
# Create layout and add widgets
layout = QVBoxLayout()
layout.addWidget(open_image_btn)
layout.addWidget(self.method_combobox)
layout.addWidget(self.threshold_label)
layout.addWidget(self.image_label)
# Set dialog layout
self.setLayout(layout)
def open_image(self):
image_path, _ = QFileDialog.getOpenFileName(self, "Load Image", filter="Image Files (*.tiff *.png *.jpeg *.jpg *.bmp)")
if image_path:
self.image = cv2.imread(image_path, 0)
self.update_preview()
def update_preview(self):
method_idx = self.method_combobox.currentIndex()
if method_idx == 0:
ret, th = '-', self.image
elif method_idx == 1:
ret, th = cv2.threshold(self.image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
self.threshold_label.setText(f"Threshold calculated: {ret}")
image_h, image_w = th.shape
q_img = QImage(th.data, image_w, image_h, image_w, QImage.Format_Indexed8)
self.image_label.setPixmap(QPixmap.fromImage(q_img))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = ThresholdingGui()
ex.show()
sys.exit(app.exec_())
```
#### File: src/util/image.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__all__ = ['to_q_image',
'to_q_pixmap']
from PySide2.QtGui import QImage, QPixmap
import numpy as np
def to_q_image(image: np.ndarray):
"""
Converts a OpenCV / NumPy array to a QImage.
Expects the image to be 8 bit.
Is able to convert Grayscale, BGR and ARGG images.
Parameters
----------
image : np.ndarray
Input Image
Returns
-------
QImage
The converted image. If image is None, returns an empty QImage.
"""
if image is None:
return QImage()
if image.dtype == np.uint8:
if len(image.shape) == 2:
height, width = image.shape
return QImage(image.data, width, height, width, QImage.Format_Indexed8)
elif len(image.shape) == 3:
height, width, ch = image.shape
if image.shape[2] == 3:
return QImage(image.data, width, height, width * 3, QImage.Format_BGR888)
elif image.shape[2] == 4:
return QImage(image.data, width, height, width * 3, QImage.Format_ARGB32)
def to_q_pixmap(image: QPixmap):
"""
Converts a OpenCV / NumPy array to a QPixmap.
Expects the image to be 8 bit.
Is able to convert Grayscale, BGR and ARGG images.
Parameters
----------
image : np.ndarray
Input Image
Returns
-------
QPixmap
The converted QPixmap. If image is None, returns an empty QPixmap.
"""
return QPixmap(to_q_image(image))
``` |
{
"source": "JoschUniHD/BA-DeepFilter_Mixed",
"score": 2
} |
#### File: BA-DeepFilter_Mixed/Data_Preparation/Prepare_NSTDB.py
```python
# Before running this section, download the QTdatabase, the Noise Stress database and add it to the current folder
# and install the Physionet WFDB package
#
# QTdatabase: https://physionet.org/static/published-projects/qtdb/qt-database-1.0.0.zip
# MIT-BIH Noise Stress Test Database: https://physionet.org/static/published-projects/nstdb/mit-bih-noise-stress-test-database-1.0.0.zip
# Installing Physionet WFDB package run from your terminal:
# $ pip install wfdb
#
# ============================================================
#
# authors: <NAME>, <NAME>
# email: <EMAIL>, <EMAIL>
# github id: Dacapi91, fperdigon
#
# ============================================================
import numpy as np
import wfdb
import _pickle as pickle
def prepare(NSTDBPath='data/mit-bih-noise-stress-test-database-1.0.0/'):
bw_signals, bw_fields = wfdb.rdsamp(NSTDBPath + 'bw')
em_signals, em_fields = wfdb.rdsamp(NSTDBPath + 'em')
ma_signals, ma_fields = wfdb.rdsamp(NSTDBPath + 'ma')
for key in bw_fields:
print(key, bw_fields[key])
for key in em_fields:
print(key, em_fields[key])
for key in ma_fields:
print(key, ma_fields[key])
# Save Data
with open('data/NoiseALL.pkl', 'wb') as output: # Overwrites any existing file.
pickle.dump([bw_signals, em_signals, ma_signals], output)
print('=========================================================')
print('MIT BIH data noise stress test database (NSTDB) saved as pickle')
``` |
{
"source": "JoschUniHD/BA-Hagedorn",
"score": 2
} |
#### File: BA-Hagedorn/deepFilter/dl_pipeline_single.py
```python
import keras
from keras import backend as K
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, TensorBoard
from keras import losses
from sklearn.model_selection import train_test_split
import deepFilter.dl_models_single as models
# Custom loss SSD
def ssd_loss(y_true, y_pred):
return K.sum(K.square(y_pred - y_true), axis=-2)
# Combined loss SSD + MSE
def combined_ssd_mse_loss(y_true, y_pred):
return K.mean(K.square(y_true - y_pred), axis=-2) * 500 + K.sum(K.square(y_true - y_pred), axis=-2)
def combined_ssd_mad_loss(y_true, y_pred):
return K.max(K.square(y_true - y_pred), axis=-2) * 50 + K.sum(K.square(y_true - y_pred), axis=-2)
# Custom loss SAD
def sad_loss(y_true, y_pred):
return K.sum(K.sqrt(K.square(y_pred - y_true)), axis=-2)
# Custom loss MAD
def mad_loss(y_true, y_pred):
return K.max(K.square(y_pred - y_true), axis=-2)
def train_dl(Dataset, experiment, signal_size=512):
print('Deep Learning pipeline: Training the model for exp ' + str(experiment))
[X_train, y_train, X_test, y_test] = Dataset
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.3, shuffle=True, random_state=1)
# ==================
# LOAD THE DL MODEL
# ==================
if experiment == 'FCN-DAE':
# FCN_DAE
model = models.FCN_DAE(signal_size=signal_size)
model_label = 'FCN_DAE'
if experiment == 'DRNN':
# DRNN
model = models.DRRN_denoising(signal_size=signal_size)
model_label = 'DRNN'
if experiment == 'Vanilla L':
# Vanilla CNN linear
model = models.deep_filter_vanilla_linear(signal_size=signal_size)
model_label = 'Vanilla_L'
if experiment == 'Vanilla NL':
# Vanilla CNN non linear
model = models.deep_filter_vanilla_Nlinear(signal_size=signal_size)
model_label = 'Vanilla_NL'
if experiment == 'Multibranch LANL':
# Multibranch linear and non linear
model = models.deep_filter_I_LANL(signal_size=signal_size)
model_label = 'Multibranch_LANL'
if experiment == 'Multibranch LANLD':
# Inception-like linear and non linear dilated
model = models.deep_filter_model_I_LANL_dilated(signal_size=signal_size)
model_label = 'Multibranch_LANLD'
print('\n ' + model_label + '\n ')
model.summary()
epochs = int(1e5) # 100000
# epochs = 100
batch_size = 128
lr = 1e-3
# lr = 1e-4
minimum_lr = 1e-10
# Loss function selection according to method implementation
if experiment == 'DRNN':
criterion = keras.losses.mean_squared_error
elif experiment == 'FCN-DAE':
criterion = ssd_loss
else:
criterion = combined_ssd_mad_loss
model.compile(loss=criterion,
optimizer=keras.optimizers.Adam(lr=lr),
metrics=[losses.mean_squared_error, losses.mean_absolute_error, ssd_loss, mad_loss])
# Keras Callbacks
# checkpoint
model_filepath = model_label + '_weights.best.hdf5'
checkpoint = ModelCheckpoint(model_filepath,
monitor="val_loss",
verbose=1,
save_best_only=True,
mode='min', # on acc has to go max
save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor="val_loss",
factor=0.5,
min_delta=0.05,
mode='min', # on acc has to go max
patience=2,
min_lr=minimum_lr,
verbose=1)
early_stop = EarlyStopping(monitor="val_loss", # "val_loss"
min_delta=0.05,
mode='min', # on acc has to go max
patience=10,
verbose=1)
tb_log_dir = './runs/' + model_label
tboard = TensorBoard(log_dir=tb_log_dir, histogram_freq=0,
write_graph=False, write_grads=False,
write_images=False, embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
# To run the tensor board
# tensorboard --logdir=./runs
# GPU
model.fit(x=X_train, y=y_train,
validation_data=(X_val, y_val),
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=[early_stop,
reduce_lr,
checkpoint,
tboard])
K.clear_session()
def test_dl(Dataset, experiment, signal_size=512):
print('Deep Learning pipeline: Testing the model')
[train_set, train_set_GT, X_test, y_test] = Dataset
batch_size = 32
# ==================
# LOAD THE DL MODEL
# ==================
if experiment == 'FCN-DAE':
# FCN_DAE
model = models.FCN_DAE(signal_size=signal_size)
model_label = 'FCN_DAE'
if experiment == 'DRNN':
# DRNN
model = models.DRRN_denoising(signal_size=signal_size)
model_label = 'DRNN'
if experiment == 'Vanilla L':
# Vanilla CNN linear
model = models.deep_filter_vanilla_linear(signal_size=signal_size)
model_label = 'Vanilla_L'
if experiment == 'Vanilla NL':
# Vanilla CNN non linear
model = models.deep_filter_vanilla_Nlinear(signal_size=signal_size)
model_label = 'Vanilla_NL'
if experiment == 'Multibranch LANL':
# Multibranch linear and non linear
model = models.deep_filter_I_LANL(signal_size=signal_size)
model_label = 'Multibranch_LANL'
if experiment == 'Multibranch LANLD':
# Inception-like linear and non linear dilated
model = models.deep_filter_model_I_LANL_dilated(signal_size=signal_size)
model_label = 'Multibranch_LANLD'
print('\n ' + model_label + '\n ')
model.summary()
# Loss function selection according to method implementation
if experiment == 'DRNN':
criterion = 'mse'
elif experiment == 'FCN-DAE':
criterion = ssd_loss
else:
criterion = combined_ssd_mad_loss
model.compile(loss=criterion,
optimizer=keras.optimizers.Adam(lr=0.01),
metrics=[losses.mean_squared_error, losses.mean_absolute_error, ssd_loss, mad_loss])
# checkpoint
model_filepath = model_label + '_weights.best.hdf5'
# load weights
model.load_weights(model_filepath)
# Test score
y_pred = model.predict(X_test, batch_size=batch_size, verbose=1)
K.clear_session()
return [X_test, y_test, y_pred]
``` |
{
"source": "joschu/Theano",
"score": 2
} |
#### File: cuda/tests/test_dnn.py
```python
import logging
import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
from theano.compat.six import StringIO
from theano.gof.python25 import any
import theano.tensor as T
import theano.tests.unittest_tools as utt
from theano.sandbox.neighbours import images2neibs, neibs2images
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.signal.downsample import DownsampleFactorMaxGrad
# Skip test if cuda_ndarray is not available.
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode(
'FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def pool_2d_i2n(input, ds=(2, 2), strides=None,
pool_function=T.max, mode='ignore_borders'):
if strides is None:
strides = ds
if strides[0] > ds[0] or strides[1] > ds[1]:
raise RuntimeError(
"strides should be smaller than or equal to ds,"
" strides=(%d, %d) and ds=(%d, %d)" %
(strides + ds))
shape = input.shape
neibs = images2neibs(input, ds, strides, mode=mode)
pooled_neibs = pool_function(neibs, axis=1)
output_width = (shape[2] - ds[0]) // strides[0] + 1
output_height = (shape[3] - ds[1]) // strides[1] + 1
pooled_output = pooled_neibs.reshape((shape[0], shape[1],
output_width, output_height))
return pooled_output
def test_pooling():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
x = T.ftensor4()
for func in (T.max, T.mean):
for ws in (2, 4, 5):
for stride in (2, 3):
if stride > ws:
continue
if ws == stride and func is T.max:
# We will check that the opt introduced it.
out1 = max_pool_2d(x, (ws, ws), ignore_border=True)
else:
out1 = cuda.dnn.dnn_pool(
x, ws=(ws, ws),
stride=(stride, stride),
mode='max' if func is T.max else "average")
out2 = pool_2d_i2n(x, ds=(ws, ws), strides=(stride, stride),
pool_function=func)
f1 = theano.function([x], out1, mode=mode_with_gpu)
assert any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f1.maker.fgraph.apply_nodes])
f2 = theano.function([x], out2, mode=mode_with_gpu)
assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)
for node in f2.maker.fgraph.apply_nodes])
for shp in [(1, 10, 100, 100),
(1, 3, 99, 99),
(32, 1, 147, 197),
]:
data = numpy.random.normal(0, 1, shp).astype("float32")
a = f1(data).__array__()
b = f2(data).__array__()
assert numpy.allclose(a, b,
atol=numpy.finfo(numpy.float32).eps)
# Test the grad
for shp in [(1, 1, 2, 2),
(1, 1, 3, 3)]:
data = numpy.random.normal(0, 1, shp).astype("float32")*10
ws = 2
strides = 2
# This test the CPU grad + opt + GPU implemtentation
def fn(x):
return max_pool_2d(x, (ws, ws), ignore_border=True)
theano.tests.unittest_tools.verify_grad(fn, [data],
cast_to_output_type=False,
mode=mode_with_gpu)
# Confirm that the opt would have inserted it.
f = theano.function([x], theano.grad(fn(x).sum(), x),
mode=mode_with_gpu)
assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad)
for node in f.maker.fgraph.toposort()])
# Test the GPU grad + GPU implementation
def fn(x):
dnn_op = cuda.dnn.dnn_pool(
x, ws=(ws, ws),
stride=(stride, stride),
mode='max' if func is T.max else "average")
return dnn_op
theano.tests.unittest_tools.verify_grad(fn, [data],
cast_to_output_type=False,
mode=mode_with_gpu)
# Confirm that we get the good op.
f = theano.function([x], theano.grad(fn(x).sum(), x),
mode=mode_with_gpu)
assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad)
for node in f.maker.fgraph.toposort()])
g_out = f(data)
if func is T.max:
# Compare again the CPU result
out = max_pool_2d(x, (ws, ws), ignore_border=True)
f = theano.function([x], theano.grad(out.sum(), x),
mode=mode_without_gpu)
assert any([isinstance(node.op, DownsampleFactorMaxGrad)
for node in f.maker.fgraph.toposort()])
c_out = f(data)
assert numpy.allclose(c_out, g_out)
def test_pooling_opt():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
x = T.ftensor4()
f = theano.function(
[x],
max_pool_2d(x, ds=(2, 2), ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()])
f = theano.function(
[x],
T.grad(max_pool_2d(x, ds=(2, 2), ignore_border=True).sum(), x),
mode=mode_with_gpu.including("cudnn"))
assert any([isinstance(n.op, cuda.dnn.GpuDnnPoolGrad)
for n in f.maker.fgraph.toposort()])
def test_dnn_tag():
"""
We test that if cudnn isn't avail we crash and that if it is avail, we use it.
"""
x = T.ftensor4()
old = theano.config.on_opt_error
theano.config.on_opt_error = "raise"
sio = StringIO()
handler = logging.StreamHandler(sio)
logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)
# Silence original handler when intentionnally generating warning messages
logging.getLogger('theano').removeHandler(theano.logging_default_handler)
raised = False
try:
f = theano.function(
[x],
max_pool_2d(x, ds=(2, 2), ignore_border=True),
mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError), e:
assert not cuda.dnn.dnn_available()
raised = True
finally:
theano.config.on_opt_error = old
logging.getLogger('theano.compile.tests.test_dnn').removeHandler(handler)
logging.getLogger('theano').addHandler(theano.logging_default_handler)
if not raised:
assert cuda.dnn.dnn_available()
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()])
``` |
{
"source": "JosCla/image-stego",
"score": 4
} |
#### File: JosCla/image-stego/lsb-stego.py
```python
from PIL import Image
# Converts a string to binary using ASCII encoding
def textToBinary(text):
return ''.join((format(ord(i), 'b')).zfill(8) for i in text)
# Getting user input to find which file to open
print('Enter image file to open:')
imgIn = input()
print('Enter text to encode:')
textIn = input()
print('Enter image file to write to:')
imgOut = input()
# Taking our message, converting to binary
messageBin = textToBinary(textIn)
# Loading an image's pixel data
img_pre = Image.open(imgIn)
width, height = img_pre.size
pixels = img_pre.load()
# Changing the least significant bit of each pixel to match each bit of our encoded message
currBit = 0;
for x in range(0, width):
if (currBit >= len(messageBin)):
break
# Getting the current pixel
currPix = pixels[x, 0]
#print('[%d, %d]: [%d, %d, %d]'%(x, 0, currPix[0], currPix[1], currPix[2]))
# Changing the r, g, and b values of the pixel
newPix = list(currPix)
for c in range(0, 3):
if (currBit >= len(messageBin)):
break
colorBin = list(format(currPix[c], 'b').zfill(8))
colorBin[7] = messageBin[currBit]
colorBin = ''.join(colorBin)
newPix[c] = int(colorBin, 2)
currBit += 1
# Overwriting old pixel
currPix = tuple(newPix)
pixels[x, 0] = currPix
# Displaying the modified pixel
#print(' -> [%d, %d, %d]'%(currPix[0], currPix[1], currPix[2]))
#print(' (%c, %c, %c)'%(messageBin[currBit-3], messageBin[currBit-2], messageBin[currBit-1]))
# Rewriting the image's pixel data to a new image
img_pre.save(imgOut)
``` |
{
"source": "joscollin/teuthology",
"score": 2
} |
#### File: teuthology/teuthology/beanstalk.py
```python
import beanstalkc
import yaml
import logging
import pprint
import sys
from collections import OrderedDict
from teuthology.config import config
from teuthology import report
log = logging.getLogger(__name__)
def connect():
host = config.queue_host
port = config.queue_port
if host is None or port is None:
raise RuntimeError(
'Beanstalk queue information not found in {conf_path}'.format(
conf_path=config.teuthology_yaml))
return beanstalkc.Connection(host=host, port=port)
def watch_tube(connection, tube_name):
"""
Watch a given tube, potentially correcting to 'multi' if necessary. Returns
the tube_name that was actually used.
"""
if ',' in tube_name:
log.debug("Correcting tube name to 'multi'")
tube_name = 'multi'
connection.watch(tube_name)
connection.ignore('default')
return tube_name
def walk_jobs(connection, tube_name, processor, pattern=None):
"""
def callback(jobs_dict)
"""
log.info("Checking Beanstalk Queue...")
job_count = connection.stats_tube(tube_name)['current-jobs-ready']
if job_count == 0:
log.info('No jobs in Beanstalk Queue')
return
# Try to figure out a sane timeout based on how many jobs are in the queue
timeout = job_count / 2000.0 * 60
for i in range(1, job_count + 1):
print_progress(i, job_count, "Loading")
job = connection.reserve(timeout=timeout)
if job is None or job.body is None:
continue
job_config = yaml.safe_load(job.body)
job_name = job_config['name']
job_id = job.stats()['id']
if pattern is not None and pattern not in job_name:
continue
processor.add_job(job_id, job_config, job)
end_progress()
processor.complete()
def print_progress(index, total, message=None):
msg = "{m} ".format(m=message) if message else ''
sys.stderr.write("{msg}{i}/{total}\r".format(
msg=msg, i=index, total=total))
sys.stderr.flush()
def end_progress():
sys.stderr.write('\n')
sys.stderr.flush()
class JobProcessor(object):
def __init__(self):
self.jobs = OrderedDict()
def add_job(self, job_id, job_config, job_obj=None):
job_id = str(job_id)
job_dict = dict(
index=(len(self.jobs) + 1),
job_config=job_config,
)
if job_obj:
job_dict['job_obj'] = job_obj
self.jobs[job_id] = job_dict
self.process_job(job_id)
def process_job(self, job_id):
pass
def complete(self):
pass
class JobPrinter(JobProcessor):
def __init__(self, show_desc=False, full=False):
super(JobPrinter, self).__init__()
self.show_desc = show_desc
self.full = full
def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_index = self.jobs[job_id]['index']
job_priority = job_config['priority']
job_name = job_config['name']
job_desc = job_config['description']
print 'Job: {i:>4} priority: {pri:>4} {job_name}/{job_id}'.format(
i=job_index,
pri=job_priority,
job_id=job_id,
job_name=job_name,
)
if self.full:
pprint.pprint(job_config)
elif job_desc and self.show_desc:
for desc in job_desc.split():
print '\t {desc}'.format(desc=desc)
class RunPrinter(JobProcessor):
def __init__(self):
super(RunPrinter, self).__init__()
self.runs = list()
def process_job(self, job_id):
run = self.jobs[job_id]['job_config']['name']
if run not in self.runs:
self.runs.append(run)
print run
class JobDeleter(JobProcessor):
def __init__(self, pattern):
self.pattern = pattern
super(JobDeleter, self).__init__()
def add_job(self, job_id, job_config, job_obj=None):
job_name = job_config['name']
if self.pattern in job_name:
super(JobDeleter, self).add_job(job_id, job_config, job_obj)
def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_name = job_config['name']
print 'Deleting {job_name}/{job_id}'.format(
job_id=job_id,
job_name=job_name,
)
job_obj = self.jobs[job_id].get('job_obj')
if job_obj:
job_obj.delete()
report.try_delete_jobs(job_name, job_id)
def pause_tube(connection, tube, duration):
duration = int(duration)
if not tube:
tubes = sorted(connection.tubes())
else:
tubes = [tube]
prefix = 'Unpausing' if duration == 0 else "Pausing for {dur}s"
templ = prefix + ": {tubes}"
log.info(templ.format(dur=duration, tubes=tubes))
for tube in tubes:
connection.pause_tube(tube, duration)
def stats_tube(connection, tube):
stats = connection.stats_tube(tube)
result = dict(
name=tube,
count=stats['current-jobs-ready'],
paused=(stats['pause'] != 0),
)
return result
def main(args):
machine_type = args['--machine_type']
status = args['--status']
delete = args['--delete']
runs = args['--runs']
show_desc = args['--description']
full = args['--full']
pause_duration = args['--pause']
try:
connection = connect()
if machine_type and not pause_duration:
# watch_tube needs to be run before we inspect individual jobs;
# it is not needed for pausing tubes
watch_tube(connection, machine_type)
if status:
print stats_tube(connection, machine_type)
elif pause_duration:
pause_tube(connection, machine_type, pause_duration)
elif delete:
walk_jobs(connection, machine_type,
JobDeleter(delete))
elif runs:
walk_jobs(connection, machine_type,
RunPrinter())
else:
walk_jobs(connection, machine_type,
JobPrinter(show_desc=show_desc, full=full))
except KeyboardInterrupt:
log.info("Interrupted.")
finally:
connection.close()
```
#### File: teuthology/lock/query.py
```python
import logging
import os
import urllib
import requests
from teuthology import misc
from teuthology.config import config
log = logging.getLogger(__name__)
def get_status(name):
name = misc.canonicalize_hostname(name, user=None)
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
log.warning(
"Failed to query lock server for status of {name}".format(name=name))
return None
def get_statuses(machines):
if machines:
statuses = []
for machine in machines:
machine = misc.canonicalize_hostname(machine)
status = get_status(machine)
if status:
statuses.append(status)
else:
log.error("Lockserver doesn't know about machine: %s" %
machine)
else:
statuses = list_locks()
return statuses
def is_vm(name=None, status=None):
if status is None:
if name is None:
raise ValueError("Must provide either name or status, or both")
name = misc.canonicalize_hostname(name)
status = get_status(name)
return status.get('is_vm', False)
def list_locks(keyed_by_name=False, **kwargs):
uri = os.path.join(config.lock_server, 'nodes', '')
for key, value in kwargs.iteritems():
if kwargs[key] is False:
kwargs[key] = '0'
if kwargs[key] is True:
kwargs[key] = '1'
if kwargs:
if 'machine_type' in kwargs:
kwargs['machine_type'] = kwargs['machine_type'].replace(',','|')
uri += '?' + urllib.urlencode(kwargs)
try:
response = requests.get(uri)
except requests.ConnectionError:
success = False
log.exception("Could not contact lock server: %s", config.lock_server)
else:
success = response.ok
if success:
if not keyed_by_name:
return response.json()
else:
return {node['name']: node
for node in response.json()}
return dict()
def find_stale_locks(owner=None):
"""
Return a list of node dicts corresponding to nodes that were locked to run
a job, but the job is no longer running. The purpose of this is to enable
us to nuke nodes that were left locked due to e.g. infrastructure failures
and return them to the pool.
:param owner: If non-None, return nodes locked by owner. Default is None.
"""
def might_be_stale(node_dict):
"""
Answer the question: "might this be a stale lock?"
The answer is yes if:
It is locked
It has a non-null description containing multiple '/' characters
... because we really want "nodes that were locked for a particular job
and are still locked" and the above is currently the best way to guess.
"""
desc = node_dict['description']
if (node_dict['locked'] is True and
desc is not None and desc.startswith('/') and
desc.count('/') > 1):
return True
return False
# Which nodes are locked for jobs?
nodes = list_locks(locked=True)
if owner is not None:
nodes = [node for node in nodes if node['locked_by'] == owner]
nodes = filter(might_be_stale, nodes)
def node_job_is_active(node, cache):
"""
Is this node's job active (e.g. running or waiting)?
:param node: The node dict as returned from the lock server
:param cache: A set() used for caching results
:returns: True or False
"""
description = node['description']
if description in cache:
return True
(name, job_id) = description.split('/')[-2:]
url = os.path.join(config.results_server, 'runs', name, 'jobs', job_id,
'')
resp = requests.get(url)
if not resp.ok:
return False
job_info = resp.json()
if job_info['status'] in ('running', 'waiting'):
cache.add(description)
return True
return False
result = list()
# Here we build the list of of nodes that are locked, for a job (as opposed
# to being locked manually for random monkeying), where the job is not
# running
active_jobs = set()
for node in nodes:
if node_job_is_active(node, active_jobs):
continue
result.append(node)
return result
```
#### File: teuthology/provision/openstack.py
```python
import json
import logging
import os
import random
import re
import subprocess
import time
import tempfile
from subprocess import CalledProcessError
from teuthology import misc
from teuthology.openstack import OpenStack, OpenStackInstance
from teuthology.config import config
from teuthology.contextutil import safe_while
from teuthology.exceptions import QuotaExceededError
log = logging.getLogger(__name__)
class ProvisionOpenStack(OpenStack):
"""
A class that provides methods for creating and destroying virtual machine
instances using OpenStack
"""
def __init__(self):
super(ProvisionOpenStack, self).__init__()
self.user_data = tempfile.mktemp()
log.debug("ProvisionOpenStack: " + str(config.openstack))
self.basename = 'target'
self.up_string = 'The system is finally up'
self.property = "%16x" % random.getrandbits(128)
def __del__(self):
if os.path.exists(self.user_data):
os.unlink(self.user_data)
def init_user_data(self, os_type, os_version):
"""
Get the user-data file that is fit for os_type and os_version.
It is responsible for setting up enough for ansible to take
over.
"""
template_path = config['openstack']['user-data'].format(
os_type=os_type,
os_version=os_version)
nameserver = config['openstack'].get('nameserver', '8.8.8.8')
user_data_template = open(template_path).read()
user_data = user_data_template.format(
up=self.up_string,
nameserver=nameserver,
username=self.username,
lab_domain=config.lab_domain)
open(self.user_data, 'w').write(user_data)
def attach_volumes(self, name, volumes):
"""
Create and attach volumes to the named OpenStack instance.
"""
for i in range(volumes['count']):
volume_name = name + '-' + str(i)
try:
self.run("volume show -f json " + volume_name)
except subprocess.CalledProcessError as e:
if 'No volume with a name or ID' not in e.output:
raise e
# do not use OpenStack().run because its
# bugous for volume create as of openstackclient 3.2.0
# https://bugs.launchpad.net/python-openstackclient/+bug/1619726
misc.sh(
"openstack volume create -f json " +
config['openstack'].get('volume-create', '') + " " +
" --property ownedby=" + config.openstack['ip'] +
" --size " + str(volumes['size']) + " " +
volume_name)
with safe_while(sleep=2, tries=100,
action="volume " + volume_name) as proceed:
while proceed():
try:
r = OpenStack().run("volume show -f json " +
volume_name)
status = self.get_value(json.loads(r), 'status')
if status == 'available':
break
else:
log.info("volume " + volume_name +
" not available yet")
except subprocess.CalledProcessError:
log.info("volume " + volume_name +
" not information available yet")
# do not use OpenStack().run because its
# bugous for volume
misc.sh("openstack server add volume " + name + " " + volume_name)
@staticmethod
def ip2name(prefix, ip):
"""
return the instance name suffixed with the /16 part of the IP.
"""
digits = map(int, re.findall('.*\.(\d+)\.(\d+)', ip)[0])
return prefix + "%03d%03d" % tuple(digits)
def create(self, num, os_type, os_version, arch, resources_hint):
"""
Create num OpenStack instances running os_type os_version and
return their names. Each instance has at least the resources
described in resources_hint.
"""
log.debug('ProvisionOpenStack:create')
if arch is None:
arch = self.get_default_arch()
resources_hint = self.interpret_hints({
'machine': config['openstack']['machine'],
'volumes': config['openstack']['volumes'],
}, resources_hint)
self.init_user_data(os_type, os_version)
image = self.image(os_type, os_version, arch)
if 'network' in config['openstack']:
net = "--nic net-id=" + str(self.net_id(config['openstack']['network']))
else:
net = ''
flavor = self.flavor(resources_hint['machine'],
arch,
config['openstack'].get('flavor-select-regexp'))
cmd = ("flock --close --timeout 28800 /tmp/teuthology-server-create.lock" +
" openstack server create" +
" " + config['openstack'].get('server-create', '') +
" -f json " +
" --image '" + str(image) + "'" +
" --flavor '" + str(flavor) + "'" +
" --key-name teuthology " +
" --user-data " + str(self.user_data) +
" " + net +
" --min " + str(num) +
" --max " + str(num) +
" --security-group teuthology" +
" --property teuthology=" + self.property +
" --property ownedby=" + config.openstack['ip'] +
" --wait " +
" " + self.basename)
try:
self.run(cmd, type='compute')
except CalledProcessError as exc:
if "quota exceeded" in exc.output.lower():
raise QuotaExceededError(message=exc.output)
raise
instances = filter(
lambda instance: self.property in instance['Properties'],
self.list_instances())
instances = [OpenStackInstance(i['ID']) for i in instances]
fqdns = []
try:
network = config['openstack'].get('network', '')
for instance in instances:
ip = instance.get_ip(network)
name = self.ip2name(self.basename, ip)
self.run("server set " +
"--name " + name + " " +
instance['ID'])
fqdn = name + '.' + config.lab_domain
if not misc.ssh_keyscan_wait(fqdn):
console_log = misc.sh("openstack console log show %s "
"|| true" % instance['ID'])
log.error(console_log)
raise ValueError('ssh_keyscan_wait failed for ' + fqdn)
time.sleep(15)
if not self.cloud_init_wait(instance):
raise ValueError('cloud_init_wait failed for ' + fqdn)
self.attach_volumes(name, resources_hint['volumes'])
fqdns.append(fqdn)
except Exception as e:
log.exception(str(e))
for id in [instance['ID'] for instance in instances]:
self.destroy(id)
raise e
return fqdns
def destroy(self, name_or_id):
log.debug('ProvisionOpenStack:destroy ' + name_or_id)
return OpenStackInstance(name_or_id).destroy()
```
#### File: teuthology/teuthology/run_tasks.py
```python
import logging
import os
import sys
import types
from copy import deepcopy
from teuthology.config import config as teuth_config
from teuthology.exceptions import ConnectionLostError
from teuthology.job_status import set_status
from teuthology.misc import get_http_log_path
from teuthology.sentry import get_client as get_sentry_client
from teuthology.timer import Timer
log = logging.getLogger(__name__)
def get_task(name):
if '.' in name:
module_name, task_name = name.split('.')
else:
module_name, task_name = (name, 'task')
# First look for the tasks's module inside teuthology
module = _import('teuthology.task', module_name, task_name)
# If it is not found, try qa/ directory (if it is in sys.path)
if not module:
module = _import('tasks', module_name, task_name, fail_on_import_error=True)
try:
# Attempt to locate the task object inside the module
task = getattr(module, task_name)
# If we get another module, we need to go deeper
if isinstance(task, types.ModuleType):
task = getattr(task, task_name)
except AttributeError:
log.error("No subtask of '{}' named '{}' was found".format(
module_name,
task_name,
))
raise
return task
def _import(from_package, module_name, task_name, fail_on_import_error=False):
full_module_name = '.'.join([from_package, module_name])
try:
module = __import__(
full_module_name,
globals(),
locals(),
[task_name],
0,
)
except ImportError:
if fail_on_import_error:
raise
else:
return None
return module
def run_one_task(taskname, **kwargs):
taskname = taskname.replace('-', '_')
task = get_task(taskname)
return task(**kwargs)
def run_tasks(tasks, ctx):
archive_path = ctx.config.get('archive_path')
if archive_path:
timer = Timer(
path=os.path.join(archive_path, 'timing.yaml'),
sync=True,
)
else:
timer = Timer()
stack = []
try:
for taskdict in tasks:
try:
((taskname, config),) = taskdict.iteritems()
except (ValueError, AttributeError):
raise RuntimeError('Invalid task definition: %s' % taskdict)
log.info('Running task %s...', taskname)
timer.mark('%s enter' % taskname)
manager = run_one_task(taskname, ctx=ctx, config=config)
if hasattr(manager, '__enter__'):
stack.append((taskname, manager))
manager.__enter__()
except BaseException as e:
if isinstance(e, ConnectionLostError):
# Prevent connection issues being flagged as failures
set_status(ctx.summary, 'dead')
else:
# the status may have been set to dead, leave it as-is if so
if not ctx.summary.get('status', '') == 'dead':
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Saw exception from tasks.')
sentry = get_sentry_client()
if sentry:
config = deepcopy(ctx.config)
tags = {
'task': taskname,
'owner': ctx.owner,
}
if 'teuthology_branch' in config:
tags['teuthology_branch'] = config['teuthology_branch']
if 'branch' in config:
tags['branch'] = config['branch']
# Remove ssh keys from reported config
if 'targets' in config:
targets = config['targets']
for host in targets.keys():
targets[host] = '<redacted>'
job_id = ctx.config.get('job_id')
archive_path = ctx.config.get('archive_path')
extra = dict(config=config,
)
if job_id:
extra['logs'] = get_http_log_path(archive_path, job_id)
exc_id = sentry.get_ident(sentry.captureException(
tags=tags,
extra=extra,
))
event_url = "{server}/?q={id}".format(
server=teuth_config.sentry_server.strip('/'), id=exc_id)
log.exception(" Sentry event: %s" % event_url)
ctx.summary['sentry_event'] = event_url
if ctx.config.get('interactive-on-error'):
ctx.config['interactive-on-error'] = False
from teuthology.task import interactive
log.warning('Saw failure during task execution, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
# Throughout teuthology, (x,) = y has been used to assign values
# from yaml files where only one entry of type y is correct. This
# causes failures with 'too many values to unpack.' We want to
# fail as before, but with easier to understand error indicators.
if type(e) == ValueError:
if e.message == 'too many values to unpack':
emsg = 'Possible configuration error in yaml file'
log.error(emsg)
ctx.summary['failure_info'] = emsg
finally:
try:
exc_info = sys.exc_info()
while stack:
taskname, manager = stack.pop()
log.debug('Unwinding manager %s', taskname)
timer.mark('%s exit' % taskname)
try:
suppress = manager.__exit__(*exc_info)
except Exception as e:
if isinstance(e, ConnectionLostError):
# Prevent connection issues being flagged as failures
set_status(ctx.summary, 'dead')
else:
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Manager failed: %s', taskname)
if exc_info == (None, None, None):
# if first failure is in an __exit__, we don't
# have exc_info set yet
exc_info = sys.exc_info()
if ctx.config.get('interactive-on-error'):
from tuethology.task import interactive
log.warning(
'Saw failure during task cleanup, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
else:
if suppress:
sys.exc_clear()
exc_info = (None, None, None)
if exc_info != (None, None, None):
log.debug('Exception was not quenched, exiting: %s: %s',
exc_info[0].__name__, exc_info[1])
raise SystemExit(1)
finally:
# be careful about cyclic references
del exc_info
timer.mark("tasks complete")
```
#### File: teuthology/task/lockfile.py
```python
import logging
import os
from teuthology.orchestra import run
from teuthology import misc as teuthology
import time
import gevent
log = logging.getLogger(__name__)
def task(ctx, config):
"""
This task is designed to test locking. It runs an executable
for each lock attempt you specify, at 0.01 second intervals (to
preserve ordering of the locks).
You can also introduce longer intervals by setting an entry
as a number of seconds, rather than the lock dictionary.
The config is a list of dictionaries. For each entry in the list, you
must name the "client" to run on, the "file" to lock, and
the "holdtime" to hold the lock.
Optional entries are the "offset" and "length" of the lock. You can also specify a
"maxwait" timeout period which fails if the executable takes longer
to complete, and an "expectfail".
An example::
tasks:
- ceph:
- ceph-fuse: [client.0, client.1]
- lockfile:
[{client:client.0, file:testfile, holdtime:10},
{client:client.1, file:testfile, holdtime:0, maxwait:0, expectfail:true},
{client:client.1, file:testfile, holdtime:0, maxwait:15, expectfail:false},
10,
{client: client.1, lockfile: testfile, holdtime: 5},
{client: client.2, lockfile: testfile, holdtime: 5, maxwait: 1, expectfail: True}]
In the past this test would have failed; there was a bug where waitlocks weren't
cleaned up if the process failed. More involved scenarios are also possible.
:param ctx: Context
:param config: Configuration
"""
log.info('Starting lockfile')
try:
assert isinstance(config, list), \
"task lockfile got invalid config"
log.info("building executable on each host")
buildprocs = list()
# build the locker executable on each client
clients = list()
files = list()
for op in config:
if not isinstance(op, dict):
continue
log.info("got an op")
log.info("op['client'] = %s", op['client'])
clients.append(op['client'])
files.append(op['lockfile'])
if not "expectfail" in op:
op["expectfail"] = False
badconfig = False
if not "client" in op:
badconfig = True
if not "lockfile" in op:
badconfig = True
if not "holdtime" in op:
badconfig = True
if badconfig:
raise KeyError("bad config {op_}".format(op_=op))
testdir = teuthology.get_testdir(ctx)
clients = set(clients)
files = set(files)
lock_procs = list()
for client in clients:
(client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
log.info("got a client remote")
(_, _, client_id) = client.partition('.')
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
proc = client_remote.run(
args=[
'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir),
run.Raw('&&'),
'mkdir', '-p', '{tdir}/lockfile'.format(tdir=testdir),
run.Raw('&&'),
'wget',
'-nv',
'--no-check-certificate',
'https://raw.github.com/gregsfortytwo/FileLocker/master/sclockandhold.cpp',
'-O', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir),
run.Raw('&&'),
'g++', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir),
'-o', '{tdir}/lockfile/sclockandhold'.format(tdir=testdir)
],
logger=log.getChild('lockfile_client.{id}'.format(id=client_id)),
wait=False
)
log.info('building sclockandhold on client{id}'.format(id=client_id))
buildprocs.append(proc)
# wait for builds to finish
run.wait(buildprocs)
log.info('finished building sclockandhold on all clients')
# create the files to run these locks on
client = clients.pop()
clients.add(client)
(client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
(_, _, client_id) = client.partition('.')
file_procs = list()
for lockfile in files:
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile)
proc = client_remote.run(
args=[
'sudo',
'touch',
filepath,
],
logger=log.getChild('lockfile_createfile'),
wait=False
)
file_procs.append(proc)
run.wait(file_procs)
file_procs = list()
for lockfile in files:
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile)
proc = client_remote.run(
args=[
'sudo', 'chown', 'ubuntu.ubuntu', filepath
],
logger=log.getChild('lockfile_createfile'),
wait=False
)
file_procs.append(proc)
run.wait(file_procs)
log.debug('created files to lock')
# now actually run the locktests
for op in config:
if not isinstance(op, dict):
assert isinstance(op, int) or isinstance(op, float)
log.info("sleeping for {sleep} seconds".format(sleep=op))
time.sleep(op)
continue
greenlet = gevent.spawn(lock_one, op, ctx)
lock_procs.append((greenlet, op))
time.sleep(0.1) # to provide proper ordering
#for op in config
for (greenlet, op) in lock_procs:
log.debug('checking lock for op {op_}'.format(op_=op))
result = greenlet.get()
if not result:
raise Exception("Got wrong result for op {op_}".format(op_=op))
# for (greenlet, op) in lock_procs
finally:
#cleanup!
if lock_procs:
for (greenlet, op) in lock_procs:
log.debug('closing proc for op {op_}'.format(op_=op))
greenlet.kill(block=True)
for client in clients:
(client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
(_, _, client_id) = client.partition('.')
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
proc = client_remote.run(
args=[
'rm', '-rf', '{tdir}/lockfile'.format(tdir=testdir),
run.Raw(';'),
'sudo', 'rm', '-rf', filepath
],
wait=True
) #proc
#done!
# task
def lock_one(op, ctx):
"""
Perform the individual lock
"""
log.debug('spinning up locker with op={op_}'.format(op_=op))
timeout = None
proc = None
result = None
(client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys()
(_, _, client_id) = op['client'].partition('.')
testdir = teuthology.get_testdir(ctx)
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
if "maxwait" in op:
timeout = gevent.Timeout(seconds=float(op["maxwait"]))
timeout.start()
try:
proc = client_remote.run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'daemon-helper',
'kill',
'{tdir}/lockfile/sclockandhold'.format(tdir=testdir),
filepath,
'{holdtime}'.format(holdtime=op["holdtime"]),
'{offset}'.format(offset=op.get("offset", '0')),
'{length}'.format(length=op.get("length", '1')),
],
logger=log.getChild('lockfile_client.{id}'.format(id=client_id)),
wait=False,
stdin=run.PIPE,
check_status=False
)
result = proc.wait()
except gevent.Timeout as tout:
if tout is not timeout:
raise
if bool(op["expectfail"]):
result = 1
if result == 1:
if bool(op["expectfail"]):
log.info("failed as expected for op {op_}".format(op_=op))
else:
raise Exception("Unexpectedly failed to lock {op_} within given timeout!".format(op_=op))
finally: #clean up proc
if timeout is not None:
timeout.cancel()
if proc is not None:
proc.stdin.close()
ret = (result == 0 and not bool(op["expectfail"])) or (result == 1 and bool(op["expectfail"]))
return ret #we made it through
```
#### File: test/task/test_pcp.py
```python
import os
import requests
import urlparse
from mock import patch, DEFAULT, Mock, MagicMock, call
from pytest import raises
from teuthology.config import config, FakeNamespace
from teuthology.orchestra.cluster import Cluster
from teuthology.orchestra.remote import Remote
from teuthology.orchestra.run import Raw
from teuthology.task.pcp import (PCPDataSource, PCPArchive, PCPGrapher,
GrafanaGrapher, GraphiteGrapher, PCP)
from teuthology.test.task import TestTask
pcp_host = 'http://pcp.front.sepia.ceph.com:44323/'
class TestPCPDataSource(object):
klass = PCPDataSource
def setup(self):
config.pcp_host = pcp_host
def test_init(self):
hosts = ['host1', 'host2']
time_from = 'now-2h'
time_until = 'now'
obj = self.klass(
hosts=hosts,
time_from=time_from,
time_until=time_until,
)
assert obj.hosts == hosts
assert obj.time_from == time_from
assert obj.time_until == time_until
class TestPCPArchive(TestPCPDataSource):
klass = PCPArchive
def test_get_archive_input_dir(self):
hosts = ['host1', 'host2']
time_from = 'now-1d'
obj = self.klass(
hosts=hosts,
time_from=time_from,
)
assert obj.get_archive_input_dir('host1') == \
'/var/log/pcp/pmlogger/host1'
def test_get_pmlogextract_cmd(self):
obj = self.klass(
hosts=['host1'],
time_from='now-3h',
time_until='now-1h',
)
expected = [
'pmlogextract',
'-S', 'now-3h',
'-T', 'now-1h',
Raw('/var/log/pcp/pmlogger/host1/*.0'),
]
assert obj.get_pmlogextract_cmd('host1') == expected
def test_format_time(self):
assert self.klass._format_time(1462893484) == \
'@ Tue May 10 15:18:04 2016'
def test_format_time_now(self):
assert self.klass._format_time('now-1h') == 'now-1h'
class TestPCPGrapher(TestPCPDataSource):
klass = PCPGrapher
def test_init(self):
hosts = ['host1', 'host2']
time_from = 'now-2h'
time_until = 'now'
obj = self.klass(
hosts=hosts,
time_from=time_from,
time_until=time_until,
)
assert obj.hosts == hosts
assert obj.time_from == time_from
assert obj.time_until == time_until
expected_url = urlparse.urljoin(config.pcp_host, self.klass._endpoint)
assert obj.base_url == expected_url
class TestGrafanaGrapher(TestPCPGrapher):
klass = GrafanaGrapher
def test_build_graph_url(self):
hosts = ['host1']
time_from = 'now-3h'
time_until = 'now-1h'
obj = self.klass(
hosts=hosts,
time_from=time_from,
time_until=time_until,
)
base_url = urlparse.urljoin(
config.pcp_host,
'grafana/index.html#/dashboard/script/index.js',
)
assert obj.base_url == base_url
got_url = obj.build_graph_url()
parsed_query = urlparse.parse_qs(got_url.split('?')[1])
assert parsed_query['hosts'] == hosts
assert len(parsed_query['time_from']) == 1
assert parsed_query['time_from'][0] == time_from
assert len(parsed_query['time_to']) == 1
assert parsed_query['time_to'][0] == time_until
def test_format_time(self):
assert self.klass._format_time(1462893484) == \
'2016-05-10T15:18:04'
def test_format_time_now(self):
assert self.klass._format_time('now-1h') == 'now-1h'
class TestGraphiteGrapher(TestPCPGrapher):
klass = GraphiteGrapher
def test_build_graph_urls(self):
obj = self.klass(
hosts=['host1', 'host2'],
time_from='now-3h',
time_until='now-1h',
)
expected_urls = [obj.get_graph_url(m) for m in obj.metrics]
obj.build_graph_urls()
built_urls = []
for metric in obj.graphs.keys():
built_urls.append(obj.graphs[metric]['url'])
assert len(built_urls) == len(expected_urls)
assert sorted(built_urls) == sorted(expected_urls)
def test_check_dest_dir(self):
obj = self.klass(
hosts=['host1'],
time_from='now-3h',
)
assert obj.dest_dir is None
with raises(RuntimeError):
obj._check_dest_dir()
def test_generate_html_dynamic(self):
obj = self.klass(
hosts=['host1'],
time_from='now-3h',
)
html = obj.generate_html()
assert config.pcp_host in html
def test_download_graphs(self):
dest_dir = '/fake/path'
obj = self.klass(
hosts=['host1'],
time_from='now-3h',
dest_dir=dest_dir,
)
_format = obj.graph_defaults.get('format')
with patch('teuthology.task.pcp.requests.get', create=True) as m_get:
m_resp = Mock()
m_resp.ok = True
m_get.return_value = m_resp
with patch('teuthology.task.pcp.open', create=True) as m_open:
m_open.return_value = MagicMock(spec=file)
obj.download_graphs()
expected_filenames = []
for metric in obj.metrics:
expected_filenames.append(
"{}.{}".format(
os.path.join(
dest_dir,
obj._sanitize_metric_name(metric),
),
_format,
)
)
graph_filenames = []
for metric in obj.graphs.keys():
graph_filenames.append(obj.graphs[metric]['file'])
assert sorted(graph_filenames) == sorted(expected_filenames)
def test_generate_html_static(self):
obj = self.klass(
hosts=['host1'],
time_from='now-3h',
dest_dir='/fake/path',
)
with patch('teuthology.task.pcp.requests.get', create=True) as m_get:
m_resp = Mock()
m_resp.ok = True
m_get.return_value = m_resp
with patch('teuthology.task.pcp.open', create=True) as m_open:
m_open.return_value = MagicMock(spec=file)
obj.download_graphs()
html = obj.generate_html(mode='static')
assert config.pcp_host not in html
def test_sanitize_metric_name(self):
sanitized_metrics = {
'foo.bar': 'foo.bar',
'foo.*': 'foo._all_',
'foo.bar baz': 'foo.bar_baz',
'foo.*.bar baz': 'foo._all_.bar_baz',
}
for in_, out in sanitized_metrics.iteritems():
assert self.klass._sanitize_metric_name(in_) == out
def test_get_target_globs(self):
obj = self.klass(
hosts=['host1'],
time_from='now-3h',
)
assert obj.get_target_globs() == ['*host1*']
assert obj.get_target_globs('a.metric') == ['*host1*.a.metric']
obj.hosts.append('host2')
assert obj.get_target_globs() == ['*host1*', '*host2*']
assert obj.get_target_globs('a.metric') == \
['*host1*.a.metric', '*host2*.a.metric']
class TestPCPTask(TestTask):
klass = PCP
task_name = 'pcp'
def setup(self):
self.ctx = FakeNamespace()
self.ctx.cluster = Cluster()
self.ctx.cluster.add(Remote('user@remote1'), ['role1'])
self.ctx.cluster.add(Remote('user@remote2'), ['role2'])
self.ctx.config = dict()
self.task_config = dict()
config.pcp_host = pcp_host
def test_init(self):
task = self.klass(self.ctx, self.task_config)
assert task.stop_time == 'now'
def test_disabled(self):
config.pcp_host = None
with self.klass(self.ctx, self.task_config) as task:
assert task.enabled is False
assert not hasattr(task, 'grafana')
assert not hasattr(task, 'graphite')
assert not hasattr(task, 'archiver')
def test_setup(self):
with patch.multiple(
self.klass,
setup_collectors=DEFAULT,
begin=DEFAULT,
end=DEFAULT,
):
with self.klass(self.ctx, self.task_config) as task:
task.setup_collectors.assert_called_once_with()
assert isinstance(task.start_time, int)
def test_setup_collectors(self):
with patch.multiple(
self.klass,
begin=DEFAULT,
end=DEFAULT,
):
with self.klass(self.ctx, self.task_config) as task:
assert hasattr(task, 'grafana')
assert not hasattr(task, 'graphite')
assert not hasattr(task, 'archiver')
self.task_config['grafana'] = False
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'grafana')
@patch('os.makedirs')
def test_setup_grafana(self, m_makedirs):
with patch.multiple(
self.klass,
begin=DEFAULT,
end=DEFAULT,
):
self.ctx.archive = '/fake/path'
with self.klass(self.ctx, self.task_config) as task:
assert hasattr(task, 'grafana')
self.task_config['grafana'] = False
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'grafana')
@patch('os.makedirs')
@patch('teuthology.task.pcp.GraphiteGrapher')
def test_setup_graphite(self, m_graphite_grapher, m_makedirs):
with patch.multiple(
self.klass,
begin=DEFAULT,
end=DEFAULT,
):
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'graphite')
self.task_config['graphite'] = False
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'graphite')
self.ctx.archive = '/fake/path'
self.task_config['graphite'] = True
with self.klass(self.ctx, self.task_config) as task:
assert hasattr(task, 'graphite')
self.task_config['graphite'] = False
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'graphite')
@patch('os.makedirs')
@patch('teuthology.task.pcp.PCPArchive')
def test_setup_archiver(self, m_archive, m_makedirs):
with patch.multiple(
self.klass,
begin=DEFAULT,
end=DEFAULT,
):
self.task_config['fetch_archives'] = True
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'archiver')
self.task_config['fetch_archives'] = False
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'archiver')
self.ctx.archive = '/fake/path'
self.task_config['fetch_archives'] = True
with self.klass(self.ctx, self.task_config) as task:
assert hasattr(task, 'archiver')
self.task_config['fetch_archives'] = False
with self.klass(self.ctx, self.task_config) as task:
assert not hasattr(task, 'archiver')
@patch('os.makedirs')
@patch('teuthology.task.pcp.GrafanaGrapher')
@patch('teuthology.task.pcp.GraphiteGrapher')
def test_begin(self, m_grafana, m_graphite, m_makedirs):
with patch.multiple(
self.klass,
end=DEFAULT,
):
with self.klass(self.ctx, self.task_config) as task:
task.grafana.build_graph_url.assert_called_once_with()
self.task_config['graphite'] = True
self.ctx.archive = '/fake/path'
with self.klass(self.ctx, self.task_config) as task:
task.graphite.write_html.assert_called_once_with()
@patch('os.makedirs')
@patch('teuthology.task.pcp.GrafanaGrapher')
@patch('teuthology.task.pcp.GraphiteGrapher')
def test_end(self, m_grafana, m_graphite, m_makedirs):
self.ctx.archive = '/fake/path'
with self.klass(self.ctx, self.task_config) as task:
# begin() should have called write_html() once by now, with no args
task.graphite.write_html.assert_called_once_with()
# end() should have called write_html() a second time by now, with
# mode=static
second_call = task.graphite.write_html.call_args_list[1]
assert second_call[1]['mode'] == 'static'
assert isinstance(task.stop_time, int)
@patch('os.makedirs')
@patch('teuthology.task.pcp.GrafanaGrapher')
@patch('teuthology.task.pcp.GraphiteGrapher')
def test_end_16049(self, m_grafana, m_graphite, m_makedirs):
# http://tracker.ceph.com/issues/16049
# Jobs were failing if graph downloading failed. We don't want that.
self.ctx.archive = '/fake/path'
with self.klass(self.ctx, self.task_config) as task:
task.graphite.download_graphs.side_effect = \
requests.ConnectionError
# Even though downloading graphs failed, we should have called
# write_html() a second time, again with no args
assert task.graphite.write_html.call_args_list == [call(), call()]
assert isinstance(task.stop_time, int)
``` |
{
"source": "josdan/pluradl.py",
"score": 3
} |
#### File: pluradl.py/scrapeutils/autoscrape_pluralsight_regex.py
```python
try:
from utils import *
except ImportError:
from scrapeutils.utils import *
SCRIPTPATH=os.path.dirname(os.path.abspath(sys.argv[0]))
JSON_OUTPUT_FILE = os.path.abspath(os.path.join(SCRIPTPATH, '..', "data", "courses.json"))
SEARCH_URL = r'https://www.pluralsight.com/search?categories=course&sort=title'
def main():
course_dict = load_stored_json(JSON_OUTPUT_FILE)
source_data = get_courselist_source(SEARCH_URL, n_pages=500)
class_name = r'search-result columns'
search_snippets = outer_search_html(source_data, class_name)
title_tag=r'<div class="search-result__title">'
author_tag=r'<div class="search-result__author">'
level_tag=r'<div class="search-result__level">'
date_tag=r'<div class="search-result__date">'
length_tag=r'<div class="search-result__length show-for-large-up">'
div_tag=r'</div>'; quote = r'"'; gt = r'>'; a_tag = r'</a>'; href=r'href="'
title_lookaround = lookaround_tags(title_tag, div_tag)
author_lookaround = lookaround_tags(author_tag, div_tag)
level_lookaround = lookaround_tags(level_tag, div_tag)
date_lookaround = lookaround_tags(date_tag, div_tag)
length_lookaround = lookaround_tags(length_tag, div_tag)
href_lookaround = lookaround_tags(href, quote)
gt_a_lookaround = lookaround_tags(gt, a_tag)
for html_input in search_snippets:
thiscourse = {}
title_outer_text = return_lookaround_text(title_lookaround.search(html_input))
if title_outer_text:
url_text = return_lookaround_text(href_lookaround.search(title_outer_text))
name_text = return_lookaround_text(gt_a_lookaround.search(title_outer_text))
course_id = url_text.split('/')[-1]
author_text = return_lookaround_text(author_lookaround.search(html_input))
level_text = return_lookaround_text(level_lookaround.search(html_input))
date_text = return_lookaround_text(date_lookaround.search(html_input))
length_text = return_lookaround_text(length_lookaround.search(html_input))
lenght = get_length(length_text)
rating_snippet = outer_search_snippet(html_input, r'search-result__rating')
rating = return_rating(rating_snippet)
if author_text and not '{' in author_text:
thiscourse['url'] = url_text.strip()
thiscourse['title'] = name_text.strip()
thiscourse['author'] = author_text.strip().split('by ')[-1]
thiscourse['level'] = level_text.strip()
thiscourse['date'] = date_text.strip()
thiscourse['length'] = lenght
thiscourse['rating'] = rating
course_dict[course_id] = thiscourse
print('')
print('Loaded', len(course_dict), 'courses.', 'Saving results to', JSON_OUTPUT_FILE)
store_dict_as_json(course_dict, JSON_OUTPUT_FILE)
print('Done.')
if __name__ == "__main__":
main()
```
#### File: pluradl.py/scrapeutils/scrape_html_to_sqlite.py
```python
from __future__ import unicode_literals
import os, re,sqlite3
import pandas as pd
import numpy as np
from sqlite3 import Error
from html.parser import HTMLParser
HTML_DATA = os.path.join("...", "data", "search_results.html")
DB_FILE = os.path.join("...", "data", "courses.db")
course_params = []
course_id = "";
class SearchHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
for attr in attrs:
value = attr[1]
if re.search(r'courses', value):
global course_id
course_id = value.split(r'/')[-1]
course_params.append(value)
def handle_data(self, data):
data = str(data).strip()
if data:
course_params.append(data)
def outer_search_result(HTML_DATA):
read_state=False; track=0; search_snippets=[]
with open(HTML_DATA, 'rt') as f:
for line in f.readlines():
if re.search(r'class="search-result columns"', line):
read_state = True
search_result = []
if read_state:
search_result.append(line)
n_open = len(re.findall(r'<div', line))
n_close = len(re.findall(r'/div>', line))
track+=n_open; track-=n_close
if track == 0:
read_state = False
search_snippets.append(''.join(search_result))
return search_snippets
def populate_dictionary(parser, search_snippets):
global course_params
global course_id
search_dictionary = {}
for snippet in search_snippets:
course_id = ""; course_params = []
parser.feed(snippet)
search_dictionary[course_id] = course_params
return search_dictionary
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
if __name__ == '__main__':
search_snippets = outer_search_result(HTML_DATA)
parser = SearchHTMLParser()
search_dictionary = populate_dictionary(parser, search_snippets)
column_data = []
for key, values in search_dictionary.items():
this_entry = []
this_entry.append(key)
for val in values:
this_entry.append(val)
if len(this_entry) < 8:
this_entry.append("None")
this_array = np.array(this_entry)
column_data.append(this_array)
elif len(this_entry) == 8:
this_array = np.array(this_entry)
column_data.append(this_array)
data = pd.DataFrame(data=column_data, columns=['id','url','title','author','level','date','length', 'rating']).set_index('id')
con = create_connection(DB_FILE)
data.to_sql('courses', con=con, if_exists='replace')
con.close
```
#### File: pluradl.py/test/test_cache.py
```python
from __future__ import unicode_literals
import shutil
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakePDL
from plura_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_cache(self):
pdl = FakePDL({
'cachedir': self.test_dir,
})
c = Cache(pdl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
self.assertEqual(c.load('test_cache', 'k.'), None)
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k2'), None)
self.assertFalse(_is_empty(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), obj)
self.assertEqual(c.load('test_cache', 'y'), None)
self.assertEqual(c.load('test_cache2', 'k.'), None)
c.remove()
self.assertFalse(os.path.exists(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), None)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "josdas/Game-theory",
"score": 3
} |
#### File: Game-theory/non-zero-sum-games/nonzero_sum_game.py
```python
import numpy as np
import scipy.optimize
class NonZeroSumGame:
def __init__(self, get_score, first_actions, second_actions):
"""
Calculate Nash equilibrium in mixed strategies for non-zero-sum games.
:param get_score: get_score(first_action, second_action) is a function
that returns pair of scores for the game.
:param first_actions: first_actions is a list of possible actions for the first player.
:param second_actions: second_actions is a list of possible actions for the second player.
"""
self._get_score = get_score
self._actions = [first_actions, second_actions]
self.optimal_policy = None
self.game_price = self.calc_optimal_policy()
def calc_optimal_policy(self):
n = len(self._actions[0])
m = len(self._actions[1])
price_matrix = np.array(
[[self._get_score(f_act, s_act) for s_act in self._actions[1]]
for f_act in self._actions[0]]).transpose((2, 0, 1))
total_price = price_matrix[0] + price_matrix[1]
vars_count = n + m + 2
def loss(x):
first, second = x[:n], x[n:n + m]
alpha, betta = x[n + m], x[n + m + 1]
return -first @ total_price @ second + alpha + betta
def jac(x):
first, second = x[:n], x[n:n + m]
first_jac = -total_price @ second
second_jac = -first @ total_price
alpha_jac = [1]
betta_jac = [1]
return np.concatenate((first_jac, second_jac,
alpha_jac, betta_jac))
# Gx >= 0
G = np.zeros((n + m, vars_count))
for i in range(n):
G[i][n:n + m] = -price_matrix[0][i]
G[i][n + m] = 1
for i in range(m):
G[n + i][:n] = -price_matrix[1][:, i]
G[n + i][n + m + 1] = 1
# Ax - b = 0
A = np.zeros((2, vars_count))
A[0][:n] = 1
A[1][n:n + m] = 1
b = np.ones(2)
constraints = [
{
'type': 'ineq',
'fun': lambda x: G @ x,
'jac': lambda x: G,
},
{
'type': 'eq',
'fun': lambda x: A @ x - b,
'jac': lambda x: A
}
]
bounds = [(0, None) for _ in range(n + m)]
bounds += [(None, None), (None, None)]
x0 = np.random.random(vars_count)
x0[:n] /= x0[:n].sum()
x0[n:n + m] /= x0[n:n + m].sum()
x0[n + m:n + m + 2] *= total_price.sum()
res = scipy.optimize.minimize(x0=x0, fun=loss, jac=jac, method='SLSQP',
constraints=constraints,
bounds=bounds)
# Policy is a distribution over actions for the each players.
self.optimal_policy = res.x[:n], res.x[n:n + m]
game_price = [self.optimal_policy[0] @ price_matrix[player] @ self.optimal_policy[1]
for player in (0, 1)]
return game_price
def gen_action(self, player):
assert player in {1, 2}
player -= 1
return np.random.choice(self._actions[player], p=self.optimal_policy[player])
```
#### File: Game-theory/non-zero-sum-games/test_nonzero.py
```python
import numpy as np
from nonzero_sum_game import NonZeroSumGame
SEED = 1337
def simulate_game(get_score, first_actions, second_actions, iters=10 ** 4, comments=None, verbosity=1):
game = NonZeroSumGame(get_score, first_actions, second_actions)
score_first, score_second = 0, 0
for it in range(iters):
f_act = game.gen_action(1)
s_act = game.gen_action(2)
f_score, s_score = get_score(f_act, s_act)
score_first += f_score
score_second += s_score
if verbosity >= 1:
if comments is not None:
print(comments)
print(game.optimal_policy[0])
print(f'Game price first: {game.game_price[0]}')
print(f'Average score first: {score_first / iters}')
print()
print(game.optimal_policy[1])
print(f'Game price second: {game.game_price[1]}')
print(f'Average score second: {score_second / iters}')
print('-' * 80)
return game.optimal_policy, game.game_price
def rock_paper_scissors(first_act, second_act):
FIGHTS = {
('rock', 'scissors'),
('paper', 'rock'),
('scissors', 'paper')
}
ACTIONS = ('rock', 'paper', 'scissors')
assert first_act in ACTIONS and second_act in ACTIONS
if (first_act, second_act) in FIGHTS:
return 1, -1
if (second_act, first_act) in FIGHTS:
return -1, 1
return 0, 0
def prisoners_dilemma(first_act, second_act):
ACTIONS = ['silent', 'betray']
assert first_act in ACTIONS and second_act in ACTIONS
if first_act == 'silent' and second_act == 'silent':
return -1, -1
if first_act == 'silent' and second_act == 'betray':
return -3, 0
if first_act == 'betray' and second_act == 'betray':
return -2, -2,
if first_act == 'betray' and second_act == 'silent':
return 0, -3
def battle_of_the_sexes(first_act, second_act):
ACTIONS = ['opera', 'football']
assert first_act in ACTIONS and second_act in ACTIONS
if first_act == 'opera' and second_act == 'opera':
return 2, 1
if first_act == 'opera' and second_act == 'football':
return -1, -1
if first_act == 'football' and second_act == 'football':
return 1, 2
if first_act == 'football' and second_act == 'opera':
return -1, -1
if __name__ == '__main__':
np.random.seed(SEED)
simulate_game(rock_paper_scissors,
['rock', 'paper', 'scissors'],
['rock', 'paper', 'scissors'],
comments="Fair rock paper scissors with zero price")
simulate_game(rock_paper_scissors,
['rock', 'paper', 'scissors'],
['rock', 'paper'],
comments="Unfair rock paper scissors where the second player can't use 'scissors'")
simulate_game(rock_paper_scissors,
['rock', 'paper', 'scissors'],
['rock'],
comments="Unfair rock paper scissors where the second player can't use 'scissors' and 'paper'")
simulate_game(prisoners_dilemma,
['silent', 'betray'],
['silent', 'betray'],
comments="Prisoner's dilemma")
for i in range(10):
simulate_game(battle_of_the_sexes,
['opera', 'football'],
['opera', 'football'],
comments="Battle of the Sexes #{}".format(i))
``` |
{
"source": "josdavidmo/clinical_records",
"score": 2
} |
#### File: clinical_records/patients/filters.py
```python
from dateutil.relativedelta import relativedelta
from django.db.models import Q
from django.utils import timezone
from patients.models import CIE10
from patients.models import Patient
from patients.models import Paraclinical
from patients.models import MedicalHistory
from patients.models import Medicine
import datetime
import django_filters
class PatientFilter(django_filters.FilterSet):
full_name_auto = django_filters.CharFilter(method='filter_full_name_auto',
label='Nombre')
age = django_filters.RangeFilter(method='filter_age',
label='Edad')
class Meta:
model = Patient
exclude = ('birth_date', 'phone', 'email', 'names', 'last_names',
'address', 'scholarship',
'profesion', 'civil_status', 'origin')
def filter_age(self, queryset, name, value):
current = datetime.datetime.now()
if value.start < value.stop:
min_date = datetime.date(
current.year - value.start, current.month, current.day)
max_date = datetime.date(
current.year - (value.stop + 1), current.month, current.day + 1)
else:
min_date = datetime.date(
current.year - (value.stop + 1), current.month, current.day + 1)
max_date = datetime.date(
current.year - value.start, current.month, current.day)
return queryset.filter(birth_date__range=(max_date, min_date))
def filter_full_name_auto(self, queryset, name, value):
return queryset.filter(Q(names__contains=value) |
Q(last_names__contains=value))
class CIE10Filter(django_filters.FilterSet):
class Meta:
model = CIE10
fields = {
'code': ['contains'],
'name': ['contains']
}
class MedicineFilter(django_filters.FilterSet):
class Meta:
model = Medicine
fields = {
'name': ['contains'],
'medicine_type': ['exact']
}
class ParaclinicalFilter(django_filters.FilterSet):
class Meta:
model = Paraclinical
fields = {
'name': ['contains']
}
class ClinicalHistoryFilter(django_filters.FilterSet):
diagnostic = django_filters.MultipleChoiceFilter(
method="filter_diagnostic",label="Diágnostico")
def filter_diagnostic(self, queryset, name, value):
return queryset.filter(physical_exam__diagnostics_images__id__in=value)
class Meta:
model = MedicalHistory
fields = ['created_at_date', 'patient', 'diagnostic']
exclude = ('clinical_history', 'physical_exam', 'systems_review',
'created_hour', 'city', 'phone_companion',
'referred_by', 'reason_consultation', 'sgss', 'companion',
'current_illness', 'formulas', 'created_at_hour')
``` |
{
"source": "josdavidmo/meiko_etl_challenge_back",
"score": 2
} |
#### File: meiko_etl_challenge_back/movies/admin.py
```python
from django.contrib import admin
from django.db.models import Sum, Func, F
from django.utils.translation import gettext_lazy as _
from movies.models import Movie
class MoviesFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _("Top Filters")
# Parameter for the filter that will be used in the URL query.
parameter_name = "tops"
def lookups(self, request, model_admin):
if request.user.is_superuser:
return (("raised_most_money",
_("Which are the 10 movies that raised the most money?")),
("raised_least_money",
_("Which are the 10 movies that least the most money?")),
("spent_most_produce_money",
_("Which are the 7 films that spent the most money to "
"produce?")),
("spent_lest_produce_money",
_("What are the 7 films that spent the least money to "
"produce?")),
("raised_most_money_year",
_(
"Which movie genre raised the most money for each year?")),
("top_genre",
_("Which genre do people like best?")),
("top_directors",
_("Which 5 directors have the best reputation?")),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
return queryset
@admin.register(Movie)
class MovieAdmin(admin.ModelAdmin):
list_display = ("id",) + Movie.HEADERS
list_filter = (MoviesFilter,)
change_list_template = "movies/movies_changelist.html"
list_per_page = 10
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
if request.GET.get("tops") == "raised_most_money":
extra_context["list"] = Movie.objects.exclude(
gross__isnull=True).values_list(
"movie_title", flat=True).order_by("-gross")[:10]
if request.GET.get("tops") == "raised_least_money":
extra_context["list"] = Movie.objects.exclude(
gross__isnull=True).values_list(
"movie_title", flat=True).order_by("gross")[:10]
if request.GET.get("tops") == "spent_most_produce_money":
extra_context["list"] = Movie.objects.exclude(
budget__isnull=True).values_list(
"movie_title", flat=True).order_by("-budget")[:7]
if request.GET.get("tops") == "spent_lest_produce_money":
extra_context["list"] = Movie.objects.exclude(
budget__isnull=True).values_list(
"movie_title", flat=True).order_by("budget")[:7]
if request.GET.get("tops") == "raised_most_money_year":
extra_context["list"] = Movie.objects.annotate(
genre=Func(F("genres"),
function="unnest")).values(
"genre").values_list("genre", flat=True).annotate(
total=Sum("gross")).order_by(
"-total")[:1]
if request.GET.get("tops") == "top_genre":
extra_context["list"] = Movie.objects.annotate(
genre=Func(F("genres"),
function="unnest")).values(
"genre").values_list("genre", flat=True).annotate(
total=Sum("cast_total_facebook_likes")).order_by(
"-total")[:1]
if request.GET.get("tops") == "top_directors":
extra_context["list"] = Movie.objects.exclude(
director_name="").values_list("director_name",
flat=True).annotate(total=Sum(
"director_facebook_likes")).order_by("-total")[:5]
return super(MovieAdmin, self).changelist_view(request,
extra_context=extra_context)
``` |
{
"source": "josdavidmo/zinobe",
"score": 2
} |
#### File: josdavidmo/zinobe/manage.py
```python
import os
import sys
def test():
""" run unittest """
import unittest
from users import test
suite = unittest.TestLoader().loadTestsFromModule(test)
unittest.TextTestRunner().run(suite)
def run():
""" run server """
from users import urls
if os.environ.get("REQUEST_METHOD", ""):
from wsgiref.handlers import BaseCGIHandler
BaseCGIHandler(sys.stdin, sys.stdout, sys.stderr,
os.environ).run(urls.urls)
else:
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from beaker.middleware import SessionMiddleware
session_opts = {
'session.type': 'file',
'session.cookie_expires': True,
'session.data_dir': 'var',
}
app = SessionMiddleware(urls.urls, session_opts)
httpd = WSGIServer(('', 8080), WSGIRequestHandler)
httpd.set_app(app)
print "Serving HTTP on %s port %s ..." % httpd.socket.getsockname()
httpd.serve_forever()
def create():
""" create database """
from users.model import Base
from dbconfig import engine
print "Creating data base..."
Base.metadata.create_all(engine)
print "Ok"
if __name__ == "__main__":
if 'create' in sys.argv:
create()
if 'run' in sys.argv:
test()
run()
if 'test' in sys.argv:
test()
```
#### File: zinobe/users/view.py
```python
import hashlib
from sqlalchemy import func, or_
from sqlalchemy.orm import sessionmaker
from core.renders import JsonResponse, redirect, render
from core.request import GET, POST
from dbconfig import engine
from users.model import User
def sign_in_user_get(environ, start_response):
"""
Used to render sign in user.
Renders sign_in.html .
Parameters
----------
start_response : start_response
WSGI start_response
environ : environ
WSGI python environ
Returns
-------
list
sign in view
"""
usr_session = environ['beaker.session']
usr_session.delete()
return render(start_response, 'sign_in.html')
def sign_in_user_post(environ, start_response):
"""
Used to manage sign in user request.
Manage sign_in.html request.
Parameters
----------
start_response : start_response
WSGI start_response
environ : environ
WSGI python environ
Returns
-------
list
sign in view
"""
request = POST(environ)
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
password = <PASSWORD>(request['password'].encode()).<PASSWORD>()
query = session.query(User).filter(
User.email == request['email'], User.password == password)
exists = query.count() == 1
if exists:
usr = query[0]
usr_session = environ['beaker.session']
usr_session['usr_id'] = usr.id
usr_session.save()
return redirect(start_response, '/list/')
else:
return redirect(start_response, '/')
def sign_up_user_get(environ, start_response):
"""
Used to render sign up user.
Renders sign_up.html .
Parameters
----------
start_response : start_response
WSGI start_response
environ : environ
WSGI python environ
Returns
-------
list
sign up view
"""
return render(start_response, 'sign_up.html')
def sign_up_user_post(environ, start_response):
"""
Used to manage sign up user request.
Manage sign_up.html request.
Parameters
----------
start_response : start_response
WSGI start_response
environ : environ
WSGI python environ
Returns
-------
list
sign up view
"""
request = POST(environ)
fields = set(field for field in User.__dict__)
dct = {key: value for key, value in request.items() if key in fields}
user = User(**dct)
user.password = hashlib.sha256(user.password.encode()).hexdigest()
try:
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
session.add(user)
session.commit()
return redirect(start_response, '/')
except Exception as e:
return redirect(start_response, '/')
def list_get(environ, start_response):
"""
Used to render list user.
Renders list.html .
Parameters
----------
start_response : start_response
WSGI start_response
environ : environ
WSGI python environ
Returns
-------
list
list view
"""
usr_session = environ['beaker.session']
if usr_session.get('usr_id'):
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
usr = session.query(User).get(usr_session.get('usr_id'))
context = {'usr': usr}
return render(start_response, 'list.html', context)
else:
return redirect(start_response, '/')
def list_post(environ, start_response):
"""
Used to manage list user request.
Manage list.html request.
Parameters
----------
start_response : start_response
WSGI start_response
environ : environ
WSGI python environ
Returns
-------
list
list view
"""
usr_session = environ['beaker.session']
if usr_session.get('usr_id'):
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
request = POST(environ)
if request.get("search[value]"):
query = session.query(User.name, User.email, User.country).filter(or_(
User.email.contains(request.get("search[value]")),
User.name.contains(request.get("search[value]"))))
else:
query = session.query(User.name, User.email, User.country)
users = [[str(user.name), str(user.email), str(user.country)]
for user in query]
response = {
"draw": int(request["draw"]),
"recordsTotal": len(users),
"recordsFiltered": len(users),
"data": users
}
return JsonResponse(start_response, str(response))
else:
return redirect(start_response, '/')
``` |
{
"source": "JosDenysGitHub/iknow",
"score": 3
} |
#### File: iknow/actions/update_python.py
```python
import updatelib
import json
import os
import re
import subprocess
import sys
import urllib.request
def get_first_numeric(s):
"""Return the index of the first numeric character in s, -1 if none
exists."""
for i in range(len(s)):
if s[i] in '0123456789':
return i
return -1
def get_first_nonnumeric(s):
"""Return the index of the first non-numeric character in s, -1 if all
characters are numeric."""
for i in range(len(s)):
if s[i] not in '0123456789':
return i
return -1
def compare_versions(a, b):
"""Compare 2 Python version strings. Return -1 if a < b, 0 if a == b, and
1 if a > b. Version string must match the regex VERSION_REGEX."""
a = a.split('.')
b = b.split('.')
a[0] = int(a[0])
b[0] = int(b[0])
a[1] = int(a[1])
b[1] = int(b[1])
nonnumeric_i = get_first_nonnumeric(a[2])
if nonnumeric_i == -1:
a[2] = int(a[2])
a.append('zz')
else:
micro = a[2]
a[2] = int(micro[:nonnumeric_i])
nonnumeric_i += micro[nonnumeric_i] == '-'
after_micro = micro[nonnumeric_i:]
numeric_i = get_first_numeric(after_micro)
a.append(after_micro[:numeric_i])
a.append(int(after_micro[numeric_i:]))
nonnumeric_i = get_first_nonnumeric(b[2])
if nonnumeric_i == -1:
b[2] = int(b[2])
b.append('zz')
else:
micro = b[2]
b[2] = int(micro[:nonnumeric_i])
nonnumeric_i += micro[nonnumeric_i] == '-'
after_micro = micro[nonnumeric_i:]
numeric_i = get_first_numeric(after_micro)
b.append(after_micro[:numeric_i])
b.append(int(after_micro[numeric_i:]))
if a < b:
return -1
elif a == b:
return 0
return 1
ALLOW_PRERELEASE = False # flag for whether to allow updates to prerelease Python versions
platform = sys.argv[1]
assert platform in ('win64', 'osx'), f'{platform} is not supported'
if platform == 'win64' and ALLOW_PRERELEASE:
VERSION_REGEX = r'^([0-9]+\.){2}[0-9]+(-(a|b|rc)[0-9]+)?$'
elif platform == 'osx' and ALLOW_PRERELEASE:
VERSION_REGEX = r'^([0-9]+\.){2}[0-9]+((a|b|rc)[0-9]+)?$'
else:
VERSION_REGEX = r'^([0-9]+\.){2}[0-9]+$'
vars = updatelib.get_vars()
if platform == 'win64':
current_versions = vars['PYVERSIONS_WIN'].split()
else:
current_versions = vars['PYVERSIONS_OSX'].split()
# Get list of available Python versions.
if platform == 'win64':
url_data = urllib.request.urlopen('https://api.nuget.org/v3/index.json')
json_data = json.load(url_data)
for item in json_data['resources']:
if item['@type'] == 'PackageBaseAddress/3.0.0':
base_url = item['@id']
break
else:
raise ValueError('PackageBaseAddress/3.0.0 not found')
url_data = urllib.request.urlopen(f'{base_url}python/index.json')
json_data = json.load(url_data)
available_versions = [version for version in json_data['versions'] if re.match(VERSION_REGEX, version)]
else: # platform == 'osx'
if sys.platform != 'darwin':
raise EnvironmentError('Must be run on Mac OS X')
subprocess.run(['brew', 'update'], check=True)
subprocess.run(['brew', 'install', 'pyenv'], check=True)
p = subprocess.run(['pyenv', 'install', '--list'],
stdout=subprocess.PIPE, universal_newlines=True,
check=True)
available_versions = [version for version in p.stdout.split() if re.match(VERSION_REGEX, version)]
# find updates to current versions
update_info = []
latest_versions = []
for current_version in current_versions:
latest_version_found = current_version
for available_version in available_versions:
if current_version.split('.')[:2] == available_version.split('.')[:2] and \
compare_versions(latest_version_found, available_version) == -1:
latest_version_found = available_version
latest_versions.append(latest_version_found)
if current_version != latest_version_found:
update_info.append([current_version, latest_version_found])
# set variables
if platform == 'win64':
vars['PYVERSIONS_WIN'] = ' '.join(latest_versions)
else:
vars['PYVERSIONS_OSX'] = ' '.join(latest_versions)
updatelib.set_vars(vars)
# set environment variables for next GitHub actions step
updatelib.setenv('PYTHON_UPDATE_INFO_ONELINE', ', '.join('→'.join(x) for x in update_info))
for i in range(len(update_info)):
update_info[i] = ' can be updated to '.join(update_info[i])
update_info[i] = f'- {update_info[i]}'
updatelib.setenv('PYTHON_UPDATE_INFO_MULTILINE', '\n'.join(update_info))
if platform == 'osx':
p = subprocess.run(['brew', 'list', '--versions', 'pyenv'],
stdout=subprocess.PIPE, universal_newlines=True,
check=True)
pyenv_tool_version = p.stdout.split()[1]
updatelib.setenv('PYENV_TOOL_VERSION', pyenv_tool_version)
```
#### File: iknow/language_development/handleCSV.py
```python
# This Python file uses the following encoding: utf-8
''' handleCSV.py : generic script for automating CSV file operations.
- task1 : filter Literal labels from lexrep.csv
'''
import os,sys,time,shutil,pprint
from os import walk
from shutil import copyfile
pp = pprint.PrettyPrinter()
language_par='*' # '*' # default to all languages
if (len(sys.argv)>1):
language_par = sys.argv[1]
def filter_doubles_from_lexreps(full_path_name):
print("filtering doubles from \"" + full_path_name + "\"")
lexreps_csv = [] # filtered collection
lexreps_map = {} # dictionary to filter doubles
f_lexreps = open(full_path_name,"r",True,"utf8")
for txt_line in f_lexreps:
if (txt_line.find("/*") != -1) or (len(txt_line.split(';'))<4): # comment or empty
lexreps_csv.append(txt_line)
continue
line_split = txt_line.split(';')
lexrep = line_split[2]
if lexrep in lexreps_map: # this is a double
print("found double : " + txt_line)
print("*** removed from lexreps collection ***")
else:
lexreps_map[lexrep]=txt_line
lexreps_csv.append(txt_line)
f_lexreps.close()
f_lexreps = open(full_path_name,"w",True,"utf8")
for txt_line in lexreps_csv:
f_lexreps.write(txt_line)
f_lexreps.close()
pass
def filter_literal_label_from_lexreps(full_path_name):
print("filtering: \"" + full_path_name + "\"")
lexreps_csv = [] # filtered collection
copyfile(full_path_name, full_path_name+".bak") # one backup file should do, use local git for original file
f_lexreps = open(full_path_name,"r",True,"utf8")
for txt_line in f_lexreps:
if txt_line.find("/**** Rewritten by DELVE") != -1:
line_split = txt_line.split(';')
meta = line_split[1]
lexrep = line_split[2]
line_reconstruct = ";" + meta + ";" + lexrep + ";;"
for i in range(4,len(line_split),1):
if (line_split[i]=="\n"):
continue
if (len(line_split[i])): # skip in the (exception) case of emptiness
line_reconstruct += line_split[i] + ";"
line_reconstruct += "\n"
lexreps_csv.append(line_reconstruct) # add modified line
# pp.pprint(line_split)
txt_lit = f_lexreps.readline() # line containing literal, exclude !
if txt_lit.find("Lit_") == -1:
raise ValueError('Lexrep line {} must have a literal label.'.format(txt_lit))
else:
lexreps_csv.append(txt_line)
f_lexreps.close
# pp.pprint(lexreps_csv)
f_lexreps = open(full_path_name,"w",True,"utf8")
for txt_line in lexreps_csv:
f_lexreps.write(txt_line)
f_lexreps.close()
pass
def filter_DELVE_rewrites_from_lexreps(full_path_name):
print("removing DELVE rewrites from:\"" + full_path_name + "\"")
lexreps_csv = [] # filtered collection
f_lexreps = open(full_path_name,'r',True,"utf8")
for txt_line in f_lexreps:
if txt_line.find("/* Expanded previously by DELVE") != -1:
# print(txt_line) # /* Expanded previously by DELVE....;;(く|ぐ|す|た|ぶ|む|る|う)のが大変;;JPVerbEndOther;JPEndRelation;-;JPno;-;JPga;-;JPCon;Join;
line_split = txt_line.split(';')
meta = line_split[1]
lexrep = line_split[2]
line_reconstruct = ";" + meta + ";" + lexrep + ";;"
for i in range(4,len(line_split),1):
if (line_split[i]=="\n"):
continue
if (len(line_split[i])): # skip in the (exception) case of emptiness
line_reconstruct += line_split[i] + ";"
line_reconstruct += "\n"
lexrep_splits = lexrep.split('|')
# line_reconstruct += "ACTION:"+str(len(lexrep_splits)) + "\n"
for i in range(len(lexrep_splits)):
lexreps_csv.pop() # remove the last one
lexreps_csv.append(line_reconstruct) # add modified line
# pp.pprint(line_split)
else:
lexreps_csv.append(txt_line)
f_lexreps.close()
#
# rewrite
#
f_lexreps = open(full_path_name,"w",True,"utf8")
for txt_line in lexreps_csv:
f_lexreps.write(txt_line)
f_lexreps.close()
pass
def task1(language,path):
print("Processing language=\"" + language + "\" in path=\"" + path + "\"\n")
for (dirpath, dirnames, filenames) in walk(path):
for single_file in filenames:
if (single_file.endswith('.csv')):
if single_file == "lexreps.csv":
filter_literal_label_from_lexreps(os.path.join(dirpath, single_file))
# filter_doubles_from_lexreps(dirpath + single_file)
filter_DELVE_rewrites_from_lexreps(os.path.join(dirpath, single_file))
pass
def main():
currentDirectory = os.getcwd() # Get Current working Directory
if currentDirectory.find("language_development") == -1: # run from top "iknow" directory
os.chdir(currentDirectory+'/language_models')
else: # run from "language_development" directory
os.chdir(currentDirectory+'/../language_models') # Change the Current working Directory to the language_models
currentDirectory = os.getcwd()
for (dirpath, dirnames, filenames) in walk(currentDirectory):
if language_par == '*': # process them all
for single_dir in dirnames:
full_dir = os.path.join(dirpath, single_dir)
task1(single_dir, full_dir)
else:
full_dir = os.path.join(dirpath, language_par)
task1(language_par, full_dir)
break
if __name__ == '__main__':
main()
``` |
{
"source": "JosDuran/showfullscr",
"score": 2
} |
#### File: JosDuran/showfullscr/app.py
```python
from flask import Flask, send_file, flash, redirect, session, render_template, request, url_for, jsonify,json, make_response
from werkzeug import secure_filename
import sqlite3, base64
import os, io, subprocess
app = Flask(__name__)
app.secret_key = 'super secret key'
DEFAULT_PATH = os.path.dirname(os.path.realpath(__file__))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/showfs', methods = ['POST'])
def shofs():
afile = request.files['selectfile']
asecure = secure_filename(afile.filename)
afile.save(asecure)
print(asecure);
image = subprocess.Popen(["feh", "--hide-pointer", "-x", "-q", "-B", "black", "-g", "1280x800", asecure])
return ('image displayed');
if __name__ == "__main__" :
app.run(debug = True, host='0.0.0.0')
``` |
{
"source": "jose-121/RECTANGULO",
"score": 2
} |
#### File: jose-121/RECTANGULO/Andres_Suarez.py
```python
def promedio (nota1, nota2, nota3, nota4):
return (nota1+nota2+nota3+nota4) / 4
``` |
{
"source": "jose1711/osm-mapper-tools",
"score": 3
} |
#### File: osm-mapper-tools/avnotes/avnotes.py
```python
import math
import sys
import os
import re
import datetime
import argparse
# ARRAY = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_~'
ARRAY = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_@'
def string2geo(s):
"""
converts string into (lat,lon) tuple
"""
code = 0
for i in range(len(s)):
try:
digit = ARRAY.index(s[i])
except:
# not sure where the ~ vs @ came from but this is to accept both
if s[i] == "~":
digit = 63
else:
raise ValueError
code = (code << 6) | digit
# align to 64bit integer
code = (code << (62 - (6 * len(s))))
x = y = 0
# deinterleaving
for i in range(61, -1, -2):
x = (x << 1) | ((code >> i) & 1)
y = (y << 1) | ((code >> (i - 1)) & 1)
lat = (y << 1) / (2 ** 32 / 180.0) - 90
lon = (x << 1) / (2 ** 32 / 360.0) - 180
return (lat, lon)
aparser = argparse.ArgumentParser()
aparser.add_argument('-s', nargs=1, help='Only convert STRING and output to stdout', metavar='STRING')
args = aparser.parse_args()
if args.s:
print string2geo(args.s[0])
sys.exit()
# process files in current directory
files = []
waypoints = []
for (dirpath, dirnames, filenames) in os.walk('.'):
files.extend(filenames)
break
files.sort(key=lambda x: os.stat(x).st_mtime)
# init. counter
c = 0
# grep for files matching 3gp extension
audiofiles = filter(lambda x: re.search(r'\.3gp$', x), files)
if not audiofiles:
sys.exit(0)
print "<?xml version='1.0' encoding='UTF-8'?>"
print "<gpx version='1.1' creator='osmand2gpx.py' xmlns='http://www.topografix.com/GPX/1/1'>"
for string in map(lambda x: re.sub("(.*)\.3gp", r"\1", x), audiofiles):
basename = string
# string=re.sub("-","",string)
string = re.sub("-.*", "", string)
lat, lon = string2geo(string)
c += 1
os.system('ffmpeg -y -i ' + basename + '.3gp ' + basename + '.3gp.wav')
times = (os.stat(basename + '.3gp').st_mtime, os.stat(basename + '.3gp').st_mtime)
os.utime(basename + '.3gp.wav', times)
waypoints.append([lon, lat, basename, c, times[0]])
if len(waypoints) < 1:
sys.exit(0)
for wpt in waypoints:
lon = wpt[0]
lat = wpt[1]
basename = wpt[2]
name = wpt[3]
time = wpt[4]
print "<wpt lon='" + repr(lon) + "' lat='" + repr(lat) + "'>"
print "<time>" + str(datetime.datetime.fromtimestamp(time)).replace(' ', 'T') + 'Z' + "</time>"
print "<name>" + repr(name) + "</name>"
print "<link href='" + basename + ".3gp.wav'/>"
print "</wpt>"
print "<trk><trkseg>"
for wpt in waypoints:
lon = wpt[0]
lat = wpt[1]
basename = wpt[2]
name = wpt[3]
time = wpt[4]
print "<trkpt lon='" + repr(lon) + "' lat='" + repr(lat) + "'>"
print "<time>" + str(datetime.datetime.fromtimestamp(time)).replace(' ', 'T') + 'Z' + "</time>"
print "</trkpt>"
print "</trkseg></trk>"
print "</gpx>"
```
#### File: osm-mapper-tools/regadr2osm/regadr2osm.py
```python
import xml.etree.ElementTree as ET
from xml.dom import minidom
import sys, os
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
inputfile = sys.argv[1]
tree = ET.parse(inputfile)
root = tree.getroot()
newroot = ET.fromstring('''<osm generator="Python 2.x" version="0.6"></osm>''')
ns='http://www.egov.sk/mvsr/RA/Odpis/Ext/ObjednanieDatasetuAdresnychBodovBezZepUI.1.0'
counter = 0
for i in root.iter():
if i.tag.endswith('item') and '{{{0}}}addressPoint'.format(ns) in [x.tag for x in i.iter()]:
counter += 1
street=i.findall('./{{{0}}}streetName/{{{0}}}name'.format(ns))[0].text
streetn = i.findall('./{{{0}}}buildingNumber'.format(ns))[0].text
conscrn = i.findall('./{{{0}}}propertyRegistrationNumber'.format(ns))[0].text
city = i.findall('./{{{0}}}district/{{{0}}}itemName'.format(ns))[0].text
lat = i.findall('./{{{0}}}addressPoint/{{{0}}}BLH/{{{0}}}axisB'.format(ns))[0].text
lon = i.findall('./{{{0}}}addressPoint/{{{0}}}BLH/{{{0}}}axisL'.format(ns))[0].text
node = ET.SubElement(newroot, 'node')
node.set("id",str(counter*-1))
node.set("visible",'true')
node.set("lat",lat)
node.set("lon",lon)
citytag = ET.SubElement(node, 'tag')
streettag = ET.SubElement(node, 'tag')
streetntag = ET.SubElement(node, 'tag')
conscrntag = ET.SubElement(node, 'tag')
housenutag = ET.SubElement(node, 'tag')
citytag.set("k","addr:city")
citytag.set("v",city)
streettag.set("k","addr:street")
streettag.set("v",street)
streetntag.set("k","addr:streetnumber")
streetntag.set("v",streetn)
conscrntag.set("k","addr:conscriptionnumber")
conscrntag.set("v",conscrn)
housenutag.set("k","addr:housenumber")
housenutag.set("v",u'{}/{}'.format(conscrn,streetn))
print u'{0} {2}/{1}: {3} {4}'.format(street,streetn, conscrn, lat, lon).encode('utf-8')
newtree = ET.ElementTree(newroot)
with open(os.path.splitext(inputfile)[0]+'.osm', 'w') as outfile:
outfile.write(prettify(newroot).encode('utf-8'))
``` |
{
"source": "Jose-30/Kien-Bot-Discord-Bot-",
"score": 3
} |
#### File: Kien-Bot-Discord-Bot-/bot_tools/music.py
```python
import discord, os, youtube_dl, re
from urllib import parse, request
from discord.ext import commands
#song_list = []
#global_index = 0
class Music(commands.Cog):
def __init__(self,client):
self.client = client
@commands.command()
async def play(self, ctx, *query):
#check if user and bot is in voice channel
voiceTrue = ctx.author.voice
bot = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
try:
await ctx.author.voice.channel.connect()
except:
if voiceTrue is None:
return await ctx.send("You are not in a voice channel")
if bot.is_connected():
return await ctx.send("Bot is already connected to a voice channel")
#check if mp3 file exists and delete it
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
except PermissionError:
await ctx.send("Wait for the current playing music to end or use the 'stop' command")
return
#modify the user's query so that youtube understands it
search = ' '.join(list(query))
print(search)
search = search.replace(' ','+')
query_string = parse.urlencode({'search_query': search})
html = request.urlopen('http://www.youtube.com/results?' + query_string)
search_results = re.findall('watch\?v=(.{11})',html.read().decode('utf-8'))
audio = "https://www.youtube.com/watch?v=" + search_results[0]
#song_list.append(audio)
#download the video
vc = ctx.voice_client
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
#the video is converted to mp3 file
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([audio])
for file in os.listdir("./"):
if file.endswith(".mp3"):
os.rename(file, "song.mp3")
#the mp3 file is played
vc.play(discord.FFmpegPCMAudio("song.mp3"))
@commands.command()
async def leave(self, ctx):
bot = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if bot.is_connected():
await bot.disconnect()
else:
return await ctx.send("Bot is not connected to a voice channel")
@commands.command()
async def pause(self, ctx):
bot = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if bot.is_playing():
bot.pause()
await ctx.send("Music paused")
else:
return await ctx.send("Im not playing anything")
@commands.command()
async def resume(self, ctx):
bot = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if bot.is_paused():
bot.resume()
await ctx.send("Music resumed")
else:
return await ctx.send("Im not paused")
@commands.command()
async def stop(self, ctx):
bot = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if bot.is_playing():
bot.stop()
await ctx.send("Music stopped")
else:
await ctx.send("Im not playing anything")
def setup(client):
client.add_cog(Music(client))
``` |
{
"source": "Jose-30/PyEmailSpamer",
"score": 4
} |
#### File: Jose-30/PyEmailSpamer/main.py
```python
from modules import gmailSPAMMER
from modules import hotmailSPAMMER
from modules import yahooSPAMMER
def main():
print("What email service do you want to use?")
print("1. Gmail")
print("2. Yahoo")
print("3. Hotmail")
choice = int(input("Enter your choice: "))
if choice == 1:
print("You chose Gmail")
username = input("Enter your username: ")
password = input("Enter your password: ")
gmail = gmailSPAMMER.Gmail(username, password)
to = input("To: ")
content = input("Content: ")
count = 0
i = int(input("How many times do you want to send this email? "))
while count < i:
gmail.send(to, content)
count += 1
gmail.send(to, content)
print("Email sent!")
elif choice == 2:
print("You chose Yahoo")
username = input("Enter your username: ")
password = input("Enter your password: ")
yahoo = yahooSPAMMER.Yahoo(username, password)
to = input("To: ")
content = input("Content: ")
count = 0
i = int(input("How many times do you want to send this email? "))
while count < i:
yahoo.send(to, content)
count += 1
yahoo.send(to, content)
print("Email sent!")
elif choice == 3:
print("You chose Hotmail")
username = input("Enter your username: ")
password = input("Enter your password: ")
hotmail = hotmailSPAMMER.Hotmail(username, password)
to = input("To: ")
content = input("Content: ")
count = 0
i = int(input("How many times do you want to send this email? "))
while count < i:
hotmail.send(to, content)
count += 1
hotmail.send(to, content)
print("Email sent!")
else:
print("Invalid choice!")
main()
``` |
{
"source": "jose-92/challenge-prework-backend-python",
"score": 4
} |
#### File: jose-92/challenge-prework-backend-python/index.py
```python
import random
def password_generator():
capitalize = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'I', 'J', 'K',
'L', 'N', 'M', 'R', 'T', 'S' 'U', 'W', 'X', 'Y', 'Z'
]
lower_case = ['a', 'b', 'd', 'e', 'f', 'g', 'i', 'j', 'k'
'l', 'n', 'm', 'r', 't', 's', 'u', 'w', 'x', 'y', 'z'
]
symbols = ['#', '[', ')', '$', '!', '/', '&', '%']
numbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
characters = capitalize + lower_case + numbers + symbols
password = []
for i in range(10):
random_characters = random.choice(characters)
password.append(random_characters)
password = ''.join(password)
return password
def main():
password = password_generator();
print('Your new password is: ' + password)
if _name_ == '_main_':
main()
``` |
{
"source": "jose96043/ThreatExchange",
"score": 2
} |
#### File: hmalib/common/image_sources.py
```python
import functools
import boto3
import requests
import typing as t
class ImageSource:
"""
An interface that is used during hashing to get an image's bytes. Subclasses
may be potentially used to store the image bytes at submission.
"""
def get_image_bytes(self, identifier: str) -> bytes:
"""
Get bytes for the image.
"""
raise NotImplementedError
@functools.lru_cache(maxsize=None)
def _get_s3_client():
# memoized so that we don't create a boto3 client unless needed.
return boto3.client("s3")
class S3BucketImageSource(ImageSource):
"""
Get images from a single S3 bucket. Formalizes the convention of including
the content_id in the S3 key. If you find yourself relying on the structure
of the s3 key to do any inference, consider moving that piece of code here.
Potential enhancements:
- Customize retry behavior, with backoff
- Parameterized credentials rather than relying on boto inference
"""
def __init__(self, bucket: str, image_prefix: str) -> None:
self.bucket = bucket
self.image_prefix = image_prefix
def get_image_bytes(self, content_id: str) -> bytes:
return (
_get_s3_client()
.get_object(Bucket=self.bucket, Key=self.get_s3_key(content_id))["Body"]
.read()
)
@staticmethod
def get_content_id_from_s3_key(s3_key: str, image_prefix: str) -> str:
"""
Useful when you have received an s3 event, so you don't have content_id,
but need to infer it.
"""
return s3_key[len(image_prefix) :]
def get_s3_key(self, content_id) -> str:
return f"{self.image_prefix}{content_id}"
def put_image_bytes(self, content_id: str, contents: bytes):
"""
This is not part of the ImageSource interface. But S3 keys have some
built in structure that must be formalized. See class docstring for
more.
"""
_get_s3_client().put_object(
Body=contents, Bucket=self.bucket, Key=self.get_s3_key(content_id)
)
class URLImageSource(ImageSource):
"""
Simple GET request to get bytes of a URL.
Potential enhancements:
- HTTP Retry configuration with backoff
- HTTP Authorization, Headers
- Customizable Keep-Alive handling (presently defaults to requests.Session
defaults)
"""
def get_image_bytes(self, identifier: str) -> bytes:
r = requests.get(identifier)
r.raise_for_status()
return r.content
``` |
{
"source": "jose9701/docker-pirate",
"score": 3
} |
#### File: flask-mysql/app/developers_orm.py
```python
import os
import peewee
import json
from playhouse.shortcuts import model_to_dict
database = peewee.MySQLDatabase(
"employees",
host=os.getenv("DB_HOST", "localhost"),
port=int(os.getenv("DB_PORT", 3306)),
user="root",
password="<PASSWORD>"
)
class DevelopersORM(peewee.Model):
name = peewee.CharField(255)
lastname = peewee.CharField(255)
id_type = peewee.CharField(255)
id_value = peewee.CharField(255)
area = peewee.CharField(255)
age = peewee.IntegerField()
class Meta:
database = database
db_table = "developers"
def developer_exists(self, id_type, id_value):
return DevelopersORM.select().where(
DevelopersORM.id_type == id_type,
DevelopersORM.id_value == id_value
).exists()
def create_developer(self, args):
return DevelopersORM.insert(
name=args["name"],
lastname=args["lastname"],
id_type=args["id_type"],
id_value=args["id_value"],
area=args["area"],
age=args["age"]
).execute()
def get_all_developers(self):
query = DevelopersORM.select()
results = []
for dev in query:
results.append(model_to_dict(dev))
return results
def get_developer_by_id_params(self, id_type, id_value):
results = DevelopersORM.select().where(
DevelopersORM.id_type == id_type,
DevelopersORM.id_value == id_value
).get()
return model_to_dict(results)
def update_developer(self, args):
dev = DevelopersORM.select().where(
DevelopersORM.id_type == args["id_type"],
DevelopersORM.id_value == args["id_value"]
).get()
dev.name = args["name"]
dev.lastname = args["lastname"]
dev.id_type = args["id_type"]
dev.id_value = args["id_value"]
dev.area = args["area"]
dev.age = args["age"]
return dev.save()
def delete_developer_by_id_params(self, id_type, id_value):
query = DevelopersORM.delete().where(
DevelopersORM.id_type == id_type,
DevelopersORM.id_value == id_value
)
return query.execute()
if __name__ == "__main__":
if database.table_exists("developers"):
print("Table already exists")
else:
print("Table does NOT exists")
# DevelopersORM.create_table()
devs = DevelopersORM()
args = {}
args["name"] = "Santiago"
args["lastname"] = "Garcia"
args["id_type"] = "cc"
args["id_value"] = "1234"
args["area"] = "SuperDevOps"
args["age"] = 50
print("create(cc, 1234): ", devs.create_developer(args))
print("exists(cc, 1234): ", devs.developer_exists("cc", "1234"))
print("get(cc, 1234): ", devs.get_developer_by_id_params("cc", "1234"))
args = {}
args["name"] = "Santi"
args["lastname"] = "Garci"
args["id_type"] = "cc"
args["id_value"] = "1234"
args["area"] = "DevOps"
args["age"] = 21
print("update(cc, 1234, args): ", devs.update_developer(args))
print("get(cc, 1234): ", devs.get_developer_by_id_params("cc", "1234"))
print("delete(cc, 1234): ", devs.delete_developer_by_id_params("cc", "1234"))
print("exists(cc, 1234): ", devs.developer_exists("cc", "1234"))
``` |
{
"source": "JoseAAManzano/polychar",
"score": 3
} |
#### File: src/scripts/data_processing.py
```python
import os
import re
import random
import unicodedata
import pandas as pd
from itertools import product
random.seed(404)
path = os.getcwd()
os.chdir(path)
file_path = "../../data/"
target_path = "../../processed_data/"
esp = pd.read_csv(os.path.join(file_path, 'ESP.csv'), encoding='utf-8')
eng = pd.read_csv(os.path.join(file_path, 'ENG.csv'), sep=',',
encoding='utf-8')
eus = pd.read_csv(os.path.join(file_path, 'EUS.txt'), sep='\t', header=None)
eus.columns = ['spelling', 'freq']
eus['len'] = eus['spelling'].apply(len)
# %% Normalizing eus data
eus = eus[(eus.freq > eus.freq.quantile(q=0.5))]
esp = esp[(esp.zipf > esp.zipf.quantile(q=0.5))]
eng = eng[(eng.ZipfUS > eng.ZipfUS.quantile(q=0.5))]
esp = esp[(esp.len >= 3) & (esp.len <= 10)]
eng = eng[(eng.Length >= 3) & (eng.Length <= 10)]
eus = eus[(eus.len >= 3) & (eus.len <= 10)]
def preprocess(st):
st = ''.join(c for c in unicodedata.normalize('NFD', st)
if unicodedata.category(c) != 'Mn')
st = re.sub(r"[^a-zA-Z]", r"", st)
return st.lower()
esp_words = list(set([preprocess(st) for st in esp.spelling]))
eng_words = list(set([preprocess(st) for st in eng.spelling]))
eus_words = list(set([preprocess(st) for st in eus.spelling]))
def editDistance(word1, word2):
'''
Return minimum number of edits required to transform word1 into word2
Edits include: deletion, insertion, replacement
Uses memoization to speed up the process
'''
n1, n2 = len(word1), len(word2)
memo = [[0]*(n2) for _ in range(n1)]
def minDist(i, j):
if i < 0:
return j+1
if j < 0:
return i+1
if memo[i][j]:
return memo[i][j]
if word1[i] == word2[j]:
memo[i][j] = minDist(i-1, j-1)
return memo[i][j]
memo[i][j] = 1 + min(minDist(i, j-1),
minDist(i-1, j),
minDist(i-1, j-1))
return memo[i][j]
return minDist(n1-1, n2-1)
def get_num_cognates(vocab1, vocab2):
cognates = 0
for w1, w2 in product(vocab1, vocab2):
if editDistance(w1, w2) == 1:
cognates += 1
return cognates
# print(get_num_cognates(esp_words, eng_words))
# print(get_num_cognates(esp_words, eus_words))
# print(get_num_cognates(eus_words, eng_words))
random.shuffle(esp_words)
random.shuffle(eng_words)
random.shuffle(eus_words)
esp_words = esp_words[:min(len(esp_words), len(eng_words), len(eus_words))]
eng_words = eng_words[:min(len(esp_words), len(eng_words), len(eus_words))]
eus_words = eus_words[:min(len(esp_words), len(eng_words), len(eus_words))]
idx1 = int(len(esp_words)*0.8)
idx2 = int(len(esp_words)*0.1)
idx3 = int(len(esp_words)*0.1)
idx1 += len(esp_words) - idx1 - idx2 - idx3
assert idx1 + idx2 + idx3 == len(esp_words)
# %% First dataset
data = pd.DataFrame(columns=['data', 'label', 'split'])
data['data'] = esp_words + eng_words
data['label'] = ['ESP'] * len(esp_words) + ['ENG'] * len(eng_words)
splits = ['train']*idx1 + ['val']*idx2 + ['test'] * \
idx3 + ['train']*idx1 + ['val']*idx2 + ['test']*idx3
data['split'] = splits
data.to_csv(os.path.join(target_path, 'ESP-ENG.csv'),
index=False, encoding='utf-8')
# %% Second dataset
data = pd.DataFrame(columns=['data', 'label', 'split'])
data['data'] = esp_words + eus_words
data['label'] = ['ESP'] * len(esp_words) + ['EUS'] * len(eus_words)
splits = ['train']*idx1 + ['val']*idx2 + ['test'] * \
idx3 + ['train']*idx1 + ['val']*idx2 + ['test']*idx3
data['split'] = splits
data.to_csv(os.path.join(target_path, 'ESP-EUS.csv'),
index=False, encoding='utf-8')
``` |
{
"source": "joseaccruz/pybireport",
"score": 2
} |
#### File: pybireport/pybireport/report.py
```python
import xlsxwriter
from pybireport.styles import Style, DefaultStyleSheet
class Report:
def __init__(self, fname):
self._fname = fname
self._pages = []
self._ss = DefaultStyleSheet()
def add(self, page):
# TBD: check if a page w/ the same name exists
self._pages.append(page)
return page
def style_sheet(self, ss):
self._ss = ss
return report
def generate(self):
# Create a workbook and add a worksheet.
wb = xlsxwriter.Workbook(self._fname)
for page in self._pages:
page.generate(wb, self._ss)
wb.close()
class Page:
def __init__(self, name):
self._name = name
self._vizs = []
def add(self, viz):
self._vizs.append(viz)
return viz
def generate(self, wb, ss):
ws = wb.add_worksheet(self._name)
for viz in self._vizs:
viz.reset()
for (i, viz) in enumerate(self._vizs):
viz.generate(wb, ws, ss)
class Viz:
PLACE_ABSOLUTE = 1
PLACE_BELLOW = 2
PLACE_ABOVE = 3
PLACE_LEFT = 4
PLACE_RIGHT = 5
def __init__(self):
# how to place the Viz
self._placement = Viz.PLACE_ABSOLUTE
self._pcol, self._prow = (1, 1)
# reference Viz
self._ref = None
self._spacer_rows = 0
self._spacer_cols = 0
self._generated = False
self._tl_col, self._tl_row = (1, 1)
self._br_col, self._br_row = (1, 1)
# format & style
self._style = {}
def format(self, component, value):
if component not in self._style.keys():
# [TBD] make it a gracefull error exception
print("Error - Component '%s' not found. Choose one of %s" % (component, self._style.keys()))
self._style[component].format(value)
return self
def reset(self):
self._generated = False
def place_at(self, pcol, prow):
self._placement = Viz.PLACE_ABSOLUTE
self._pcol = pcol
self._prow = prow
return self
def place_bellow(self, viz, rows=1, align="left"):
self._placement = Viz.PLACE_BELLOW
self._ref = viz
self._spacer_rows = rows
return self
def place_left(self, viz, cols=1, align="top"):
self._placement = Viz.PLACE_LEFT
self._ref = viz
self._spacer_cols = cols
return self
def _generate(self, wb, ws, ss):
# [TBD] Raise an error here
print("Abstract class error")
quit()
def generate(self, wb, ws, ss):
if not self._generated:
self._generated = True
# compute the relative positioning
if self._placement != Viz.PLACE_ABSOLUTE:
# generate the reference viz (if not already)
self._ref.generate(wb, ws, ss)
(ul_col, ul_row, br_col, br_row) = self._ref.get_coords()
if self._placement == Viz.PLACE_BELLOW:
# TBD: honor the align parameter to compute the _pcol
self._pcol = ul_col
self._prow = br_row + 1 + self._spacer_rows
elif self._placement == Viz.PLACE_LEFT:
# TBD: honor the align parameter to compute the _pcol
self._pcol = br_col + 1 + self._spacer_cols
self._prow = br_row
# generate it's own
print("Viz: Create the viz on (%d, %d)" % (self._pcol, self._prow))
self._generate(wb, ws, ss)
else:
print("Done")
def get_coords(self):
return self._tl_col, self._tl_row, self._br_col, self._br_row
class Text(Viz):
def __init__(self, text):
super().__init__()
self._text = text
# format & style
self._style["text"] = Style("text")
# merge info
self._merge_rows = 1
self._merge_cols = 1
def merge_cols(self, cols=1):
self._merge_cols = cols
return self
def merge_rows(self, rows=1):
self._merge_rows = rows
return self
def format(self, value):
return super().format("text", value)
def _generate(self, wb, ws, ss):
# prepare the format
self._fmt_text = wb.add_format(ss.get(self._style["text"]))
# write the text
if self._merge_cols > 1 or self._merge_rows > 1:
ws.merge_range(self._prow, self._pcol, self._prow + self._merge_rows - 1, self._pcol + self._merge_cols - 1, self._text, self._fmt_text)
else:
ws.write_string(self._prow, self._pcol, self._text, self._fmt_text)
# compute the occupied area
self._tl_col, self._tl_row = self._pcol, self._prow
self._br_col, self._br_row = self._pcol + self._merge_cols, self._prow + self._merge_rows
class Table(Viz):
def __init__(self, data):
super().__init__()
self._data = data
self._title = ""
self._description = ""
self._legend = ""
# default parameters
self._merge_title = True
self._zebra = False
# format & style
self._style = {
"title": Style("table_title"),
"description": Style("table_description"),
"legend": Style("table_legend"),
"header": Style("table_header"),
"row": Style("table_row"),
"row_odd": Style("table_row_odd"),
"row_even": Style("table_row_even") }
# [TBD] allow a specific format for each column (inherit from row, row_odd, row_even)
def title(self, title, style={}):
self._title = title
self.format("title", style)
return self
def description(self, description, style={}):
self._description = description
self.format("description", style)
return self
def legend(self, legend, style={}):
self._legend = legend
self.format("legend", style)
return self
def zebra(self, on):
self._zebra = on
return self
def _generate(self, wb, ws, ss):
# [TBD] add description and legend
# setup all formats
self._fmt = {}
for k, v in self._style.items():
self._fmt[k] = wb.add_format(ss.get(v))
# start cell
(r, c) = self._prow, self._pcol
# write the title
if self._merge_title:
ws.merge_range(r, c, r, c + len(self._data.columns) - 1, self._title, self._fmt["title"])
else:
ws.write_string(r, c, self._title, self._fmt["title"])
# [TBD] this spacer should be configured in the future
r += 2
# write the header
for (i, col) in enumerate(self._data.columns):
# [TBD] allow a specific format for each header column
ws.write_string(r, c + i, col, self._fmt["header"])
r += 1
# write the data
for (i, values) in enumerate(self._data.values):
if self._zebra:
if i % 2 == 0:
fmt_cell = self._fmt["row_odd"]
else:
fmt_cell = self._fmt["row_even"]
else:
fmt_cell = self._fmt["row"]
for (j, value) in enumerate(values):
# Convert the date string into a datetime object.
# date = datetime.strptime(date_str, "%Y-%m-%d")
# [TBD] use a class parameter "col_type"
ws.write_string(r + i, c + j, str(value), fmt_cell)
#worksheet.write_datetime(row, col + 1, date, date_format )
#worksheet.write_number (row, col + 2, cost, money_format)
#row += 1
# compute the occupied area
self._tl_col, self._tl_row = self._pcol, self._prow
self._br_col, self._br_row = self._pcol + len(self._data.columns), self._prow + len(self._data) + 3
class Form(Viz):
# [TBD] finish form
def __init__(self, title, data):
super().__init__()
self._title = title
self._data = _data
def _generate(self, wb, ws):
print("Generate Label")
print("\t'%s'" % self._title)
for (k, v) in self._data.items():
print("\t%s: %s" % (k, str(v)))
# compute the occupied area
self._tl_col, self._tl_row = self._pcol, self._prow
self._br_col, self._br_row = self._pcol + 2, self._prow + len(self._data.keys()) + 2
``` |
{
"source": "joseafga/thanos-python",
"score": 3
} |
#### File: joseafga/thanos-python/btconnection.py
```python
import bluetooth
def find_devices():
print('Looking for nearby devices...')
devices_list = []
devices = bluetooth.discover_devices(duration=5, lookup_names=True)
for d in devices:
devices_list.append(d + (1,))
return devices_list
def find_services():
print('Looking for nearby services...')
services = bluetooth.find_service()
services_list = []
for s in services:
services_list.append((s['host'], str(s['name']), s['port']))
return services_list
def print_table(*args):
# table header
print("\n{:2} {:<18} {:<4} {}".format('ID', 'ADDRESS', 'PORT', 'NAME'))
# create a table with all devices
i = 0 # counter
for devices in args:
if len(devices):
for device in devices:
i += 1 # increment
addr, name, port = device
print('{:>2} {:<18} {:<4} {}'.format(i, addr, port, name))
print(' ------------------------------ ') # separator
else:
print(' ----------- EMPTY ------------ ')
else:
print('{:2} Find Devices'.format(i + 1))
print('{:2} Find Services'.format(i + 2))
print(' ------------------------------ ')
def choose(devices):
print("\nType device ID or action:", end=" ")
choice = int(input().lower())
diff = choice - len(devices)
if diff > 0:
if diff == 1:
devices_nearby = find_devices()
print_table(devices_nearby)
return choose(devices_nearby)
else:
services_nearby = find_services()
print_table(services_nearby)
return choose(services_nearby)
return devices[choice - 1]
def do_connection(addr, name, port):
# show message
print("Connecting to {} on {} port {}".format(name, addr, port))
# Create the client socket
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((addr, port))
# connection ok
print("Connected.\n")
return sock
``` |
{
"source": "joseafilho/aws-cdk-python-builders",
"score": 2
} |
#### File: databases/beer_application/beer_database_rds.py
```python
from aws_cdk import (
aws_ec2 as ec2,
core as cdk
)
from aws_cdk.core import (
SecretValue,
Tags
)
import aws_cdk.aws_logs as logs
import aws_cdk.aws_rds as rds
from aws_cdk.aws_ec2 import ISecurityGroup
from aws_cdk.aws_ec2 import IVpc
from libraries.security_group.security_group_builder import SecurityGroupBuilder
class BeerRDSResources(cdk.Construct):
def __init__(self, scope: cdk.Construct, id: str, vpc: IVpc, sg_beer_backend: ISecurityGroup, **kwargs):
super().__init__(scope, id, **kwargs)
self.__id = id
self.__vpc = vpc
self.__sg_beer_backend = sg_beer_backend
self.__create_sg()
self.__create_rds_instance()
def __create_sg(self):
self.__sg_rds = SecurityGroupBuilder(
self, 'beer-rds-postgres-sg',
vpc = self.__vpc,
sg_description = 'Access RDS Postgres'
)
self.__sg_rds.add_role(
port = 5432,
sg_parent = self.__sg_beer_backend,
rule_description = 'Beer backend.'
)
def __create_rds_instance(self):
instance_name = 'beer-db'
instance_type = ec2.InstanceType.of(
ec2.InstanceClass.BURSTABLE4_GRAVITON,
ec2.InstanceSize.SMALL
)
instance = rds.DatabaseInstance(self, instance_name,
engine = rds.DatabaseInstanceEngine.postgres(
version = rds.PostgresEngineVersion.VER_12_7
),
instance_type = instance_type,
credentials = rds.Credentials.from_password(
username = 'postgres_admin',
password = SecretValue.plain_text('<PASSWORD>')
),
vpc = self.__vpc,
vpc_subnets = {
'subnet_type': ec2.SubnetType.PUBLIC
},
security_groups = [self.__sg_rds.sg],
allocated_storage = 50,
max_allocated_storage = 1000,
publicly_accessible = False,
instance_identifier = instance_name,
backup_retention = cdk.Duration.days(7),
monitoring_interval = cdk.Duration.seconds(60),
enable_performance_insights = True,
cloudwatch_logs_exports = ['postgresql'],
cloudwatch_logs_retention = logs.RetentionDays.ONE_WEEK,
deletion_protection = True,
auto_minor_version_upgrade = False
)
Tags.of(instance).add('Name', instance_name)
```
#### File: libraries/certificate_manager/certificate_builder.py
```python
from aws_cdk import (
aws_certificatemanager as cert,
core as cdk
)
from aws_cdk.core import Tags
class CertificateBuilder(cdk.Construct):
@property
def certificate(self):
return self.__certificate
def __init__(self, scope: cdk.Construct, id: str, domain: str, **kwargs):
super().__init__(scope, id, **kwargs)
self.__id = id
self.__domain_name = domain
self.__create_certificate()
def __create_certificate(self):
self.__certificate = cert.Certificate(
self, self.__id,
domain_name = self.__domain_name,
validation = cert.CertificateValidation.from_dns()
)
Tags.of(self.__certificate).add('Name', self.__id)
class CertificateFromArnBuilder(cdk.Construct):
@property
def certificate(self):
return self.__certificate
def __init__(self, scope: cdk.Construct, id: str, arn: str, **kwargs):
super().__init__(scope, id, **kwargs)
self.__id = id
self.__arn = arn
self.__create_certificate()
def __create_certificate(self):
self.__certificate = cert.Certificate.from_certificate_arn(
self, self.__id,
certificate_arn = self.__arn
)
```
#### File: project-base/storages/bucket_private_ecs.py
```python
from aws_cdk import (
core as cdk,
aws_iam as iam
)
from libraries.s3.bucket_private_builder import BucketPrivateBuilder
class BucketPrivateECS(cdk.Construct):
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.__bucket_uploads = BucketPrivateBuilder(
self, 'company-uploads-bk'
)
bucket_uploads_arn = self.__bucket_uploads.bucket.bucket_arn + '/*'
self.__bucket_uploads.bucket.add_to_resource_policy(
iam.PolicyStatement(
principals = [
iam.ServicePrincipal('ecs-tasks.amazonaws.com')
],
actions = ['*'],
resources = [bucket_uploads_arn]
)
)
``` |
{
"source": "joseahr/image-rectification-app",
"score": 2
} |
#### File: opencv/mosaic/matchers.py
```python
import cv2
import numpy as np
class matchers:
def __init__(self):
self.surf = cv2.xfeatures2d.SURF_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=0, trees=5)
search_params = dict(checks=50)
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
def match(self, i1, i2, direction=None):
imageSet1 = self.getSURFFeatures(i1)
imageSet2 = self.getSURFFeatures(i2)
print "Direction : ", direction
matches = self.flann.knnMatch(
imageSet2['des'],
imageSet1['des'],
k=2
)
good = []
for i , (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
good.append((m.trainIdx, m.queryIdx))
if len(good) > 4:
pointsCurrent = imageSet2['kp']
pointsPrevious = imageSet1['kp']
matchedPointsCurrent = np.float32(
[pointsCurrent[i].pt for (__, i) in good]
)
matchedPointsPrev = np.float32(
[pointsPrevious[i].pt for (i, __) in good]
)
H, s = cv2.findHomography(matchedPointsCurrent, matchedPointsPrev, cv2.RANSAC, 4)
return H
return None
def getSURFFeatures(self, im):
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
kp, des = self.surf.detectAndCompute(gray, None)
return {'kp':kp, 'des':des}
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.