id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
6419438
|
from ptree.generators.tree_generator import TreeGenerator
from ptree.rptree import DirectoryTree
import argparse
import pathlib
import sys
from termcolor import colored
ERROR_BANNER = colored("ERROR:", "red")
def parse_args():
parser = argparse.ArgumentParser(
prog="ptree",
description="print tree representation of dir",
epilog="Thanks for using P Tree!",
)
parser.add_argument(
"dir",
metavar="ROOT_DIR",
default=".",
nargs="?",
help="Generate a full directory tree starting at ROOT_DIR.",
)
parser.add_argument(
"--max_depth",
metavar="MAX_DEPTH",
type=int,
required=False,
default=3,
help="Max depth till where to show tree. Valid values > 0",
)
return parser.parse_args()
def print_error_message(msg):
message = colored(msg, "yellow")
print(f"{ERROR_BANNER} {message}")
sys.exit()
def main():
args = parse_args()
root_dir = args.dir
if not pathlib.Path(root_dir).is_dir():
print_error_message("The Specified dir doesn't exists")
max_depth = args.max_depth
if max_depth <= 0:
print_error_message(f"invalid value of max_depth: {max_depth}")
DirectoryTree(TreeGenerator(root_dir, max_depth)).generate()
|
StarcoderdataPython
|
6443717
|
import cv2
def change_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v = cv2.add(v,value)
v[v > 255] = 255
v[v < 0] = 0
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
for datax in range(0,1):
# Opens the inbuilt camera of laptop to capture video.
cap = cv2.VideoCapture(0)
i = 0
while(cap.isOpened()):
ret, frame = cap.read()
# This condition prevents from infinite looping
# incase video ends.
if ret == False:
break
if i!=64:
# Save Frame by Frame into disk using imwrite method
dim = (28,28)
img=cv2.resize(frame,dim)
#cv2.imwrite(str(i)+'.jpg', img)
#img = change_brightness(img, value=-100) #decreases
cv2.imwrite("images/"+str(i)+".jpg", img)
i += 1
print(i)
else:
break
cap.release()
cv2.destroyAllWindows()
import os
import random
from PIL import Image, ImageOps
import numpy as np
for j in range(0,8):
for i in range(0,8):
# read the images
img1 = cv2.imread('images/'+str(j*8+i)+".jpg")
if i!=0:
# vertically concatenates images
# of same width
im_v = cv2.hconcat([im_v, img1])
else:
im_v = img1
if j!=0:
im_h = cv2.vconcat([im_h, im_v])
else:
im_h = im_v
# show the output image
cv2.imwrite('img.jpg')
|
StarcoderdataPython
|
1728331
|
<reponame>Anirban166/tstl<filename>tstl/replay.py
from __future__ import print_function
import sys
import traceback
import os
import time
# Appending current working directory to sys.path
# So that user can run randomtester from the directory where sut.py is located
current_working_dir = os.getcwd()
sys.path.append(current_working_dir)
if "--help" not in sys.argv:
import sut as SUT
def trace_lines(frame, event, arg):
if event != 'line':
return
co = frame.f_code
func_name = co.co_name
line_no = frame.f_lineno
print(' %s line %s' % (func_name, line_no))
sys.stdout.flush()
def trace_calls(frame, event, arg):
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
line_no = frame.f_lineno
filename = co.co_filename
print('Call to %s on line %s of %s' % (func_name, line_no, filename))
sys.stdout.flush()
return trace_lines
def main():
if "--help" in sys.argv:
print(
"Usage: tstl_replay <test file> [--noCheck] [--logging loglevel] [--verbose] [--showActions] [--coverage]"
+ " [--internal] [--html directory] [--delay secs] [--trace] [--afl] [--aflswarm]")
print("Options:")
print("--noCheck: do not run property checks")
print("--logging: set the logging level for the test")
print("--verbose: run with verbose action output")
print("--hideOpaque: hide opaque values in verbose actions")
print("--showActions: show all actions")
print("--coverage: report code coverage")
print("--internal: report detailed code coverage information")
print("--html: produce HTML report on coverage")
print("--delay: delay to inject between steps")
print("--trace: trace lines executed (does not work with SUTs compiled with coverage)")
print("--afl: test is in afl format")
print("--aflswarm: test is in afl swarm format")
sys.exit(0)
sut = SUT.sut()
if not (("--coverage" in sys.argv) or ("--internal" in sys.argv)):
try:
sut.stopCoverage()
except BaseException:
pass
if ("--trace" in sys.argv):
goodToTrace = False
try:
sut.stopCoverage()
except BaseException:
goodToTrace = True
if not goodToTrace:
print(
"CANNOT TRACE WHEN SUT IS COMPILED WITH COVERAGE. REBUILD WITH --noCover")
sys.exit(1)
rout = open("replay.out", 'w')
file = sys.argv[1]
nocheck = "--noCheck" in sys.argv
verbose = "--verbose" in sys.argv
logLevel = None
if "--logging" in sys.argv:
lastWasLogging = False
for line in sys.argv:
if lastWasLogging:
logLevel = int(line)
if line == "--logging":
lastWasLogging = True
else:
lastWasLogging = False
delay = None
if "--delay" in sys.argv:
lastWasDelay = False
for line in sys.argv:
if lastWasDelay:
delay = float(line)
if line == "--delay":
lastWasDelay = True
else:
lastWasDelay = False
htmlOut = None
lastWasHtml = False
for f in sys.argv[1:]:
if lastWasHtml:
htmlOut = f
lastWasHtml = False
elif f == "--html":
lastWasHtml = True
else:
lastWasHtml = False
sut.restart()
if logLevel is not None:
sut.setLog(logLevel)
i = 0
if verbose:
sut.verbose(True)
if "--hideOpaque" in sys.argv:
sut.verboseOpaque(False)
if "--afl" not in sys.argv:
with open(file, 'r') as f:
theTest = f.readlines()
else:
readTest = sut.loadTest(
file, afl=True, swarm=("--aflswarm" in sys.argv))
theTest = list(map(lambda x: x[0] + "\n", readTest))
for line in theTest:
name = line[:-1]
if name == "<<RESTART>>":
if "--showActions" in sys.argv:
print("<<RESTART>>")
# print "RESTART"
rout.write("<<RESTART>>\n")
rout.flush()
sut.restart()
else:
if verbose:
print("STEP #" + str(i) + ":", end=' ')
rout.write(line)
rout.flush()
action = sut.playable(name)
if "--showActions" in sys.argv:
print(sut.prettyName(action[0]))
if action[1](): # check the guard
if "--trace" in sys.argv:
sys.settrace(trace_calls)
stepOk = sut.safely(action)
if "--trace" in sys.argv:
sys.settrace(None)
if not stepOk:
print("FAILED STEP")
print(sut.failure())
traceback.print_tb(sut.failure()[2], file=sys.stdout)
if "--internal" in sys.argv:
sut.internalReport()
if "--coverage" in sys.argv:
print(sut.report("coverage.out"), "PERCENT COVERED")
if htmlOut is not None:
sut.htmlReport(htmlOut)
sys.exit(255)
if not nocheck:
checkResult = sut.check()
if not checkResult:
print("FAILED PROPERTY")
print(sut.failure())
traceback.print_tb(sut.failure()[2], file=sys.stdout)
if "--internal" in sys.argv:
sut.internalReport()
if "--coverage" in sys.argv:
print(sut.report("coverage.out"), "PERCENT COVERED")
if htmlOut is not None:
sut.htmlReport(htmlOut)
sys.exit(255)
if delay is not None:
time.sleep(delay)
i += 1
rout.write("TEST REPLAYED SUCCESSFULLY\n")
rout.close()
if "--internal" in sys.argv:
sut.internalReport()
if "--coverage" in sys.argv:
print(sut.report("coverage.out"), "PERCENT COVERED")
if htmlOut is not None:
sut.htmlReport(htmlOut)
sys.exit(0)
|
StarcoderdataPython
|
4960863
|
<reponame>radmirnovii/databend
#!/usr/bin/env python3
import os
import sys
import signal
CURDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURDIR, '../../helpers'))
from client import client
log = None
# uncomment the line below for debugging
log = sys.stdout
client1 = client(name='client1>', log=log)
sqls = """
DROP DATABASE IF EXISTS db1;
CREATE DATABASE db1;
USE db1;
CREATE TABLE IF NOT EXISTS t1(a String, b String, c String, d String, e String, f String, g String, h String) Engine = Memory;
INSERT INTO t1 (a,b,c,d,e,f,g,h) VALUES('1','2','3','4','2021-08-15', '2021-09-15', '2021-08-15 10:00:00', 'string1234'),
('5','6','7','8','2021-10-15', '2021-11-15', '2021-11-15 10:00:00', 'string5678');
INSERT INTO t1(a,b,c,d,e,f,g,h) select * from t1;
SELECT COUNT(1) = 4 from t1;
DROP DATABASE db1;
"""
client1.run(sqls)
stdout, stderr = client1.run_with_output("select * from system.metrics")
assert stdout is not None
assert stderr is None
|
StarcoderdataPython
|
166003
|
<gh_stars>1-10
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
License: https://github.com/app-generator/license-eula
"""
class COMMON:
NULL = None # not set
NA = -1 # not set
OK = 0 # all ok
ERR = 1 # not ok
NOT_FOUND = 2 # file or directory not found
INPUT_ERR = 3 # file or directory not found
# Recover errors for COMMON class
def errorinfo( aErrorCode ):
if COMMON.NA == aErrorCode: return 'Not Set'
if COMMON.ERR == aErrorCode: return 'Error Generic'
if COMMON.OK == aErrorCode: return 'OK'
if COMMON.NOT_FOUND == aErrorCode: return 'Not Found'
if COMMON.INPUT_ERR == aErrorCode: return 'Input error'
return 'UNKN Err ' + str( aErrorCode )
'''
Order is important - start with long paths
'''
ASSETS_MAP = {
'../../../assets' : '/static/assets',
'../../assets' : '/static/assets',
'../assets' : '/static/assets',
'./assets' : '/static/assets',
'../../../css' : '/static/assets/css',
'../../css' : '/static/assets/css',
'../css' : '/static/assets/css',
'./css' : '/static/assets/css',
'../../../vendor' : '/static/assets/vendor',
'../../vendor' : '/static/assets/vendor',
'../vendor' : '/static/assets/vendor',
'./vendor' : '/static/assets/vendor',
'../../../lib' : '/static/assets/lib',
'../../lib' : '/static/assets/lib',
'../lib' : '/static/assets/lib',
'./lib' : '/static/assets/lib',
'lib/' : '/static/assets/lib/',
'../../js' : '/static/assets/js',
'../js' : '/static/assets/js',
'./js' : '/static/assets/js',
'../../../img' : '/static/assets/img',
'../../img' : '/static/assets/img',
'../img' : '/static/assets/img',
'./img' : '/static/assets/img',
'../../node_modules' : '/static/assets/node_modules',
'../node_modules' : '/static/assets/node_modules',
'./node_modules' : '/static/assets/node_modules'
}
class BS_TAG:
def __init__(self, aTag):
self._tag = aTag
self.href = ''
self.rel = ''
self.type = ''
self.is_local = True
self.is_link = False
self.is_canonical = False
self.is_css = False
self.is_img = False
self.is_js = False
self.is_icon = False
self.is_manifest = False
self.path = ''
self.file = ''
self.ext = ''
self.data = {}
def printme(self):
print('>>> TAG Info')
if self.is_css:
print(' - Type = CSS' )
if self.is_img:
print(' - Type = IMAGE' )
if self.is_js:
print(' - Type = JS' )
if self.is_icon:
print(' - Type = ICON' )
if self.is_manifest:
print(' - Type = MANIFEST' )
if self.is_local:
print(' - Storage = LOCAL' )
else:
print(' - Storage = REMOTE' )
print(' - HREF = ' + self.href )
print(' - PATH = ' + self.path )
print(' - FILE = ' + self.file )
print(' - EXT = ' + self.ext )
def info(self):
ret = 'UNKN '
if self.is_css:
ret = 'CSS '
if self.is_img:
ret = 'IMAGE '
if self.is_js:
ret = 'JS '
if self.is_link:
ret = 'LINK '
if self.is_canonical:
ret = 'CANNONICAL '
if self.is_icon:
ret = 'ICON '
if self.is_manifest:
ret = 'MANIFEST '
if self.is_local:
ret += '(local) '
else:
ret += '(www) '
if self.is_canonical or self.is_link:
ret += self.href
else:
ret += '[' + self.file + '] ' + self.href + ' -> [' + str(self.path) + '] '
return ret
class TMPL:
def __init__(self, aFile=''):
self.file = aFile
self.title = ''
self.css = []
self.js = []
self.img = []
self.links = []
self.all_files = [] # used to fix the file internal links
self.err = [] # used to report missing assets
self.err_links = [] # used to report missing assets
def __repr__(self):
errors = len( self.err) + len( self.err_links)
return "" + self.file + ' | css=' + str(len( self.css )) + ' / js=' + str(len( self.js )) + ' / img=' + str(len( self.img )) + ' - ('+str(errors)+') ERRORS'
|
StarcoderdataPython
|
3583059
|
<filename>system_test_progress_tracking/tm_api/admin.py
from django.contrib import admin
from .models import (
Machine,
Test,
Scenario,
MasterScenario,
DryRunData,
)
admin.site.register(Machine)
admin.site.register(Test)
admin.site.register(Scenario)
admin.site.register(MasterScenario)
admin.site.register(DryRunData)
|
StarcoderdataPython
|
3436071
|
<reponame>ydong08/PythonCode
#!/usr/bin/python
#encoding=utf-8
from SocketServer import TCPServer, ForkingMixIn, StreamRequestHandler
import time
class Server(ForkingMixIn, TCPServer): #自定义Server类
pass
class MyHandler(StreamRequestHandler):
def handle(self): #重载handle函数
addr = self.request.getpeername()
print 'Get connection from', addr #打印客户端地址
time.sleep(5) #休眠5秒钟
self.wfile.write('This is a ForkingMixIn tcp socket server') #发送信息
host = ''
port = 1234
server = Server((host, port), MyHandler)
server.serve_forever() #开始侦听并处理连接
|
StarcoderdataPython
|
9764066
|
<gh_stars>1-10
import copy
import torch
import torch.nn as nn
from others.transformers import BertModel, BertConfig
from others.transformers import RobertaModel, RobertaConfig
from torch.nn.init import xavier_uniform_
from models.decoder import TransformerDecoder
from models.encoder import Classifier, ExtTransformerEncoder
from models.optimizers import Optimizer
def build_optim(args, model, checkpoint):
""" Build optimizer """
#if checkpoint is not None:
if False:
optim = checkpoint['optim'][0]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps)
optim.set_parameters(list(model.named_parameters()))
return optim
def build_optim_bert(args, model, checkpoint):
""" Build optimizer """
#if checkpoint is not None:
if False:
optim = checkpoint['optims'][0]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr_bert, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps_bert)
params = [(n, p) for n, p in list(model.named_parameters()) if n.startswith('bert.model')]
optim.set_parameters(params)
return optim
def build_optim_dec(args, model, checkpoint):
""" Build optimizer """
#if checkpoint is not None:
if False:
optim = checkpoint['optims'][1]
saved_optimizer_state_dict = optim.optimizer.state_dict()
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if args.visible_gpus != '-1':
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
else:
optim = Optimizer(
args.optim, args.lr_dec, args.max_grad_norm,
beta1=args.beta1, beta2=args.beta2,
decay_method='noam',
warmup_steps=args.warmup_steps_dec)
params = [(n, p) for n, p in list(model.named_parameters()) if not n.startswith('bert.model')]
optim.set_parameters(params)
return optim
class SpanPositionalEncoding():
def __init__(self, itos, padding_idx):
self.itos = itos
self.padding_idx = padding_idx
def __call__(self, x):
# print(x.shape)
seq_len, batch_size = x.shape
pos_enc = torch.zeros_like(x)
for b in range(batch_size):
for i in range(seq_len):
if '##' not in self.itos[x[i, b].item()] or i == 0:
pos_enc[i, b] = 0
else:
if i > 0 and x[i, b].item() != self.padding_idx and '##' in self.itos[x[i, b].item()]:
pos_enc[i, b] = 1 + pos_enc[i-1, b]
else:
pos_enc[i, b] = 0
return pos_enc
@staticmethod
def decay(pos, prev, curr, decay_rate=0.1):
"""Copy rate decaying for current step.
Arguments:
pos {[type]} -- [S, B]
prev {[type]} -- copy rate for last step, [T-1, B, S]
curr {[type]} -- copy rate for current step, [T, B, S]
Keyword Arguments:
decay_rate {float} -- [description] (default: {0.1})
Returns:
[type] -- new copy rate for current step, [T, B, S]
"""
steps = curr.size(0)
print ('curr_shape: ', curr.shape)
print ('prev_shape: ', prev.shape)
print ('pos_shape: ', pos.shape)
residual = torch.zeros_like(curr) # [T, B, S]
mask = torch.zeros_like(curr) # [T, B, S]
residual[1:, ..., 1:] += prev[..., :-1] * decay_rate # [T, B, S]
# Only if the current step is within the same span of the last step.
flag = (pos[1:] > pos[:-1]).float() # [S-1, B]
mask[-1:, ..., 1:] += flag.transpose(0, 1).unsqueeze(0).repeat([1, 1, 1])
mask = (mask == 1.0)
new = residual + (1 - decay_rate) * curr
ans = torch.where(mask, new, curr)
return torch.softmax(ans, dim=-1)
def get_generator(vocab_size, dec_hidden_size, device):
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(dec_hidden_size, vocab_size),
gen_func
)
generator.to(device)
return generator
class CopyGenerator(nn.Module):
def __init__(self, vocab_size, d_model):
super(CopyGenerator, self).__init__()
self.vocab_size = vocab_size
self.gen_proj = nn.Linear(d_model, vocab_size)
self.prob_proj = nn.Linear(d_model*2, 1)
self.sig_proj = nn.Sigmoid()
self.gen_softmax = nn.Softmax(dim=-1)
self.copy_softmax = nn.Softmax(dim=-1)
def forward(self, src, decode_output, decode_attn, memory):
decode_attn = torch.mean(decode_attn, dim=1)
batch_size, steps, seq = decode_attn.size()
src = src.unsqueeze(1).repeat([1, steps, 1])
# vocab
gen_logits = self.gen_proj(decode_output)
copy_logits = torch.zeros_like(gen_logits)
context = torch.matmul(decode_attn, memory)
copy_logits = copy_logits.scatter_add(2, src, decode_attn)
prob = self.sig_proj(self.prob_proj(torch.cat([context, decode_output], -1)))
gen_logits = prob * self.gen_softmax(gen_logits)
copy_logits = (1 - prob) * self.copy_softmax(copy_logits)
final_logits = gen_logits + copy_logits
return torch.log(final_logits.squeeze(1).contiguous().view(-1, self.vocab_size))
class Bert(nn.Module):
def __init__(self, large, temp_dir, model_pth=None, finetune=False):
super(Bert, self).__init__()
if(large):
self.model = BertModel.from_pretrained(model_pth, cache_dir=temp_dir)
else:
self.model = BertModel.from_pretrained(model_pth, cache_dir=temp_dir)
self.finetune = finetune
def forward(self, x, segs, mask):
if(self.finetune):
top_vec, _ = self.model(x, mask)
else:
self.eval()
with torch.no_grad():
top_vec, _ = self.model(x, mask)
return top_vec
class Roberta(nn.Module):
def __init__(self, large, temp_dir, model_pth=None, finetune=False):
super(Roberta, self).__init__()
if(large):
self.model = RobertaModel.from_pretrained(model_pth, cache_dir=temp_dir)
else:
self.model = RobertaModel.from_pretrained(model_pth, cache_dir=temp_dir)
self.finetune = finetune
def forward(self, x, segs, mask):
if(self.finetune):
top_vec, _ = self.model(x, attention_mask=mask)
else:
self.eval()
with torch.no_grad():
top_vec, _ = self.model(x, attention_mask=mask)
return top_vec
class ExtSummarizer(nn.Module):
def __init__(self, args, device, checkpoint):
super(ExtSummarizer, self).__init__()
self.args = args
self.device = device
self.bert = Bert(args.large, args.temp_dir, args.finetune_bert)
self.ext_layer = ExtTransformerEncoder(self.bert.model.config.hidden_size, args.ext_ff_size, args.ext_heads,
args.ext_dropout, args.ext_layers)
if (args.encoder == 'baseline'):
bert_config = BertConfig(self.bert.model.config.vocab_size, hidden_size=args.ext_hidden_size,
num_hidden_layers=args.ext_layers, num_attention_heads=args.ext_heads, intermediate_size=args.ext_ff_size)
self.bert.model = BertModel(bert_config)
self.ext_layer = Classifier(self.bert.model.config.hidden_size)
if(args.max_pos>512):
my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(args.max_pos-512,1)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=True)
else:
if args.param_init != 0.0:
for p in self.ext_layer.parameters():
p.data.uniform_(-args.param_init, args.param_init)
if args.param_init_glorot:
for p in self.ext_layer.parameters():
if p.dim() > 1:
xavier_uniform_(p)
self.to(device)
def forward(self, src, segs, clss, mask_src, mask_cls):
top_vec = self.bert(src, segs, mask_src)
sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]
sents_vec = sents_vec * mask_cls[:, :, None].float()
sent_scores = self.ext_layer(sents_vec, mask_cls).squeeze(-1)
return sent_scores, mask_cls
class AbsSummarizer(nn.Module):
def __init__(self, args, device, checkpoint=None, bert_from_extractive=None, ids_to_tokens=None):
super(AbsSummarizer, self).__init__()
self.args = args
self.device = device
if args.train_from == "bart":
self.bert = RobertaModel(RobertaConfig("/home/lcl193798/PreRobertaSummMaro/src/config.json"))
elif args.encoder == 'bert' or args.encoder == 'zh_bert':
self.bert = Bert(args.large, args.temp_dir, args.model_pth, args.finetune_bert)
elif args.encoder == 'roberta':
self.bert = Roberta(args.large, args.temp_dir, args.model_pth, args.finetune_bert)
if (args.encoder == 'baseline'):
bert_config = BertConfig(self.bert.model.config.vocab_size, hidden_size=args.enc_hidden_size,
num_hidden_layers=args.enc_layers, num_attention_heads=8,
intermediate_size=args.enc_ff_size,
hidden_dropout_prob=args.enc_dropout,
attention_probs_dropout_prob=args.enc_dropout)
self.bert.model = BertModel(bert_config)
if(args.max_pos>512):
my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(args.max_pos-512,1)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
if args.train_from == "bart":
self.vocab_size = self.bert.config.vocab_size
else:
self.vocab_size = self.bert.model.config.vocab_size
if args.encoder == 'roberta':
if args.train_from == "bart":
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.config.hidden_size, padding_idx=1)
else:
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=1)
else:
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
if (self.args.share_emb) and self.args.train_from != 'bart':
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size, heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size, dropout=self.args.dec_dropout, embeddings=tgt_embeddings, train_from=self.args.train_from)
'''
else:
args_bart = checkpoint['args_bart']
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=1)
dictionary = [0]*self.vocab_size
self.decoder = TransformerBartDecoder(args_bart, dictionary, tgt_embeddings)
'''
if self.args.p_gen:
self.generator = CopyGenerator(self.vocab_size, self.args.dec_hidden_size)
#print (self.generator.gen_proj)
self.generator.gen_proj.weight = self.decoder.embeddings.weight
else:
self.generator = get_generator(self.vocab_size, self.args.dec_hidden_size, device)
#print (self.generator)
#print (self.generator[0])
self.generator[0].weight = self.decoder.embeddings.weight
if checkpoint is not None:
self.load_state_dict(checkpoint['model'], strict=False)
else:
for module in self.decoder.modules():
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
for p in self.generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
if(args.use_bert_emb) and args.train_from != 'bart':
if args.encoder == "roberta":
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=1)
else:
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder.embeddings = tgt_embeddings
if self.args.p_gen:
self.generator.gen_proj.weight = self.decoder.embeddings.weight
else:
self.generator[0].weight = self.decoder.embeddings.weight
if bert_from_extractive is not None:
#print ([n for n, p in bert_from_extractive.items()])
self.bert.model.load_state_dict(
dict([(n[5:], p) for n, p in bert_from_extractive.items() if n.startswith('bert')]), strict=True)
self.to(device)
def forward(self, src, tgt, mask_src, mask_tgt):
top_vec = self.bert(src, None, mask_src)
dec_state = self.decoder.init_decoder_state(src, top_vec)
decoder_outputs, attns, state = self.decoder(tgt[:, :-1], top_vec, dec_state)
return decoder_outputs, attns[-1], top_vec, None
|
StarcoderdataPython
|
4864987
|
<gh_stars>0
from microbit import *
import utime
import machine
import music
class Robit:
PRESCALE_REG = 0xFE
MODE_1_REG = 0x00
SRV_REG_BASE = 0x08
MOT_REG_BASE = 0x28
REG_OFFSET = 4
SERVO_MULTIPLIER = 226
SERVO_ZERO_OFFSET = 0x66
chipAddress = 0x40
initialised = False
stepInit = False
stepStage = 0
stepper1Steps = 200
stepper2Steps = 200
lineFollowLeftPin = pin13
lineFollowRightPin = pin14
Jpin = {'J1': (pin13, pin14), 'J2': (pin15, pin16), 'J3': (pin1, pin2), 'J4': (pin3, pin4)}
def __init__(self):
buf = bytearray(2)
buf[0] = self.PRESCALE_REG
buf[1] = 0x85 # 50Hz
i2c.write(self.chipAddress, buf, False)
for blockReg in range(0xFA, 0xFE, 1):
buf[0] = blockReg
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
buf[0] = self.MODE_1_REG
buf[1] = 0x01
i2c.write(self.chipAddress, buf, False)
self.initialised = True
def servoWrite(self, servo, degrees):
if self.initialised is False:
self.__init__(self)
buf = bytearray(2)
calcServo = self.SRV_REG_BASE + ((servo - 1) * self.REG_OFFSET)
HighByte = False
PWMVal = (degrees * 100 * self.SERVO_MULTIPLIER) / (10000 + self.SERVO_ZERO_OFFSET)
if (PWMVal > 0xFF):
HighByte = True
buf[0] = calcServo
buf[1] = int(PWMVal)
i2c.write(self.chipAddress, buf, False)
buf[0] = calcServo + 1
if (HighByte):
buf[1] = 0x01
else:
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
def motorOn(self, motor, direction, speed):
if self.initialised is False:
self.__init__(self)
buf = bytearray(2)
motorReg = self.MOT_REG_BASE + (2 * (motor - 1) * self.REG_OFFSET)
HighByte = False
OutputVal = speed * 40
if direction == "forward":
if OutputVal > 0xFF:
HighByte = True
HighOutputVal = int(OutputVal/256)
buf[0] = motorReg
buf[1] = int(OutputVal)
i2c.write(self.chipAddress, buf, False)
buf[0] = motorReg + 1
if HighByte:
buf[1] = HighOutputVal
else:
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
for offset in range(4, 6, 1):
buf[0] = motorReg + offset
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
elif direction == "reverse":
if OutputVal > 0xFF:
HighByte = True
HighOutputVal = int(OutputVal/256)
buf[0] = motorReg + 4
buf[1] = int(OutputVal)
i2c.write(self.chipAddress, buf, False)
buf[0] = motorReg + 5
if HighByte:
buf[1] = HighOutputVal
else:
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
for offset2 in range(0, 2, 1):
buf[0] = motorReg + offset2
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
def motorOff(self, motor):
buf = bytearray(2)
motorReg = self.MOT_REG_BASE + (2 * (motor - 1) * self.REG_OFFSET)
for offset3 in range(0, 2, 1):
buf[0] = motorReg + offset3
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
for offset4 in range(4, 6, 1):
buf[0] = motorReg + offset4
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
def allOff(self):
buf = bytearray(2)
servoOffCount = 0
servoRegCount = 0
for motors in range(1, 5, 1):
self.motorOff(self, motors)
while servoOffCount < 8:
for offset5 in range(0, 2, 1):
buf[0] = self.SRV_REG_BASE + servoRegCount + offset5
buf[1] = 0x00
i2c.write(self.chipAddress, buf, False)
servoRegCount += 4
servoOffCount += 1
def Ultrasonic(self, jpin):
pin = self.Jpin.get(jpin)[1]
# send pulse
pin.write_digital(0)
utime.sleep_us(2)
pin.write_digital(1)
utime.sleep_us(10)
pin.write_digital(0)
# Get the duration, in microseconds, of a pulse high from one of the pin
distance = machine.time_pulse_us(pin, 1, 23000) / 58
return distance
def init_line_follow(self, jpin):
self.lineFollowLeftPin = self.Jpin.get(jpin)[0]
self.lineFollowRightPin = self.Jpin.get(jpin)[1]
def left_line_follow(self):
return self.lineFollowLeftPin.read_digital()
def right_line_follow(self):
return self.lineFollowRightPin.read_digital()
def sound_r2d2(self):
tune = ["A7:0", "G7:0", "E7:0", "C7:0",
"D7:0", "B7:0", "F7:0", "C8:0",
"A7:0", "G7:0", "E7:0", "C7:0",
"D7:0", "B7:0", "F7:0", "C8:0"]
music.play(tune)
def sound_bip(self):
for i in range(2):
freq = 2000
while freq > 1000:
music.pitch(int(freq), 10)
freq *= 0.95
freq = 1000
while freq < 3000:
music.pitch(int(freq), 10)
freq *= 1.05
def light_level(self):
level = pin10.read_analog()
return level
|
StarcoderdataPython
|
3417560
|
print "Hello, Pyhons!"
|
StarcoderdataPython
|
371037
|
import json
import cassandra
import sys
from cassandra.cluster import Cluster
import os
def main():
#create database connection
cluster = Cluster()
#use keyspace 'hash'
session = cluster.connect()
#!!!
#CREATE KEYSPACE IF NOT EXITSTS hash WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor':1};
session.execute("""
CREATE KEYSPACE IF NOT EXITSTS hash WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
session.execute('''
CREATE TABLE public_recipe (
id text,
name text,
time int,
imageurl text,
ingredients list<text>,
numberofserving int,
flavor text,
instruction text,
PRIMARY KEY(id) )
''')
session.execute('''
CREATE TABLE user (
id uuid,
username text,
email text,
password <PASSWORD>,
favorite text,
ingredients text,
PRIMARY KEY(username) )
''')
session.execute('''
CREATE custom index fn_contains on public_recipe(name) using 'org.apache.cassandra.index.sasi.SASIIndex' with OPTIONS = {
'mode': 'CONTAINS',
'analyzer_class': 'org.apache.cassandra.index.sasi.analyzer.NonTokenizingAnalyzer',
'case_sensitive': 'false'}
''')
#close database connection
cluster.shutdown()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3556586
|
from django.conf import settings as django_setting
__all__ = (
'LIKES_MODELS',
'LIKES_REST_PAGINATION_CLASS'
)
LIKES_MODELS = getattr(
django_setting,
'LIKES_MODELS',
{}
)
LIKES_REST_PAGINATION_CLASS = getattr(
django_setting,
'LIKES_REST_PAGINATION_CLASS',
None
)
|
StarcoderdataPython
|
320175
|
<gh_stars>0
import easyocr
import os
reader = easyocr.Reader(['en'])
images=os.listdir(r'NIC_Images')
def get_data(data:list):
Name=Father_Name=Gender=Id_Number=D_O_Birth=D_O_Issue=D_O_Expiry='-'
gaurdian='Father Name'
#father name
if list(filter(lambda x: 'Father' in x, data)):
Father_Name=data[data.index(list(filter(lambda x: 'Father' in x, data))[0])+1]
elif list(filter(lambda x: 'Husband' in x, data)):
Father_Name=data[data.index((list(filter(lambda x: 'Husband' in x, data)))[0])+1]
gaurdian="Husband Name"
Gender='F'
#name
if "Name" in data:
Name=data[data.index("Name")+1]
if Name == "-":
_name=list(filter(lambda x: 'Nam' in x, data))
if _name and not bool(list(filter(lambda x: 'Father' in x, _name))):
Name=data[data.index(list(filter(lambda x: 'Nam' in x, data))[0])+1]
elif Father_Name != "-":
Name=data[data.index(list(filter(lambda x: 'Father' in x, data))[0])-1]
#gender
if "F" in data:
Gender='F'
elif "M" in data:
Gender='M'
#id_number
Id_Number=list(filter(lambda x: '-' in x, data))[0]
#dates
try:
D_O_Birth,D_O_Issue,D_O_Expiry=list(filter(lambda x: '.' in x, data))
except:pass
#assign data
data_dic={
'Name':Name,
gaurdian:Father_Name,
'Gender':Gender,
'Identity Number':Id_Number,
'Date Of Birth':D_O_Birth,
'Date Of Issue':D_O_Issue,
'Date Of Expiry':D_O_Expiry}
return data_dic
if not os.path.exists("output"):
os.mkdir('output')
for i,j in enumerate(images):
print(f"========== Image {j} Data fetching... ==========")
output = reader.readtext(rf'NIC_Images/{j}')
data=[]
for a,b,c in output:
data.append(b)
# print(b)
print(get_data(data))
f=open(f"output/{j.split(sep='.')[0]}.txt",'w')
f.write("================data from image================\n")
f.write(str(data))
f.write("\n================extracted from data================\n")
for i in get_data(data).items():
f.write(str(i))
f.write('\n')
f.close()
|
StarcoderdataPython
|
3398722
|
<reponame>realsifocopypaste333/sifo-player-binary
# Generated by Django 3.0 on 2019-12-03 12:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='album',
name='album_logo',
),
migrations.AddField(
model_name='album',
name='album_image',
field=models.FileField(default='', upload_to='album_logo/'),
),
]
|
StarcoderdataPython
|
6486814
|
<gh_stars>1000+
from cookies.resources.helpers import makeDropCookie, setNoCacheAndCORSHeaders
def main(request, response):
"""Respond to `/cookies/resources/dropSameSiteMultiAttribute.py by dropping
the cookies set by setSameSiteMultiAttribute.py"""
headers = setNoCacheAndCORSHeaders(request, response)
# Expire the cookies, and return a JSON-encoded success code.
headers.append(makeDropCookie(b"samesite_unsupported", True))
headers.append(makeDropCookie(b"samesite_unsupported_none", True))
headers.append(makeDropCookie(b"samesite_unsupported_lax", False))
headers.append(makeDropCookie(b"samesite_unsupported_strict", False))
headers.append(makeDropCookie(b"samesite_none_unsupported", True))
headers.append(makeDropCookie(b"samesite_lax_unsupported", True))
headers.append(makeDropCookie(b"samesite_strict_unsupported", True))
headers.append(makeDropCookie(b"samesite_lax_none", True))
return headers, b'{"success": true}'
|
StarcoderdataPython
|
8141114
|
import numpy as np
class Space:
"""A space is a general concept, where it can be discrete or continuous."""
def __init__(self, check):
"""Defines a space, by having a check lambda.
:param check A lambda verifying if the element is in the space.
"""
self.check = np.vectorize(check)
def contains(self, x):
"""This method checks whether the space contains the list of samples.
:param x The list of points.
:returns True, if all elements inside.
"""
if isinstance(x, set):
x = list(x)
elif not isinstance(x, list):
x = [x]
return self.check(x)
def cut(self, space):
"""Check if the current space intersects with the passed space.
:return Passes back the result as a set.
"""
pass
class ContinuousSpace(Space):
"""A continuous space [a,b(."""
def __init__(self, a, b, open_brackets = True):
"""Defines a continuous interval, using the boundaries.
:param a The left side of interval.
:param b The right side of interval.
:param open_brackets True if brackets shall be open.
"""
self.a = a
self.b = b
self.open_brackets = open_brackets
# make check lambda
fn = np.greater if open_brackets else np.greater_equal
def f(x): return np.logical_and(fn(x, a), fn(b, x))
super().__init__(f)
def cut(self, space):
"""Check if the current space intersects with the passed space.
:return Passes back the result as a set.
"""
if isinstance(space, ContinuousSpace):
if space.b < self.a or self.b < space.a:
return NullSpace()
term_s = np.maximum(self.a, space.a)
term_e = np.minimum(self.b, space.b)
return ContinuousSpace(term_s, term_e, open_brackets=self.open_brackets)
# twist the plot
return space.cut(self)
class DiscreteSpace(Space):
"""A discrete space which holds a set of elements."""
def __init__(self, s, e):
"""Defines a discrete set.
:param s Start index
:param e End index
"""
self.s = s
self.e = e
# create lambda and pass to super
def f(x): return np.logical_and(np.greater_equal(x, s), np.greater(e, x))
super().__init__(f)
def cut(self, space):
"""Check if the current space intersects with the passed space.
:return Passes back the result as a set.
"""
la = isinstance(space, DiscreteSpace)
lc = isinstance(space, ContinuousSpace)
# set correctly
lim = None
if lc: lim = [space.a, space.b]
elif la: lim = [space.s, space.e]
# if it is a discrete set cut
if la or lc:
term_s = np.maximum(self.s, lim[0])
term_e = np.minimum(self.e, lim[1])
return DiscreteSpace(term_s, term_e + 1)
space.cut(self)
class NullSpace(Space):
"""A space which holds nothing."""
def __init__(self):
"""Simply reject all training inputs."""
# make check lambda
f = lambda x: False
check = np.vectorize(f)
super().__init__(check)
def cut(self, space):
"""Empty set cut with any space is empty.
:return Always the empty set.
"""
return self
|
StarcoderdataPython
|
11326564
|
<reponame>alex-oleshkevich/malanka<filename>malanka/sockets.py<gh_stars>1-10
import json
import typing as t
from starlette import status
from starlette.concurrency import run_until_first_complete
from starlette.types import Receive, Scope, Send
from starlette.websockets import WebSocket
from malanka.backends import Event
from malanka.pubsub import PubSub
class Socket:
pubsub: PubSub
channel_name: str
websocket: WebSocket
encoding: t.Optional[str] = None
def __init__(self, pubsub: PubSub, channel_name: str = None) -> None:
self.pubsub = pubsub
self.channel_name = channel_name or self.channel_name
assert self.channel_name, 'Channel name is not set.'
async def connected(self, ws: WebSocket) -> None:
"""Called on successful connection."""
await ws.accept()
async def disconnected(self, ws: WebSocket, close_code: int) -> None:
"""Called after disconnection."""
async def received(self, websocket: WebSocket, data: t.Any) -> None:
"""Called when a message from client has been received."""
async def decode(self, ws: WebSocket, data: t.Mapping) -> t.Any:
if self.encoding == 'text':
if 'text' not in data:
await ws.close(status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Expected text websocket messages, but got bytes.")
return data['text']
elif self.encoding == 'bytes':
if 'bytes' not in data:
await ws.close(status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Expected bytes websocket messages, but got text.")
elif self.encoding == 'json':
text = data['text'] if data.get('text') is not None else data['bytes'].decode('utf-8')
try:
return self.load_json(text)
except json.decoder.JSONDecodeError:
await ws.close(status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError('Malformed JSON data received.')
return self.decode_fallback(ws, data)
def decode_fallback(self, ws: WebSocket, data: t.Mapping) -> t.Any:
"""Handle unsupported encoding by overriding this method.
Override this method to support custom data encodings.
Example:
import msgpack
class MsgpackSocket:
def decode_fallback(self, ws, data):
return msgpack.loads(data["text"])
"""
return data["text"] if data.get("text") else data["bytes"]
async def broadcast(self, data: t.Any) -> None:
"""Send message to all channel members including this connection."""
await self.pubsub.publish(self.get_channel_name(), data)
def get_channel_name(self) -> str:
"""Return current name. Override this function to set a dynamic channel name.
Example:
def get_channel_name(self) -> str:
return 'user.%s' % self.scope['user_id']
"""
return self.channel_name
def load_json(self, raw_data: str) -> t.Any:
"""Parse JSON encoded raw data. Override this method to customize JSON parsing.
Example:
import ujson
def load_json(self, raw_data):
return ujson.loads(raw_data)
"""
return json.loads(raw_data)
@classmethod
def as_asgi(cls, pubsub: PubSub, channel_name: str = None) -> t.Callable:
"""Create a new ASGI compatible class with stream and channel name properly set up.
This is the primary way to use Socket class.
Example:
from starlette.routes import WebsocketRoute
from malanka.streams import RedisStream
routes = [
WebSocketRoute('/ws', MySocket.as_asgi(RedisStream('redis://'))),
]
"""
class ASGIAppWrapper:
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
instance = cls(pubsub, channel_name)
await instance(scope, receive, send)
return ASGIAppWrapper()
async def _receive_from_client(self, ws: WebSocket) -> None:
"""Listens socket for client generated events."""
close_code = status.WS_1000_NORMAL_CLOSURE
try:
while True:
message = await ws.receive()
if message['type'] == 'websocket.receive':
data = await self.decode(ws, message)
await self.received(ws, data)
elif message['type'] == 'websocket.disconnect':
close_code = int(message.get('code', status.WS_1000_NORMAL_CLOSURE))
break
except Exception:
close_code = status.WS_1011_INTERNAL_ERROR
raise
finally:
await self.disconnected(ws, close_code)
async def _receive_from_pubsub(self, ws: WebSocket, subscriber: t.AsyncGenerator[Event, None]) -> None:
"""Listens stream for data generated by another members."""
async for event in subscriber:
data = event.message
if self.encoding == 'text':
await ws.send_text(data)
elif self.encoding == 'bytes':
await ws.send_bytes(data.encode('utf-8'))
elif self.encoding == 'json':
await ws.send_json(data)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
"""ASGI application entry point."""
self.websocket = WebSocket(scope, receive, send)
async with self.pubsub:
async with self.pubsub.subscribe(self.get_channel_name()) as subscriber:
await self.connected(self.websocket)
# connection closes if at least one of these functions returns
await run_until_first_complete(
(self._receive_from_client, dict(ws=self.websocket)),
(self._receive_from_pubsub, dict(ws=self.websocket, subscriber=subscriber)),
)
class TextSocket(Socket):
encoding = 'text'
class BinarySocket(Socket):
encoding = 'bytes'
class JSONSocket(Socket):
encoding = 'json'
|
StarcoderdataPython
|
6530523
|
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ipywidgets as widgets
import math
from traitlets import Unicode, Bool, Int, Float
# https://code.activestate.com/recipes/577659-decorators-for-adding-aliases-to-methods-in-a-clas/
class alias(object):
def __init__(self, *aliases):
self.aliases = set(aliases)
def __call__(self, f):
f._aliases = self.aliases
return f
def aliased(aliased_class):
original_methods = aliased_class.__dict__.copy()
for name, method in original_methods.items():
if hasattr(method, '_aliases'):
for alias in method._aliases - set(original_methods):
setattr(aliased_class, alias, method)
return aliased_class
@aliased
@widgets.register
class Turtle(widgets.DOMWidget):
""""""
_view_name = Unicode('TurtleView').tag(sync=True)
_model_name = Unicode('TurtleModel').tag(sync=True)
_view_module = Unicode('ipyturtle').tag(sync=True)
_model_module = Unicode('ipyturtle').tag(sync=True)
_view_module_version = Unicode('^0.2.4').tag(sync=True)
_model_module_version = Unicode('^0.2.4').tag(sync=True)
_canvas_fixed = Bool(True).tag(sync=True)
_canvas_width = Int(320).tag(sync=True)
_canvas_height = Int(320).tag(sync=True)
_turtle_on = Bool(True).tag(sync=True)
_pen_on = True
_turtle_height = Int(20).tag(sync=True)
_turtle_width = Int(10).tag(sync=True)
_turtle_location_x = Float(0.0).tag(sync=True)
_turtle_location_y = Float(0.0).tag(sync=True)
_turtle_heading = Float(00.0).tag(sync=True)
_turtle_heading_x = Float(1).tag(sync=True)
_turtle_heading_y = Float(0).tag(sync=True)
_line = Unicode('').tag(sync=True)
_current_color = "Black"
_current_color_rgb = None
def __init__(self, width=320, height=320, fixed=True):
widgets.DOMWidget.__init__(self)
self._canvas_width = width
self._canvas_height = height
self._canvas_fixed = fixed
self._reset()
def _reset(self):
self._turtle_on = True
self._pen_on = True
self._turtle_location_x = 0
self._turtle_location_y = 0
self._turtle_heading = 90.0
self._turtle_heading_x = 1.0
self._turtle_heading_y = 0.0
def position(self):
return self._turtle_location_x, self._turtle_location_y
def forward(self, length):
precision = 4
start = "{} {}".format(round(self._turtle_location_x,precision),
round(self._turtle_location_y,precision))
self._turtle_location_x += length * self._turtle_heading_x
self._turtle_location_y += length * self._turtle_heading_y
end = " {} {}".format(round(self._turtle_location_x, precision),
round(self._turtle_location_y, precision))
#print(start, end)
if self._pen_on:
color = self._current_color
if len(self._current_color)==0:
color = "rgb({},{},{})".format(self._current_color_rgb[0],
self._current_color_rgb[1],
self._current_color_rgb[2])
self._line = start + end + " " + color
def back(self, length):
self.forward(-length)
def heading(self):
return self._turtle_heading
@alias('seth')
def setheading(self, angle):
self._turtle_heading = angle % 360
self._turtle_heading_x = math.cos(math.radians(self._turtle_heading))
self._turtle_heading_y = math.sin(math.radians(self._turtle_heading))
@alias('setposition', 'setpos')
def goto(self, x, y):
start_heading = self._turtle_heading
delta_x = x - self._turtle_location_x
delta_y = y - self._turtle_location_y
degrees = math.degrees(math.atan2(delta_y, delta_x))
distance = math.sqrt((delta_x)**2 + (delta_y)**2)
self.setheading(degrees)
self.forward(distance)
self.setheading(start_heading)
def circle(self, radius):
steps = 45
speed = ((360/steps) / 60) * radius + 1/steps
rotate = 360/steps
for i in range(steps):
self.forward(speed)
self.left(rotate)
def setpos(self, x, y=None):
return self.goto(x, y)
def setposition(self, x, y=None):
return self.goto(x, y)
def left(self, degree=None):
if degree is None:
degree = 90
self._turtle_heading += degree
self._turtle_heading = self._turtle_heading % 360
hx = math.cos(math.radians(self._turtle_heading))
hy = math.sin(math.radians(self._turtle_heading))
self._turtle_heading_x = hx
self._turtle_heading_y = hy
def right(self, degree=None):
if degree is None:
degree = 90
self.left(-degree)
def penup(self):
self._pen_on = False
def pendown(self):
self._pen_on = True
def isdown(self):
return self._pen_on
def hideturtle(self):
self._turtle_on = False
def showturtle(self):
self._turtle_on = True
def isvisible(self):
return self._turtle_on
def reset(self):
self._reset()
self.pencolor(0, 0, 0)
self.forward(0)
self._line = 'clear'
def pencolor(self,r=-1,g=-1,b=-1):
if r == -1:
if len(self._current_color)==0:
return self._current_color_rgb
else:
return self._current_color
elif type(r) == str:
self._current_color = r
self._current_color_rgb = None
elif type(r) == tuple:
self._current_color = ""
self._current_color_rgb = r
else:
self._current_color = ""
self._current_color_rgb = (r,g,b)
self.forward(0)
def reset_notebook():
t.reset()
t.seth(0)
return t
|
StarcoderdataPython
|
3535550
|
<filename>Chapter_9/try_9.13.py<gh_stars>0
#Done by <NAME> in 09/07/2020
"""
Make a class Die with one attribute called sides , which has a default
value of 6. Write a method called roll_die() that prints a random number
between 1 and the number of sides the die has. Make a 6-sided die and roll it
10 times.
Make a 10-sided die and a 20-sided die. Roll each die 10 times.
"""
#Dice
#6-sided Die.
from random import randint
class Die():
"""A simple attempt to model a dice."""
def __init__(self, sides=6):
"""Initialize die attributes."""
self.sides = sides
def roll_die(self):
"""Print a random number between 1 and 6."""
print(randint(1, self.sides))
print("6 sided Die")
#Build a 6-sided Die
d6 = Die(sides=6)
results = ''
#Make it roll 10 times
for roll in range(10):
result = d6.roll_die()
print(results)
print("\n")
print("10 sided Die")
#10-sided Die.
from random import randint
class Die():
"""A simple attempt to represent a Dice."""
def __init__(self, sides=6):
self.sides = sides
def roll_die(self):
print(randint(1, self.sides))
#Make a 10 sided die.
d = Die(sides=10)
result = ''
#Make it roll 10 times
for roll in range(10):
result = d.roll_die()
print(results)
print("\n")
print("20 sided Dice")
from random import randint
class Die():
"""A simple attempt to model a Dice."""
def __init__(self, sides=6):
self.sides = sides
def roll_die(self):
print(randint(1, self.sides))
#Build a 20 sided Die.
d20 = Die (sides=20)
#Make it roll 10 times
result = ''
for roll in range(10):
result = d20.roll_die()
print(results)
|
StarcoderdataPython
|
67280
|
import bisect
import collections
import os
import queue
import random
import subprocess
import threading
import time
import traceback
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
NEXT_THREAD_CLEAROUT = 0
THREADS_TO_THREAD_INFO = {}
THREAD_INFO_LOCK = threading.Lock()
def CheckIfThreadShuttingDown():
if IsThreadShuttingDown():
raise HydrusExceptions.ShutdownException( 'Thread is shutting down!' )
def ClearOutDeadThreads():
with THREAD_INFO_LOCK:
all_threads = list( THREADS_TO_THREAD_INFO.keys() )
for thread in all_threads:
if not thread.is_alive():
del THREADS_TO_THREAD_INFO[ thread ]
def GetThreadInfo( thread = None ):
global NEXT_THREAD_CLEAROUT
if HydrusData.TimeHasPassed( NEXT_THREAD_CLEAROUT ):
ClearOutDeadThreads()
NEXT_THREAD_CLEAROUT = HydrusData.GetNow() + 600
if thread is None:
thread = threading.current_thread()
with THREAD_INFO_LOCK:
if thread not in THREADS_TO_THREAD_INFO:
thread_info = {}
thread_info[ 'shutting_down' ] = False
THREADS_TO_THREAD_INFO[ thread ] = thread_info
return THREADS_TO_THREAD_INFO[ thread ]
def IsThreadShuttingDown():
if HG.controller.DoingFastExit():
return True
me = threading.current_thread()
if isinstance( me, DAEMON ):
if HG.started_shutdown:
return True
else:
if HG.model_shutdown:
return True
thread_info = GetThreadInfo()
return thread_info[ 'shutting_down' ]
def ShutdownThread( thread ):
thread_info = GetThreadInfo( thread )
thread_info[ 'shutting_down' ] = True
def SubprocessCommunicate( process: subprocess.Popen ):
def do_test():
if HG.model_shutdown:
try:
process.kill()
except:
pass
raise HydrusExceptions.ShutdownException( 'Application is shutting down!' )
do_test()
while True:
try:
return process.communicate( timeout = 10 )
except subprocess.TimeoutExpired:
do_test()
class DAEMON( threading.Thread ):
def __init__( self, controller, name ):
threading.Thread.__init__( self, name = name )
self._controller = controller
self._name = name
self._event = threading.Event()
self._controller.sub( self, 'wake', 'wake_daemons' )
self._controller.sub( self, 'shutdown', 'shutdown' )
def _DoPreCall( self ):
if HG.daemon_report_mode:
HydrusData.ShowText( self._name + ' doing a job.' )
def GetCurrentJobSummary( self ):
return 'unknown job'
def GetName( self ):
return self._name
def shutdown( self ):
ShutdownThread( self )
self.wake()
def wake( self ):
self._event.set()
class DAEMONWorker( DAEMON ):
def __init__( self, controller, name, callable, topics = None, period = 3600, init_wait = 3, pre_call_wait = 0 ):
if topics is None:
topics = []
DAEMON.__init__( self, controller, name )
self._callable = callable
self._topics = topics
self._period = period
self._init_wait = init_wait
self._pre_call_wait = pre_call_wait
for topic in topics:
self._controller.sub( self, 'set', topic )
self.start()
def _CanStart( self ):
return self._ControllerIsOKWithIt()
def _ControllerIsOKWithIt( self ):
return True
def _DoAWait( self, wait_time, event_can_wake = True ):
time_to_start = HydrusData.GetNow() + wait_time
while not HydrusData.TimeHasPassed( time_to_start ):
if event_can_wake:
event_was_set = self._event.wait( 1.0 )
if event_was_set:
self._event.clear()
return
else:
time.sleep( 1.0 )
CheckIfThreadShuttingDown()
def _WaitUntilCanStart( self ):
while not self._CanStart():
time.sleep( 1.0 )
CheckIfThreadShuttingDown()
def GetCurrentJobSummary( self ):
return self._callable
def run( self ):
try:
self._DoAWait( self._init_wait )
while True:
CheckIfThreadShuttingDown()
self._DoAWait( self._pre_call_wait, event_can_wake = False )
CheckIfThreadShuttingDown()
self._WaitUntilCanStart()
CheckIfThreadShuttingDown()
self._DoPreCall()
try:
self._callable( self._controller )
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.ShowText( 'Daemon ' + self._name + ' encountered an exception:' )
HydrusData.ShowException( e )
self._DoAWait( self._period )
except HydrusExceptions.ShutdownException:
return
def set( self, *args, **kwargs ):
self._event.set()
# Big stuff like DB maintenance that we don't want to run while other important stuff is going on, like user interaction or vidya on another process
class DAEMONBackgroundWorker( DAEMONWorker ):
def _ControllerIsOKWithIt( self ):
return self._controller.GoodTimeToStartBackgroundWork()
# Big stuff that we want to run when the user sees, but not at the expense of something else, like laggy session load
class DAEMONForegroundWorker( DAEMONWorker ):
def _ControllerIsOKWithIt( self ):
return self._controller.GoodTimeToStartForegroundWork()
class THREADCallToThread( DAEMON ):
def __init__( self, controller, name ):
DAEMON.__init__( self, controller, name )
self._callable = None
self._queue = queue.Queue()
self._currently_working = True # start off true so new threads aren't used twice by two quick successive calls
def CurrentlyWorking( self ):
return self._currently_working
def GetCurrentJobSummary( self ):
return self._callable
def put( self, callable, *args, **kwargs ):
self._currently_working = True
self._queue.put( ( callable, args, kwargs ) )
self._event.set()
def run( self ):
try:
while True:
while self._queue.empty():
CheckIfThreadShuttingDown()
self._event.wait( 10.0 )
self._event.clear()
CheckIfThreadShuttingDown()
try:
try:
( callable, args, kwargs ) = self._queue.get( 1.0 )
except queue.Empty:
# https://github.com/hydrusnetwork/hydrus/issues/750
# this shouldn't happen, but...
# even if we assume we'll never get this, we don't want to make a business of hanging forever on things
continue
self._DoPreCall()
self._callable = ( callable, args, kwargs )
if HG.profile_mode:
summary = 'Profiling CallTo Job: {}'.format( callable )
HydrusData.Profile( summary, 'callable( *args, **kwargs )', globals(), locals(), min_duration_ms = HG.callto_profile_min_job_time_ms )
else:
callable( *args, **kwargs )
self._callable = None
del callable
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.Print( traceback.format_exc() )
HydrusData.ShowException( e )
finally:
self._currently_working = False
time.sleep( 0.00001 )
except HydrusExceptions.ShutdownException:
return
class JobScheduler( threading.Thread ):
def __init__( self, controller ):
threading.Thread.__init__( self, name = 'Job Scheduler' )
self._controller = controller
self._waiting = []
self._waiting_lock = threading.Lock()
self._new_job_arrived = threading.Event()
self._current_job = None
self._cancel_filter_needed = threading.Event()
self._sort_needed = threading.Event()
self._controller.sub( self, 'shutdown', 'shutdown' )
def _FilterCancelled( self ):
with self._waiting_lock:
self._waiting = [ job for job in self._waiting if not job.IsCancelled() ]
def _GetLoopWaitTime( self ):
with self._waiting_lock:
if len( self._waiting ) == 0:
return 0.2
next_job = self._waiting[0]
time_delta_until_due = next_job.GetTimeDeltaUntilDue()
return min( 1.0, time_delta_until_due )
def _NoWorkToStart( self ):
with self._waiting_lock:
if len( self._waiting ) == 0:
return True
next_job = self._waiting[0]
if next_job.IsDue():
return False
else:
return True
def _SortWaiting( self ):
# sort the waiting jobs in ascending order of expected work time
with self._waiting_lock: # this uses __lt__ to sort
self._waiting.sort()
def _StartWork( self ):
jobs_started = 0
while True:
with self._waiting_lock:
if len( self._waiting ) == 0:
break
if jobs_started >= 10: # try to avoid spikes
break
next_job = self._waiting[0]
if not next_job.IsDue():
# front is not due, so nor is the rest of the list
break
next_job = self._waiting.pop( 0 )
if next_job.IsCancelled():
continue
if next_job.SlotOK():
# important this happens outside of the waiting lock lmao!
next_job.StartWork()
jobs_started += 1
else:
# delay is automatically set by SlotOK
with self._waiting_lock:
bisect.insort( self._waiting, next_job )
def AddJob( self, job ):
with self._waiting_lock:
bisect.insort( self._waiting, job )
self._new_job_arrived.set()
def ClearOutDead( self ):
with self._waiting_lock:
self._waiting = [ job for job in self._waiting if not job.IsDead() ]
def GetName( self ):
return 'Job Scheduler'
def GetCurrentJobSummary( self ):
with self._waiting_lock:
return HydrusData.ToHumanInt( len( self._waiting ) ) + ' jobs'
def GetJobs( self ):
with self._waiting_lock:
return list( self._waiting )
def GetPrettyJobSummary( self ):
with self._waiting_lock:
num_jobs = len( self._waiting )
job_lines = [ repr( job ) for job in self._waiting ]
lines = [ HydrusData.ToHumanInt( num_jobs ) + ' jobs:' ] + job_lines
text = os.linesep.join( lines )
return text
def JobCancelled( self ):
self._cancel_filter_needed.set()
def shutdown( self ):
ShutdownThread( self )
self._new_job_arrived.set()
def WorkTimesHaveChanged( self ):
self._sort_needed.set()
def run( self ):
while True:
try:
while self._NoWorkToStart():
if IsThreadShuttingDown():
return
#
if self._cancel_filter_needed.is_set():
self._FilterCancelled()
self._cancel_filter_needed.clear()
if self._sort_needed.is_set():
self._SortWaiting()
self._sort_needed.clear()
continue # if some work is now due, let's do it!
#
wait_time = self._GetLoopWaitTime()
self._new_job_arrived.wait( wait_time )
self._new_job_arrived.clear()
self._StartWork()
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.Print( traceback.format_exc() )
HydrusData.ShowException( e )
time.sleep( 0.00001 )
class SchedulableJob( object ):
PRETTY_CLASS_NAME = 'job base'
def __init__( self, controller, scheduler: JobScheduler, initial_delay, work_callable ):
self._controller = controller
self._scheduler = scheduler
self._work_callable = work_callable
self._should_delay_on_wakeup = False
self._next_work_time = HydrusData.GetNowFloat() + initial_delay
self._thread_slot_type = None
self._work_lock = threading.Lock()
self._currently_working = threading.Event()
self._is_cancelled = threading.Event()
def __lt__( self, other ): # for the scheduler to do bisect.insort noice
return self._next_work_time < other._next_work_time
def __repr__( self ):
return '{}: {} {}'.format( self.PRETTY_CLASS_NAME, self.GetPrettyJob(), self.GetDueString() )
def _BootWorker( self ):
self._controller.CallToThread( self.Work )
def Cancel( self ):
self._is_cancelled.set()
self._scheduler.JobCancelled()
def CurrentlyWorking( self ):
return self._currently_working.is_set()
def GetDueString( self ):
due_delta = self._next_work_time - HydrusData.GetNowFloat()
due_string = HydrusData.TimeDeltaToPrettyTimeDelta( due_delta )
if due_delta < 0:
due_string = 'was due {} ago'.format( due_string )
else:
due_string = 'due in {}'.format( due_string )
return due_string
def GetNextWorkTime( self ):
return self._next_work_time
def GetPrettyJob( self ):
return repr( self._work_callable )
def GetTimeDeltaUntilDue( self ):
return HydrusData.GetTimeDeltaUntilTimeFloat( self._next_work_time )
def IsCancelled( self ):
return self._is_cancelled.is_set()
def IsDead( self ):
return False
def IsDue( self ):
return HydrusData.TimeHasPassedFloat( self._next_work_time )
def PubSubWake( self, *args, **kwargs ):
self.Wake()
def SetThreadSlotType( self, thread_type ):
self._thread_slot_type = thread_type
def ShouldDelayOnWakeup( self, value ):
self._should_delay_on_wakeup = value
def SlotOK( self ):
if self._thread_slot_type is not None:
if HG.controller.AcquireThreadSlot( self._thread_slot_type ):
return True
else:
self._next_work_time = HydrusData.GetNowFloat() + 10 + random.random()
return False
return True
def StartWork( self ):
if self._is_cancelled.is_set():
return
self._currently_working.set()
self._BootWorker()
def Wake( self, next_work_time = None ):
if next_work_time is None:
next_work_time = HydrusData.GetNowFloat()
self._next_work_time = next_work_time
self._scheduler.WorkTimesHaveChanged()
def WakeOnPubSub( self, topic ):
HG.controller.sub( self, 'PubSubWake', topic )
def Work( self ):
try:
if self._should_delay_on_wakeup:
while HG.controller.JustWokeFromSleep():
if IsThreadShuttingDown():
return
time.sleep( 1 )
with self._work_lock:
self._work_callable()
finally:
if self._thread_slot_type is not None:
HG.controller.ReleaseThreadSlot( self._thread_slot_type )
self._currently_working.clear()
class SingleJob( SchedulableJob ):
PRETTY_CLASS_NAME = 'single job'
def __init__( self, controller, scheduler: JobScheduler, initial_delay, work_callable ):
SchedulableJob.__init__( self, controller, scheduler, initial_delay, work_callable )
self._work_complete = threading.Event()
def IsWorkComplete( self ):
return self._work_complete.is_set()
def Work( self ):
SchedulableJob.Work( self )
self._work_complete.set()
class RepeatingJob( SchedulableJob ):
PRETTY_CLASS_NAME = 'repeating job'
def __init__( self, controller, scheduler: JobScheduler, initial_delay, period, work_callable ):
SchedulableJob.__init__( self, controller, scheduler, initial_delay, work_callable )
self._period = period
self._stop_repeating = threading.Event()
def Cancel( self ):
SchedulableJob.Cancel( self )
self._stop_repeating.set()
def Delay( self, delay ):
self._next_work_time = HydrusData.GetNowFloat() + delay
self._scheduler.WorkTimesHaveChanged()
def IsRepeatingWorkFinished( self ):
return self._stop_repeating.is_set()
def StartWork( self ):
if self._stop_repeating.is_set():
return
SchedulableJob.StartWork( self )
def Work( self ):
SchedulableJob.Work( self )
if not self._stop_repeating.is_set():
self._next_work_time = HydrusData.GetNowFloat() + self._period
self._scheduler.AddJob( self )
|
StarcoderdataPython
|
5056358
|
from django.apps import AppConfig
class WagtailFaviconConfig(AppConfig):
name = 'wagtail_favicon'
|
StarcoderdataPython
|
11357002
|
<filename>src/keyedtensor/_repr_utils.py
from typing import List
def format_field(kstr: str, rows: List[str], prefix: str = '') -> str:
kprefix = f'{prefix}{kstr}'
pad = ' ' * (len(kprefix))
tensorstr = '\n'.join(f'{pad if i > 0 else ""}{v}' for i, v in enumerate(rows))
return f'{kprefix}{tensorstr}'
def keyedtensor_str(kt) -> str:
prefix = f'{kt.__class__.__name__}('
str_data = [(f'{k}=', f'{t!r}'.split('\n')) for k, t in kt.items()]
if all(len(t) == 1 for _, t in str_data):
return f'{prefix}{", ".join(f"{k}{v[0]}" for k, v in str_data)})'
return (
',\n'.join(
format_field(kstr, rows, prefix=' ' * len(prefix) if i else prefix)
for i, (kstr, rows) in enumerate(str_data)
)
+ ')'
)
|
StarcoderdataPython
|
3510987
|
<filename>benchmark/algorithms/puck_t1.py<gh_stars>0
#-*- coding:utf-8 -*-
################################################################################
#
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
@file: puck_t1.py
@author: yinjie06(<EMAIL>)
@date: 2021-10-06 13:44
@brief:
"""
from benchmark.algorithms.base import BaseANN
from benchmark.algorithms.base import BaseANN
from benchmark.datasets import DATASETS, download_accelerated
from puck import py_puck_api
import os
import numpy as np
import time
swig_ptr = py_puck_api.swig_ptr
class Puck(BaseANN):
def __init__(self, metric, index_params):
self._index_params = index_params
self._metric = metric
self._query_bs = -1
self.indexkey = index_params.get("indexkey", "NA")
if 'query_bs' in index_params:
self._query_bs = index_params['query_bs']
self.index = py_puck_api.PySearcher()
self.topk = 10
self.n = 0
def track(self):
return "T1"
def fit(self, dataset):
print("Puck provide the index-data and the Docker image for search. We will open Puck to open source community at the end of this year.")
def index_name(self, name):
return f"data/{name}.{self.indexkey}.puckindex"
def index_tag_name(self, name):
return f"{name}.{self.indexkey}.puckindex"
def load_index(self, dataset):
index_components = ["filer_data.dat","GNOIMI_coarse.dat","GNOIMI_fine.dat","index.dat","learn_assign.dat"]
############ download index && update links
print(self.index_name(dataset))
if not os.path.exists(self.index_name(dataset)):
if 'url' not in self._index_params:
return False
#5 index files will be downloaded in this lib
index_dir = os.path.join(os.getcwd(), self.index_name(dataset))
print(index_dir)
os.makedirs(index_dir, mode=0o777, exist_ok=True)
print('Downloading index in background. This can take a while.')
for component in index_components:
download_accelerated(self._index_params['url']+"_"+component, self.index_name(dataset)+"/"+component, quiet=True)
time.sleep(60)
print("Loading index")
index_tag = self.index_tag_name(dataset)
cmd = " ln -s %s ./puck_index"%(self.index_name(dataset))
print(cmd)
os.system(cmd)
cmd = " ls -al puck_index/"
os.system(cmd)
self.index.init()
self.index.show()
ds = DATASETS[dataset]()
self.n = ds.nq
return True
def set_query_arguments(self, query_args):
query_args_list = query_args.strip().split(',')
self.index.update_params(int(query_args_list[0]), int(query_args_list[1]), int(query_args_list[2]),int(query_args_list[3]))
self.topk = int(query_args_list[0])
self.res = (np.empty((self.n, self.topk), dtype='float32'), np.empty((self.n, self.topk), dtype='uint32'))
self.qas = query_args
print(type(self.res[0]), len(self.res[0]))
def query(self, X, topK):
n, d = X.shape
self.index.search(n, swig_ptr(X), swig_ptr(self.res[0]), swig_ptr(self.res[1]))
def get_results(self):
return self.res[1]
def __str__(self):
return f'Puck({self.qas})'
|
StarcoderdataPython
|
11335563
|
# file: xpand.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2013-2017 <NAME> <<EMAIL>>.
# SPDX-License-Identifier: MIT
# Created: 2013-08-13T23:13:48+0200
# Last modified: 2018-04-17T20:45:41+0200
"""Function to expand filename globs."""
import glob
def xpand(args): # {{{1
"""Expand command line arguments for operating systems incapable of doing
so.
Arguments:
args: String or list of strings.
Returns:
Expanded argument list of strings.
"""
if isinstance(args, str):
args = [args]
xa = []
for a in args:
g = glob.glob(a)
if g:
xa += g
else:
xa += [a]
return xa
|
StarcoderdataPython
|
6551502
|
<filename>kapitel-8_Funktionen_Methoden_und_Attribute/main.py
wert = max([3,6,3,5,8,6,9,10])
liste = [2,5,7,2,9,8,6,7,2]
var = 12
referenz.methode(var, "<NAME>!")
print(wert/2)
liste.sort()
print(liste)
|
StarcoderdataPython
|
1967453
|
<reponame>lhj940825/FDA_Integration_to_INTRA_DA
import numpy as np
import torch
import torch.nn as nn
from advent.utils.loss import cross_entropy_2d
def bce_loss(y_pred, y_label):
y_truth_tensor = torch.FloatTensor(y_pred.size())
y_truth_tensor.fill_(y_label)
y_truth_tensor = y_truth_tensor.to(y_pred.get_device())
return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor)
def loss_calc(pred, label, device):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = label.long().to(device)
return cross_entropy_2d(pred, label)
def lr_poly(base_lr, iter, max_iter, power):
""" Poly_LR scheduler
"""
return base_lr * ((1 - float(iter) / max_iter) ** power)
def _adjust_learning_rate(optimizer, i_iter, cfg, learning_rate):
lr = lr_poly(learning_rate, i_iter, cfg.TRAIN.MAX_ITERS, cfg.TRAIN.POWER)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def adjust_learning_rate(optimizer, i_iter, cfg):
""" adject learning rate for main segnet
"""
_adjust_learning_rate(optimizer, i_iter, cfg, cfg.TRAIN.LEARNING_RATE)
def adjust_learning_rate_discriminator(optimizer, i_iter, cfg):
_adjust_learning_rate(optimizer, i_iter, cfg, cfg.TRAIN.LEARNING_RATE_D)
def prob_2_entropy(prob):
""" convert probabilistic prediction maps to weighted self-information maps
"""
n, c, h, w = prob.size()
return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def FDA_source_to_target(src_img, trg_img, L=0.1):
# exchange magnitude
# input: src_img, trg_img
# get fft of both source and target
fft_src = torch.rfft( src_img.clone(), signal_ndim=2, onesided=False )
fft_trg = torch.rfft( trg_img.clone(), signal_ndim=2, onesided=False )
# extract amplitude and phase of both ffts
amp_src, pha_src = extract_ampl_phase( fft_src.clone())
amp_trg, pha_trg = extract_ampl_phase( fft_trg.clone())
# replace the low frequency amplitude part of source with that from target
amp_src_ = low_freq_mutate( amp_src.clone(), amp_trg.clone(), L=L )
# recompose fft of source
fft_src_ = torch.zeros( fft_src.size(), dtype=torch.float )
fft_src_[:,:,:,:,0] = torch.cos(pha_src.clone()) * amp_src_.clone()
fft_src_[:,:,:,:,1] = torch.sin(pha_src.clone()) * amp_src_.clone()
# get the recomposed image: source content, target style
_, _, imgH, imgW = src_img.size()
src_in_trg = torch.irfft( fft_src_, signal_ndim=2, onesided=False, signal_sizes=[imgH,imgW] )
return src_in_trg
def extract_ampl_phase(fft_im):
# fft_im: size should be bx3xhxwx2
fft_amp = fft_im[:,:,:,:,0]**2 + fft_im[:,:,:,:,1]**2
fft_amp = torch.sqrt(fft_amp)
fft_pha = torch.atan2( fft_im[:,:,:,:,1], fft_im[:,:,:,:,0] )
return fft_amp, fft_pha
def low_freq_mutate( amp_src, amp_trg, L=0.1 ):
_, _, h, w = amp_src.size()
b = ( np.floor(np.amin((h,w))*L) ).astype(int) # get b
amp_src[:,:,0:b,0:b] = amp_trg[:,:,0:b,0:b] # top left
amp_src[:,:,0:b,w-b:w] = amp_trg[:,:,0:b,w-b:w] # top right
amp_src[:,:,h-b:h,0:b] = amp_trg[:,:,h-b:h,0:b] # bottom left
amp_src[:,:,h-b:h,w-b:w] = amp_trg[:,:,h-b:h,w-b:w] # bottom right
return amp_src
def preprocess(image, mean):
image = image[:,:,::-1] # change to BGR
image -= mean
return image.transpose((2, 0, 1))
|
StarcoderdataPython
|
396619
|
<gh_stars>0
#Need to import our own stuff!
import constants
from level_manager import *
from title_screen import *
from music import *
import pygame
# Initialize Pygame
pygame.init()
screen = pygame.display.set_mode([constants.SCREEN_WIDTH,
constants.SCREEN_HEIGHT])
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
level_manager = LevelManager()
level_manager.load_level(TitleScreen())
pygame.display.set_caption('Quantum Leaper: LEVEL 1')
done = False
# -------- Main Program Loop -----------
while not done:
current_level = level_manager.get_current_level()
#We've left the TitleScreen - Exit the game
if current_level == None:
break
#Needs Game Logic
#Update and Draw are very common delinations in game logic
#Update is involved in changing game state
#Draw simply draws objects on the screen
current_level.draw(screen)
current_level.update()
# Update the screen with what we've drawn.
pygame.display.flip()
# Limit to 60 frames per second
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
break
#Needs keyboard logic too!
#Handle base keyboard event, plus any inherited from the level
current_level.handle_keyboard_event(event)
pygame.quit()
|
StarcoderdataPython
|
6402189
|
<reponame>mythus/python-designateclient
from __future__ import print_function
import logging
from keystoneauth1.identity import generic
from keystoneauth1 import session as keystone_session
from designateclient import shell
from designateclient.v2 import client
logging.basicConfig(level='DEBUG')
auth = generic.Password(
auth_url=shell.env('OS_AUTH_URL'),
username=shell.env('OS_USERNAME'),
password=<PASSWORD>('<PASSWORD>'),
project_name=shell.env('OS_PROJECT_NAME'),
project_domain_id='default',
user_domain_id='default')
session = keystone_session.Session(auth=auth, timeout=10)
client = client.Client(session=session)
pages = []
fetch = 1
while fetch:
kw = {'limit': 3}
if pages:
# marker is the latest page with the last item.
kw['marker'] = pages[-1][-1]['id']
page = client.zones.list(**kw)
if not page:
break
pages.append(page)
for page in pages:
print(page)
|
StarcoderdataPython
|
5064882
|
from datetimex import getTime
from datetimex.error import WrongfulError
from datetimex.formatTime import FormatTime
from datetimex.formatChinese import FormatChinese
from datetimex.legalizationTime import LegalizationTime
def getTimeString(strTime):
"""
将传入的中文时间字符串转换为包含有的数字字符串
例如:传入值:"三年之后",传出值:“3年之后”
如果其中出现异常,直接给外部抛出字符串不合法异常
"""
formatStrTimeObject = FormatChinese()
try:
timeStr = formatStrTimeObject.format(strTime)
except Exception as e:
raise WrongfulError('The incoming string is illegal and cannot be converted to digital time')
return timeStr
def getTimeData():
"""
获取初步处理时间,传出值为tuple,传出值子项也是tuple
例如:((None, None), (None, None), ('天', -2), (None, None), (None, None), ('早上', 0), ('点', -3), ('半', 0.5), (None, None))
其中分别为(年,月,日,周数,星期,上下午,时,分秒)的初步处理结果
如果其中所有子项都为None,抛出字符串不合法异常
"""
year = getTime.GetYear().get()
month = getTime.GetMonth().get()
day = getTime.GetDay().get()
weekNum = getTime.GetWeekNum().get()
week = getTime.GetWeek().get()
ampm = getTime.GetAmPm().get()
hour = getTime.GetHour().get()
minute = getTime.GetMinute().get()
second = getTime.GetSecond().get()
if (year[0] == None and year[1] == None and month[0] == None and month[1] == None and day[0] == None and day[1] == None
and weekNum[0] == None and weekNum[1] == None and week[0] == None and weekNum[1] == None and ampm[0] == None and ampm[1] == None
and hour[0] == None and hour[1] == None and minute[0] == None and minute[1] == None and second[0] == None and second[1] == None):
raise WrongfulError('The incoming string is illegal and cannot be converted to digital time')
return (year,month,day,weekNum,week,ampm,hour,minute,second)
def getTimeFormatData(timeData,now):
"""
获取二次处理时间结果,传入值为初步处理得到的tuple,传出值类型为string
例如:2018|12|11|X|X|AM|07|30|29
其中|为方便后续分割字符串所用,这步处理结果可能包含负数,X为没有处理到这部分数据,一般出现在周数和星期
周数和星期为X时,需要后续进一步处理
"""
formatTimeObject = FormatTime(timeData,now)
try:
time = formatTimeObject.format()
except Exception as e:
raise WrongfulError('The incoming string is illegal and cannot be converted to digital time')
return time
def getLegalizationTime(time,timeNow):
"""
获取三次处理时间结果,传入值为二次处理得到的string,传出值为string
例如:20181211492AM073029
其中,2018为处理后的年,12为处理后的月,11为处理后的日,49为当前年的第几周【00为一年中的第一周】,2为星期几【0为星期天,一周的第一天】,
AM为上下午,07为时,30为分,29为秒
一般情况下,到这个方法执行完得到的结果为已经合法的结果,如果为不合法的结果,说明程序已经出现了未知bug
"""
LegalizationTimeObject = LegalizationTime()
try:
time = LegalizationTimeObject.legalization(time,timeNow)
except Exception as e:
raise WrongfulError('The incoming string is illegal and cannot be converted to digital time')
return time
def main(timeStr,now):
strTime = getTimeString(timeStr) #先将传入的字符串转换其中的中文数字为阿拉伯数字
getTime.timeString = strTime
getTime.now = now
getFirstTimeData = getTimeData() #获取初步处理结果
formatTimeData = getTimeFormatData(getFirstTimeData,now) #获取二次处理结果
legalizationTimeData = getLegalizationTime(formatTimeData,now) #获取时间合法化后的结果
return legalizationTimeData
|
StarcoderdataPython
|
3305973
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
import torch.utils.data
import os
from os import listdir
from torchvision import transforms
from numpy import clip
from skimage import io
from skimage.color import rgb2gray
from skimage.util import img_as_float, img_as_ubyte
from skimage.transform import resize
import pandas as pd
import nibabel as nib
import json
import random
from skimage.filters import unsharp_mask
from skimage.transform import resize
import albumentations as A
from MGimage import detect_calcifications_whole_image
def standard_image_preprocess(
img, *args,
new_size=None,
expand_dim=False,
adjust_label=False,
normalize=False,
img_transforms=None,
**kwargs):
h, w = img.shape
if new_size:
img_new = np.zeros(new_size)
img_new[0:h,0:w]=img
img = img_new.copy()
h, w = img.shape
del(img_new)
# Expand dimensions for image if specified
if expand_dim is True:
if len(img.shape) == 2:
img = np.expand_dims(img, axis=0)
if normalize:
if img.max() > 1:
# Remove white color
img = img*(img<=2**16-2)
img = img/(2**16-1)
if img_transforms is not None:
img = img_transforms(img)
return img
def unsharp_transform(img):
return unsharp_mask(img, radius=10., amount=1.5)
def remove_calcs(img, file_name):
save_dir = '/workspace/temp/'
os.makedirs(save_dir,exist_ok=True)
# Find or generate mask
mask_path = os.path.join(save_dir,file_name+'.nii.gz')
try:
mask = nib.load(mask_path).get_data()
# print('Mask loaded from file')
except:
if len(img.shape)==3:
img2 =img.squeeze()
else:
img2=img
h,w=img.shape
mask = detect_calcifications_whole_image(img2,
erosion=0,
method='Ciecholwski',
thr=25).astype(np.int0)
nifti_mask = nib.Nifti1Image(mask, affine=np.eye(4))
nib.save(nifti_mask,mask_path)
# print('Mask generated and saved')
img = img*(1-mask)
return img
class DataProcessor(Dataset):
def __init__(self, img_dir, annot_dir, csv,*args,
resize =(2457, 1996),
transformations=None,
resize_img=False,
give_annot=True,
only_positive=False,
get_class=False,
give_actual_label=False,
give_filename=False,
preprocess=standard_image_preprocess,
augmentations=None, **kwargs):
self.img_dir = img_dir
self.annot_dir = annot_dir
self.df = pd.read_csv(csv)
self.resize = resize
self.transformations = transformations
self.give_annot = give_annot
self.preprocess = preprocess
self.give_actual_label = give_actual_label
self.augmentations=augmentations
self.give_filename=give_filename
if only_positive:
# retain only cases where there are annotations:
self._keep_positive()
if get_class:
self._get_class()
def _augment(self,img,annot):
if self.augmentations is not None:
# the augmentations
bbox_params = A.BboxParams(format='pascal_voc') if annot is not None else None
transform = A.Compose([
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),# A.Rotate(p=0.5,border_mode=0,limit=20),
A.OneOf([ A.RandomBrightness(p=1),
A.RandomGamma(p=1),],p=0.9,),
], bbox_params=bbox_params)
boxes_labels = [list(b)+[l] for b,l in zip(annot['boxes'], annot['labels'])]
if len(img.shape)==3:
if (img.shape[0]==1) or (img.shape[0]==3):
img = img.transpose(1,2,0)
img = img.astype(np.float32)
data = transform(image=img, bboxes=boxes_labels)
boxes, labels = [b[:-1] for b in data['bboxes']], [b[-1] for b in data['bboxes']]
img, annot['boxes'], annot['labels'] = data['image'],boxes, labels
if len(annot['labels'])==0:
annot['boxes']=np.zeros((0,4))
annot['labels']=np.zeros((0,))
return img,annot
def _to_tensor(self,img,annot):
if len(img.shape)==3:
if (img.shape[2]==1) or (img.shape[2]==3):
img = img.transpose(2,0,1)
elif len(img.shape)==2:
img = img.unsqueeze(0)
img = torch.as_tensor(img, dtype=torch.float32)
if (annot is not None) and ('labels' in annot.keys()):
if len(annot['labels'])==0:
annot['boxes']=np.zeros((0,4))
annot['labels']=np.zeros((0,))
annot['boxes'] = torch.as_tensor(annot['boxes'], dtype=torch.float32)
annot['labels'] = torch.as_tensor(annot['labels'], dtype=torch.int64)
return img,annot
def _get_image_and_annot(self,file_name):
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
# Load nifti image
img = nib.load(os.path.join(img_path)).get_data()
annot = self._get_annot(file_name)
return img,annot
def __getitem__(self, i):
file_name = self.df.iloc[i]['file_name']
img,annot = self._get_image_and_annot(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
if img.shape[0]==3:
img_list = list()
for i in range(3):
img_list.append(self.preprocess(img[i], new_size = self.resize, expand_dim=True,
adjust_label=False, normalize=True,
img_transforms=self.transformations))
img = np.array(img_list).squeeze()
else:
img = self.preprocess(img, new_size = self.resize, expand_dim=True,
adjust_label=False, normalize=True,
img_transforms=self.transformations)
img, annot = self._augment(img,annot)
img, annot = self._to_tensor(img,annot)
if self.give_annot or self.give_filename:
return img, annot
return img
def __len__(self):
return len(self.df)
def _get_annot(self, file_name):
if self.give_annot:
annot_path = os.path.join(self.annot_dir, file_name+'.json')
annot = json.load(open(annot_path,'r'))
num_objs = len(annot['boxes'])
# treat no boxes case
if len(annot['boxes'])==0:
annot['boxes']=np.zeros((0,4))
annot['labels']=np.zeros((0,))
annot['boxes'] = list(annot['boxes'])
labels = [0]*num_objs
if self.give_actual_label:
labels = list(annot['labels'])
labels = [l+1 for l in labels] # Labels should be 1 and above, 0 corresponds to background
annot["labels"] = labels
return annot
return None
def _get_class(self):
print('Reading annotations')
self.df['annot_exists'] = True
from tqdm import tqdm
for idx in tqdm(range(len(self.df))):
file_name = self.df.iloc[idx]['file_name']
annot = self._get_annot(file_name)
if len(annot['labels'])==0:
self.df['annot_exists'].iloc[idx] = False
def _keep_positive(self):
self._get_class()
self.df = self.df[self.df.annot_exists]
self.df.reset_index(inplace=True)
def threedpreprocess(
img, *args,
new_size=None,
expand_dim=False,
adjust_label=False,
normalize=False,
img_transforms=None,
n_splits = 3,
**kwargs):
d,h,w = img.shape
zvalues = np.linspace(0,d-1,n_splits+1).astype(int)
img_splits = list()
for i in range(len(zvalues)-1):
z0, z1 = zvalues[i], zvalues[i+1]
img_splits.append(img[z0:z1])
# Find mip
img_splits = [arr.max(0) for arr in img_splits]
if new_size is not None:
for i in range(len(img_splits)):
im = img_splits[i]
h, w = im.shape
img_new = np.zeros(new_size)
img_new[0:h,0:w]=im
img_splits[i] = img_new.copy()
img = np.array(img_splits)
if normalize:
if img.max() > 1:
img = img/(2**16-1)
return img
def change_slice(file_name):
list_fname = file_name.split('_')
slice_id = int(list_fname[-1])
# shift one of -2,-1,0,1,2 slices
slice_id = slice_id+random.randint(-2,2)
list_fname[-1]=str(slice_id)
return '_'.join(list_fname)
class ModDataProcessor(DataProcessor):
def _get_image_and_annot(self,file_name):
file_name = change_slice(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
while not os.path.exists(img_path):
file_name = self.df.iloc[i]['file_name']
file_name = change_slice(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
# Load image
try:
img = nib.load(os.path.join(img_path)).get_data()
except:
file_name = self.df.iloc[i]['file_name']
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
img = nib.load(os.path.join(img_path)).get_data()
annot = self._get_annot(file_name)
return img,annot
def shift_slice(file_name,ds):
list_fname = file_name.split('_')
slice_id = int(list_fname[-1])
# shift one of -2,-1,0,1,2 slices
slice_id = slice_id+ds
list_fname[-1]=str(slice_id)
return '_'.join(list_fname)
class ModDataProcessor3slices(DataProcessor):
def __init__(self, *args, random_slice=False, **kwargs):
self.random_slice = random_slice
self.give_filename = kwargs['give_filename'] if ('give_filename' \
in kwargs.keys()) else False
super().__init__(*args, **kwargs)
def _get_image_and_annot(self,file_name):
if self.random_slice:
file_name = change_slice(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
while not os.path.exists(img_path):
file_name = self.df.iloc[i]['file_name']
file_name = change_slice(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
# Getting the other two slices
behind_slice = shift_slice(file_name,-1)
front_slice = shift_slice(file_name,+1)
img_path_behind = os.path.join(self.img_dir, behind_slice+'.nii.gz')
img_path_front = os.path.join(self.img_dir, front_slice+'.nii.gz')
if os.path.exists(img_path_behind) and os.path.exists(img_path_front):
slices_names = [behind_slice, file_name, front_slice]
elif (not os.path.exists(img_path_behind)) and os.path.exists(img_path_front):
slices_names = [file_name, front_slice, shift_slice(file_name,+2)]
else:
slices_names = [shift_slice(file_name,-2), behind_slice, file_name]
img_list = list()
for f in slices_names:
img_list.append(nib.load(os.path.join(self.img_dir, f+'.nii.gz')).get_data())
img = np.array(img_list, dtype=np.float32)
annot = self._get_annot(file_name)
if self.give_filename:
return img, {'file_name':file_name}
return img, annot
class ModDataProcessor3slicesNoCalcs(DataProcessor):
def __init__(self, *args, random_slice=False, **kwargs):
self.random_slice = random_slice
self.give_filename = kwargs['give_filename'] if ('give_filename' \
in kwargs.keys()) else False
super().__init__(*args, **kwargs)
def _get_image_and_annot(self,file_name):
if self.random_slice:
file_name = change_slice(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
while not os.path.exists(img_path):
file_name = self.df.iloc[i]['file_name']
file_name = change_slice(file_name)
img_path = os.path.join(self.img_dir, file_name+'.nii.gz')
# Getting the other two slices
behind_slice = shift_slice(file_name,-1)
front_slice = shift_slice(file_name,+1)
img_path_behind = os.path.join(self.img_dir, behind_slice+'.nii.gz')
img_path_front = os.path.join(self.img_dir, front_slice+'.nii.gz')
if os.path.exists(img_path_behind) and os.path.exists(img_path_front):
slices_names = [behind_slice, file_name, front_slice]
elif (not os.path.exists(img_path_behind)) and os.path.exists(img_path_front):
slices_names = [file_name, front_slice, shift_slice(file_name,+2)]
else:
slices_names = [shift_slice(file_name,-2), behind_slice, file_name]
img_list = list()
for f in slices_names:
img_list.append(nib.load(os.path.join(self.img_dir, f+'.nii.gz')).get_data())
# Remove calcs
for i in range(len(slices_names)):
fname = slices_names[i]
img_list[i]= remove_calcs(img_list[i],fname)
img = np.array(img_list, dtype=np.float32)
annot = self._get_annot(file_name)
if self.give_filename:
return img, {'file_name':file_name}
return img, annot
## SAMPLER
# From https://github.com/ufoym/imbalanced-dataset-sampler/
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.df.annot_exists.iloc[idx]
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
|
StarcoderdataPython
|
1996788
|
import datetime
import pytest
import pytz
from .....product.models import Collection, CollectionChannelListing
from ....tests.utils import assert_graphql_error_with_message, get_graphql_content
@pytest.fixture
def collections_for_sorting_with_channels(channel_USD, channel_PLN):
collections = Collection.objects.bulk_create(
[
Collection(name="Collection1", slug="collection1"),
Collection(name="Collection2", slug="collection2"),
Collection(name="Collection3", slug="collection3"),
Collection(name="Collection4", slug="collection4"),
Collection(name="Collection5", slug="collection5"),
]
)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
collection=collections[0],
published_at=None,
is_published=True,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[1],
published_at=None,
is_published=False,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[2],
published_at=datetime.datetime(2004, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[3],
published_at=datetime.datetime(2003, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_USD,
),
# second channel
CollectionChannelListing(
collection=collections[0],
published_at=None,
is_published=False,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[1],
published_at=None,
is_published=True,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[2],
published_at=datetime.datetime(2002, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[4],
published_at=datetime.datetime(2001, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_PLN,
),
]
)
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING = """
query (
$sortBy: CollectionSortingInput,
$filter: CollectionFilterInput, $channel: String
){
collections (
first: 10, sortBy: $sortBy, filter: $filter, channel: $channel
) {
edges {
node {
name
slug
}
}
}
}
"""
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "ASC"},
{"field": "PUBLICATION_DATE", "direction": "DESC"},
],
)
def test_collections_with_sorting_and_without_channel(
sort_by,
staff_api_client,
permission_manage_products,
):
# given
variables = {"sortBy": sort_by}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
assert_graphql_error_with_message(response, "A default channel does not exist.")
@pytest.mark.parametrize(
"sort_by, collections_order",
[
(
{"field": "PUBLICATION_DATE", "direction": "ASC"},
["Collection4", "Collection3", "Collection1", "Collection2"],
),
(
{"field": "PUBLICATION_DATE", "direction": "DESC"},
["Collection2", "Collection1", "Collection3", "Collection4"],
),
],
)
def test_collections_with_sorting_and_channel_USD(
sort_by,
collections_order,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
for index, collection_name in enumerate(collections_order):
assert collection_name == collections_nodes[index]["node"]["name"]
@pytest.mark.parametrize(
"sort_by, collections_order",
[
(
{"field": "PUBLICATION_DATE", "direction": "ASC"},
["Collection5", "Collection3", "Collection1", "Collection2"],
),
(
{"field": "PUBLICATION_DATE", "direction": "DESC"},
["Collection2", "Collection1", "Collection3", "Collection5"],
),
],
)
def test_collections_with_sorting_and_channel_PLN(
sort_by,
collections_order,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_PLN,
):
# given
variables = {"sortBy": sort_by, "channel": channel_PLN.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
for index, collection_name in enumerate(collections_order):
assert collection_name == collections_nodes[index]["node"]["name"]
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "ASC"},
{"field": "PUBLICATION_DATE", "direction": "ASC"},
],
)
def test_collections_with_sorting_and_not_existing_channel_asc(
sort_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["collections"]["edges"]
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "DESC"},
{"field": "PUBLICATION_DATE", "direction": "DESC"},
],
)
def test_collections_with_sorting_and_not_existing_channel_desc(
sort_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["collections"]["edges"]
|
StarcoderdataPython
|
6503965
|
"""
The chemml.chem module includes (please click on links adjacent to function names for more information):
- Molecule: :func:`~chemml.chem.Molecule`
- XYZ: :func:`~chemml.chem.XYZ`
- CoulombMatrix: :func:`~chemml.chem.CoulombMatrix`
- BagofBonds: :func:`~chemml.chem.BagofBonds`
- RDKitFingerprint: :func:`~chemml.chem.RDKitFingerprint`
- atom_features: :func:`~chemml.chem.atom_features`
- bond_features: :func:`~chemml.chem.bond_features`
- tensorise_molecules: :func:`~chemml.chem.tensorise_molecules`
- Dragon: :func:`~chemml.chem.Dragon`
"""
from .molecule import Molecule
from .molecule import XYZ
from .CoulMat import CoulombMatrix
from .CoulMat import BagofBonds
from .RDKFP import RDKitFingerprint
from .Dragon import Dragon
from .local_features import atom_features
from .local_features import bond_features
from .local_features import num_atom_features
from .local_features import num_bond_features
from .local_features import tensorise_molecules
__all__ = [
'Molecule',
'XYZ',
'CoulombMatrix',
'BagofBonds',
'RDKitFingerprint',
'Dragon',
'atom_features',
'bond_features',
'num_atom_features',
'num_bond_features',
'tensorise_molecules'
]
|
StarcoderdataPython
|
4997189
|
<reponame>UKGovernmentBEIS/BRE_DigitalRegulationNavigator_Alpha
from django import forms
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from crispy_forms.helper import FormHelper
User = get_user_model()
class RegistrationForm(forms.ModelForm):
email = forms.EmailField(
label="Enter your email address",
max_length=254,
required=True,
widget=forms.EmailInput(attrs={"autocomplete": "email"}),
)
class Meta:
model = User
fields = ["email"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.labels_small = True # custom
def save(self, commit=True):
user = super().save(commit=False)
user.username = self.cleaned_data["email"]
user.set_unusable_password()
if commit:
user.save()
return user
class LoginForm(forms.Form):
""" See: django.contrib.auth.forms.AuthenticationForm """
email = forms.EmailField(
label="Enter your email address",
max_length=254,
required=True,
widget=forms.EmailInput(attrs={"autocomplete": "email"}),
)
error_messages = {
"invalid_login": "Please enter a correct email address.",
"inactive": "This account is inactive.",
}
def __init__(self, *args, **kwargs):
self.user_cache = None
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.labels_small = True # custom
def clean(self):
cleaned_data = super().clean()
email = self.cleaned_data.get("email")
if email is not None:
try:
self.user_cache = User.objects.get(email=email)
if not self.user_cache.is_active:
raise ValidationError(
self.error_messages["inactive"],
code="inactive",
)
except User.DoesNotExist:
raise ValidationError(
self.error_messages["invalid_login"],
code="invalid_login",
)
return cleaned_data
def get_user(self):
return self.user_cache
|
StarcoderdataPython
|
8178673
|
<gh_stars>0
"""
a)
State Representation: (matrix, (zero_pos_row, zero_pos_col))
Initial State: (randomized matrix, random position in matrix)
(matrix should have the numbers from 0 to rows*cols)
Operators: (zero_pos_x >= 0) moveup -> (matrix, (zero_pos_row, zero_pos_col)) => (upt_matrix, (zero_pos_row - 1, zero_pos_col))
(zero_pos_x < rows) movedown -> (matrix, (zero_pos_row, zero_pos_col)) => (upt_matrix, (zero_pos_row + 1, zero_pos_col))
(zero_pos_y >= 0) moveleft -> (matrix, (zero_pos_row, zero_pos_col)) => (upt_matrix, (zero_pos_row, zero_pos_col - 1))
(zero_pos_y < cols) moveright -> (matrix, (zero_pos_row, zero_pos_col)) => (upt_matrix, (zero_pos_row, zero_pos_col + 1))
upt_matrix consists in the same matrix but with the element in the position of the new zero position swapped with the previous zero position
All operators have a cost of 1.
Objective Test: Check if the matrix is ordered and the zero is positioned in the last cell
"""
from algorithms import *
from functools import reduce
import operator
import numpy as np
import copy
class NPuzzle:
@staticmethod
def flatten(matrix):
return reduce(operator.concat, matrix)
def __init__(self, matrix, zero_pos, previousNode=None, distance=0):
all_elems_list = sorted(NPuzzle.flatten(matrix))
if (all_elems_list != list(range(len(matrix) * len(matrix[0])))):
raise ValueError('Invalid Input Matrix')
x, y = zero_pos
if (matrix[x][y] != 0):
raise ValueError('Zero position is not correct')
self.matrix = matrix
self.zero_pos = zero_pos
self.distance = distance
self.previousNode = previousNode
def __eq__(self, other):
if isinstance(other, self.__class__):
return NPuzzle.flatten(self.matrix) == NPuzzle.flatten(
other.matrix) and self.zero_pos == other.zero_pos
return False
def __repr__(self):
matrix_repr = np.array(self.matrix)
return f'\n{matrix_repr}\n'
def __str__(self):
matrix_repr = np.array(self.matrix)
return f'\n{matrix_repr}\n'
def __lt__(self, other):
return self.distance < other.distance
def edgeNodes(self, distance=0):
edgeNodesList = []
x, y = self.zero_pos
if (x - 1 >= 0):
matrixcopy = copy.deepcopy(self.matrix)
matrixcopy[x][y] = matrixcopy[x - 1][y]
matrixcopy[x - 1][y] = 0
edgeNodesList.append(
NPuzzle(matrixcopy, (x - 1, y), self, distance))
if (x + 1 < len(self.matrix)):
matrixcopy = copy.deepcopy(self.matrix)
matrixcopy[x][y] = matrixcopy[x + 1][y]
matrixcopy[x + 1][y] = 0
edgeNodesList.append(
NPuzzle(matrixcopy, (x + 1, y), self, distance))
if (y - 1 >= 0):
matrixcopy = copy.deepcopy(self.matrix)
matrixcopy[x][y] = matrixcopy[x][y - 1]
matrixcopy[x][y - 1] = 0
edgeNodesList.append(
NPuzzle(matrixcopy, (x, y - 1), self, distance))
if (y + 1 < len(self.matrix[0])):
matrixcopy = copy.deepcopy(self.matrix)
matrixcopy[x][y] = matrixcopy[x][y + 1]
matrixcopy[x][y + 1] = 0
edgeNodesList.append(
NPuzzle(matrixcopy, (x, y + 1), self, distance))
return edgeNodesList
def condition(node):
return NPuzzle.flatten(node.matrix) == list(
range(1,
len(node.matrix[0]) * len(node.matrix))) + [0]
def heuristic1(node):
row_length = len(node.matrix[0])
sum_h = 0
for row, row_val in enumerate(node.matrix):
for col, item in enumerate(row_val):
sum_h += 1 if item != row * row_length + col + 1 else 0
return sum_h
def heuristic2(node):
curr_pos = {}
correct_pos = {}
row_length = len(node.matrix)
col_length = len(node.matrix[0])
for row, row_val in enumerate(node.matrix):
for col, item in enumerate(row_val):
curr_pos[item] = (row, col)
correct_pos[row * row_length + col + 1] = (row, col)
sum_manhattan = 0
for i in range(1, row_length * col_length):
sum_manhattan += abs(curr_pos[i][0] -
correct_pos[i][0]) + abs(curr_pos[i][0] -
correct_pos[i][0])
return sum_manhattan
initial = NPuzzle(
[[5, 1, 3, 4], [2, 0, 7, 8], [10, 6, 11, 12], [9, 13, 14, 15]], (1, 1))
print(ucost(initial, condition))
print('-------H1----------')
print(greedy(initial, condition, heuristic1))
print('-------H1----------')
print(astar(initial, condition, heuristic1))
print('-------H2----------')
print(greedy(initial, condition, heuristic2))
print('-------H2----------')
print(astar(initial, condition, heuristic2))
|
StarcoderdataPython
|
5071328
|
#!/usr/bin/env python
import numpy as np
import cv2
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from sensor_msgs.msg import PointCloud2
from cv_bridge import CvBridge, CvBridgeError
class Kinect:
def __init__(self):
self.bridge = CvBridge()
self.image=Image()
self.image_depth=Image()
self.cloud=PointCloud2()
self.has_image=0
self.has_depth=0
self.has_cloud=0
def set_image(self,data):
try:
self.image = self.bridge.imgmsg_to_cv2(data, "bgr8")
self.has_image=1
except CvBridgeError as e:
print(e)
def set_image_depth(self,data):
try:
self.image = self.bridge.imgmsg_to_cv2(data, "mono16")
self.has_depth=1
except CvBridgeError as e:
print(e)
def set_point(self,data):
self.cloud.header=data.header
self.cloud.height=data.height
self.cloud.width=data.width
self.cloud.is_bigendian=data.is_bigendian
self.cloud.point_step=data.point_step
self.cloud.row_step=data.row_step
self.cloud.is_dense=data.is_dense
self.cloud.data= data.data;
self.cloud.fields = data.fields
self.has_cloud=0
def show_image(self):
if self.has_image == 1:
cv2.imshow('image',self.image)
k=cv2.waitKey(1)
def show_image_depth(self):
if self.has_depth == 1:
cv2.imshow('image3D',self.image_depth)
k=cv2.waitKey(1)
|
StarcoderdataPython
|
9648803
|
<gh_stars>0
#!/usr/bin/env python
import sys
#import math
from scipy import spatial # Cosine similarity calculation
#import average_vector
import filter_vocab_words
import string_util
def getDistanceBetweenSets( arr_words1, arr_words2, model):
"""
Get distance between two sets of words.
(1) ::,
(2) subsets S1 and S2 are not empty,
(3) dist(S1, S2) > dist(S1 + w, S2),
(4) dist(S1, S2) > dist(S1 , S2 + w),
(where "dist" is Cosine similarity between average vectors of S1 and S2 - subsets of vectors),
i.e. the word w makes close any subsets S1 and S2 after adding.
Parameters
----------
arr_words : array of Strings
Array of source words, for example a synonym set or a sentence' words.
model : object
Word2Vec model.
Returns
-------
array of Strings
Internal subset for the source list of words.
Empty array if there are no such words.
"""
result_int_s = []
DEBUG_PRINT = False #True
arr_words = filter_vocab_words.filterVocabWords( arr_words, model.vocab )
#print string_util.joinUtf8( ",", arr_words ) # after filter, now there are only words with vectors
len_words = len(arr_words)
#print "len_words = {}".format( len_words )
if len_words < 3:
return [] # it is possible calculate IntS only when there are >= 3 words
# current_synset = lib.synset.Synset()
# current_synset.headword = arr_words[0] # let's first element in synset is a headword (? target word)
# current_synset.line = line
# syn_rank = dict() # integer
# syn_centrality = dict() # float
# syn_internal = dict() # boolean (true for IntS, for synonyms, which always make subsets more nearer)
# let's take all subsets for every 'out' element
i=0
while (i < len_words):
gr = arr_words[:]
# extract the element 'out' which is under consideration
test_word = gr.pop(i)
#test_word_counter_int = 0
#test_word_counter_float = 0
sim12_greater_sim0_always = True
for j in range(0, len(gr)):
for l in range(j, len(gr)-1):
gr1 = gr[j:l+1]
gr2 = gr[0:j]+gr[l+1:len(gr)]
if DEBUG_PRINT:
print u"{} | gr1={} | gr2={}".format( test_word, string_util.joinUtf8( ",", gr1 ),
string_util.joinUtf8( ",", gr2 ) )
gr1_and_test_word = gr1[:]
gr1_and_test_word.append( test_word )
gr2_and_test_word = gr2[:]
gr2_and_test_word.append( test_word )
sim0 = model.n_similarity(gr1, gr2)
sim1 = model.n_similarity(gr1_and_test_word, gr2)
sim2 = model.n_similarity(gr1, gr2_and_test_word)
if DEBUG_PRINT:
print "sim0 = {:5.3f}".format( sim0 )
print "sim1 = {:5.3f}".format( sim1 )
print "sim2 = {:5.3f}".format( sim2 )
if sim0 > sim1 or sim0 > sim2:
sim12_greater_sim0_always = False
if DEBUG_PRINT:
a = 1 if sim1 > sim0 else -1
b = 1 if sim2 > sim0 else -1
#test_word_counter_int += (a + b)/2
#test_word_counter_float += (sim1 - sim0) + (sim2 - sim0)
#print "test_word_counter_int = {}".format( test_word_counter_int )
#print "test_word_counter_float = {}".format( test_word_counter_float )
if DEBUG_PRINT:
print ("---")
#syn_rank [test_word] = test_word_counter_int;
#syn_centrality[test_word] = test_word_counter_float;
#syn_internal [test_word] = sim12_greater_sim0_always;
if sim12_greater_sim0_always:
result_int_s.append( test_word )
if DEBUG_PRINT:
print ("+++++++")
print
i += 1
return result_int_s
# This function should be called if the fuction getInternalSet failed (returned empty IntS).
def getInternalSetWithReducing( arr_words, target_word, model):
"""
Get IntS (internal, kernel) words for words in the array arr_words.
If |IntS (arr_words)| == 0 then try reduce arr_words,
(1) until |IntS (reduced arr_words)| > 0
(2) and set (reduced arr_words) contains target_word
Parameters
----------
arr_words : array of Strings
Array of source words, for example a synonym set or a sentence' words.
target_word : String
(1) arr_words contain target_word,
(2) the result IntS should contain target_word too.
model : object
Word2Vec model.
Returns
-------
array of Strings
Internal subset for the source list of words (arr_words) or for subset of arr_words.
Empty array if there are no such words.
"""
#result_int_s = []
arr_words = filter_vocab_words.filterVocabWords( arr_words, model.vocab )
#print string_util.joinUtf8( ",", arr_words ) # after filter, now there are only words with vectors
#if len(arr_words) < 3:
# return []
while len(arr_words) >= 3: # it is possible calculate IntS only when there are >= 3 words
int_s = getInternalSet (arr_words, model)
if len( int_s ) > 0:
return int_s
# then now: len (int_s) == 0
# let's find word_remote (1) within arr_words, (2) the most distant word to the target word
target_vector = model [ target_word ]
word_remote = ""
#arr_new = []
sim_min = 1.0
for word in arr_words:
if word == target_word:
continue # let's skip and do not delete target word itself
vector = model [ word ]
sim = 1 - spatial.distance.cosine( target_vector, vector )
#print u"sim({}, {}) = {}".format( target_word, word, sim )
if sim < sim_min:
#print u"UPDATE: new sim {} < sim_min {}, word_remote old = {}, new = {}".format( sim, sim_min, word_remote, word )
sim_min = sim
word_remote = word
#print
if len( word_remote ) == 0: # it is very strange that we did not find any word!
return []
arr_words.remove( word_remote )
print string_util.joinUtf8( ",", arr_words )
return []
#return result_int_s
|
StarcoderdataPython
|
8073534
|
<filename>sympy/calculus/tests/test_singularities.py
from sympy import Symbol, exp, log
from sympy.calculus.singularities import (singularities, is_increasing,
is_strictly_increasing, is_decreasing,
is_strictly_decreasing, is_monotonic)
from sympy.sets import Interval
from sympy import oo, S
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
def test_singularities():
x = Symbol('x', real=True)
assert singularities(x**2, x) == ()
assert singularities(x/(x**2 + 3*x + 2), x) == (-2, -1)
@XFAIL
def test_singularities_non_rational():
x = Symbol('x', real=True)
assert singularities(exp(1/x), x) == (0)
assert singularities(log((x - 2)**2), x) == (2)
def test_is_increasing():
assert is_increasing(x**3 - 3*x**2 + 4*x, S.Reals)
assert is_increasing(-x**2, Interval(-oo, 0))
assert is_increasing(-x**2, Interval(0, oo)) is False
assert is_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval(-2, 3)) is False
def test_is_strictly_increasing():
assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Ropen(-oo, -2))
assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Lopen(3, oo))
assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.open(-2, 3)) is False
assert is_strictly_increasing(-x**2, Interval(0, oo)) is False
def test_is_decreasing():
assert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))
assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
assert is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2)) is False
assert is_decreasing(-x**2, Interval(-oo, 0)) is False
def test_is_strictly_decreasing():
assert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))
assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
assert is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2)) is False
assert is_decreasing(-x**2, Interval(-oo, 0)) is False
def test_is_monotonic():
assert is_monotonic(1/(x**2 - 3*x), Interval.open(1.5, 3))
assert is_monotonic(1/(x**2 - 3*x), Interval.Lopen(3, oo))
assert is_monotonic(x**3 - 3*x**2 + 4*x, S.Reals)
assert is_monotonic(-x**2, S.Reals) is False
|
StarcoderdataPython
|
8190025
|
from genie import parsergen
def show_ip_eigrp_neighbors(uut):
"""
Parsing show ip eigrp neigbors using parsergen
sample output
EIGRP-IPv4 Neighbors for AS(100)
H Address Interface Hold Uptime SRTT RTO Q Seq
(sec) (ms) Cnt Num
1 10.1.1.3 Tu1 10 02:51:11 26 156 0 3
0 10.1.1.4 Tu1 12 02:51:12 5 100 0 4
""" # noqa
# Use connect method to initiate connection to the device under test
if not uut.is_connected():
uut.connect()
# collect show command output
command = 'show ip eigrp neighbors'
output = uut.execute(command)
try:
headers = ["H", "Address", "Interface", "Hold",
"Uptime", "SRTT", "RTO", " Q ", "Seq"]
label_fields = ["H", "Address", "Interface", "Uptime",
"SRTT", "RTO", "QCnt", "SeqNum"]
eigrp_dict = parsergen.oper_fill_tabular(device_output=output,
device_os='iosxe',
header_fields=headers,
label_fields=label_fields,
index=[1])
if '' in eigrp_dict.entries:
del eigrp_dict.entries['']
return eigrp_dict.entries
except Exception as e:
pass
|
StarcoderdataPython
|
247631
|
class Solution:
def findKthPositive(self, arr: List[int], k: int) -> int:
missing = []
limit = arr[-1]
nums = set(arr)
for i in range(1,limit):
if i not in nums:
missing.append(i)
if len(missing) >= k:
return missing[k-1]
else:
return limit + k - len(missing)
|
StarcoderdataPython
|
6557993
|
"""CUCM AXL Device APIs."""
from .._internal_utils import flatten_signature_kwargs
from .base import DeviceAXLAPI
from .base import SimpleAXLAPI
class CommonDeviceConfig(DeviceAXLAPI):
_factory_descriptor = "common_device_config"
supported_methods = ["model", "create", "add", "get", "list", "update", "remove", "apply", "reset"]
def add(self, name, softkeyTemplateName=None, userLocale=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class CommonPhoneConfig(DeviceAXLAPI):
_factory_descriptor = "common_phone_profile"
supported_methods = ["model", "create", "add", "get", "list", "update", "remove", "apply", "reset"]
def add(self, name, unlockPwd=None, featureControlPolicy=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class CtiRoutePoint(DeviceAXLAPI):
_factory_descriptor = "cti_route_point"
def add(self, name, devicePoolName, product="CTI Route Point", protocol="SCCP", **kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "CTI Route Point"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class DefaultDeviceProfile(SimpleAXLAPI):
_factory_descriptor = "default_device_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "options"]
def add(self, name, product, phoneButtonTemplate="Universal Device Template Button Layout", softkeyTemplate=None,
protocol="SIP", protocolSide="User", **kwargs):
# the name is not obvious in the UI. It appears to default to a concat of product and protocol.
# it may be useful to log a warning for this...
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Device Profile"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class DeviceProfile(SimpleAXLAPI):
_factory_descriptor = "udp"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "options"]
def add(self, name, product, phoneTemplateName,
protocol="SIP",
**kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Device Profile"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class FeatureControlPolicy(SimpleAXLAPI):
_factory_descriptor = "feature_control_policy"
def add(self, name,
features=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class IpPhoneServices(SimpleAXLAPI):
_factory_descriptor = "ip_phone_service"
def add(self, serviceName, asciiServiceName, serviceUrl, secureServiceUrl=None, serviceCategory="XML Service",
serviceType="Standard IP Phone Service", enabled=True, enterpriseSubscription=False, parameters=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class Line(DeviceAXLAPI):
_factory_descriptor = "line"
supported_methods = [
"model", "create", "add", "get", "update", "list", "remove", "options", "apply", "restart", "reset"
]
def __init__(self, connector, object_factory):
super().__init__(connector, object_factory)
def add(self, pattern, routePartitionName,
usage="Device",
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class NetworkAccessProfile(SimpleAXLAPI):
_factory_descriptor = "network_access_profile"
def add(self, name, vpnRequired="Default", proxySettings="None", proxyHostname="", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class Phone(DeviceAXLAPI):
_factory_descriptor = "phone"
supported_methods = [
"model", "create", "add", "get", "list", "update", "remove",
"options", "wipe", "lock",
"apply", "restart", "reset",
]
@staticmethod
def _check_confidential_access(confidentialAccess):
"""Workaround for AXL defect not accepting None for 'confidentialAccessMode'"""
if not confidentialAccess['confidentialAccessMode']:
confidentialAccess['confidentialAccessMode'] = ''
return confidentialAccess
def add(self, name, product, devicePoolName, locationName="Hub_None", protocol="SIP",
commonPhoneConfigName="Standard Common Phone Profile", **kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Phone"
add_kwargs = flatten_signature_kwargs(self.add, locals())
try:
add_kwargs['confidentialAccess'] = self._check_confidential_access(add_kwargs['confidentialAccess'])
except KeyError:
pass
return super().add(**add_kwargs)
def update(self, **kwargs):
try:
kwargs['confidentialAccess'] = self._check_confidential_access(kwargs['confidentialAccess'])
except KeyError:
pass
return super().update(**kwargs)
def wipe(self, **kwargs):
"""Allows Cisco's newer Android-based devices, like the Cisco DX650,
to be remotely reset to factory defaults, removing user specific settings and data.
:param kwargs: phone name or uuid
:return: None
"""
# check_identifiers(self._wsdl_objects["name_and_guid_model"], **kwargs)
return self._serialize_axl_object("wipe", **kwargs)
def lock(self, **kwargs):
return self._serialize_axl_object("lock", **kwargs)
class PhoneButtonTemplate(DeviceAXLAPI):
_factory_descriptor = "phone_button_template"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "restart"]
def add(self, name, basePhoneTemplateName, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class PhoneSecurityProfile(DeviceAXLAPI):
_factory_descriptor = "phone_security_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "restart"]
def add(self, name, phoneType="Universal Device Template", protocol="Protocol Not Specified",
deviceSecurityMode=None, authenticationMode="By Null String", keySize=1024, transportType="TCP+UDP",
sipPhonePort=5060, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RecordingProfile(SimpleAXLAPI):
_factory_descriptor = "recording_profile"
def add(self, name, recorderDestination, recordingCssName=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RemoteDestination(SimpleAXLAPI):
_factory_descriptor = "remote_destination"
def add(self, destination, ownerUserId, name=None, enableUnifiedMobility=True, enableMobileConnect=True,
isMobilePhone=True, remoteDestinationProfileName=None, dualModeDeviceName=None, lineAssociations=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RemoteDestinationProfile(SimpleAXLAPI):
_factory_descriptor = "rdp"
def add(self, name, devicePoolName, userId, rerouteCallingSearchSpaceName=None, callingSearchSpaceName=None,
lines=None, product="Remote Destination Profile", protocol="Remote Destination", protocolSide="User",
**kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Remote Destination Profile"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SdpTransparencyProfile(SimpleAXLAPI):
_factory_descriptor = "sdp_transparency_profile"
def add(self, name, attributeSet, **kwargs):
if "class" not in kwargs:
kwargs["class"] = "Trunk"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SipTrunk(DeviceAXLAPI):
_factory_descriptor = "sip_trunk"
def add(self, name, devicePoolName, destinations, product="SIP Trunk", locationName="Hub_None", protocol="SIP",
securityProfileName="Non Secure SIP Trunk Profile", sipProfileName="Standard SIP Profile",
presenceGroupName="Standard Presence Group", protocolSide="Network", **kwargs):
if "class" not in kwargs:
kwargs["class"] = "Trunk"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SipProfile(DeviceAXLAPI):
_factory_descriptor = "sip_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "options", "apply", "restart"]
def add(self, name, sdpTransparency="Pass all unknown SDP attributes", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SipTrunkSecurityProfile(DeviceAXLAPI):
_factory_descriptor = "sip_trunk_security_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "reset"]
def add(self, name, acceptPresenceSubscription=False, acceptOutOfDialogRefer=False,
acceptUnsolicitedNotification=False, allowReplaceHeader=False, transmitSecurityStatus=False, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SoftKeyTemplate(DeviceAXLAPI):
_factory_descriptor = "softkey_template"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "restart"]
def add(self, name, description,
baseSoftkeyTemplateName="Standard User",
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SoftKeySet(SimpleAXLAPI):
_factory_descriptor = "softkey_set"
supported_methods = ["get", "update"]
class UniversalDeviceTemplate(SimpleAXLAPI):
_factory_descriptor = "udt"
def add(self, name, devicePool, directoryNumber=None, lineLabel=None, displayCallerId=None, callingSearchSpace=None,
sipProfile="Standard SIP Profile", commonPhoneProfile="Standard Common Phone Profile",
phoneButtonTemplate="Universal Device Template Button Layout",
deviceSecurityProfile="Universal Device Template - Model-independent Security Profile",
blfPresenceGroup="Standard Presence group", location="Hub_None", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class UniversalLineTemplate(SimpleAXLAPI):
_factory_descriptor = "ult"
def add(self, name, routePartition=None, lineDescription=None, callingSearchSpace=None, voiceMailProfile=None,
alertingName=None, rejectAnonymousCall=False, # override inconsistency between normal line add and ULT
blfPresenceGroup="Standard Presence group", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class WifiHotspot(SimpleAXLAPI):
_factory_descriptor = "wifi_hotspot"
def add(self, name, ssidPrefix, frequencyBand="Auto", userModifiable="Allowed", authenticationMethod="None",
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class WLANProfile(SimpleAXLAPI):
_factory_descriptor = "wlan_profile"
def add(self, name, ssid, frequencyBand="Auto", userModifiable="Allowed", authMethod="EAP-FAST",
networkAccessProfile=None, userName="", password="", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class WlanProfileGroup(SimpleAXLAPI):
_factory_descriptor = "wlan_profile_group"
def add(self, name,
members=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
|
StarcoderdataPython
|
5009176
|
<reponame>TrainingByPackt/Intelligent-Projects-Using-Python-eLearning<filename>Lesson06/rbm.py
import numpy as np
import pandas as pd
import tensorflow as tf
import os
print(tf.__version__)
import fire
from elapsedtimer import ElapsedTimer
class recommender:
def __init__(self,mode,train_file,outdir,test_file=None,
user_info_file=None,movie_info_file=None,
batch_size=32,epochs=500,
learning_rate=1e-3,num_hidden=50,
display_step=5):
self.mode = mode
self.train_file = train_file
self.outdir = outdir
self.test_file = test_file
self.batch_size = batch_size
self.learning_rate = learning_rate
self.num_hidden = num_hidden
self.epochs = epochs
self.display_step = display_step
self.user_info_file = user_info_file
self.movie_info_file = movie_info_file
def read_data(self):
if self.mode == 'train':
self.train_data = np.load(self.train_file)
self.num_ranks = self.train_data.shape[2]
self.num_movies = self.train_data.shape[1]
self.users = self.train_data.shape[0]
else:
self.train_df = pd.read_csv(self.train_file)
self.test_data = np.load(self.test_file)
self.test_df = pd.DataFrame(self.test_data,columns=['userid','movieid','rating'])
if self.user_info_file != None:
self.user_info_df = pd.read_csv(self.user_info_file,sep='|',header=None)
self.user_info_df.columns=['userid','age','gender','occupation','zipcode']
if self.movie_info_file != None:
self.movie_info_df = pd.read_csv(self.movie_info_file,sep='|',encoding='latin-1',header=None)
self.movie_info_df = self.movie_info_df[[0,1]]
self.movie_info_df.columns = ['movieid','movie Title']
def next_batch(self):
while True:
ix = np.random.choice(np.arange(self.train_data.shape[0]),self.batch_size)
train_X = self.train_data[ix,:,:]
yield train_X
def __network(self):
self.x = tf.placeholder(tf.float32, [None,self.num_movies,self.num_ranks], name="x")
self.xr = tf.reshape(self.x, [-1,self.num_movies*self.num_ranks], name="xr")
self.W = tf.Variable(tf.random_normal([self.num_movies*self.num_ranks,self.num_hidden], 0.01), name="W")
self.b_h = tf.Variable(tf.zeros([1,self.num_hidden], tf.float32, name="b_h"))
self.b_v = tf.Variable(tf.zeros([1,self.num_movies*self.num_ranks],tf.float32, name="b_v"))
self.k = 2
## Converts the probability into discrete binary states i.e. 0 and 1
def sample_hidden(probs):
return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))
def sample_visible(logits):
logits = tf.reshape(logits,[-1,self.num_ranks])
sampled_logits = tf.multinomial(logits,1)
sampled_logits = tf.one_hot(sampled_logits,depth = 5)
logits = tf.reshape(logits,[-1,self.num_movies*self.num_ranks])
print(logits)
return logits
## Gibbs sampling step
def gibbs_step(x_k):
# x_k = tf.reshape(x_k,[-1,self.num_movies*self.num_ranks])
h_k = sample_hidden(tf.sigmoid(tf.matmul(x_k,self.W) + self.b_h))
x_k = sample_visible(tf.add(tf.matmul(h_k,tf.transpose(self.W)),self.b_v))
return x_k
## Run multiple gives Sampling step starting from an initital point
def gibbs_sample(k,x_k):
for i in range(k):
x_k = gibbs_step(x_k)
# Returns the gibbs sample after k iterations
return x_k
# Constrastive Divergence algorithm
# 1. Through Gibbs sampling locate a new visible state x_sample based on the current visible state x
# 2. Based on the new x sample a new h as h_sample
self.x_s = gibbs_sample(self.k,self.xr)
self.h_s = sample_hidden(tf.sigmoid(tf.matmul(self.x_s,self.W) + self.b_h))
# Sample hidden states based given visible states
self.h = sample_hidden(tf.sigmoid(tf.matmul(self.xr,self.W) + self.b_h))
# Sample visible states based given hidden states
self.x_ = sample_visible(tf.matmul(self.h,tf.transpose(self.W)) + self.b_v)
# The weight updated based on gradient descent
#self.size_batch = tf.cast(tf.shape(x)[0], tf.float32)
self.W_add = tf.multiply(self.learning_rate/self.batch_size,tf.subtract(tf.matmul(tf.transpose(self.xr),self.h),tf.matmul(tf.transpose(self.x_s),self.h_s)))
self.bv_add = tf.multiply(self.learning_rate/self.batch_size, tf.reduce_sum(tf.subtract(self.xr,self.x_s), 0, True))
self.bh_add = tf.multiply(self.learning_rate/self.batch_size, tf.reduce_sum(tf.subtract(self.h,self.h_s), 0, True))
self.updt = [self.W.assign_add(self.W_add), self.b_v.assign_add(self.bv_add), self.b_h.assign_add(self.bh_add)]
def _train(self):
self.__network()
# TensorFlow graph execution
with tf.Session() as sess:
self.saver = tf.train.Saver()
#saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
# Initialize the variables of the Model
init = tf.global_variables_initializer()
sess.run(init)
total_batches = self.train_data.shape[0]//self.batch_size
batch_gen = self.next_batch()
# Start the training
for epoch in range(self.epochs):
if epoch < 150:
self.k = 2
if (epoch > 150) & (epoch < 250):
self.k = 3
if (epoch > 250) & (epoch < 350):
self.k = 5
if (epoch > 350) & (epoch < 500):
self.k = 9
# Loop over all batches
for i in range(total_batches):
self.X_train = next(batch_gen)
# Run the weight update
#batch_xs = (batch_xs > 0)*1
_ = sess.run([self.updt],feed_dict={self.x:self.X_train})
# Display the running step
if epoch % self.display_step == 0:
print("Epoch:", '%04d' % (epoch+1))
self.saver.save(sess,os.path.join(self.outdir,'model'), global_step=epoch)
# Do the prediction for all users all items irrespective of whether they have been rated
self.logits_pred = tf.reshape(self.x_,[self.users,self.num_movies,self.num_ranks])
self.probs = tf.nn.softmax(self.logits_pred,axis=2)
out = sess.run(self.probs,feed_dict={self.x:self.train_data})
recs = []
for i in range(self.users):
for j in range(self.num_movies):
rec = [i,j,np.argmax(out[i,j,:]) +1]
recs.append(rec)
recs = np.array(recs)
df_pred = pd.DataFrame(recs,columns=['userid','movieid','predicted_rating'])
df_pred.to_csv(self.outdir + 'pred_all_recs.csv',index=False)
print("RBM training Completed !")
def inference(self):
self.df_result = self.test_df.merge(self.train_df,on=['userid','movieid'])
# in order to get the original ids we just need to add 1
self.df_result['userid'] = self.df_result['userid'] + 1
self.df_result['movieid'] = self.df_result['movieid'] + 1
if self.user_info_file != None:
self.df_result = self.df_result.merge(self.user_info_df,on=['userid'])
if self.movie_info_file != None:
self.df_result = self.df_result.merge(self.movie_info_df,on=['movieid'])
self.df_result.to_csv(self.outdir + 'test_results.csv',index=False)
print(f'output written to {self.outdir}test_results.csv')
test_rmse = (np.mean((self.df_result['rating'].values - self.df_result['predicted_rating'].values)**2))**0.5
print(f'test RMSE : {test_rmse}')
def main_process(self):
self.read_data()
if self.mode == 'train':
self._train()
else:
self.inference()
if __name__ == '__main__':
with ElapsedTimer('process RBM'):
fire.Fire(recommender)
|
StarcoderdataPython
|
12825516
|
import json
# file_to_read_from = "toy_data/verb_only/constrained_training_data.txt"
# file_to_write_to = "toy_data/verb_only/constrained_predicate_list.txt"
all_preds = set()
with open(file_to_read_from, 'r') as in_file, open(file_to_write_to, "w") as out_file:
for sample in in_file:
row = json.loads(sample)
for pred in row['fact1_pred']:
all_preds.add(pred)
for pred in row['fact2_pred']:
all_preds.add(pred)
preds_sorted = list(all_preds)
preds_sorted.sort()
for pred in preds_sorted:
out_file.write(f"{pred.strip()}\n")
|
StarcoderdataPython
|
5068384
|
from checker_functions import *
# check where we are running
# check the license server exists
# check the license server is a license server
# check all the paths in the thinclient.xml
check_file("./thinclient.xml","thinclient.xml")
validate_xml("./thinclient.xml")
# check for pit file
check_file("./var/opt/Autodesk/Adlm/.config/ProductInformation.pit","pit file")
# check for license.env file and sensible content
check_file("./usr/autodesk/maya2018/bin/License.env","License.env")
validate_file("./usr/autodesk/maya2018/bin/License.env",[["MAYA_LICENSE_METHOD",1,["network","standalone"]]])
|
StarcoderdataPython
|
11233189
|
import _pickle as pickle
import copy
import sys
import numpy as np
import os
import timeit
import torch
import torch.multiprocessing as mp
import shutil
from collections import namedtuple
from functools import partial
from pprint import pprint
import rl_sandbox.constants as c
from rl_sandbox.envs.utils import make_env
from rl_sandbox.envs.fake_env import FakeEnv
from rl_sandbox.utils import DummySummaryWriter, EpochSummary
from rl_sandbox.algorithms.sac_x.schedulers import RecycleScheduler
from rl_sandbox.agents.hrl_agents import SACXAgent
from rl_sandbox.envs.wrappers.absorbing_state import AbsorbingStateWrapper
def buffer_warmup(agent,
env,
buffer,
buffer_preprocess,
transition_preprocess,
experiment_settings,
render=False):
clip_action = experiment_settings.get(
c.CLIP_ACTION, c.DEFAULT_TRAIN_PARAMS[c.CLIP_ACTION])
min_action = experiment_settings.get(c.MIN_ACTION, None)
max_action = experiment_settings.get(c.MAX_ACTION, None)
num_steps = experiment_settings.get(c.NUM_STEPS, 0)
num_episodes = experiment_settings.get(c.NUM_EPISODES, 0)
buffer_preprocess.reset()
curr_obs = env.reset()
curr_obs = buffer_preprocess(curr_obs)
curr_h_state = agent.reset()
curr_step = 0
curr_episode = 0
while True:
if hasattr(env, c.RENDER) and render:
env.render()
action, next_h_state, act_info = agent.compute_action(
obs=curr_obs, hidden_state=curr_h_state)
env_action = action
if clip_action:
env_action = np.clip(
action, a_min=min_action, a_max=max_action)
next_obs, reward, done, env_info = env.step(env_action)
next_obs = buffer_preprocess(next_obs)
info = dict()
info[c.DISCOUNTING] = env_info.get(c.DISCOUNTING, 1)
info.update(act_info)
buffer.push(**transition_preprocess(curr_obs,
curr_h_state,
action,
reward,
done,
info,
next_obs=next_obs,
next_h_state=next_h_state))
curr_obs = next_obs
curr_h_state = next_h_state
curr_step += 1
if curr_step >= num_steps:
break
if done:
buffer_preprocess.reset()
curr_obs = env.reset()
curr_obs = buffer_preprocess(curr_obs)
curr_h_state = agent.reset()
curr_episode += 1
if curr_episode >= num_episodes:
break
def train(agent,
evaluation_agent,
train_env,
evaluation_env,
buffer_preprocess,
experiment_settings,
auxiliary_reward=lambda reward, **kwargs: np.array([reward]),
summary_writer=DummySummaryWriter(),
save_path=None):
if auxiliary_reward is None:
auxiliary_reward = lambda reward, **kwargs: np.array([reward])
# Training Setting
clip_action = experiment_settings.get(
c.CLIP_ACTION, c.DEFAULT_TRAIN_PARAMS[c.CLIP_ACTION])
min_action = experiment_settings.get(c.MIN_ACTION, None)
max_action = experiment_settings.get(c.MAX_ACTION, None)
max_total_steps = experiment_settings.get(
c.MAX_TOTAL_STEPS, c.DEFAULT_TRAIN_PARAMS[c.MAX_TOTAL_STEPS])
# Progress Tracking
curr_episode = experiment_settings.get(
c.CURR_EPISODE, c.DEFAULT_TRAIN_PARAMS[c.CURR_EPISODE])
num_updates = experiment_settings.get(
c.NUM_UPDATES, c.DEFAULT_TRAIN_PARAMS[c.NUM_UPDATES])
returns = experiment_settings.get(
c.RETURNS, c.DEFAULT_TRAIN_PARAMS[c.RETURNS])
cum_episode_lengths = experiment_settings.get(
c.CUM_EPISODE_LENGTHS, c.DEFAULT_TRAIN_PARAMS[c.CUM_EPISODE_LENGTHS])
# Logging
print_interval = experiment_settings.get(
c.PRINT_INTERVAL, c.DEFAULT_TRAIN_PARAMS[c.PRINT_INTERVAL])
save_interval = experiment_settings.get(
c.SAVE_INTERVAL, c.DEFAULT_TRAIN_PARAMS[c.SAVE_INTERVAL])
log_interval = experiment_settings.get(
c.LOG_INTERVAL, c.DEFAULT_TRAIN_PARAMS[c.LOG_INTERVAL])
train_render = experiment_settings.get(
c.TRAIN_RENDER, c.DEFAULT_TRAIN_PARAMS[c.TRAIN_RENDER])
# Evaluation
evaluation_frequency = experiment_settings.get(
c.EVALUATION_FREQUENCY, c.DEFAULT_TRAIN_PARAMS[c.EVALUATION_FREQUENCY])
evaluation_returns = experiment_settings.get(
c.EVALUATION_RETURNS, c.DEFAULT_TRAIN_PARAMS[c.EVALUATION_RETURNS])
evaluation_successes_all_tasks = experiment_settings.get(
c.EVALUATION_SUCCESSES_ALL_TASKS, c.DEFAULT_TRAIN_PARAMS[c.EVALUATION_SUCCESSES_ALL_TASKS])
evaluation_successes = experiment_settings.get(
c.EVALUATION_SUCCESSES, c.DEFAULT_TRAIN_PARAMS[c.EVALUATION_SUCCESSES])
num_evaluation_episodes = experiment_settings.get(
c.NUM_EVALUATION_EPISODES, c.DEFAULT_TRAIN_PARAMS[c.NUM_EVALUATION_EPISODES])
evaluation_render = experiment_settings.get(
c.EVALUATION_RENDER, c.DEFAULT_TRAIN_PARAMS[c.EVALUATION_RENDER])
assert save_path is None or os.path.isdir(save_path)
num_tasks = experiment_settings.get(c.NUM_TASKS, 1)
eps_per_task = int(num_evaluation_episodes / num_tasks)
multitask_returns = np.zeros([num_tasks, eps_per_task])
multitask_successes = np.zeros([num_tasks, eps_per_task])
if hasattr(evaluation_env, 'get_task_successes') and c.AUXILIARY_REWARDS in experiment_settings and \
hasattr(experiment_settings[c.AUXILIARY_REWARDS], '_aux_rewards_str'):
auxiliary_success = partial(
evaluation_env.get_task_successes, tasks=experiment_settings[c.AUXILIARY_REWARDS]._aux_rewards_str)
elif hasattr(evaluation_env, 'get_task_successes') and hasattr(evaluation_env, 'VALID_AUX_TASKS') and \
(auxiliary_reward.__qualname__ in evaluation_env.VALID_AUX_TASKS or
auxiliary_reward.__qualname__ == 'train.<locals>.<lambda>'):
if auxiliary_reward.__qualname__ == 'train.<locals>.<lambda>':
auxiliary_success = partial(evaluation_env.get_task_successes, tasks=['main'])
else:
auxiliary_success = partial(evaluation_env.get_task_successes, tasks=[auxiliary_reward.__qualname__])
else:
auxiliary_success = None
eval = partial(evaluate_policy,
agent=evaluation_agent,
env=evaluation_env,
buffer_preprocess=buffer_preprocess,
num_episodes=num_evaluation_episodes,
clip_action=clip_action,
min_action=min_action,
max_action=max_action,
render=evaluation_render,
auxiliary_reward=auxiliary_reward,
auxiliary_success=auxiliary_success)
parallel_eval_process = None
parallel_eval_q = mp.Queue()
policy_eval_q = mp.Queue()
exploration_strategy = experiment_settings.get(c.EXPLORATION_STRATEGY, None)
done = False
if isinstance(train_env, FakeEnv):
auxiliary_reward = lambda reward, **kwargs: np.array([reward])
try:
returns.append(0)
cum_episode_lengths.append(cum_episode_lengths[-1])
curr_h_state = agent.reset()
curr_obs = train_env.reset()
buffer_preprocess.reset()
curr_obs = buffer_preprocess(curr_obs)
tic = timeit.default_timer()
last_buf_save_path = None
epoch_summary = EpochSummary()
epoch_summary.new_epoch()
for timestep_i in range(cum_episode_lengths[-1], max_total_steps):
if hasattr(train_env, c.RENDER) and train_render:
train_env.render()
action, next_h_state, act_info = agent.compute_action(
obs=curr_obs, hidden_state=curr_h_state)
if timestep_i < experiment_settings.get(c.EXPLORATION_STEPS, 0) and exploration_strategy is not None:
action, _, act_info = exploration_strategy.compute_action(
obs=curr_obs, hidden_state=curr_h_state)
if timestep_i % print_interval == 0:
pprint(f"Action: {action}")
pprint(act_info)
env_action = action
if clip_action:
env_action = np.clip(action,
a_min=min_action,
a_max=max_action)
next_obs, reward, done, env_info = train_env.step(env_action)
next_obs = buffer_preprocess(next_obs)
reward = np.atleast_1d(auxiliary_reward(observation=curr_obs,
action=env_action,
reward=reward,
done=done,
next_observation=next_obs,
info=env_info))
info = dict()
info[c.DISCOUNTING] = env_info.get(c.DISCOUNTING, np.array([1]))
info.update(act_info)
updated, update_info = agent.update(curr_obs,
curr_h_state,
action,
reward,
done,
info,
next_obs,
next_h_state)
curr_obs = next_obs
curr_h_state = next_h_state
returns[-1] += reward
cum_episode_lengths[-1] += 1
if updated:
num_updates += 1
for update_key, update_value in update_info.items():
update_value_mean = update_value
if isinstance(update_value, (list, tuple, np.ndarray)):
if len(update_value) == 0:
continue
update_value_mean = np.mean(update_value)
epoch_summary.log(f"{c.UPDATE_INFO}/{update_key}", update_value, track_min_max=False)
# Tensorboard is slow sometimes, use this log interval to gate amount of information
if num_updates % log_interval == 0:
summary_writer.add_scalar(
f"{c.UPDATE_INFO}/{update_key}", update_value_mean, num_updates)
else:
for update_key, update_value in update_info.items():
epoch_summary.log(f"{c.UPDATE_INFO}/{update_key}", update_value, track_min_max=False)
if done:
curr_h_state = agent.reset()
curr_obs = train_env.reset()
buffer_preprocess.reset()
curr_obs = buffer_preprocess(curr_obs)
# Logging
episode_length = cum_episode_lengths[-1] if curr_episode == 0 else cum_episode_lengths[-1] - \
cum_episode_lengths[-2]
for task_i, task_i_ret in enumerate(returns[-1]):
summary_writer.add_scalar(
f"{c.INTERACTION_INFO}/task_{task_i}/{c.RETURNS}", task_i_ret, timestep_i)
summary_writer.add_scalar(
f"{c.INTERACTION_INFO}/{c.EPISODE_LENGTHS}", episode_length, curr_episode)
if type(agent) == SACXAgent:
summary_writer.add_text(
f"{c.INTERACTION_INFO}/{c.SCHEDULER_TRAJ}", str(update_info[c.SCHEDULER_TRAJ]), curr_episode)
summary_writer.add_text(
f"{c.INTERACTION_INFO}/{c.SCHEDULER_TRAJ_VALUE}", str(update_info[c.SCHEDULER_TRAJ_VALUE]), curr_episode)
epoch_summary.log(f"{c.INTERACTION_INFO}/{c.RETURNS}", returns[-1], axis=0)
epoch_summary.log(f"{c.INTERACTION_INFO}/{c.EPISODE_LENGTHS}", episode_length)
returns.append(0)
cum_episode_lengths.append(cum_episode_lengths[-1])
curr_episode += 1
curr_timestep = timestep_i + 1
if evaluation_frequency > 0 and curr_timestep % evaluation_frequency == 0:
if experiment_settings.get(c.EVALUATION_IN_PARALLEL, False):
if parallel_eval_process is None:
parallel_eval_process = mp.Process(target=parallel_evaluate_policy, args=(
evaluation_agent, eval, parallel_eval_q, policy_eval_q))
parallel_eval_process.start()
print("Parallel eval process started.")
else:
# eval process should be shorter than training, but this will make sure
print("Waiting for latest eval results...")
evaluation_return, evaluation_success, evaluation_success_all_tasks = parallel_eval_q.get()
print("Grabbed latest eval results")
evaluation_returns.append(evaluation_return)
evaluation_successes.append(evaluation_success)
evaluation_successes_all_tasks.append(evaluation_success_all_tasks)
for task_i, task_i_ret in enumerate(evaluation_returns[-1]):
rets_slice = slice(task_i * eps_per_task, task_i * eps_per_task + eps_per_task)
task_i_ret = task_i_ret[rets_slice]
task_i_success = evaluation_success_all_tasks[task_i, rets_slice]
summary_writer.add_scalar(
f"{c.EVALUATION_INFO}/task_{task_i}/{c.AVERAGE_RETURNS}", np.mean(task_i_ret),
timestep_i)
summary_writer.add_scalar(
f"{c.EVALUATION_INFO}/task_{task_i}/{c.EVALUATION_SUCCESSES}", np.mean(task_i_success),
timestep_i)
multitask_returns[task_i] = task_i_ret
multitask_successes[task_i] = task_i_success
epoch_summary.log(f"{c.EVALUATION_INFO}/{c.AVERAGE_RETURNS}", multitask_returns, axis=(0, 2))
epoch_summary.log(f"{c.EVALUATION_INFO}/{c.EVALUATION_SUCCESSES_ALL_TASKS}",
multitask_successes, axis=(0, 2))
epoch_summary.log(f"{c.EVALUATION_INFO}/{c.EVALUATION_SUCCESSES}", evaluation_success)
latest_model = copy.deepcopy(agent.model)
latest_model.to('cpu')
latest_model.device = 'cpu'
policy_eval_q.put(latest_model)
else: # no parallel eval
evaluation_return, evaluation_success, evaluation_success_all_tasks = eval()
evaluation_returns.append(evaluation_return)
evaluation_successes.append(evaluation_success)
evaluation_successes_all_tasks.append(evaluation_success_all_tasks)
for task_i, task_i_ret in enumerate(evaluation_returns[-1]):
rets_slice = slice(task_i * eps_per_task, task_i * eps_per_task + eps_per_task)
task_i_ret = task_i_ret[rets_slice]
task_i_success = evaluation_success_all_tasks[task_i, rets_slice]
summary_writer.add_scalar(
f"{c.EVALUATION_INFO}/task_{task_i}/{c.AVERAGE_RETURNS}", np.mean(task_i_ret), timestep_i)
summary_writer.add_scalar(
f"{c.EVALUATION_INFO}/task_{task_i}/{c.EVALUATION_SUCCESSES}", np.mean(task_i_success), timestep_i)
multitask_returns[task_i] = task_i_ret
multitask_successes[task_i] = task_i_success
epoch_summary.log(f"{c.EVALUATION_INFO}/{c.AVERAGE_RETURNS}", multitask_returns, axis=(0, 2))
epoch_summary.log(f"{c.EVALUATION_INFO}/{c.EVALUATION_SUCCESSES_ALL_TASKS}", multitask_successes, axis=(0, 2))
epoch_summary.log(f"{c.EVALUATION_INFO}/{c.EVALUATION_SUCCESSES}", evaluation_success)
if curr_timestep % print_interval == 0:
epoch_summary.print_summary()
epoch_summary.new_epoch()
if save_path is not None and curr_timestep % save_interval == 0:
curr_save_path = f"{save_path}/{timestep_i}.pt"
print(f"Saving model to {curr_save_path}")
torch.save(agent.learning_algorithm.state_dict(), curr_save_path)
pickle.dump({c.RETURNS: returns if done else returns[:-1],
c.CUM_EPISODE_LENGTHS: cum_episode_lengths if done else cum_episode_lengths[:-1],
c.EVALUATION_RETURNS: evaluation_returns,
c.EVALUATION_SUCCESSES_ALL_TASKS: evaluation_successes_all_tasks,
c.EVALUATION_SUCCESSES: evaluation_successes,},
open(f'{save_path}/{c.TRAIN_FILE}', 'wb'))
if hasattr(agent, c.LEARNING_ALGORITHM) and hasattr(agent.learning_algorithm, c.BUFFER):
buf_save_path = f"{save_path}/{timestep_i}_buffer.pkl"
has_absorbing_wrapper = False
for wrap_dict in experiment_settings[c.ENV_SETTING][c.ENV_WRAPPERS]:
if wrap_dict[c.WRAPPER] == AbsorbingStateWrapper:
has_absorbing_wrapper = True
break
agent.learning_algorithm.buffer.save(buf_save_path, end_with_done=not has_absorbing_wrapper)
if last_buf_save_path is not None and \
os.path.isfile(last_buf_save_path) and os.path.isfile(buf_save_path):
os.remove(last_buf_save_path)
last_buf_save_path = buf_save_path
finally:
if save_path is not None:
torch.save(agent.learning_algorithm.state_dict(),
f"{save_path}/{c.TERMINATION_STATE_DICT_FILE}")
if not done:
returns = returns[:-1]
cum_episode_lengths = cum_episode_lengths[:-1]
pickle.dump(
{c.RETURNS: returns, c.CUM_EPISODE_LENGTHS: cum_episode_lengths,
c.EVALUATION_RETURNS: evaluation_returns},
open(f'{save_path}/{c.TERMINATION_TRAIN_FILE}', 'wb')
)
if hasattr(agent, c.LEARNING_ALGORITHM) and hasattr(agent.learning_algorithm, c.BUFFER):
if save_path is not None:
agent.learning_algorithm.buffer.save(f"{save_path}/{c.TERMINATION_BUFFER_FILE}")
agent.learning_algorithm.buffer.close()
toc = timeit.default_timer()
print(f"Training took: {toc - tic}s")
def parallel_evaluate_policy(evaluation_agent, partial_eval_func, results_q, policy_q):
while True:
print("Eval process waiting for new policy parameters...")
new_model = policy_q.get()
evaluation_agent.model = new_model
print("Latest policy params read, starting evaluation")
evaluation_return, evaluation_success, evaluation_success_all_tasks = partial_eval_func(agent=evaluation_agent)
results_q.put([evaluation_return, evaluation_success, evaluation_success_all_tasks])
print("Parallel eval iteration complete, results loaded to queue")
def evaluate_policy(agent,
env,
buffer_preprocess,
num_episodes,
clip_action,
min_action,
max_action,
render,
auxiliary_reward=lambda reward, **kwargs: np.array([reward]),
auxiliary_success=None,
verbose=False,
forced_schedule=None,
stochastic_policy=False):
# example forced schedule: {0: 2, 90: 0}
eval_returns = []
done_successes = []
aux_successes = []
for _ in range(num_episodes):
eval_returns.append(0)
curr_obs = env.reset()
buffer_preprocess.reset()
curr_obs = buffer_preprocess(curr_obs)
h_state = agent.reset()
done = False
done_successes.append(0)
aux_successes.append([0])
ts = 0
while not done:
if hasattr(env, c.RENDER) and render:
env.render()
if forced_schedule is not None:
for t_key in forced_schedule.keys():
if ts == t_key:
print(f"switching to intention {forced_schedule[ts]}")
agent.high_level_model._intention_i = forced_schedule[ts]
agent.curr_high_level_act = forced_schedule[ts]
if stochastic_policy:
action, h_state, act_info = agent.compute_action(
obs=curr_obs, hidden_state=h_state)
else:
action, h_state, act_info = agent.deterministic_action(
obs=curr_obs, hidden_state=h_state)
if clip_action:
action = np.clip(action, a_min=min_action, a_max=max_action)
next_obs, reward, done, env_info = env.step(action)
next_obs = buffer_preprocess(next_obs)
if env_info.get(c.DONE_SUCCESS, False) or (env_info.get(c.INFOS, [{}])[0].get(c.DONE_SUCCESS, False)):
done_successes[-1] += 1
eval_returns[-1] += np.atleast_1d(auxiliary_reward(observation=curr_obs,
action=action,
reward=reward,
done=done,
next_observation=next_obs,
info=env_info))
if auxiliary_success is not None:
aux_successes[-1] = np.array(auxiliary_success(observation=curr_obs,
action=action,
env_info=env_info['infos'][-1])).astype(int).tolist()
else:
aux_successes[-1] = np.zeros(eval_returns[-1].shape)
curr_obs = next_obs
ts += 1
if verbose:
print(eval_returns[-1], done_successes[-1])
return np.array(eval_returns).T, done_successes, np.array(aux_successes).T
|
StarcoderdataPython
|
6546781
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015,掌阅科技
All rights reserved.
摘 要: test_key.py
创 建 者: WangLichao
创建日期: 2015-08-18
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.split(os.path.realpath(__file__))[0]))
import unittest
from zyredis.key import Key
class TestKeyModel(unittest.TestCase):
"""测试key生成逻辑
"""
def test_key_with_prefix(self):
"""测试带前缀情况
"""
key = Key("prefix") # 有前缀
mykey = key["abc"]["def"]["ddd"]
assert mykey == "prefix_abc_def_ddd"
def test_key_no_prefix(self):
"""测试没有前缀的情况
"""
key = Key() # 无前缀
key1 = Key("")
mykey = key["abc"]["def"]
mykey1 = key1["abc"]["def"]
assert mykey == "abc_def"
assert mykey1 == "abc_def"
key2 = Key("")
mykey2 = key2["ttt"]["xxx"]
assert mykey2 == "ttt_xxx"
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
8106049
|
<reponame>bogdancarpusor/flight-price-predictions
from sqlalchemy import Column, Integer, String, TIMESTAMP, Numeric
from .database import db
class Flight(db.Model):
__tablename__ = 'flights'
id = Column(Integer, primary_key=True)
city_from = Column(String(64), index=True)
city_to = Column(String(64), index=True)
distance = Column(Numeric)
airport_from = Column(String(64))
airport_to = Column(String(64))
kiwi_flight_id = Column(String(64))
price = Column(Numeric, index=True)
departure_time = Column(TIMESTAMP(timezone=True))
arrival_time = Column(TIMESTAMP(timezone=True))
flight_number = Column(String(64))
cabin_class = Column(String(64))
airline_code = Column(String(64))
def __repr__(self):
return '<Flight {0} from {1} to {2}>'.format(self.flight_number, self.city_from, self.city_to)
@property
def serialize(self):
return {
'id': self.id,
'city_from': self.city_from,
'city_to': self.city_to,
'distance': float(self.distance),
'airport_from': self.airport_from,
'airport_to': self.airport_to,
'kiwi_flight_id': self.kiwi_flight_id,
'price': float(self.price),
'departure_time': self.departure_time.isoformat(),
'arrival_time': self.arrival_time.isoformat(),
'flight_number': self.flight_number,
'cabin_class': self.cabin_class,
'airline_code': self.airline_code
}
class FindFlightsJob(db.Model):
__tablename__ = 'find_flights_jobs'
id = Column(Integer, primary_key=True)
city_from = Column(String(64), index=True)
city_to = Column(String(64))
departure_date = Column(TIMESTAMP(timezone=True))
status = Column(String(64))
class CronJob(db.Model):
__tablename__ = 'cron_jobs'
id = Column(Integer, primary_key=True)
day_of_week = Column(String(64))
month = Column(String(64))
day_of_month = Column(String(64))
hour = Column(String(64))
minute = Column(String(64))
|
StarcoderdataPython
|
5089919
|
<gh_stars>1-10
# Copyright 2010 by <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to parse BIG GenePop files.
The difference between this class and the standard Bio.PopGen.GenePop.Record
class is that this one does not read the whole file to memory.
It provides an iterator interface, slower but consuming much mess memory.
Should be used with big files (Thousands of markers and individuals).
See http://wbiomed.curtin.edu.au/genepop/ , the format is documented
here: http://wbiomed.curtin.edu.au/genepop/help_input.html .
Classes:
- FileRecord Holds GenePop data.
Functions:
"""
from Bio.PopGen.GenePop import get_indiv
def read(fname):
"""Parse a file containing a GenePop file.
fname is a file name that contains a GenePop record.
"""
record = FileRecord(fname)
return record
class FileRecord(object):
"""Hold information from a GenePop record.
Attributes:
- marker_len The marker length (2 or 3 digit code per allele).
- comment_line Comment line.
- loci_list List of loci names.
Methods:
- get_individual Returns the next individual of the current population.
- skip_population Skips the current population.
skip_population skips the individuals of the current population, returns
True if there are more populations.
get_individual returns an individual of the current population (or None
if the list ended).
Each individual is a pair composed by individual name and a list of alleles
(2 per marker or 1 for haploid data). Examples::
('Ind1', [(1,2), (3,3), (200,201)]
('Ind2', [(2,None), (3,3), (None,None)]
('Other1', [(1,1), (4,3), (200,200)]
"""
def __init__(self, fname):
"""Initialize the class."""
self.comment_line = ""
self.loci_list = []
self.fname = fname
self.start_read()
def __str__(self):
"""Return (reconstructs) a GenePop textual representation.
This might take a lot of memory.
Marker length will be 3.
"""
marker_len = 3
rep = [self.comment_line + '\n']
rep.append('\n'.join(self.loci_list) + '\n')
current_pop = self.current_pop
current_ind = self.current_ind
self._handle.seek(0)
self.skip_header()
rep.append('Pop\n')
more = True
while more:
res = self.get_individual()
if res is True:
rep.append('Pop\n')
elif res is False:
more = False
else:
name, markers = res
rep.append(name)
rep.append(',')
for marker in markers:
rep.append(' ')
for al in marker:
if al is None:
al = '0'
aStr = str(al)
while len(aStr) < marker_len:
aStr = "".join(['0', aStr])
rep.append(aStr)
rep.append('\n')
self.seek_position(current_pop, current_ind)
return "".join(rep)
def start_read(self):
"""Start parsing a file containing a GenePop file."""
self._handle = open(self.fname)
self.comment_line = self._handle.readline().rstrip()
# We can now have one loci per line or all loci in a single line
# separated by either space or comma+space...
# We will remove all commas on loci... that should not be a problem
sample_loci_line = self._handle.readline().rstrip().replace(',', '')
all_loci = sample_loci_line.split(' ')
self.loci_list.extend(all_loci)
for line in self._handle:
line = line.rstrip()
if line.upper() == 'POP':
break
self.loci_list.append(line)
else:
raise ValueError('No population data found, file probably not GenePop related')
# self._after_pop = True
self.current_pop = 0
self.current_ind = 0
def skip_header(self):
"""Skip the Header. To be done after a re-open."""
self.current_pop = 0
self.current_ind = 0
for line in self._handle:
if line.rstrip().upper() == "POP":
return
def seek_position(self, pop, indiv):
"""Seek a certain position in the file.
Arguments:
- pop - pop position (0 is first)
- indiv - individual in pop
"""
self._handle.seek(0)
self.skip_header()
while pop > 0:
self.skip_population()
pop -= 1
while indiv > 0:
self.get_individual()
indiv -= 1
def skip_population(self):
"""Skip the current population. Returns true if there is another pop."""
for line in self._handle:
if line == "":
return False
line = line.rstrip()
if line.upper() == 'POP':
self.current_pop += 1
self.current_ind = 0
return True
def get_individual(self):
"""Get the next individual.
Returns individual information if there are more individuals
in the current population.
Returns True if there are no more individuals in the current
population, but there are more populations. Next read will
be of the following pop.
Returns False if at end of file.
"""
for line in self._handle:
line = line.rstrip()
if line.upper() == 'POP':
self.current_pop += 1
self.current_ind = 0
return True
else:
self.current_ind += 1
indiv_name, allele_list, ignore = get_indiv(line)
return indiv_name, allele_list
return False
def remove_population(self, pos, fname):
"""Remove a population (by position).
Arguments:
- pos - position
- fname - file to be created with population removed
"""
old_rec = read(self.fname)
with open(fname, "w") as f:
f.write(self.comment_line + "\n")
for locus in old_rec.loci_list:
f.write(locus + "\n")
curr_pop = 0
l_parser = old_rec.get_individual()
start_pop = True
while l_parser:
if curr_pop == pos:
old_rec.skip_population()
curr_pop += 1
else:
if l_parser is True:
curr_pop += 1
start_pop = True
else:
if start_pop:
f.write("POP\n")
start_pop = False
name, markers = l_parser
f.write(name + ",")
for marker in markers:
f.write(' ')
for al in marker:
if al is None:
al = '0'
aStr = str(al)
while len(aStr) < 3:
aStr = "".join(['0', aStr])
f.write(aStr)
f.write('\n')
l_parser = old_rec.get_individual()
def remove_locus_by_position(self, pos, fname):
"""Remove a locus by position.
Arguments:
- pos - position
- fname - file to be created with locus removed
"""
old_rec = read(self.fname)
with open(fname, "w") as f:
f.write(self.comment_line + "\n")
loci_list = old_rec.loci_list
del loci_list[pos]
for locus in loci_list:
f.write(locus + "\n")
l_parser = old_rec.get_individual()
f.write("POP\n")
while l_parser:
if l_parser is True:
f.write("POP\n")
else:
name, markers = l_parser
f.write(name + ",")
marker_pos = 0
for marker in markers:
if marker_pos == pos:
marker_pos += 1
continue
marker_pos += 1
f.write(' ')
for al in marker:
if al is None:
al = '0'
aStr = str(al)
while len(aStr) < 3:
aStr = "".join(['0', aStr])
f.write(aStr)
f.write('\n')
l_parser = old_rec.get_individual()
def remove_loci_by_position(self, positions, fname):
"""Remove a set of loci by position.
Arguments:
- positions - positions
- fname - file to be created with locus removed
"""
old_rec = read(self.fname)
with open(fname, "w") as f:
f.write(self.comment_line + "\n")
loci_list = old_rec.loci_list
positions.sort()
positions.reverse()
posSet = set()
for pos in positions:
del loci_list[pos]
posSet.add(pos)
for locus in loci_list:
f.write(locus + "\n")
l_parser = old_rec.get_individual()
f.write("POP\n")
while l_parser:
if l_parser is True:
f.write("POP\n")
else:
name, markers = l_parser
f.write(name + ",")
marker_pos = 0
for marker in markers:
if marker_pos in posSet:
marker_pos += 1
continue
marker_pos += 1
f.write(' ')
for al in marker:
if al is None:
al = '0'
aStr = str(al)
while len(aStr) < 3:
aStr = "".join(['0', aStr])
f.write(aStr)
f.write('\n')
l_parser = old_rec.get_individual()
def remove_locus_by_name(self, name, fname):
"""Remove a locus by name.
Arguments:
- name - name
- fname - file to be created with locus removed
"""
for i in range(len(self.loci_list)):
if self.loci_list[i] == name:
self.remove_locus_by_position(i, fname)
return
# If here than locus not existent... Maybe raise exception?
# Although it should be Ok... Just a boolean return, maybe?
def remove_loci_by_name(self, names, fname):
"""Remove a loci list (by name).
Arguments:
- names - names
- fname - file to be created with loci removed
"""
positions = []
for i in range(len(self.loci_list)):
if self.loci_list[i] in names:
positions.append(i)
self.remove_loci_by_position(positions, fname)
# If here than locus not existent... Maybe raise exception?
# Although it should be Ok... Just a boolean return, maybe?
|
StarcoderdataPython
|
1639729
|
#!/usr/bin/python -u
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json, os, sys, hmac, hashlib, subprocess, time
def getenv_default(env_name, default_value):
if env_name in os.environ and os.environ[env_name]:
return os.environ[env_name]
else:
return default_value
class GitAutoDeploy(BaseHTTPRequestHandler):
def do_POST(self):
try:
secret_url = self.path[1:].split("/")[-1]
if secret_url == os.environ['SECRET_URL']:
sig_received = self.headers.getheader('X-Hub-Signature')[5:]
length = int(self.headers.getheader('content-length'))
body = self.rfile.read(length)
sig_calculated = hmac.new(os.environ['SECRET_KEY'], msg=body, digestmod=hashlib.sha1).hexdigest()
if sig_calculated == sig_received:
print("Valid hook received, rebuilding page")
sys.stdout.flush()
if subprocess.call(['su', '-s', '/bin/sh', '-c', '/usr/sbin/deploy_app.sh', getenv_default('DEPLOY_USER', 'app')]) == 0:
retval=200
else:
retval=500
else:
retval=403
else:
retval=403
except:
retval=500
self.send_response(retval)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write('OHAI! STATUS: %d' % retval)
def log_message(self, format, *args):
return
def main():
if not 'SECRET_KEY' in os.environ or not 'SECRET_URL' in os.environ:
os.system("supervisorctl stop autodeploy")
while True:
time.sleep(1)
try:
server = None
print('GitHub Autodeploy Service Thing v1.0.2-ultrastable started.')
server = HTTPServer(('', 8888), GitAutoDeploy)
server.serve_forever()
except(KeyboardInterrupt, SystemExit) as e:
if server:
server.socket.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
173102
|
<reponame>alainivars/utils2devops<filename>utils2devops/aws/security_group.py
import boto3
from utils2devops.aws import SecurityGroup, Gress
"""
Aws configuration iiles should be present:
~/.aws/credentials
~/.aws/config
"""
def _make_gress(ports):
gress = Gress()
gress.protocol = ports['IpProtocol']
if 'PortRange' in ports:
gress.from_port = ports['FromPort']
gress.to_port = ports['ToPort']
else:
gress.from_port = 0
gress.to_port = 0
gress.cidr_block = ports['IpRanges'][0]['CidrIp'] if len(ports['IpRanges']) else ''
return gress
def list_security_groups(name, state=None):
"""This function does something in aws security_groups. TODO DOC
:param name: The name to use.
:type name: str.
:param state: Current state to be in.
:type state: bool.
:returns: int -- the return code.
:raises: AttributeError, KeyError
"""
session = boto3.Session(profile_name='terraform')
client = session.client(service_name='ec2', region_name='us-east-2')
elements = client.describe_security_groups()
for element in elements['SecurityGroups']:
x = SecurityGroup(element['GroupId'])
x.name = element['GroupName']
x.description = element['Description']
x.vpc_id = element['VpcId']
for ports in element['IpPermissions']:
gress = _make_gress(ports)
x.ingress.append(gress)
for ports in element['IpPermissionsEgress']:
gress = _make_gress(ports)
x.egress.append(gress)
print(x)
|
StarcoderdataPython
|
8151691
|
<filename>accessApp/models.py
from .entities.VehicleEntity import Vehicle
from .entities.AccessEntity import Access
|
StarcoderdataPython
|
11233364
|
from unidef.languages.common.ir_model import *
from unidef.utils.typing_ext import *
def walk_nodes(
node: IrNode,
foreach: Callable[[IrNode, List[Any], Dict[str, Any]], bool],
*args,
**kwargs
):
stop_cond = foreach(node, *args, **kwargs)
if not stop_cond:
for key in node.keys():
value = node.get_field(Attribute(key=key, ty=Any))
if isinstance(value, list):
for v in value:
if isinstance(v, IrNode):
walk_nodes(v, foreach, *args, **kwargs)
elif isinstance(value, IrNode):
walk_nodes(value, foreach, *args, **kwargs)
|
StarcoderdataPython
|
252027
|
<reponame>doyaguillo1997/Data2Gether
from colour import Color
from app.external_sources.csv.services.csv_service import get_load_properties
from app.external_sources.idealista.services.idealista_service import get_last_info
from app.external_sources.idealista.services.idealista_service import get_predictions
from app.geo.services.geo_service import get_zone_childrens
from app.geo.services.geo_service import get_zone_containing_point
def get_geo_elements(parent_id: int):
"""Load web info on geo elements
Args:
parent_id (int): parent geo id
Returns:
list: list of geo zones with color info and historic info
"""
geo_elements = list(get_zone_childrens(parent_id))
# Load las historic for each geo zone
prices = []
for geo_element in geo_elements:
geo_element.historic = get_last_info(geo_element.pk)
geo_element.best_moment = None
if geo_element.historic:
prices.append(geo_element.historic.price)
if geo_element.level.id < 3:
best_moment = get_predictions(geo_element.pk).order_by("-price").first()
if int(best_moment.price) > geo_element.historic.price:
geo_element.best_moment = best_moment
# Range of colors
colors = list(
Color("#b6d148").range_to(Color("green"), int(len(geo_elements) * 100))
)
min_price = min(prices)
max_price = max(prices)
# Unit of color for each increase in price
unit_color = (len(colors) - 1) / (max_price - min_price)
for geo_element in geo_elements:
if geo_element.historic: # There are zones without historic
geo_element.color = str(
colors[int((geo_element.historic.price - min_price) * unit_color)]
)
return geo_elements
def get_wallet(load_id: int):
properties = list()
geo_resume = {"neighborhoods": {}, "districts": {}}
# Get Properties
dirty_properties = list(get_load_properties(load_id))
future_predicions_dates = list(get_predictions(1).values("date"))
# Prepare data structure
future_estimation_resume = {
"date": [date["date"] for date in future_predicions_dates],
"price": [0] * len(future_predicions_dates),
"conf_up": [0] * len(future_predicions_dates),
"conf_low": [0] * len(future_predicions_dates),
}
wallet = {
"account_id": dirty_properties[0].load.account.id,
"load": {
"id": dirty_properties[0].load.id,
"name": dirty_properties[0].load.name,
},
}
for dirty_property in dirty_properties:
property = {}
property["cadastre"] = {
"builded_surface": dirty_property.property.cadastre.private_builded_surface,
"year_built": dirty_property.property.cadastre.year_built,
"cadastral_reference": dirty_property.property.cadastre.cadastral_reference,
}
property["buyed_price"] = dirty_property.property.buyed_price
property["estimated_price"] = dirty_property.property.estimated_price
neighborhood = get_zone_containing_point(
dirty_property.property.cadastre.location, 3
)
property["geo"] = {
"neighborhood": {"id": neighborhood.id, "name": neighborhood.name},
"district": {
"id": neighborhood.parent.id,
"name": neighborhood.parent.name,
},
"location": {
"latitude": dirty_property.property.cadastre.latitude,
"longitude": dirty_property.property.cadastre.longitude,
},
}
# Get Idealista historic info
district_historic = get_predictions(neighborhood.parent.id)
actual_district_price = get_last_info(neighborhood.parent.id).price
index = 0
for historic in list(district_historic):
future_estimation_resume["price"][index] = (
future_estimation_resume["price"][index]
+ (historic.price / actual_district_price)
* dirty_property.property.estimated_price
)
future_estimation_resume["conf_up"][index] = (
future_estimation_resume["conf_up"][index]
+ (historic.conf_up / actual_district_price)
* dirty_property.property.estimated_price
)
future_estimation_resume["conf_low"][index] = (
future_estimation_resume["conf_low"][index]
+ (historic.conf_low / actual_district_price)
* dirty_property.property.estimated_price
)
index = index + 1
# Check best moment to sell
best_moment = district_historic.order_by("-price").first()
if (
(best_moment.price / actual_district_price)
* dirty_property.property.estimated_price
> dirty_property.property.estimated_price
):
property["best_moment"] = {
"date": best_moment.date,
"estimated_price": (best_moment.price / actual_district_price)
* dirty_property.property.estimated_price,
}
properties.append(property)
addPropertyGeoResume(
geo_resume, dirty_property.property, neighborhood.name, "neighborhoods"
)
addPropertyGeoResume(
geo_resume, dirty_property.property, neighborhood.parent.name, "districts"
)
wallet["properties"] = properties
wallet["future_estimations"] = future_estimation_resume
geo_resume["neighborhoods"] = {
k: v
for k, v in sorted(
geo_resume["neighborhoods"].items(),
key=lambda item: item[1]["total_estimated"],
reverse=True,
)
}
geo_resume["districts"] = {
k: v
for k, v in sorted(
geo_resume["districts"].items(),
key=lambda item: item[1]["total_estimated"],
reverse=True,
)
}
wallet["geo_resume"] = geo_resume
return wallet
def addPropertyGeoResume(geo_resume, property, geo_name, key):
"""Add Geo Resume
Args:
geo_resume ([type]): Datastructure to append new info
property ([type]): Property
geo_name ([type]): Name of zone
key ([type]): Districts or neighborhoods
"""
if geo_name in geo_resume[key]:
geo_resume[key][geo_name]["total_estimated"] = (
geo_resume[key][geo_name]["total_estimated"] + property.estimated_price
)
geo_resume[key][geo_name]["total_buyed"] = (
geo_resume[key][geo_name]["total_buyed"] + property.buyed_price
)
geo_resume[key][geo_name]["total_properties"] = (
geo_resume[key][geo_name]["total_properties"] + 1
)
else:
geo_resume[key][geo_name] = {
"total_estimated": property.estimated_price,
"total_buyed": property.buyed_price,
"total_properties": 1,
}
|
StarcoderdataPython
|
6466256
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: apemodefb
class EInterpolationModeFb(object):
Const = 0
Linear = 1
Cubic = 2
|
StarcoderdataPython
|
6450227
|
<gh_stars>100-1000
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
VERSION = open(os.path.join(here, 'lwp/version')).read()
setup(
name='lwp',
version=VERSION,
description='LXC Web Panel',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/claudyus/LXC-Web-Panel',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask>=0.10',
'jinja2>=2.7.2',
'python-ldap',
'PyOpenSSL',
],
scripts=['bin/lwp'],
)
|
StarcoderdataPython
|
11372712
|
def add(a, b):
return a + b
def sub(a, b):
return a - b
def prod(a, b):
return a * b
def div(a, b):
return a / b
def file_add(file_name):
f = open(file_name)
s = 0
for l in f:
s += int(l)
f.close()
return s
def file_prod(file_name):
f = open(file_name)
s = 1
for l in f:
s *= int(l)
f.close()
return s
|
StarcoderdataPython
|
3423386
|
<reponame>jbjjbjjbj/eittek651
# Copyright 2021 <NAME> <<EMAIL>>, <NAME>, <NAME>, <NAME>, <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: Beerware OR MIT
import numpy as np
import pandas as pd
import ad_path
import antenna_diversity as ad
ad_path.nop()
nr_packets = 2000
snrs_db = np.arange(-5, 25, 1)
x_crc_error_ratios = []
z_crc_error_ratios = []
a_crc_error_ratios = []
bers = []
nr_undetected_errors = []
for snr in snrs_db:
print(f"Starting run for SNR {snr}")
packets = [ad.protocols.dect.Full.with_random_payload()
for _ in range(nr_packets)]
packets_bytes = [p.to_bytes() for p in packets]
modulator = ad.modulation.GFSK()
encoder = ad.encoding.SymbolEncoder(2)
symbols = [encoder.encode_msb(pb) for pb in packets_bytes]
modulated = [modulator.modulate(s) for s in symbols]
some_channel = ad.channel.channel_models.RayleighAWGNChannel(2, snr)
rs_and_hss = []
for i, m in enumerate(modulated):
rs_and_hss.append(some_channel.run(m))
some_channel.frame_sent()
# god forsaken return tuples
demodulated = [
modulator.demodulate(
ad.diversity_technique.selection_from_power(r)[0]) for r,
hs in rs_and_hss]
received_packets = [
ad.protocols.dect.Full.from_bytes(
encoder.decode_msb(d)) for d in demodulated]
received_packets_bytes = [rp.to_bytes() for rp in received_packets]
a_crc_detecteds = [rp.a_field_crc_error_detected()
for rp in received_packets]
a_crc_error_ratios.append(sum(a_crc_detecteds) / len(a_crc_detecteds))
x_crc_detecteds = [rp.x_crc_error_detected() for rp in received_packets]
x_crc_error_ratios.append(sum(x_crc_detecteds) / len(x_crc_detecteds))
z_crc_error_detecteds = [rp.z_crc_error_detected()
for rp in received_packets]
z_crc_error_ratios.append(
sum(z_crc_error_detecteds) /
len(z_crc_error_detecteds))
nr_undetected_error = 0
packet_bers = []
for i in range(
ad.common.shared_length(
packets_bytes,
received_packets_bytes)):
faults, total = ad.common.count_bit_errors(packets_bytes[i],
received_packets_bytes[i])
packet_bers.append(faults / total)
if ((not received_packets[i].any_crc_error_detected()) and
packets_bytes[i] != received_packets_bytes[i]):
nr_undetected_error += 1
nr_undetected_errors.append(nr_undetected_error / nr_packets)
mean_ber = sum(packet_bers) / len(packet_bers)
bers.append(mean_ber)
df = pd.DataFrame(
index=snrs_db,
data={"Ratio of Packets with X-CRC Error Detected": x_crc_error_ratios,
"Ratio of Packets with R-CRC Error Detected": a_crc_error_ratios,
f"Mean Bit Error Rate ({nr_packets} packets)": bers,
"Ratio of Undetected Packet Errors": nr_undetected_errors,
"Ratio of Packets with Z-CRC Error Detected": z_crc_error_ratios,
}
)
df.plot(ylabel="Ratio [-]", xlabel="SNR [dB]", figsize=(8.0, 4.8), grid=True).get_figure().savefig("lol2.pdf")
|
StarcoderdataPython
|
144654
|
<filename>jigls/jeditor/jdantic.py
from os import name
from typing import Any, Callable, List, Dict, Optional
from pydantic import BaseModel, create_model, ValidationError, validator
from uuid import UUID
from pprint import pprint
from pydantic import BaseModel as PydanticBaseModel
# from pydantic.types import UUID4
class JBaseModel(PydanticBaseModel):
__slots__ = "__weakref__"
# class Config:
# json_ecoders = {UUID4: lambda v: v.hex}
class JSocketModel(JBaseModel):
name: str
uid: str
type: int
multiConnect: bool
dataType: str
default: Optional[Any]
exec: bool
execOnChange: bool
execOnConnect: bool
monitorOnChange: bool
traceback: bool
dirty: bool = True
class JNodeModel(JBaseModel):
name: str
uid: str
socketList: List[JSocketModel]
class JGraphNodeModel(JBaseModel):
# node: JNodeModel
nodeType: str
posX: float
posY: float
node: JNodeModel
dataContent: Dict
class JGraphEdgeModel(JBaseModel):
uid: str
startSocket: str
destnSocket: str
pathType: int
# class JGrEdgeModel(JBaseModel):
# edge: JEdgeModel
class JGraphModel(BaseModel):
nodes: List[Optional[JGraphNodeModel]]
edges: List[Optional[JGraphEdgeModel]]
|
StarcoderdataPython
|
3454745
|
from .base import Attr, Context, Load, AST, ACO, Expression, CC
from syn.base_utils import quote_string, setitem
from syn.type.a import List
from syn.five import PY2, STR
#-------------------------------------------------------------------------------
# Base Class
class Literal(Expression):
_opts = dict(max_len = 0)
#-------------------------------------------------------------------------------
# Num
if PY2:
n_type = (int, long, float, complex)
else:
n_type = (int, float, complex)
class Num(Literal):
_opts = dict(args = ('n',))
_attrs = dict(n = Attr(n_type, doc='The numerical value', group=AST))
def emit(self, **kwargs):
ret = self._indent(**kwargs)
ret += str(self.n)
return ret
#-------------------------------------------------------------------------------
# Str
class Str(Literal):
_opts = dict(args = ('s',))
_attrs = dict(s = Attr(STR, doc='The string contents', group=AST))
def emit(self, **kwargs):
ret = self._indent(**kwargs)
ret += quote_string(self.s)
return ret
#-------------------------------------------------------------------------------
# Bytes
class Bytes(Literal):
minver = '3'
_opts = dict(args = ('s',))
_attrs = dict(s = Attr(bytes, group=AST))
def emit(self, **kwargs):
ret = self._indent(**kwargs)
if PY2:
ret += quote_string(self.s)
else:
ret += str(self.s)
return ret
#-------------------------------------------------------------------------------
# Sequence
class Sequence(Literal):
bounds = ('[', ']')
delim = ', '
_opts = dict(args = ('elts',))
_attrs = dict(elts = Attr(List(Expression), groups=(AST, ACO, CC)))
def emit(self, **kwargs):
with setitem(kwargs, 'indent_level', 0):
cs = [c.emit(**kwargs) for c in self.elts]
ret = self.delim.join(cs)
if len(cs) == 1 and isinstance(self, Tuple):
ret += ','
ret = self.bounds[0] + ret + self.bounds[1]
ret = self._indent(**kwargs) + ret
return ret
#-------------------------------------------------------------------------------
# List
class List(Sequence):
_attrs = dict(ctx = Attr(Context, Load(), groups=(AST, ACO)))
#-------------------------------------------------------------------------------
# Tuple
class Tuple(List):
bounds = ('(', ')')
#-------------------------------------------------------------------------------
# Set
class Set(Sequence):
bounds = ('{', '}')
#-------------------------------------------------------------------------------
# NameConstant
class NameConstant(Literal):
minver = '3.4'
_opts = dict(args = ('value',))
_attrs = dict(value = Attr([True, False, None], group=AST))
def emit(self, **kwargs):
ret = self._indent(**kwargs)
ret += str(self.value)
return ret
#-------------------------------------------------------------------------------
# __all__
__all__ = ('Literal', 'Num', 'Str', 'Bytes',
'Sequence', 'List', 'Tuple', 'Set',
'NameConstant')
#-------------------------------------------------------------------------------
|
StarcoderdataPython
|
6557445
|
from .source import get_source, source_info, read_sources as sources
|
StarcoderdataPython
|
3487847
|
<gh_stars>1-10
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
#
# ----------------------------------------------------------------------------
#
# spack install py-tpot
#
# You can edit this file again by typing:
#
# spack edit py-tpot
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
from spack import *
class PyTpot(PythonPackage):
"""A Python Automated Machine Learning tool that optimizes machine learning pipelines using genetic programming."""
homepage = "http://epistasislab.github.io/tpot/"
url = "https://github.com/EpistasisLab/tpot/archive/v0.10.2.tar.gz"
version('0.10.2', sha256='a35c4b7ff1927168a440327004f71d7dd09b6540ab38a951dc0748df5aa91b30')
depends_on('py-deap', type=('build', 'run'))
depends_on('py-nose', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-stopit', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-joblib', type=('build', 'run'))
|
StarcoderdataPython
|
190906
|
def multiply(a,b):
return a*b
def divide(a,b):
return a/b
|
StarcoderdataPython
|
5134831
|
import asyncio
_EVENTS = {}
def get(name):
if name not in _EVENTS:
_EVENTS[name] = asyncio.Event()
return _EVENTS[name]
def clear():
_EVENTS.clear()
|
StarcoderdataPython
|
3370596
|
<filename>neurofire/inference/blending.py
import numpy as np
# Blending similar to what is done in
# 'Superhuman accuracy on SNEMI'
# However, the function in there probably contains a
# typo and doesn't make sense
# -> for now we use a linear ramp
# TODO more blending modes ?!
# -> this would be the sub-class `LinearBlending`
# -> if so make a top class and inherit from it for each mode
class Blending(object):
epsilon = 0.001
def __init__(self, dim, ramp_size):
assert dim in (2, 3), str(dim)
self.dim = dim
assert isinstance(ramp_size, (int, list, tuple))
if isinstance(ramp_size, int):
self.ramp_size = [ramp_size] * self.dim
else:
self.ramp_size = list(ramp_size)
assert len(self.ramp_size) == self.dim
@staticmethod
def blending_profile_1d(x, dim_size, ramp_size):
# In 1-D case, x must resemble an arange
ramp = np.logical_and(x >= ramp_size, x < (dim_size - ramp_size))
ramp = np.where(ramp, 1., 0.)
ramp_up = np.where(x < ramp_size, 1., 0.)
ramp_down = np.where(x >= (dim_size - ramp_size), 1., 0.)
profile = (ramp +
(1 - ramp) * ramp_up * (x / ramp_size) +
(1 - ramp) * ramp_down * ((dim_size - x - 1) / ramp_size))
return profile
def _get_blending_mask_2d(self, shape):
yy, xx = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
profile_x = self.blending_profile_1d(xx, shape[0], self.ramp_size[0])
profile_y = self.blending_profile_1d(yy, shape[1], self.ramp_size[1])
return 0.5 * (profile_x + profile_y)
def _get_blending_mask_3d(self, shape):
zz, yy, xx = np.meshgrid(np.arange(shape[0]),
np.arange(shape[1]),
np.arange(shape[2]),
indexing='ij')
profile_z = self.blending_profile_1d(zz, shape[0], self.ramp_size[0])
profile_y = self.blending_profile_1d(yy, shape[1], self.ramp_size[1])
profile_x = self.blending_profile_1d(xx, shape[2], self.ramp_size[2])
return (profile_x + profile_y + profile_z) / 3.
def get_blending_mask(self, shape):
blend_mask = self._get_blending_mask_2d(shape) if self.dim == 2 else \
self._get_blending_mask_3d(shape)
# we clip to avoid division by zero later
return np.clip(blend_mask, self.epsilon, 1.)
def __call__(self, input_):
assert input_.ndim in (self.dim, self.dim + 1), '%i, %i' % (input_.ndim, self.dim)
shape = input_.shape if input_.ndim == self.dim else input_.shape[1:]
blend_mask = self.get_blending_mask(shape)
return (blend_mask * input_).astype(input_.dtype), blend_mask
|
StarcoderdataPython
|
1927536
|
<filename>task3/merge_sort.py
"""
Merge Sort
Contains two procedure:
1. half hte sequence until each sequence only has 1 element, which means each sequence
has already been sorted;
2. merge those sequences, keep there original order.
"""
def merge(left, right):
"""Merge two sorted list to one sorted list"""
res = []
p1, p2 = 0, 0
while True:
if p1 == len(left):
res.extend(right[p2:])
break
elif p2 == len(right):
res.extend(left[p1:])
break
elif left[p1] < right[p2]:
res.append(left[p1])
p1 += 1
else:
res.append(right[p2])
p2 += 1
return res
def merge_sort(l):
if len(l) == 1:
return l
mid = len(l) // 2
left = merge_sort(l[:mid])
right = merge_sort(l[mid:])
return merge(left, right)
if __name__ == '__main__':
a = [1, 5, 2, 8, 0, 2, 10, 9, 20]
print(merge_sort(a))
|
StarcoderdataPython
|
3468920
|
<filename>examples/demo.py
from opencage.geocoder import OpenCageGeocode
key = 'your-key-here'
geocoder = OpenCageGeocode(key)
query = '182 Clerkenwell Road, London'
ret = geocoder.geocode(query)
print(ret._content)
|
StarcoderdataPython
|
1991351
|
import gym
env = gym.make('CartPole-v0')
for i_episode in range(20): # one episode is everytime cartpole falls
observation = env.reset()
for timestep in range(1000):
env.render() # render for every timestep
print(observation) # prints array of velocities on where cartpole is
action = env.action_space.sample() # ML - random action
observation, reward, done, info = env.step(action)
# step function completes an action and return four variables
# observation - what it sees
# reward - set of velocities
# done - boolean ; info - diagnostics
if done:
print("episode finished")
break
|
StarcoderdataPython
|
1762975
|
<reponame>juaoantonio/curso_video_python
n = int(input('Digite um número inteiro para saber seu antecessor e sucessor: '))
print(f'O antecessor de {n} é {n-1} e seu sucessor é {n+1}')
|
StarcoderdataPython
|
4900214
|
<reponame>whart222/cctbx_project<filename>xfel/merging/command_line/dev_cxi_mpi_merge_refltable.py
from __future__ import absolute_import, division, print_function
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# LIBTBX_SET_DISPATCHER_NAME dev.cxi.mpi_merge_refltable
#
# $Id$
from xfel.merging.command_line.dev_mpi_cluster_two_merge import scaling_manager_mpi, Script
from xfel.merging.command_line.dev_cxi_merge_refltable import refltable_scaling_manager
class refltable_scaling_manager_mpi(scaling_manager_mpi, refltable_scaling_manager):
pass
if (__name__ == "__main__"):
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
script = Script(refltable_scaling_manager_mpi)
result = script.run(comm=comm,timing=False)
if rank == 0:
script.show_plot(result)
print("DONE")
|
StarcoderdataPython
|
6685353
|
<filename>__scraping__/cnbc.com - requests/main.py
#
# https://stackoverflow.com/a/47744797/1832058
#
from bs4 import BeautifulSoup
import requests
html = requests.get("https://www.cnbc.com/2017/12/07/pinterest-hires-former-facebook-exec-gary-johnson-to-run-corporate-dev.html").text
soup = BeautifulSoup(html, 'html5lib')
all_paragraphs = soup.find_all('p')
for p in all_paragraphs:
#print(p) # all HTML
print(p.get_text()) # p.get_text(strip=True)
# or
print(p.text)
|
StarcoderdataPython
|
11257745
|
<filename>iwant/__init__.py<gh_stars>100-1000
__version__ = '1.0.14' # version number
|
StarcoderdataPython
|
6675126
|
<reponame>Programming-The-Next-Step-2021/RecipesProject
import unittest
from Recipe_Finder import find_recipes, recipe_link
class TestRecipeFinder(unittest.TestCase):
def test_find_recipes(self):
recipes = find_recipes('eggs,bacon,cheese')
self.assertIsNotNone(recipes,'no recipes')
def test_recipe_link(self):
link = recipe_link('716429')
self.assertIn('html', link)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11353361
|
<gh_stars>1-10
from libsim.models import (
SimulationStep
)
from libsim.features import (
PausedBy,
)
from libsim.util import (
latest_sample,
)
from tests.helpers import (
create_flip_flop_model
)
def never(*args):
return False
def id(x):
return x
model = create_flip_flop_model(start=0, flip=0, flop=1)
def test_process_interface():
"""
Should behave like a variable
"""
var = model & PausedBy(never)
step = SimulationStep(x0=1, inputs=(0,), duration=1)
assert var.name == model.name
assert var.start == model.start
assert latest_sample(var(step)) == 0
def test_pauses():
"""
Should not flip when paused
"""
var = model & PausedBy(id)
step = SimulationStep(x0=1, inputs=(1,), duration=1)
assert latest_sample(var(step)) == 1
def test_should_pass_down_input_tail():
def assert_inputs(sim_step):
v1, v2 = sim_step.inputs
assert v1 == 1
assert v2 == 2
var = PausedBy(never)(assert_inputs)
step = SimulationStep(x0=1, inputs=(0, 1, 2), duration=1)
var(step)
def test_should_pass_first_input_var_to_predicate():
def pred(x):
assert x == 0
return False
var = model & PausedBy(pred)
step = SimulationStep(x0=1, inputs=(0, 1, 2), duration=1)
var(step)
|
StarcoderdataPython
|
4858455
|
<reponame>rupenp/lin-train
__author__ = '<NAME>'
"""
A multiprocessor version of the trainer that distributes potential feature sets to processes. Each process then
performs the linear regression across the k-folds.
Note that this can be very memory intensive as each process must have a copy of the data.
"""
from trainer import Trainer
|
StarcoderdataPython
|
6567694
|
<gh_stars>1-10
from argparse import ArgumentParser
from os.path import join
from .constants import (CFG_TEMP_FILE, MD_TEMP_DIR, MD_CURRENT_FILES, FMT,
LATEX_FORMATS, LATEX, EPUB, HTML, OUTPUT_FILE)
from .utils import yaml_load, yaml_dump
class FlightScript:
def __init__(self, func, flight_type):
setattr(self, "fligh_script", func.__get__(self))
parser = ArgumentParser(description="")
parser.add_argument('--cfg', nargs='?', default="",
help='The cfg from pandoc_styles')
args = parser.parse_args()
self.cfg = yaml_load(args.cfg)
self.fmt = self.cfg[FMT]
self.real_fmt = self.fmt
if self.fmt in LATEX_FORMATS:
self.fmt = LATEX
elif self.fmt == EPUB:
self.fmt = HTML
if flight_type == "preflight":
self.files = self.cfg[MD_CURRENT_FILES]
else:
self.file = self.cfg[OUTPUT_FILE]
def fligh_script(self):
pass
def save_cfg(self):
yaml_dump(self.cfg, join(self.cfg.get(MD_TEMP_DIR), CFG_TEMP_FILE))
def run_preflight_script(func):
script = FlightScript(func, "preflight")
script.fligh_script()
def run_postflight_script(func):
script = FlightScript(func, "postflight")
script.fligh_script()
|
StarcoderdataPython
|
9730690
|
<reponame>diberry/AzureStorageSnippets<filename>blobs/quickstarts/python/V12/app_framework.py
import os, uuid
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
try:
print("Azure Blob Storage v" + __version__ + " - Python quickstart sample")
# Quick start code goes here
except Exception as ex:
print('Exception:')
print(ex)
|
StarcoderdataPython
|
1695580
|
#!/usr/bin/env python3
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import os
import math
from scipy.stats import mannwhitneyu, ttest_ind
from nutris import nutris
BASEPATH = "/data"
app = dash.Dash(__name__)
app.config['suppress_callback_exceptions']=True
def combine_all_data():
print("getting new data")
survey_df = pd.DataFrame()
#tracking_files = {}
machineLayouts = pd.DataFrame()
timings = pd.DataFrame()
for filename in os.listdir(BASEPATH):
if ".csv" in filename:
if "machineLayout" in filename:
user_id = filename.split("_")[0]
task = filename.split("_")[1]
#the machinelayout is the same for all tasks no need to store it multiple times
#extract the machine layout
machinelayout_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), sep=';')
machinelayout_df_tmp["user_id"] = user_id
machinelayout_df_tmp["task"] = task
machineLayouts = machineLayouts.append(machinelayout_df_tmp, ignore_index=True)
if "_trackings_" in filename:
user_id = filename.split("_")[0]
task = filename.split("_")[1]
timings_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), sep=',')
timings = timings.append({"user_id":user_id, "task":task, "time":timings_df_tmp.iloc[-1]["timestamp"] / 1000}, ignore_index=True)
for filename in os.listdir(BASEPATH):
if ".csv" in filename and not "BAK" in filename:
if "_evaluation_" in filename:
survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col="user_id", sep=';')
survey_df = survey_df_tmp.combine_first(survey_df)
elif "_basic_" in filename:
survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col="user_id", sep=';')
survey_df = survey_df_tmp.combine_first(survey_df)
elif "_guess_" in filename:
survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col="user_id", sep=';')
survey_df = survey_df_tmp.combine_first(survey_df)
elif "_task_" in filename:
#extract the nutriscore & label from machine layout if available
survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col="user_id", sep=';')
user_id = str(survey_df_tmp.index[0])
#assuming there is only one row in the survey_task.csv, which is fine if data from typeform
for taskNr in range(1,5):
try:
product = machineLayouts[ (machineLayouts["user_id"] == user_id) & \
(machineLayouts["BoxNr"] == int(survey_df_tmp["t_{}".format(taskNr)].iloc[0]))
].iloc[0]
survey_df_tmp["nutri_label_{}".format(taskNr)] = product["ProductNutriLabel"]
survey_df_tmp["nutri_score_{}".format(taskNr)] = product["ProductNutriScore"]
survey_df_tmp["energy_{}".format(taskNr)] = nutris[product["ProductId"]]["energy"]
survey_df_tmp["sugar_{}".format(taskNr)] = nutris[product["ProductId"]]["sugar"]
survey_df_tmp["sat_fat_{}".format(taskNr)] = nutris[product["ProductId"]]["sat_fat"]
survey_df_tmp["natrium_{}".format(taskNr)] = nutris[product["ProductId"]]["natrium"]
survey_df_tmp["protein_{}".format(taskNr)] = nutris[product["ProductId"]]["protein"]
survey_df_tmp["fiber_{}".format(taskNr)]= nutris[product["ProductId"]]["fiber"]
survey_df_tmp["health_percentage_{}".format(taskNr)] = nutris[product["ProductId"]]["health_percentage"]
survey_df_tmp["time_{}".format(taskNr)] = timings.loc[(timings["user_id"]==user_id) & (timings["task"]==str(taskNr)),"time"].iloc[0]
except:
survey_df_tmp["nutri_label_{}".format(taskNr)] = None
survey_df_tmp["nutri_score_{}".format(taskNr)] = None
survey_df_tmp["energy_{}".format(taskNr)] = None
survey_df_tmp["sugar_{}".format(taskNr)] = None
survey_df_tmp["sat_fat_{}".format(taskNr)] = None
survey_df_tmp["natrium_{}".format(taskNr)] = None
survey_df_tmp["protein_{}".format(taskNr)] = None
survey_df_tmp["fiber_{}".format(taskNr)]= None
survey_df_tmp["health_percentage_{}".format(taskNr)] = None
survey_df_tmp["time_{}".format(taskNr)] = None
survey_df = survey_df_tmp.combine_first(survey_df)
age_classes = {
0: "0.) < 19yrs",
1: "1.) 20 - 29 yrs",
2: "2.) 30 - 49 yrs",
3: "2.) 30 - 49 yrs",
4: "3.) 50 - 65 yrs",
5: "4.) > 65 yrs",
6: "4.) > 65 yrs"}
survey_df["age_class"] = survey_df["age"].apply(lambda x: safe_dict(x, age_classes))
ages = {
0: 18,
1: 25,
2: 35,
2: 45,
3: 57,
4: 72,
5: 85
}
survey_df["age"] = survey_df["age"].apply(lambda x: safe_dict(x, ages))
weights = {
"39-": 35,
"40-49": 45,
"50-59": 55,
"60-69": 65,
"70-79": 75,
"80-89": 85,
"90-99": 95,
"100-109": 105,
"110-119": 115,
"120-129": 125,
"130-139": 135,
"140-149": 145,
"150+": 155
}
survey_df["weight"] = survey_df["weight"].apply(lambda x: safe_dict(x, weights, False))
heights = {
"139-": 1.35,
"140-149": 1.45,
"150-159": 1.55,
"160-169": 1.65,
"170-179": 1.75,
"180-189": 1.85,
"190-199": 1.95,
"200-209": 2.05,
"210+": 2.15
}
survey_df["height"] = survey_df["height"].apply(lambda x: safe_dict(x, heights, False))
genders = {
"male": "0.) Male",
"female": "1.)Female"
}
survey_df["gender"] = survey_df["gender"].apply(lambda x: safe_dict(x, genders, False))
survey_df["bmi"] = survey_df["weight"] / (survey_df["height"] * survey_df["height"])
survey_df["bmi_class"] = survey_df["bmi"].apply(bmi_class)
diets = {
"No I don't follow a certain diet": "None",
"Nein, ich folge keiner bestimmten Diät": "None",
"I avoid certain foods because of an allergy or food intolerance": "Allergy / Intolerance",
"Ich vermeide bestimmte Lebensmittel wegen Allergie oder Unverträglichkeit": "Allergy / Intolerance",
"I eat vegetarian": "Vegiatrian / Vegan",
"Ich esse vegetarisch (ovo-lacto-vegetarisch, lacto-vegetarisch)": "Vegiatrian / Vegan",
"I eat vegan": "Vegiatrian / Vegan",
"Ich esse vegan": "Vegiatrian / Vegan",
"I avoid certain foods for ethical/cultural/religious reasons": "Cultural / Ethnical",
"Ich vermeide bestimmte Lebensmittel aus ethischen, kulturellen oder religiösen Gründen": "Cultural / Ethnical",
"I follow a high carbohydrate diet": "High Carb",
"Ich esse kohlenhydratreich": "High Carb",
"I follow a diet low in carbohydrates": "Low Carb",
"Ich esse kohlenhydrat-arm": "Low Carb",
"I follow a low fat or cholosterol diet": "Low Fat",
"Ich esse fettarm oder cholesterin-arm": "Low Fat",
"I follow a diet with reduced salt consumption": "Low Salt",
"Ich esse salz-reduziert": "Low Salt",
"I follow a diet low in protein": "Low Protein",
"Ich esse protein-arm": "Low Protein",
"I follow a diet rich in protein": "High Protein",
"Ich esse protein-reich": "High Protein",
"I follow an environmentally friendly / sustainable diet": "Sustainable",
"Ich ernähre mich umweltreundlich und nachhaltig": "Sustainable",
}
survey_df["diet"] = survey_df["diet"].apply(lambda x: safe_dict(x, diets, False))
educations = {
"Manditory School": "0:) primary education",
"Middle school": "0:) primary education",
"High school": "1.) secondary education",
"Vocational school": "1.) secondary education",
"master's diploma": "2.) tertiary education",
"College / University": "2.) tertiary education",
"Obligatorische Schule": "0:) primary education",
"Weiterführende Schule": "0:) primary education",
"Matura": "1.) secondary education",
"Berufsschule": "1.) secondary education",
"Meister- / eidg. Diplom": "2.) tertiary education",
"Studium": "2.) tertiary education",
}
survey_df["education"] = survey_df["education"].apply(lambda x: safe_dict(x, educations, False))
snack_frequencies = {
"sehr selten bis nie": "0.) never",
"never":"0.) never",
"once or twice per year":"0.) never",
"ca. monatlich":"1.) monthly",
"monthly":"1.) monthly",
"ca. wöchentlich":"2.) weekly",
"weekly":"2.) weekly",
"ca. 2-3 mal pro Woche":"2.) weekly",
"ca. 4-5 mal pro Woche":"3.) almost daily",
"daily":"3.) almost daily",
"ca. täglich":"3.) almost daily",
}
snack_frequencies_int = {
"sehr selten bis nie": 0,
"never":0,
"once or twice per year":0,
"ca. monatlich":1,
"monthly":1,
"ca. wöchentlich":4,
"weekly":4,
"ca. 2-3 mal pro Woche":10,
"ca. 4-5 mal pro Woche":20,
"daily":31,
"ca. täglich":31,
}
survey_df["snack_frequency_int"] = survey_df["snack_frequency"].apply(lambda x: safe_dict(x, snack_frequencies_int, False))
survey_df["snack_frequency"] = survey_df["snack_frequency"].apply(lambda x: safe_dict(x, snack_frequencies, False))
ar_frequencies = {
"Never used":"0.) Never",
"Noch nie benutz":"0.) Never",
"Tried once or twice":"1.) Few Times",
"Schon ein oder zwei Mal benutzt":"1.) Few Times",
"I use it sometimes":"2.) Sometimes",
"Ich benutze es hin und wieder privat":"2.) Sometimes",
"I worked with it on a project":"3.) Regularly",
"Ich habe an einem Projekt damit gearbeitet":"3.) Regularly",
"I use it regularly for private purpose":"3.) Regularly",
"Ich benutze es regelmäßig privat":"3.) Regularly",
"It is part of my job on a regular basis":"3.) Regularly",
"Ich komme auf der Arbeit regelmäßig damit in Kontakt":"3.) Regularly",
"I am an expert / developer in the field":"4.) Expert",
"Ich bin ein Experte / Entwickler auf dem Feld":"4.) Expert",
}
ar_frequencies_int = {
"Never used":0,
"Noch nie benutz":0,
"Tried once or twice":1,
"Schon ein oder zwei Mal benutzt":1,
"I use it sometimes":2,
"Ich benutze es hin und wieder privat":2,
"I worked with it on a project":3,
"Ich habe an einem Projekt damit gearbeitet":3,
"I use it regularly for private purpose":3,
"Ich benutze es regelmäßig privat":3,
"It is part of my job on a regular basis":3,
"Ich komme auf der Arbeit regelmäßig damit in Kontakt":3,
"I am an expert / developer in the field":4,
"Ich bin ein Experte / Entwickler auf dem Feld":4,
}
survey_df["ar_frequency_int"] = survey_df["ar_frequency"].apply(lambda x: safe_dict(x, ar_frequencies_int, False))
survey_df["ar_frequency"] = survey_df["ar_frequency"].apply(lambda x: safe_dict(x, ar_frequencies, False))
survey_df["BI_avg"] = survey_df[["BI1", "BI2","BI3"]].mean(axis=1, numeric_only=True)
survey_df["EE_avg"] = survey_df[["EE1", "EE2","EE3"]].mean(axis=1, numeric_only=True)
survey_df["FL_avg"] = survey_df[["FL2","FL3"]].mean(axis=1, numeric_only=True)
survey_df["HM_avg"] = survey_df[["HM1", "HM2"]].mean(axis=1, numeric_only=True)
survey_df["IE_avg"] = survey_df[["IE1", "IE2"]].mean(axis=1, numeric_only=True)
survey_df["PE_avg"] = survey_df[["PE1", "PE2","PE3"]].mean(axis=1, numeric_only=True)
survey_df["PI_avg"] = survey_df[["PI1", "PI2","PI3"]].mean(axis=1, numeric_only=True)
survey_df["SI_avg"] = survey_df[["SI1", "SI2","SI3"]].mean(axis=1, numeric_only=True)
survey_df.fillna(value=pd.np.nan, inplace=True)
return survey_df
def render_box_per_col(col, survey_df):
is_test = survey_df["group"] == "Test"
is_control = survey_df["group"] == "Control"
data = []
data.append(go.Box(
x = survey_df[col][is_test],
name="test",
marker = dict(
color = 'rgb(7,40,89)'),
line = dict(
color = 'rgb(7,40,89)')
))
data.append(go.Box(
x = survey_df[col][is_control],
name="control",
marker = dict(
color = 'rgb(107,174,214)'),
line = dict(
color = 'rgb(107,174,214)')
))
graph = dcc.Graph(
figure = go.Figure(
data = data,
layout = go.Layout(
showlegend=True,
legend=go.layout.Legend(
x=0,
y=1.0
),
margin=go.layout.Margin(l=40, r=0, t=40, b=30)
)
),
style={'height': 150},
id='box_{}'.format(col)
)
graph_div = html.Div([graph],
style={'padding-top': '20',
'padding-bottom': '20'})
return graph_div
def data_per_col(col, survey_df):
is_test = survey_df["group"] == "Test"
is_control = survey_df["group"] == "Control"
data = [
go.Histogram(
x = survey_df[col][is_test].sort_values(),
name="test",
opacity=0.75,
marker = dict(
color = 'rgb(7,40,89)'),
),
go.Histogram(
x = survey_df[col][is_control].sort_values(),
name="control",
opacity=0.75,
marker = dict(
color = 'rgb(107,174,214)'),
)
]
return data
def render_hist_per_col(col, survey_df):
data = data_per_col(col, survey_df)
graph = dcc.Graph(
figure = go.Figure(
data = data,
layout = go.Layout(
showlegend=True,
legend=go.layout.Legend(
x=0,
y=1.0
),
margin=go.layout.Margin(l=40, r=0, t=40, b=30)
)
),
style={'height': 300}
)
graph_div = html.Div([graph],
style={'padding-top': '20',
'padding-bottom': '20'})
return graph_div
def render_table(survey_df):
table = dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in survey_df.columns],
data=survey_df.to_dict("rows"),
)
return table
#def load_user_tracking(user_id, task_id):
# filename = tracking_files[user_id][task_id]
def calc_p_whitney(col, s, ns):
Rg = col.rank()
nt = col[s].count()
nc = col[ns].count()
if (Rg == Rg.iloc[0]).all():
return Rg[s].sum() - nt * (nt + 1) / 2, 0.5, nt, nc
u, p = stats.mannwhitneyu(Rg[s], Rg[ns])
return p
# def calc_p_whitney(colname, survey_df):
# col = survey_df[colname]
# istest = survey_df["group"]=="Test"
# iscontrol = survey_df["group"]=="Control"
# Rg = col.rank()
#
# nt = col[istest].count()
# nc = col[iscontrol].count()
#
# if (Rg == Rg.iloc[0]).all():
# return Rg[istest].sum() - nt * (nt + 1) / 2, 0.5, nt, nc
#
# u, p = mannwhitneyu(Rg[istest], Rg[iscontrol])
# return u, p, nt, nc
def calc_p_t(colname, survey_df):
col = survey_df[colname]
istest = survey_df["group"]=="Test"
iscontrol = survey_df["group"]=="Control"
t, p = ttest_ind(col[istest].values, col[iscontrol].values, axis=0, nan_policy='omit')
return t, p
def table_group(task_nr, survey_df, header):
istest = survey_df["group"] == "Test"
iscontrol = survey_df["group"] == "Control"
isoverweight = survey_df["bmi"] > 25
isnormal = survey_df["bmi"] <= 25
iseducated = survey_df["bmi"] > 25
isliterate = survey_df["FL_avg"] > 4.5
isilliterate = survey_df["FL_avg"] <= 4.5
cols = ["nutri_score",
"energy",
"sat_fat",
"sugar",
"natrium",
"protein",
"fiber",
"health_percentage",
"time"]
data = pd.DataFrame()
for col in cols:
col_name = "{}_{}".format(col, task_nr)
data.loc[col, "N Total"] = "[{}]".format(int(data.loc[col, "N Test"] + data.loc[col, "N Control"]))
data.loc[col, "mean Total"] = "{:.2f}".format(survey_df[col_name].mean())
data.loc[col, "SD Total"] = "({:.2f})".format(survey_df[col_name].std())
p = calc_p_whitney(survey_df["group"], istest, iscontrol)
data.loc[col, "p group"] = "{:.4f}".format(p)
data.loc[col, "N Test"] = "[{}]".format(int(len(survey_df[istest])))
data.loc[col, "mean Test"] = "{:.2f}".format(survey_df[col_name][istest].mean())
data.loc[col, "SD Test"] = "({:.2f})".format(survey_df[col_name][istest].std())
data.loc[col, "N Control"] = "[{}]".format(int(len(survey_df[iscontrol])))
data.loc[col, "mean Control"] = "{:.2f}".format(survey_df[col_name][iscontrol].mean())
data.loc[col, "SD Control"] = "({:.2f})".format(survey_df[col_name][iscontrol].std())
p = calc_p_whitney(survey_df["FL_avg"], isliterate, isilliterate)
data.loc[col, "p FL"] = "{:.4f}".format(p)
data.loc[col, "N FL>4.5"] = "[{}]".format(int(len(survey_df[isliterate])))
data.loc[col, "mean FL>4.5"] = "{:.2f}".format(survey_df[col_name][isliterate].mean())
data.loc[col, "SD FL>4.5"] = "({:.2f})".format(survey_df[col_name][isliterate].std())
data.loc[col, "N FL<=4.5"] = "[{}]".format(int(len(survey_df[isilliterate])))
data.loc[col, "mean FL<=4.5"] = "{:.2f}".format(survey_df[col_name][isilliterate].mean())
data.loc[col, "SD FL<=4.5"] = "({:.2f})".format(survey_df[col_name][isilliterate].std())
p = calc_p_whitney(survey_df["FL_avg"], isliterate, isilliterate)
data.loc[col, "p FL"] = "{:.4f}".format(p)
data.loc[col, "N FL>4.5"] = "[{}]".format(int(len(survey_df[isliterate])))
data.loc[col, "mean FL>4.5"] = "{:.2f}".format(survey_df[col_name][isliterate].mean())
data.loc[col, "SD FL>4.5"] = "({:.2f})".format(survey_df[col_name][isliterate].std())
data.loc[col, "N FL<=4.5"] = "[{}]".format(int(len(survey_df[isilliterate])))
data.loc[col, "mean FL<=4.5"] = "{:.2f}".format(survey_df[col_name][isilliterate].mean())
data.loc[col, "SD FL<=4.5"] = "({:.2f})".format(survey_df[col_name][isilliterate].std())
data["index"] = data.index
data_dict = data.to_dict("rows")
table = dash_table.DataTable(
id='table',
columns=[ {"name": "", "id": "index"},
{"name": "u", "id": "u"},
{"name": "p", "id": "p"},
{"name": "Total mean", "id": "mean Total"},
{"name": "(SD)", "id": "SD Total"},
{"name": "[N]", "id": "Total N"},
{"name": "Test mean", "id": "mean Test"},
{"name": "(SD)", "id": "SD Test"},
{"name": "[N]", "id": "Test N"},
{"name": "Control mean", "id": "mean Control"},
{"name": "(SD)", "id": "SD Control"},
{"name": "[N]", "id": "Control N"}],
data=data_dict,
style_as_list_view=True,
style_cell={'padding': '5px'},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'left'
} for c in ['index','SD Total', 'SD Test', 'SD Control', 'Total N', 'Test N', 'Control N']
],
)
ret_div = html.Div([
html.H1("Task {}".format(task_nr)),
html.H2(header),
html.Div( [table],
style={ 'padding-top': '10',
'padding-bottom': '30',
'padding-left': '30',
'padding-right': '5'}),
render_box_per_col("nutri_score_{}".format(task_nr), survey_df),
render_hist_per_col("nutri_label_{}".format(task_nr), survey_df.sort_values(by="nutri_label_{}".format(task_nr)))
])
return ret_div
def creat_mean_desc(col, survey_df, header = None):
data = pd.DataFrame()
istest = survey_df["group"] == "Test"
iscontrol = survey_df["group"] == "Control"
if isinstance(header, str):
title = html.H3(header)
else:
title = html.H3(col)
ret_div = html.Div([title,
html.P("Total mean (SD) \t\t {:.2f} ({:.2f})".format(survey_df[col].mean(), survey_df[col].std())),
html.P("Test mean (SD) \t\t {:.2f} ({:.2f})".format(survey_df[col][istest].mean(), survey_df[col][istest].std())),
html.P("Control mean (SD) \t\t {:.2f} ({:.2f})".format(survey_df[col][iscontrol].mean(), survey_df[col][iscontrol].std())),
render_box_per_col(col, survey_df)])
return ret_div
def create_count_desc(col, survey_df, header=None):
data = pd.DataFrame()
istest = survey_df["group"] == "Test"
iscontrol = survey_df["group"] == "Control"
survey_df.loc[survey_df[col].isna(),col] = "Missing"
data["count Total"] = survey_df[col].value_counts()
data["% Total"] = (data["count Total"] / data["count Total"].sum() * 100).apply(lambda x : "({:.1f}%)".format(x))
data.loc["Total", "count Total"] = data["count Total"].sum()
data["count Test"] = survey_df[col][istest].value_counts()
data["% Test"] = (data["count Test"] / data["count Test"].sum() * 100).apply(lambda x : "({:.1f}%)".format(x))
data.loc["Total", "count Test"] = data["count Test"].sum()
data["count Control"] = survey_df[col][iscontrol].value_counts()
data["% Control"] = (data["count Control"] / data["count Control"].sum() * 100).apply(lambda x : "({:.1f}%)".format(x))
data.loc["Total", "count Control"] = data["count Control"].sum()
data.loc["Total", ["% Total","% Test","% Control"]] = ""
data["index"] = data.index
data = data.sort_index()
data_dict = data.to_dict("rows")
table = dash_table.DataTable(
id='table',
columns=[ {"name": "", "id": "index"},
{"name": "Total N", "id": "count Total"},
{"name": "(%)", "id": "% Total"},
{"name": "Test N", "id": "count Test"},
{"name": "(%)", "id": "% Test"},
{"name": "Control N", "id": "count Control"},
{"name": "(%)", "id": "% Control"},],
data=data_dict,
style_as_list_view=True,
style_cell={'padding': '5px'},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'left'
} for c in ['index', '% Total', '% Test', '% Control']
],
)
if isinstance(header, str):
title = html.H3(header)
else:
title = html.H3(col)
ret_div = html.Div([title,
html.Div( [table],
style={ 'padding-top': '10',
'padding-bottom': '30',
'padding-left': '30',
'padding-right': '5'}),
render_hist_per_col(col, survey_df),
])
return ret_div
def get_question_text_save(col, questions_df, question_ids):
try:
question_text = questions_df[" question.text,"][question_ids[col]]
except:
question_text = "Error: Question wasn't found"
return question_text
def create_survey(cols, survey_df, header):
questionsfile = os.path.join(BASEPATH, "questionlayout-evaluation.csv")
questions_df = pd.read_csv(questionsfile, sep=";", index_col="question.id")
questions_df["time_1"] = "task 1"
questions_df["time_2"] = "task 2"
questions_df["time_3"] = "task 3"
questions_df["time_4"] = "task 4"
question_ids = {
"IE1":"jcruLQD1jtsb",
"IE2":"eaTgLd8mTqIl",
"PE1":"q0mA3PRRFjx7",
"PE2":"sBItcnzLbeab",
"PE3":"HNBvOMYBB0aG",
"EE1":"MEMNKBeL1Yx1",
"EE2":"erPaRi4mPyPG",
"EE3":"QVMeswBQSWAi",
"SI1":"xdCMMXgxnem1",
"SI2":"wfA9uqPz8cRt",
"SI3":"xUlfUW6JGEav",
"HM1":"JYEh0RF8Fm8b",
"HM2":"DuGG9VdyhxCd",
"PI1":"Y4v77TAeZzKs",
"PI2":"QVzNIkgWgGxB",
"PI3":"BQXqCdJgdxle",
"BI1":"b4YNQSqEHFaE",
"BI2":"GfV0SwI2TmuK",
"BI3":"PEWOeMEEayNA",
"FL1":"Wiq2wP97n7RO",
"FL2":"zDVqi1Ti9Nwq",
"FL3":"WeELc4DWjE6P",
"time_1":"time_1",
"time_2":"time_2",
"time_3":"time_3",
"time_4":"time_4",
}
question_texts = {col: get_question_text_save(col, questions_df, question_ids) for col in cols}
question_texts["Average"] = "--- Average ---"
survey_df_tmp = survey_df.loc[:,cols]
survey_df_tmp.loc[:,"Average"] = survey_df_tmp.mean(axis=1,numeric_only=True)
survey_df_tmp.loc[:,"group"] = survey_df.loc[:,"group"]
cols.append("Average")
data = pd.DataFrame()
istest = survey_df["group"] == "Test"
iscontrol = survey_df["group"] == "Control"
data["mean Total"] = survey_df_tmp[cols].mean().apply(lambda x : "{:.2f}".format(x))
data["SD Total"] = survey_df_tmp[cols].std().apply(lambda x : "({:.2f})".format(x))
data["mean Test"] = survey_df_tmp[cols][istest].mean().apply(lambda x : "{:.2f}".format(x))
data["SD Test"] = survey_df_tmp[cols][istest].std().apply(lambda x : "({:.2f})".format(x))
data["mean Control"] = survey_df_tmp[cols][iscontrol].mean().apply(lambda x : "{:.2f}".format(x))
data["SD Control"] = survey_df_tmp[cols][iscontrol].std().apply(lambda x : "({:.2f})".format(x))
data["question"] = pd.Series(question_texts)
for col in cols:
_, data.loc[col, "p (rank)"], _, _ = calc_p_whitney(col, survey_df_tmp)
_, data.loc[col, "p (t)"] = calc_p_t(col, survey_df_tmp)
data["p (rank)"] = data["p (rank)"].apply(lambda x : "{:.4f}".format(x))
data["p (t)"] = data["p (t)"].apply(lambda x : "{:.4f}".format(x))
data_dict = data.to_dict("rows")
table = dash_table.DataTable(
id='table',
columns=[ {"name": "", "id": "question"},
{"name": "Total mean", "id": "mean Total"},
{"name": "(SD)", "id": "SD Total"},
{"name": "Test mean", "id": "mean Test"},
{"name": "(SD)", "id": "SD Test"},
{"name": "Control mean", "id": "mean Control"},
{"name": "(SD)", "id": "SD Control"},
{"name": "p (rank)", "id": "p (rank)"},
{"name": "p (t)", "id": "p (t)"}],
data=data_dict,
style_as_list_view=True,
style_cell={'padding': '5px'},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'left'
} for c in ['question', 'SD Total', 'SD Test', 'SD Control', "u"]
],
)
ret_div = html.Div([html.H3(header),
html.Div( [table],
style={ 'padding-top': '10',
'padding-bottom': '30',
'padding-left': '30',
'padding-right': '5'})])
return ret_div
def bmi_class(bmi):
if bmi < 18.5:
return "0:) Underweight (BMI < 18.5)"
elif bmi < 25:
return "1.) Normal (18.5 ≤ BMI < 25)"
elif bmi < 30:
return "2.) Overweight (25 ≤ BMI < 30)"
else:
return "3.) Obese (30 ≤ BMI"
def safe_dict(_key, _dict, _int=True):
try:
if _int:
val = _dict[int(_key)]
else:
val = _dict[_key]
except:
val = None
return val
app.layout = html.Div([
html.Button("Refresh", id="refresh"),
html.Div([],
id="graphs",
style={'width':'70%',
'padding-top': '40',
'padding-bottom': '10',
'padding-left': '50',
'padding-right': '50'}),
])
@app.callback(Output("graphs", "children"),
[Input("refresh", "n_clicks")])
def update_survey(_):
survey_df = combine_all_data()
print("printing new data")
return [creat_mean_desc("age", survey_df, "Age"),
create_count_desc("age_class", survey_df, "Age"),
creat_mean_desc("bmi", survey_df, "Body Mass Index"),
create_count_desc("bmi_class", survey_df, "Weight"),
create_count_desc("gender", survey_df, "Gender"),
create_count_desc("education", survey_df, "Education"),
create_count_desc("snack_frequency", survey_df, "Machine Usage Frequency"),
creat_mean_desc("snack_frequency_int", survey_df, "Machine Usage Frequency"),
create_count_desc("ar_frequency", survey_df, "AR Usage Frequency"),
create_survey(["ar_frequency_int"],
survey_df,
"AR Frequency"),
html.Hr(),
table_group(1, survey_df, "Choose a snack of your choice"),
html.Hr(),
table_group(2, survey_df,"Choose a drink of your choice"),
html.Hr(),
table_group(3, survey_df,"Choose the healthiest snack"),
html.Hr(),
table_group(4, survey_df,"Choose the healthiest drink"),
html.Hr(),
create_survey(["time_1", "time_2","time_3","time_4"],
survey_df,
"Time Taken per Task"),
html.Hr(),
create_survey(["IE1", "IE2"],
survey_df,
"Intervention Effect"),
create_survey(["PE1", "PE2", "PE3"],
survey_df,
"Performance Expectancy"),
create_survey(["EE1", "EE2", "EE3"],
survey_df,
"Effort Expectancy"),
create_survey(["SI2", "SI3"],
survey_df,
"Social Influence"),
create_survey(["HM1", "HM2"],
survey_df,
"Hedonic Motivations"),
create_survey(["PI1", "PI2", "PI3"],
survey_df,
"Personal Innovativeness"),
create_survey(["BI1", "BI2", "BI3"],
survey_df,
"Behavioural Intention"),
create_survey(["FL2", "FL3"],
survey_df,
"Food Literacy (ohne FL1)"),
create_survey(["FL1", "FL2", "FL3"],
survey_df,
"Food Literacy"),
create_survey(["SI1"],
survey_df,
"Observation Bias"),
#render_table(survey_df)
]
if __name__ == '__main__':
app.run_server(debug=True, host="0.0.0.0", port=80)
|
StarcoderdataPython
|
1699690
|
<filename>webapp/utils/request_params.py
def param2type(request, method, param, data_type, defval=None):
""" get http request paramter
Args:
request: HttpRequest instance
method: string of HTTP method
param: string of parameter
data_type: type of parameter
defval: default value if parameter do not exist in request method
Returns:
val: parameter value
"""
method = method.upper()
try:
val = data_type(getattr(request, method).get(param, defval))
except ValueError:
val = defval
except Exception as e:
val = defval
return val
def param2int(request, method, param, defval=0):
return param2type(request, method, param, int, defval=defval)
def param2str(request, method, param, defval=""):
return param2type(request, method, param, str, defval=defval)
|
StarcoderdataPython
|
6421381
|
<filename>test/test_ami_line_tool.py
import unittest
import context
from pyamiimage.ami_plot import POLYGON, POLYLINE, AmiLine, AmiLineTool, X, Y
class TestAmiLineTool:
"""test AmilineTool"""
def test_empty_polyline(self):
line_tool = AmiLineTool()
assert line_tool.line_points_list == []
assert line_tool.polygons == []
assert line_tool.mode == POLYLINE
def test_single_segment(self):
# creates fresh polyline
line_tool = AmiLineTool()
line_tool.add_segment([[1, 2], [2, 3]])
assert line_tool.line_points_list == [
[[1, 2], [2, 3]]
], f"found {line_tool.line_points_list}"
def test_multiple_segments(self):
line_tool = AmiLineTool(xy_flag=X)
segments = [[[1, 2], [2, 3]], [[2, 3], [5, 7]]]
line_tool.add_segments(segments)
assert line_tool.line_points_list == [
[[1, 2], [2, 3], [5, 7]]
], f"found {line_tool.line_points_list}"
def test_multiple_non_overlapping_segments(self):
line_tool = AmiLineTool(xy_flag=X)
# already sorted
line_tool.add_segments([[[1, 2], [2, 3]], [[5, 8], [5, 7]]])
assert line_tool.line_points_list == [[[1, 2], [2, 3]], [[5, 8], [5, 7]]]
# not sorted
line_tool = AmiLineTool(xy_flag=X)
line_tool.add_segments([[[5, 8], [5, 7]], [[1, 2], [2, 3]]])
assert line_tool.line_points_list == [[[5, 8], [5, 7]], [[1, 2], [2, 3]]]
def test_add_right_segment(self):
line_tool = AmiLineTool(xy_flag=X)
line_tool.add_segments([[[1, 2], [2, 3]], [[2, 3], [5, 7]]])
assert line_tool.line_points_list == [[[1, 2], [2, 3], [5, 7]]]
line_tool.add_segment([[5, 7], [9, 5]])
assert line_tool.line_points_list == [[[1, 2], [2, 3], [5, 7], [9, 5]]]
def test_fail_right_segment(self):
line_tool = AmiLineTool(xy_flag=X)
line_tool.add_segments([[[1, 2], [2, 3]], [[2, 3], [5, 7]]])
try:
line_tool.add_segment([[15, 7], [9, 5]])
raise ValueError("should raise error as points don't overlap")
except ValueError:
pass
def test_make_unclosed_box_and_close(self):
line_tool = AmiLineTool(xy_flag=X, mode=POLYGON)
line_tool.add_segments(
[[[10, 20], [10, 30]], [[10, 30], [40, 30]], [[40, 30], [40, 20]]]
)
assert line_tool.line_points_list == [[[10, 20], [10, 30], [40, 30], [40, 20]]]
# close box
line_tool.add_segments([[[10, 20], [40, 20]]])
assert line_tool.polygons == [[[10, 20], [10, 30], [40, 30], [40, 20]]]
def test_make_closed_box(self):
line_tool = AmiLineTool(xy_flag=X, mode=POLYGON)
line_tool.add_merge_polyline_to_poly_list(
[[10, 20], [10, 30], [40, 30], [40, 20]]
)
assert line_tool.line_points_list == [[[10, 20], [10, 30], [40, 30], [40, 20]]]
line_tool.add_segments([[[10, 20], [40, 20]]])
assert line_tool.polygons == [[[10, 20], [10, 30], [40, 30], [40, 20]]]
def test_joining_lines_head_tail(self):
"""head-tail"""
line_tool = AmiLineTool()
polyline0 = [[0, 10], [0, 20], [10, 20]]
line_tool.add_merge_polyline_to_poly_list(polyline0)
polyline1 = [[10, 20], [10, 30], [20, 30]]
line_tool.add_merge_polyline_to_poly_list(polyline1)
assert line_tool.line_points_list == [
[[0, 10], [0, 20], [10, 20], [10, 30], [20, 30]]
]
def test_joining_lines_head_head(self):
"""head-head"""
line_tool = AmiLineTool()
polyline0 = [[0, 10], [0, 20], [10, 20]]
line_tool.add_merge_polyline_to_poly_list(polyline0)
polyline1 = [[20, 30], [10, 30], [10, 20]]
line_tool.add_merge_polyline_to_poly_list(polyline1)
assert line_tool.line_points_list == [
[[0, 10], [0, 20], [10, 20], [10, 30], [20, 30]]
]
def test_joining_lines_tail_tail(self):
"""tail-tail"""
line_tool = AmiLineTool()
polyline0 = [[10, 20], [0, 20], [0, 10]]
line_tool.add_merge_polyline_to_poly_list(polyline0)
polyline1 = [[10, 20], [10, 30], [20, 30]]
line_tool.add_merge_polyline_to_poly_list(polyline1)
assert line_tool.line_points_list == [
[[20, 30], [10, 30], [10, 20], [0, 20], [0, 10]]
]
def test_joining_lines_tail_head(self):
"""tail-head"""
line_tool = AmiLineTool()
polyline0 = [[10, 20], [0, 20], [0, 10]]
line_tool.add_merge_polyline_to_poly_list(polyline0)
polyline1 = [[20, 30], [10, 30], [10, 20]]
line_tool.add_merge_polyline_to_poly_list(polyline1)
assert line_tool.line_points_list == [
[[20, 30], [10, 30], [10, 20], [0, 20], [0, 10]]
]
|
StarcoderdataPython
|
6445049
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
import numpy as np
import os
import subprocess
from finn.custom_op import CustomOp
from finn.util.basic import (
CppBuilder,
make_build_dir,
roundup_to_integer_multiple,
get_rtlsim_trace_depth,
)
from finn.util.fpgadataflow import (
IPGenBuilder,
pyverilate_get_liveness_threshold_cycles,
rtlsim_multi_io,
)
from . import templates
try:
from pyverilator import PyVerilator
except ModuleNotFoundError:
PyVerilator = None
class HLSCustomOp(CustomOp):
"""HLSCustomOp class all custom ops that correspond to a finn-hlslib
function are based on. Contains different functions every fpgadataflow
custom node should have. Some as abstract methods, these have to be filled
when writing a new fpgadataflow custom op node."""
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.code_gen_dict = {}
# getting templates from templates.py
# template for single node execution
self.docompute_template = templates.docompute_template
# templates for single node ip generation
# cpp file
self.ipgen_template = templates.ipgen_template
# tcl script
self.ipgentcl_template = templates.ipgentcl_template
def get_nodeattr_types(self):
return {
"backend": ("s", True, "fpgadataflow"),
"code_gen_dir_cppsim": ("s", False, ""),
"code_gen_dir_ipgen": ("s", False, ""),
"executable_path": ("s", False, ""),
"ipgen_path": ("s", False, ""),
"ip_path": ("s", False, ""),
"ip_vlnv": ("s", False, ""),
"exec_mode": ("s", False, ""),
"cycles_rtlsim": ("i", False, 0),
"cycles_estimate": ("i", False, 0),
"rtlsim_trace": ("s", False, ""),
"res_estimate": ("s", False, ""),
"res_hls": ("s", False, ""),
"res_synth": ("s", False, ""),
"rtlsim_so": ("s", False, ""),
# partitioning info
"partition_id": ("i", False, 0),
# input and output FIFO depths
"inFIFODepth": ("i", False, 2),
"outFIFODepth": ("i", False, 2),
}
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s_%s" % (node.name, node.name)
return prefixed_top_name
def get_verilog_top_module_intf_names(self):
"""Return a dict of names of input and output interfaces.
The keys reflect the protocols each interface implements:
'clk', 'rst', 'm_axis', 's_axis', 'aximm', 'axilite'.
Values are lists of names:
's_axis' names correspond to the list of node inputs in order,
'm_axis' names correspond to the list of node outputs in order'
Each block must have at most one aximm and one axilite."""
intf_names = {}
intf_names["clk"] = ["ap_clk"]
intf_names["rst"] = ["ap_rst_n"]
intf_names["s_axis"] = ["in0_V_V"]
intf_names["m_axis"] = ["out_V_V"]
intf_names["aximm"] = []
intf_names["axilite"] = []
return intf_names
def get_verilog_top_filename(self):
"Return the Verilog top module filename for this node."
verilog_file = "{}/project_{}/sol1/impl/verilog/{}.v".format(
self.get_nodeattr("code_gen_dir_ipgen"),
self.onnx_node.name,
self.get_verilog_top_module_name(),
)
return verilog_file
def get_all_verilog_paths(self):
"Return list of all folders containing Verilog code for this node."
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
assert (
code_gen_dir != ""
), """Node attribute "code_gen_dir_ipgen" is
not set. Please run HLSSynthIP first."""
verilog_path = "{}/project_{}/sol1/impl/verilog/".format(
code_gen_dir, self.onnx_node.name
)
# default impl only returns the HLS verilog codegen dir
return [verilog_path]
def get_all_verilog_filenames(self):
"Return list of all Verilog files used for this node."
verilog_files = []
verilog_paths = self.get_all_verilog_paths()
for verilog_path in verilog_paths:
for f in os.listdir(verilog_path):
if f.endswith(".v"):
verilog_files += [f]
return verilog_files
def prepare_rtlsim(self):
"""Creates a Verilator emulation library for the RTL code generated
for this node, sets the rtlsim_so attribute to its path and returns
a PyVerilator wrapper around it."""
if PyVerilator is None:
raise ImportError("Installation of PyVerilator is required.")
verilog_paths = self.get_all_verilog_paths()
verilog_files = self.get_all_verilog_filenames()
# build the Verilator emu library
sim = PyVerilator.build(
verilog_files,
build_dir=make_build_dir("pyverilator_" + self.onnx_node.name + "_"),
verilog_path=verilog_paths,
trace_depth=get_rtlsim_trace_depth(),
top_module_name=self.get_verilog_top_module_name(),
)
# save generated lib filename in attribute
self.set_nodeattr("rtlsim_so", sim.lib._name)
return sim
def get_rtlsim(self):
"""Return a PyVerilator wrapper for the Verilator emulation library
for this node."""
rtlsim_so = self.get_nodeattr("rtlsim_so")
assert os.path.isfile(rtlsim_so), "Cannot find rtlsim library."
# create PyVerilator wrapper
sim = PyVerilator(rtlsim_so)
return sim
def node_res_estimation(self):
"""Returns summarized resource estimation of BRAMs and LUTs
of the node as a dictionary."""
ret = dict()
ret["BRAM_18K"] = self.bram_estimation()
ret["BRAM_efficiency"] = self.bram_efficiency_estimation()
ret["LUT"] = self.lut_estimation()
return ret
def bram_efficiency_estimation(self):
"""Function for BRAM efficiency estimation: actual parameter storage
needed divided by the allocated BRAM storage (from estimation)"""
return 1
def bram_estimation(self):
"""Function for BRAM resource estimation, is member function of
HLSCustomOp class but has to be filled by every node"""
return 0
def lut_estimation(self):
"""Function for LUT resource estimation, is member function of
HLSCustomOp class but has to be filled by every node"""
return 0
def get_exp_cycles(self):
"""Function for estimation of expected cycles for set folding,
is member function of HLSCustomOp class but has to be filled
by every node"""
return 0
def code_generation_ipgen(self, model, fpgapart, clk):
"""Generates c++ code and tcl script for ip generation."""
node = self.onnx_node
# generate top cpp file for ip generation
path = self.get_nodeattr("code_gen_dir_ipgen")
self.code_gen_dict["$AP_INT_MAX_W$"] = [str(self.get_ap_int_max_w())]
self.generate_params(model, path)
self.global_includes()
self.defines("ipgen")
self.blackboxfunction()
self.pragmas()
self.docompute()
template = self.ipgen_template
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
f = open(os.path.join(code_gen_dir, "top_{}.cpp".format(node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
# generate tcl script for ip generation
self.code_gen_dict["$PROJECTNAME$"] = ["project_{}".format(node.name)]
self.code_gen_dict["$HWSRCDIR$"] = [code_gen_dir]
self.code_gen_dict["$FPGAPART$"] = [fpgapart]
self.code_gen_dict["$FINNHLSLIBDIR$"] = ["/workspace/finn-hlslib"]
self.code_gen_dict["$TOPFXN$"] = [node.name]
self.code_gen_dict["$CLKPERIOD$"] = [str(clk)]
self.code_gen_dict["$EXTRA_DIRECTIVES$"] = self.ipgen_extra_directives()
template = self.ipgentcl_template
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
f = open(os.path.join(code_gen_dir, "hls_syn_{}.tcl".format(node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_extra_directives(self):
"Return a list of extra tcl directives for HLS synthesis."
return []
def ipgen_singlenode_code(self):
"""Builds the bash script for ip generation using the IPGenBuilder from
finn.util.fpgadataflow."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
builder = IPGenBuilder()
builder.append_tcl(code_gen_dir + "/hls_syn_{}.tcl".format(node.name))
builder.set_ipgen_path(code_gen_dir + "/project_{}".format(node.name))
builder.build(code_gen_dir)
self.set_nodeattr("ipgen_path", builder.ipgen_path)
self.set_nodeattr("ip_path", builder.ipgen_path + "/sol1/impl/ip")
vlnv = "xilinx.com:hls:%s:1.0" % node.name
self.set_nodeattr("ip_vlnv", vlnv)
def code_generation_cppsim(self, model):
"""Generates c++ code for simulation (cppsim)."""
node = self.onnx_node
path = self.get_nodeattr("code_gen_dir_cppsim")
self.code_gen_dict["$AP_INT_MAX_W$"] = [str(self.get_ap_int_max_w())]
self.generate_params(model, path)
self.global_includes()
self.defines("cppsim")
self.read_npy_data()
self.strm_decl()
self.pragmas()
self.docompute()
self.dataoutstrm()
self.save_as_npy()
template = self.docompute_template
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
f = open(os.path.join(code_gen_dir, "execute_{}.cpp".format(node.op_type)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def code_generation_ipi(self):
"""Constructs and returns the TCL for node instantiation in Vivado IPI."""
vlnv = self.get_nodeattr("ip_vlnv")
cmd = ["create_bd_cell -type ip -vlnv %s %s" % (vlnv, self.onnx_node.name)]
return cmd
def compile_singlenode_code(self):
"""Builds the bash script for compilation using the CppBuilder from
finn.util.basic and executes the script to produce the executable."""
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
builder = CppBuilder()
# to enable additional debug features please uncommand the next line
# builder.append_includes("-DDEBUG")
builder.append_includes("-I/workspace/finn/src/finn/data/cpp")
builder.append_includes("-I/workspace/cnpy/")
builder.append_includes("-I/workspace/finn-hlslib")
builder.append_includes("-I{}/include".format(os.environ["VIVADO_PATH"]))
builder.append_includes("--std=c++11")
builder.append_includes("-O3")
builder.append_sources(code_gen_dir + "/*.cpp")
builder.append_sources("/workspace/cnpy/cnpy.cpp")
builder.append_includes("-lz")
builder.set_executable_path(code_gen_dir + "/node_model")
builder.build(code_gen_dir)
self.set_nodeattr("executable_path", builder.executable_path)
def dynamic_input_to_npy(self, context, count):
"""Saves input (given context) into .npy files.
Count indicates the number of inputs that have to be saved."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
if code_gen_dir == "":
raise Exception(
"""
Found no codegen dir for this node, did you run the prepare_cppsim transformation?
"""
)
# create a npy file for each input of the node (in_ind is input index)
# assuming dynamic inputs start from 0
for in_ind in range(count):
current_input_name = node.input[in_ind]
# make copy before saving array
input_array = context[current_input_name].copy()
np.save(
os.path.join(code_gen_dir, "input_{}.npy".format(in_ind)), input_array
)
def npy_to_dynamic_output(self, context):
"""Reads the output from an output.npy file generated from cppsim and
places its content into the context dictionary."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
output = np.load("{}/output.npy".format(code_gen_dir))
context[node.output[0]] = output
def npy_to_dynamic_outputs(self, context, npy_list):
"""Reads the output from .npy files generated from cppsim and places
their content into the context dictionary.
npy_list is a list specifying which files to read, and its order must
match the order of node outputs."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
for i in range(len(npy_list)):
output = np.load("{}/{}".format(code_gen_dir, npy_list[i]))
context[node.output[i]] = output
def exec_precompiled_singlenode_model(self):
"""Executes precompiled executable."""
executable_path = self.get_nodeattr("executable_path")
if executable_path == "":
raise Exception(
"""
Found no executable for this node, did you run the codegen and
compilation transformations?
"""
)
process_execute = subprocess.Popen(executable_path, stdout=subprocess.PIPE)
process_execute.communicate()
def reset_rtlsim(self, sim):
"""Sets reset input in pyverilator to zero, toggles the clock and set it
back to one"""
sim.io.ap_rst_n = 0
sim.io.ap_clk = 1
sim.io.ap_clk = 0
sim.io.ap_rst_n = 1
def toggle_clk(self, sim):
"""Toggles the clock input in pyverilator once."""
sim.io.ap_clk = 1
sim.io.ap_clk = 0
def rtlsim(self, sim, inp, inp2=None):
"""Runs the pyverilator simulation by passing the input values to the simulation,
toggle the clock and observing the execution time. Function contains also an
observation loop that can abort the simulation if no output value is produced
after 100 cycles."""
trace_file = self.get_nodeattr("rtlsim_trace")
if trace_file != "":
if trace_file == "default":
trace_file = self.onnx_node.name + ".vcd"
sim.start_vcd_trace(trace_file)
inputs = inp
outputs = []
sim.io.out_V_V_TREADY = 1
# observe if output is completely calculated
# observation_count will contain the number of cycles the calculation ran
num_out_values = self.get_number_output_values()
output_observed = False
observation_count = 0
# avoid infinite looping of simulation by aborting when there is no change in
# output values after 100 cycles
no_change_count = 0
old_outputs = outputs
liveness_threshold = pyverilate_get_liveness_threshold_cycles()
while not (output_observed):
sim.io.in0_V_V_TVALID = 1 if len(inputs) > 0 else 0
sim.io.in0_V_V_TDATA = inputs[0] if len(inputs) > 0 else 0
if sim.io.in0_V_V_TREADY == 1 and sim.io.in0_V_V_TVALID == 1:
inputs = inputs[1:]
if inp2 is not None:
sim.io.in1_V_V_TVALID = 1 if len(inp2) > 0 else 0
sim.io.in1_V_V_TDATA = inp2[0] if len(inp2) > 0 else 0
if sim.io.in1_V_V_TREADY == 1 and sim.io.in1_V_V_TVALID == 1:
inp2 = inp2[1:]
if sim.io.out_V_V_TVALID == 1 and sim.io.out_V_V_TREADY == 1:
outputs = outputs + [sim.io.out_V_V_TDATA]
sim.io.ap_clk = 1
sim.io.ap_clk = 0
observation_count = observation_count + 1
no_change_count = no_change_count + 1
if len(outputs) == num_out_values:
self.set_nodeattr("cycles_rtlsim", observation_count)
output_observed = True
if no_change_count == liveness_threshold:
if old_outputs == outputs:
if trace_file != "":
sim.flush_vcd_trace()
sim.stop_vcd_trace()
raise Exception(
"Error in simulation! Takes too long to produce output. "
"Consider setting the LIVENESS_THRESHOLD env.var. to a "
"larger value."
)
else:
no_change_count = 0
old_outputs = outputs
if trace_file != "":
sim.flush_vcd_trace()
sim.stop_vcd_trace()
return outputs
def rtlsim_multi_io(self, sim, io_dict):
"Run rtlsim for this node, supports multiple i/o streams."
trace_file = self.get_nodeattr("rtlsim_trace")
if trace_file == "default":
trace_file = self.onnx_node.name + ".vcd"
num_out_values = self.get_number_output_values()
total_cycle_count = rtlsim_multi_io(sim, io_dict, num_out_values, trace_file)
self.set_nodeattr("cycles_rtlsim", total_cycle_count)
def execute_node(self, context, graph):
"""Executes single node using cppsim or rtlsim."""
mode = self.get_nodeattr("exec_mode")
if mode == "cppsim":
# save input(s)
self.dynamic_input_to_npy(context, 1)
# execute the precompiled model
self.exec_precompiled_singlenode_model()
# load output npy file
self.npy_to_dynamic_output(context)
elif mode == "rtlsim":
pass
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def generate_params(self, model, path):
"""Function to generate parameters (i.e. weights and thresholds),
is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def get_number_output_values(self):
"""Function to get the number of expected output values,
is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def global_includes(self):
"""Function to set the global includes for c++ code that has to be generated
for cppsim or rtlsim, is member function of HLSCustomOp class but has to
be filled by every node."""
pass
@abstractmethod
def defines(self, var):
"""Function to set the define commands for c++ code that has to be generated
for cppsim or rtlsim, is member function of HLSCustomOp class but has to
be filled by every node.
var: makes it possible to reuse the function for different c++ code generation.
I.e. if set to "ipgen" in StreamingFCLayer_Batch additional PRAGMA defines are
added."""
pass
@abstractmethod
def read_npy_data(self):
"""Function to generate the commands for reading data from .npy file in c++,
is member function of HLSCustomOp class but has to be filled by every node."""
pass
@abstractmethod
def strm_decl(self):
"""Function to generate the commands for the stream declaration in c++,
is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def docompute(self):
"""Function to generate the commands for the computational part of the
c++ code, is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def dataoutstrm(self):
"""Function to generate the commands for reading out data from c++ and convert
into npy format, is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def save_as_npy(self):
"""Function to generate the commands for saving data in .npy file in c++,
is member function of HLSCustomOp class but has to be filled by every node."""
pass
@abstractmethod
def blackboxfunction(self):
"""Function to generate a blackbock function in c++ from which an IP block
will be generated, is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def pragmas(self):
"""Function to generate the pragma commands in c++, is member function of
HLSCustomOp class but has to be filled by every node."""
pass
def get_normal_input_shape(self):
"""Returns normal input shape if implemented."""
raise Exception("get_normal_input_shape not implemented for this op")
def get_normal_output_shape(self):
"""Returns folded output shape if implemented."""
raise Exception("get_normal_output_shape not implemented for this op")
def get_folded_input_shape(self):
"""Returns folded input shape (according to synapse folding), if implemented."""
raise Exception("get_folded_input_shape not implemented for this op")
def get_folded_output_shape(self):
"""Returns folded output shape (according to neuron folding), if implemented."""
raise Exception("get_folded_output_shape not implemented for this op")
def get_instream_width(self):
"""Returns input stream width, if implemented."""
raise Exception("get_instream_width not implemented for this op")
def get_outstream_width(self):
"""Returns output stream width, if implemented."""
raise Exception("get_outstream_width not implemented for this op")
def get_instream_width_padded(self):
"""Returns input stream width padded to a multiple of 8. This is required
by the AXI Stream spec."""
in_width = self.get_instream_width()
return roundup_to_integer_multiple(in_width, 8)
def get_outstream_width_padded(self):
"""Returns output stream width padded to a multiple of 8. This is required
by the AXI Stream spec."""
out_width = self.get_outstream_width()
return roundup_to_integer_multiple(out_width, 8)
def get_ap_int_max_w(self):
"Return the maximum width of any ap_int used in this module."
instream = self.get_instream_width()
outstream = self.get_outstream_width()
return max([instream, outstream])
|
StarcoderdataPython
|
36726
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2019 Lorenzo
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .abc import BaseObject
from .common import Effect, Name, NamedAPIObject, VerboseEffect
__all__ = (
"Ability",
"AbilityEffectChange",
"AbilityPokemon",
"AbilityFlavorText",
)
class Ability(BaseObject):
"""Represents an ability object from the API.
.. versionadded:: 0.1.2a
.. container:: operations
.. describe:: str(x)
Returns the Pokémon's name.
.. describe:: x[y]
Returns a Pokémon's y attribute.
.. describe:: x == y
Check if two Pokémons are the same.
.. describe:: x != y
Check if two Pokémons are *not* the same.
Attributes
----------
id: :class:`int`
The identifier for the ability.
name: :class:`str`
The name for the ability.
is_main_series: :class:`bool`
Whether or not the ability originated in the main series of the video games.
generation: :class:`NamedAPIObject`
The generation the ability originated in.
names: List[:class:`Name`]
The name of the ability listed in different languages.
effect_entries: List[:class:`VerboseEffect`]
The effect of the ability listed in different languages.
effect_changes: List[:class:`AbilityEffectChange`]
The list of previous effects the ability has had across version groups.
flavor_text_entries: List[:class:`AbilityFlavorText`]
The flavor text of the ability listed in different languages.
pokemon: List[:class:`AbilityPokemon`]
A list of Pokémon that could potentially have the ability."""
__slots__ = (
"is_main_series", "generation", "names", "effect_entries", "effect_changes", "flavor_text_entries", "pokemon"
)
def __init__(self, data: dict):
super().__init__(data)
self.is_main_series = data["is_main_series"]
self.generation = NamedAPIObject(data["generation"])
self.names = [Name(d) for d in data["names"]]
self.effect_entries = [VerboseEffect(d) for d in data["effect_entries"]]
self.effect_changes = [AbilityEffectChange(d) for d in data["effect_changes"]]
self.flavor_text_entries = [AbilityFlavorText(d) for d in data["flavor_text_entries"]]
self.pokemon = [AbilityPokemon(d) for d in data["pokemon"]]
class AbilityEffectChange:
"""Represents a past change of the effect of a move in a version group.
.. versionadded:: 0.1.2a
Attributes
----------
effect_entries: List[:class:`Effect`]
The previous effect of the ability listed in different languages.
version_group: :class:`NamedAPIObject`
The version group in which the previous effect of the ability originated."""
__slots__ = ("effect_entries", "version_group")
def __init__(self, data: dict):
self.effect_entries = [Effect(d) for d in data["effect_entries"]]
self.version_group = NamedAPIObject(data["version_group"])
def __repr__(self) -> str:
return "<AbilityEffectChange version_group='{0.version_group}'>".format(self)
class AbilityPokemon:
"""Reppresents an Pokémon of an :class:`Ability`.
Attributes
----------
is_hidden: :class:`bool`
Whether or not this a hidden ability for the Pokémon.
slot: :class:`int`
The slot of the ability for the pokemon.
pokemon: :class:`NamedAPIObject`
The Pokémon this ability could belong to."""
__slots__ = ("is_hidden", "slot", "pokemon")
def __init__(self, data: dict):
self.is_hidden = data["is_hidden"]
self.slot = data["slot"]
self.pokemon = NamedAPIObject(data["pokemon"])
def __repr__(self) -> str:
return "<AbilityPokemon is_hidden={0.is_hidden} slot={0.slot} pokemon='{0.pokemon}'>".format(self)
class AbilityFlavorText:
"""Represents the flavor text for a move, with a language and a version group.
. container:: operations
.. describe:: str(x)
Returns the actual flavor text.
Attributes
----------
flavor_text: :class:`str`
The actual text.
language: :class:`NamedAPIObject`
The language in which the text is in.
version_group: :class:`NamedAPIObject`
The version group that uses this text."""
__slots__ = ("flavor_text", "language", "version_group")
def __init__(self, data: dict):
self.flavor_text = data["flavor_text"]
self.language = NamedAPIObject(data["language"])
self.version_group = NamedAPIObject(data["version_group"])
def __str__(self) -> str:
return self.flavor_text
def __repr__(self) -> str:
return "<AbilityFlavorText language='{0.language}' version_group='{0.version_group}'>".format(self)
|
StarcoderdataPython
|
9681057
|
<reponame>63Shivani/Python-BootCamp
age1 = int(input("enter age\n"))
age2 = int(input("enter age\n"))
age3 = int(input("enter age\n"))
age4 = int(input("enter age\n"))
if(age1>age2 and age1>age3 and age1>age4):
print(age1,"year old is oldest")
elif(age2>age1 and age2>age3 and age2>age4):
print(age2,"year old is oldest")
elif(age3>age1 and age3>age2 and age3>age4):
print(age3,"year old is oldest")
else:
print(age4,"year old is oldest")
|
StarcoderdataPython
|
157415
|
#!/usr/bin/python3
import re
import nltk
from nltk import pos_tag,word_tokenize,sent_tokenize
from textblob import TextBlob
import pandas as pd
def remove_punc(sentence):
return re.sub(r'[^\w\s]',' ',sentence).lower()
def get_sentiment(sentence):
a = TextBlob(sentence)
return round(a.sentiment[0],4)
def get_diversity(sentence):
t = remove_punc(sentence)
tt = pos_tag(word_tokenize(t))
tags = [t[1] for t in tt]
sent_len = len(tags)
uniques = list(set(tags))
if len(uniques) / sent_len > 0.45:
return 'high'
else:
return 'low'
def get_typology(sentence):
"""
takes word tokens as input
returns a dictionary object with useful nlp data
"""
def index_sent_tokens(sent):
t = remove_punc(sentence)
tt = pos_tag(word_tokenize(t))
index_list = [tt.index(item) for item in tt]
tag_list = [tag[1] for tag in tt]
return zip(index_list,tag_list)
sent_idx = list(index_sent_tokens(sentence))
verb_tags = ['VB','VBD','VBZ','VBG','VBN']
noun_tags = ['NN','NNS','PRP','NNP','PRP']
typology = "low"
try:
subject_ = [item[0] for item in sent_idx if item[1] in noun_tags][0]
object_ = [item[0] for item in sent_idx if item[1] in noun_tags][-1]
verb_ = [item[0] for item in sent_idx if item[1] in verb_tags][0]
if subject_ < verb_:
typology = "high"
else:
typology = "low"
# if subject_ < object_:
# if object_ < verb_:
# typology = "SOV"
except:
typology = "low"
return typology
def categorize_length(word_count):
if 0 < word_count < 14:
return "low"
if word_count >= 16:
return "high"
else:
return "low"
def categorize_verbs(verb_count):
if verb_count <= 3:
return "low"
else:
return "high"
def categorize_nouns(noun_count):
if noun_count <= 4:
return "low"
else:
return "high"
class ProcessedText():
def __init__(self):
pass
def get_word_tokens(self,g):
return word_tokenize(g)
def get_noun_count(self,g):
nn = ['NN','NNS','PRP','NNP','PRP']
c = [item[1] for item in g if item[1] in nn]
return len(c)
def get_verb_count(self,g):
vb = ['VB','VBD','VBZ','VBG','VBN']
v = [item[1] for item in g if item[1] in vb]
return len(v)
def result(self,text):
grammar = pos_tag(word_tokenize(remove_punc(text)))
words = self.get_word_tokens(remove_punc(text))
op = {}
attributes = []
att1 = categorize_length(len(words))
attributes.append(att1)
op['Word Count'] = att1
att2 = categorize_nouns(self.get_noun_count(grammar))
attributes.append(att2)
op['Noun Count'] = att2
att3 = categorize_verbs(self.get_verb_count(grammar))
attributes.append(att3)
op["Verb Count"] = att3
att4 = get_typology(text)
attributes.append(att4)
op["SVO Chance"] = att4
att5 = get_diversity(text)
attributes.append(att5)
op["Diversity"] = att5
label = 'bad'
high_count = [att for att in attributes if att=='high']
if len(high_count) >= 3:
label = 'good'
op["Label"] = label
#op["Sentiment"] = get_sentiment(text)
return op
def create_dataset(file_path,output_name="summary"):
with open(file_path, "r") as file:
text = file.read()
sentences = sent_tokenize(text)
s_length = len(sentences)
rows = []
count = 0
sut = ProcessedText()
for s in sentences:
print ("processing row {}".format(count))
count += 1
rows.append(sut.result(s))
pass
df = pd.DataFrame(rows)
df.to_csv('csvs/{}-nlp-data.csv'.format(output_name), index=False, header=True)
print (df.head(10))
return
print(create_dataset('summary.txt'))
|
StarcoderdataPython
|
212256
|
<filename>secedgar/utils/__init__.py
import datetime
import errno
import os
def sanitize_date(date):
"""Sanitizes date to be in acceptable format for EDGAR.
Args:
date (Union[datetime.datetime, str]): Date to be sanitized for request.
Returns:
date (str): Properly formatted date in 'YYYYMMDD' format.
Raises:
TypeError: If date is not in format YYYYMMDD as str or int.
"""
if isinstance(date, datetime.datetime):
return date.strftime("%Y%m%d")
elif isinstance(date, str):
if len(date) != 8:
raise TypeError('Date must be of the form YYYYMMDD')
elif isinstance(date, int):
if date < 10 ** 7 or date > 10 ** 8:
raise TypeError('Date must be of the form YYYYMMDD')
return date
def make_path(path, **kwargs):
"""Make directory based on filing info.
Args:
path (str): Path to be made if it doesn't exist.
Raises:
OSError: If there is a problem making the path.
Returns:
None
"""
if not os.path.exists(path):
try:
os.makedirs(path, **kwargs)
except OSError as e:
if e.errno != errno.EEXIST:
raise OSError
|
StarcoderdataPython
|
3499840
|
""" The Application's Entry Point"""
import os
from code import create_app
app = create_app()
if __name__ == '__main__':
app.run(debug=True)
|
StarcoderdataPython
|
3328717
|
<filename>face_tracking.py<gh_stars>1-10
# coding: utf-8
import picamera
import picamera.array
import cv2
import pigpio
import time
xsv = 25 #X軸サーボのPort番号
ysv = 24 #y軸サーボのPort番号
span = 300 #サーボのセンターからの可動範囲duty値
xct = 1550 #X軸サーボのセンターduty値
yct = 1490 #X軸サーボのセンターduty値
dly = 0.01 #サーボ駆動時のウェイト時間
stp = 2 #サーボ駆動時のdutyステップ値
xsize = 320 #RGB 水平サイズ
ysize = 240 #RGB 垂直サイズ
#サーボの駆動範囲
xmin = xct - span
xmax = xct + span
ymin = yct - span
ymax = yct + span
#グローバル変数
xpos = xct
ypos = yct
xpos0 = xpos
ypos0 = ypos
sv = pigpio.pi()
def move(svn,in0,in1,step):
if in1 > in0:
for duty in range(in0,in1,step):
sv.set_servo_pulsewidth(svn,duty)
time.sleep(dly)
if in1 < in0:
for duty in range(in0,in1,-step):
sv.set_servo_pulsewidth(svn,duty)
time.sleep(dly)
#カメラをセンターに移動
move(xsv,sv.get_servo_pulsewidth(xsv),xpos,stp)
move(ysv,sv.get_servo_pulsewidth(ysv),ypos,stp)
cascade_file = "./haarcascade_frontalface_default.xml"
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (xsize, ysize)
camera.vflip = True
camera.hflip = True
while True:
# stream.arrayにRGBの順で映像データを格納
camera.capture(stream, 'bgr', use_video_port=True)
# グレースケールに変換
gray = cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
# カスケードファイルを利用して顔の位置を見つける
cascade = cv2.CascadeClassifier(cascade_file)
face_list = cascade.detectMultiScale(gray, minSize=(100, 100))
if len(face_list):
for (x, y, w, h) in face_list:
print("face_position:",x, y, w, h)
color = (0, 0, 255)
pen_w = 5
cv2.rectangle(stream.array, (x, y), (x+w, y+h), color, thickness = pen_w)
# カメラ移動
xdf = (x + w/2) - xsize/2
ydf = (y + h/2) - ysize/2
xpos = int(xpos0 - xdf*0.2)
ypos = int(ypos0 + ydf*0.2)
if xpos > xmax:
xpos = xmax
if xpos < xmin:
xpos = xmin
if ypos > ymax:
ypos = ymax
if ypos < ymin:
ypos = ymin
move(xsv,xpos0,xpos,stp)
move(ysv,ypos0,ypos,stp)
xpos0 = xpos
ypos0 = ypos
# system.arrayをウィンドウに表示
cv2.imshow('frame', stream.array)
# "q"でウィンドウを閉じる
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# streamをリセット
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
5122799
|
"""
Class for managing our data.
"""
import csv
import numpy as np
import os.path
import random
import threading
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
import cv2
class DataSet():
def __init__(self, class_limit=None, image_shape=(224, 224), original_image_shape=(341, 256), batch_size=16):
"""Constructor.
opt_flow_len = (int) the number of optical flow frames to consider
class_limit = (int) number of classes to limit the data to.
None = no limit.
"""
self.class_limit = class_limit
self.image_shape = image_shape
self.original_image_shape = original_image_shape
self.batch_size = batch_size
self.static_frame_path = os.path.join('./data','test')
self.opt_flow_path = os.path.join('./data', 'opt_flow')
self.pose_path = os.path.join('./data', 'pose')
# Get the data.
self.data_list = self.get_data_list()
# Get the classes.
self.classes = self.get_classes()
# Now do some minor data cleaning
self.data_list = self.clean_data_list()
# number of batches in 1 epoch
self.n_batch_train = len(train) // self.batch_size
self.n_batch_test = len(test) // self.batch_size
@staticmethod
def get_data_list():
"""Load our data list from file."""
with open(os.path.join('./data', 'data_list.csv'), 'r') as fin:
reader = csv.reader(fin)
data_list = list(reader)
return data_list
def clean_data_list(self):
data_list_clean = []
for item in self.data_list:
if item[1] in self.classes:
data_list_clean.append(item)
return data_list_clean
def get_classes(self):
"""Extract the classes from our data, '\n'. If we want to limit them,
only return the classes we need."""
classes = []
for item in self.data_list:
if item[1] not in classes:
classes.append(item[1])
# Sort them.
classes = sorted(classes)
# Return.
if self.class_limit is not None:
return classes[:self.class_limit]
else:
return classes
def format_gen_outputs(gen1,gen2,gen3):
x1 = gen1[0]
x2 = gen2[0]
x3 = gen3[0]
y1 = gen1[1]
return [x1, x2, x3], y1
def get_generators(self):
train_datagen = ImageDataGenerator(
preprocessing_function=self.normalize,
rotation_range=30,
rescale=1./255,
shear_range=0.2,
horizontal_flip=True
# width_shift_range=0.2,
# height_shift_range=0.2)
)
valid_datagen = ImageDataGenerator(rescale=1./255, preprocessing_function=self.normalize)
valid_dir1=os.path.join('./data', 'valid')
valid_dir2=os.path.join('./data/opt_flow', 'valid')
valid_dir3=os.path.join('./data/pose', 'valid')
train_dir1=os.path.join('./data', 'train')
train_dir2=os.path.join('./data/opt_flow', 'train')
train_dir3=os.path.join('./data/pose', 'train')
seed = random.randint(1,1001)
valid_genX1 = valid_datagen.flow_from_directory(valid_dir1,
target_size=self.image_shape,
batch_size=self.batch_size,
#classes=data.classes,
class_mode='categorical',
shuffle=True,
seed=seed)
valid_genX2 = valid_datagen.flow_from_directory(valid_dir2,
target_size=self.image_shape,
batch_size=self.batch_size,
#classes=data.classes,
class_mode='categorical',
shuffle=True,
seed=seed)
valid_genX3 = valid_datagen.flow_from_directory(valid_dir3,
target_size=self.image_shape,
batch_size=self.batch_size,
#classes=data.classes,
class_mode='categorical',
shuffle=True,
seed=seed)
validation_generator = map(self.format_gen_outputs, valid_genX1, valid_genX2, valid_genX3)
seed = random.randint(1,1001)
train_genX1 = train_datagen.flow_from_directory(train_dir1,
target_size=self.image_shape,
batch_size=self.batch_size,
#classes=data.classes,
class_mode='categorical',
shuffle=True,
seed=seed)
train_genX2 = train_datagen.flow_from_directory(train_dir2,
target_size=self.image_shape,
batch_size=self.batch_size,
#classes=data.classes,
class_mode='categorical',
shuffle=True,
seed=seed)
train_genX3 = train_datagen.flow_from_directory(train_dir3,
target_size=self.image_shape,
batch_size=self.batch_size,
#classes=data.classes,
class_mode='categorical',
shuffle=True,
seed=seed)
train_generator = map(self.format_gen_outputs, train_genX1, train_genX2, train_genX3)
return train_generator, validation_generator
def normalize(self, x):
x[1] = (x[1]-0.458)/0.229
x[2] = (x[2]-0.456)/0.224
x[3] = (x[3]-0.406)/0.225
return x
|
StarcoderdataPython
|
3426256
|
import pandas as pd
import numpy as np
import copy as cp
import sys
import pickle
import hashlib as hs
import base64 as b64
import os
import random as rd
import datetime as dt
import threading
import time
from difflib import SequenceMatcher
from darc_core.metrics import Metrics
from darc_core.preprocessing import round1_preprocessing
from darc_core.utils import check_format_trans_file
def partition(df,y,m,u):
return df[(df["id_user"]==u) & (df["month"]==m) & (df["year"]==y)]["id_item"]
def extractListForMonth(df,y,m):
users = df[(df["month"]==m) & (df["year"]==y)]["id_user"].unique().tolist()
months = list(range(1,13))
key = []
hashed = []
for u in users:
key=[]
partitioned = partition(df,y,m,u)
key=partitioned.unique().tolist()
hashed.append([u,key,len(key)])
hashed=sorted(hashed, key = lambda x:x[2])
return hashed
def generateLists(df):
lists_all=[]
lists_all.append(extractListForMonth(df,2010,12))
for m in range(1,13):
lists_all.append(extractListForMonth(df,2011,m))
return lists_all
if __name__ == "__main__":
df = pd.read_csv("./ground_truth.csv", parse_dates=["date"])
df["id_user"]=df["id_user"].astype(str)
df["month"]=df["date"].dt.month
df["year"]=df["date"].dt.year
dx = pd.read_csv("./atxf.csv", parse_dates=["date"])
dx["month"]=dx["date"].dt.month
dx["year"]=dx["date"].dt.year
all_anon = generateLists(dx)
with open('all_clear', 'wb') as fp:
pickle.dump(all_clear, fp)
|
StarcoderdataPython
|
238098
|
#!/usr/bin/env python
import argparse
import logging
import sys
from autoscale import MesosReporter, MesosDecider, AwsAsgScaler
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--log-level', default="warn", help='Log level (debug, [default] info, warn, error)')
parser.add_argument('-u', '--mesos-url', help='Mesos cluster URL', required=True)
parser.add_argument('-c', '--cpus', help='Comma-delimited CPU thresholds (lower,upper)')
parser.add_argument('-d', '--disk', help='Comma-delimited disk thresholds (lower,upper)')
parser.add_argument('-m', '--mem', help='Comma-delimited memory thresholds (lower,upper)')
parser.add_argument('-r', '--region', help='AWS region', required=True)
parser.add_argument('-a', '--asg', help='AWS auto scaling group name', required=True)
args = parser.parse_args()
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stderr, level=getattr(logging, args.log_level.upper()))
thresholds = {}
if args.cpus:
lower, upper = args.cpus.split(',')
thresholds['cpus'] = dict(lower=int(lower), upper=int(upper))
if args.disk:
lower, upper = args.disk.split(',')
thresholds['disk'] = dict(lower=int(lower), upper=int(upper))
if args.mem:
lower, upper = args.mem.split(',')
thresholds['mem'] = dict(lower=int(lower), upper=int(upper))
reporter = MesosReporter(args.mesos_url)
decider = MesosDecider(thresholds)
scaler = AwsAsgScaler(args.region, args.asg)
delta = decider.should_scale(reporter)
if delta:
print('Scaling {asg} in {region} by {delta}'.format(asg=args.asg, region=args.region, delta=delta))
scaler.scale(delta)
else:
print('No change needed for {asg} in {region}'.format(asg=args.asg, region=args.region))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3328235
|
a=[11,6,4,5,9,2,7,0,1,-1,-9]
n=len(a)
for i in range(1,n):
temp=a[i]
j=i-1
while j>=0:
if temp>=a[j]:
break
a[j+1]=a[j]
j-=1
a[j+1]=temp
print(a)
|
StarcoderdataPython
|
4851594
|
from starling_sim.basemodel.trace.events import *
from starling_sim.basemodel.agent.requests import UserStop, StopPoint, StationRequest
from starling_sim.utils.constants import PUBLIC_TRANSPORT_TYPE
class KPI:
"""
Generic structure of a KPI class
Its sub-classes compute and update specific indicator
from given events
"""
#: **agentId**: id of the agent
KEY_ID = "agentId"
def __init__(self):
"""
The indicator_dict associates values to the indicators names
The keys attribute correspond to the keys of the indicator_dict
"""
self.indicator_dict = None
self.keys = []
self.new_indicator_dict()
def setup(self, simulation_model):
"""
Setup method called during simulation setup.
:param simulation_model:
:return:
"""
pass
def new_indicator_dict(self):
"""
Reset the indicator_dict, when computing the indicator
for a new target
:return: None, resets directly the indicator_dict attribute
"""
pass
def update(self, event, agent):
"""
Update the kpi values according to the event content and the agent.
:param event: processed event
:param agent: subject of the event
:return:
"""
if isinstance(event, InputEvent):
self.indicator_dict[self.KEY_ID] = agent.id
class MoveKPI(KPI):
"""
This KPI evaluates the distance and spent time for each one of the simulation modes
"""
#: **{mode}Distance**: distance travelled in <mode> [meters]
SUFFIX_KEY_DISTANCE = "{mode}Distance"
#: **{mode}Time**: time travelled in <mode> [seconds]
SUFFIX_KEY_TIME = "{mode}Time"
def __init__(self):
self.modes = []
# init of indicator dict
super().__init__()
def setup(self, simulation_model):
self.modes = list(simulation_model.environment.topologies.keys())
self.new_indicator_dict()
def new_indicator_dict(self):
"""
Initialize the time and distance values at 0
for the considered modes
:return:
"""
base_dict = {}
for mode in self.modes:
key = self.SUFFIX_KEY_DISTANCE.format(mode=mode)
base_dict[key] = 0
self.keys += [key]
key = self.SUFFIX_KEY_TIME.format(mode=mode)
base_dict[key] = 0
self.keys += [key]
self.indicator_dict = base_dict
def update(self, event, agent):
"""
Add travelled distances and durations
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, MoveEvent):
self.indicator_dict[self.SUFFIX_KEY_DISTANCE.format(mode=event.mode)] += event.distance
self.indicator_dict[self.SUFFIX_KEY_TIME.format(mode=event.mode)] += event.duration
class WaitKPI(KPI):
"""
This KPI evaluates the time spent waiting
"""
#: **waitTime**: total traced wait time [seconds]
KEY_WAIT = "waitTime"
def __init__(self):
super().__init__()
self.keys = [self.KEY_WAIT]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_WAIT: 0}
def update(self, event, agent):
"""
Add total wait duration of the request
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, RequestEvent):
self.indicator_dict[self.KEY_WAIT] += sum(event.request.waitSequence)
if isinstance(event, WaitEvent):
self.indicator_dict[self.KEY_WAIT] += event.waiting_time
class OdtWaitsKPI(KPI):
"""
This KPI evaluates the lateness in ODT requests.
"""
#: **odtPickupWait**: series of wait times at ODT pickups [seconds]
KEY_PICKUP_WAIT = "odtPickupWait"
#: **odtDetour**: series of ODT detour times [seconds]
KEY_DETOUR = "odtDetour"
#: **odtDirectTrip**: series of ODT direct trip times [seconds]
KEY_DIRECT_TRIP = "odtDirectTrip"
def __init__(self):
super().__init__()
self.keys = [self.KEY_PICKUP_WAIT, self.KEY_DETOUR, self.KEY_DIRECT_TRIP]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_PICKUP_WAIT: "", self.KEY_DETOUR: "", self.KEY_DIRECT_TRIP: ""}
def update(self, event, agent):
"""
Add wait durations of ODT requests to KPIs.
:param event:
:param agent:
:return:
"""
# TODO : find a better condition
if isinstance(event, StopEvent) and event.serviceVehicle.type != PUBLIC_TRANSPORT_TYPE:
dropoff_agents = [request.agent.id for request in event.dropoffs]
pickup_agents = [request.agent.id for request in event.pickups]
if agent.id in dropoff_agents:
request = event.dropoffs[dropoff_agents.index(agent.id)]
if len(request.waitSequence) > 1:
if self.indicator_dict[self.KEY_DETOUR] != "":
self.indicator_dict[self.KEY_DETOUR] += "-"
self.indicator_dict[self.KEY_DIRECT_TRIP] += "-"
self.indicator_dict[self.KEY_DETOUR] += str(request.waitSequence[1])
self.indicator_dict[self.KEY_DIRECT_TRIP] += str(request.directTravelTime)
elif agent.id in pickup_agents:
request = event.pickups[pickup_agents.index(agent.id)]
if len(request.waitSequence) > 0:
if self.indicator_dict[self.KEY_PICKUP_WAIT] != "":
self.indicator_dict[self.KEY_PICKUP_WAIT] += "-"
self.indicator_dict[self.KEY_PICKUP_WAIT] += str(request.waitSequence[0])
class GetVehicleKPI(KPI):
"""
This KPI evaluates the number of vehicle uses
"""
#: **nbGetVehicle**: number of uses of the vehicle
KEY_GET_VEHICLE = "nbGetVehicle"
def __init__(self):
super().__init__()
self.keys = [self.KEY_GET_VEHICLE]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_GET_VEHICLE: 0}
def update(self, event, agent):
"""
Add a new use for each GetVehicleEvent
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, GetVehicleEvent):
self.indicator_dict[self.KEY_GET_VEHICLE] += 1
class SuccessKPI(KPI):
"""
This KPI evaluates the number of failed/successful requests
"""
#: **nbFailedGet**: number of failed get requests
KEY_FAILED_GET = "nbFailedGet"
#: **nbSuccessGet**: number successful get requests
KEY_SUCCESS_GET = "nbSuccessGet"
#: **nbFailedPut**: number of failed put requests
KEY_FAILED_PUT = "nbFailedPut"
#: **nbSuccessPut**: number of successful put requests
KEY_SUCCESS_PUT = "nbSuccessPut"
#: **nbFailedRequest**: number of failed requests
KEY_FAILED_REQUEST = "nbFailedRequest"
#: **nbSuccessRequest**: number of successful requests
KEY_SUCCESS_REQUEST = "nbSuccessRequest"
def __init__(self, indicator_selection):
super().__init__()
self.keys = indicator_selection
def new_indicator_dict(self):
base_dict = {self.KEY_FAILED_GET: 0, self.KEY_SUCCESS_GET: 0,
self.KEY_FAILED_PUT: 0, self.KEY_SUCCESS_PUT: 0,
self.KEY_FAILED_REQUEST: 0, self.KEY_SUCCESS_REQUEST: 0}
self.indicator_dict = base_dict
def update(self, event, agent):
"""
Add request events according to their success
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, RequestEvent):
if event.request.success:
self.indicator_dict[self.KEY_SUCCESS_REQUEST] += 1
if event.request.type == StationRequest.GET_REQUEST:
self.indicator_dict[self.KEY_SUCCESS_GET] += 1
else:
self.indicator_dict[self.KEY_SUCCESS_PUT] += 1
else:
self.indicator_dict[self.KEY_FAILED_REQUEST] += 1
if event.request.type == StationRequest.GET_REQUEST:
self.indicator_dict[self.KEY_FAILED_GET] += 1
else:
self.indicator_dict[self.KEY_FAILED_PUT] += 1
class StaffOperationKPI(KPI):
"""
This KPI evaluates the number of staff operations
"""
#: **nbFailedGetStaff**: number of failed gets by staff
KEY_FAILED_GET_STAFF = "nbFailedGetStaff"
#: **nbSuccessGetStaff**: number of successful gets by staff
KEY_SUCCESS_GET_STAFF = "nbSuccessGetStaff"
#: **nbFailedPutStaff**: number of failed puts by staff
KEY_FAILED_PUT_STAFF = "nbFailedPutStaff"
#: **nbSuccessPutStaff**: number of successful puts by staff
KEY_SUCCESS_PUT_STAFF = "nbSuccessPutStaff"
def __init__(self):
super().__init__()
self.keys = [self.KEY_FAILED_GET_STAFF, self.KEY_SUCCESS_GET_STAFF,
self.KEY_FAILED_PUT_STAFF, self.KEY_SUCCESS_PUT_STAFF]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_FAILED_GET_STAFF: 0,
self.KEY_SUCCESS_GET_STAFF: 0,
self.KEY_FAILED_PUT_STAFF: 0,
self.KEY_SUCCESS_PUT_STAFF: 0}
def update(self, event, agent):
"""
Add operations to the total
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, StaffOperationEvent):
goal = event.goal
total = event.total
if goal < 0:
self.indicator_dict[self.KEY_SUCCESS_GET_STAFF] += abs(total)
self.indicator_dict[self.KEY_FAILED_GET_STAFF] += total - goal
elif goal > 0:
self.indicator_dict[self.KEY_SUCCESS_PUT_STAFF] += total
self.indicator_dict[self.KEY_FAILED_PUT_STAFF] += goal - total
class OccupationKPI(KPI):
"""
This KPI evaluates the empty and full time and distance
and the stock relative time/distance
"""
def __init__(self):
#: **emptyTime**: time spent empty [seconds]
self.KEY_EMPTY_TIME = "emptyTime"
#: **emptyDistance**: distance travelled empty [meters]
self.KEY_EMPTY_DISTANCE = "emptyDistance"
#: **fullTime**: time spent full [seconds]
self.KEY_FULL_TIME = "fullTime"
#: **fullDistance**: distance travelled full [meters]
self.KEY_FULL_DISTANCE = "fullDistance"
#: **stockTime**: stock relative time (stock*time) [seconds]
self.KEY_STOCK_TIME = "stockTime"
#: **stockDistance**: stock relative distance (stock*distance) [meters]
self.KEY_STOCK_DISTANCE = "stockDistance"
#: **maxStock**: maximum stock
self.KEY_MAX_STOCK = "maxStock"
super().__init__()
self.keys = [self.KEY_EMPTY_TIME, self.KEY_EMPTY_DISTANCE, self.KEY_FULL_TIME, self.KEY_FULL_DISTANCE,
self.KEY_STOCK_TIME, self.KEY_STOCK_DISTANCE, self.KEY_MAX_STOCK]
self.capacity = None
self.currentStock = None
self.previousTime = 0
self.currentDistance = None
def new_indicator_dict(self):
"""
Initialize the time and distance counts to 0.
"""
self.indicator_dict = {self.KEY_EMPTY_TIME: 0, self.KEY_EMPTY_DISTANCE: 0,
self.KEY_FULL_TIME: 0, self.KEY_FULL_DISTANCE: 0,
self.KEY_STOCK_TIME: 0, self.KEY_STOCK_DISTANCE: 0,
self.KEY_MAX_STOCK: 0}
self.capacity = None
self.currentStock = None
self.previousTime = 0
self.currentDistance = None
def get_capacity(self, element):
"""
Get the capacity of the agent, according to its type.
:param element:
:return: agent's capacity
"""
return self.capacity
def get_initial_stock(self, element):
"""
Get the initial stock of the agent, according to its type.
:param element:
:return: agent's initial stock
"""
return self.currentStock
def add_to_stock(self, value, timestamp):
"""
Update the full and empty time and distance counts, according to the previous
stock value, then updates the stock and time.
:param value: stock change (negative for stock loss)
:param timestamp: timestamp of the stock change event
"""
# compute time spent with last stock
duration = timestamp - self.previousTime
# add time to relevant time count
if self.currentStock == 0:
self.indicator_dict[self.KEY_EMPTY_TIME] += duration
if self.currentDistance is not None:
self.indicator_dict[self.KEY_EMPTY_DISTANCE] += self.currentDistance
elif self.currentStock == self.capacity:
self.indicator_dict[self.KEY_FULL_TIME] += duration
if self.currentDistance is not None:
self.indicator_dict[self.KEY_FULL_DISTANCE] += self.currentDistance
# add stock relative time and distance
self.indicator_dict[self.KEY_STOCK_TIME] += duration * self.currentStock
if self.currentDistance is not None:
self.indicator_dict[self.KEY_STOCK_DISTANCE] += self.currentDistance * self.currentStock
# update stock and current time
self.currentStock += value
self.previousTime = timestamp
if self.currentStock > self.indicator_dict[self.KEY_MAX_STOCK]:
self.indicator_dict[self.KEY_MAX_STOCK] = self.currentStock
# reset distance count
if self.currentDistance is not None:
self.currentDistance = 0
def update(self, event, agent):
"""
Update the stock and time counts from traced events
:param agent:
:param event:
"""
super().update(event, agent)
if isinstance(event, InputEvent):
self.capacity = self.get_capacity(event.element)
self.currentStock = self.get_initial_stock(event.element)
self.indicator_dict[self.KEY_MAX_STOCK] = self.currentStock
if isinstance(event, LeaveSimulationEvent):
self.add_to_stock(0, event.timestamp)
class StationOccupationKPI(OccupationKPI):
"""
This KPI evaluates the time spent in the empty and full states (of a station),
and the stock relative time spent in the station
"""
def __init__(self):
super().__init__()
self.keys = [self.KEY_EMPTY_TIME, self.KEY_FULL_TIME, self.KEY_STOCK_TIME]
def get_capacity(self, element):
return element.capacity
def get_initial_stock(self, element):
return element.initial_stock
def update(self, event, agent):
"""
Update the stock and time counts from request events
:param agent:
:param event:
"""
super().update(event, agent)
if isinstance(event, RequestEvent) and event.request.success:
request = event.request
# update time counts and current time
if request.type == StationRequest.GET_REQUEST:
self.add_to_stock(-1, request.timestamp)
elif request.type == StationRequest.PUT_REQUEST:
self.add_to_stock(1, request.timestamp)
if isinstance(event, StaffOperationEvent):
if event.total != 0:
self.add_to_stock(event.total, event.timestamp)
class VehicleOccupationKPI(OccupationKPI):
"""
This KPI evaluates the time and distance in the empty and full states (of vehicle),
and a passenger relative distance and time.
"""
def __init__(self):
super().__init__()
self.currentDistance = 0
def new_indicator_dict(self):
super().new_indicator_dict()
self.currentDistance = 0
def get_capacity(self, element):
return element.seats
def get_initial_stock(self, element):
# for now, initial stock is always 0 in our simulation
return 0
def update(self, event, agent):
"""
Update the stock and time/distance counts from
get/leave vehicle and move events
:param agent:
:param event:
"""
super().update(event, agent)
if isinstance(event, GetVehicleEvent):
self.add_to_stock(1, event.timestamp)
elif isinstance(event, LeaveVehicleEvent):
self.add_to_stock(-1, event.timestamp)
if isinstance(event, MoveEvent):
self.currentDistance += event.distance
class ChargeKPI(KPI):
"""
This KPI evaluates the trips's boards and un-boards
"""
#: **routeId**: gtfs route id
KEY_ROUTE_ID = "routeId"
#: **tripId**: gtfs trip id
KEY_TRIP_ID = "tripId"
#: **tripDirection**: gtfs trip direction
KEY_TRIP_DIRECTION = "tripDirection"
#: **time**: simulation timestamp of board/un-board
KEY_TIME = "time"
#: **stopId**: stop id of board/un-board
KEY_STOP_ID = "stopId"
#: **boardType**: (+1) for boards, (-1) for un-boards
KEY_BOARD_TYPE = "boardType"
#: **value**: numeric value of the charge change
KEY_VALUE = "value"
def __init__(self, non_empty_only=True, public_transport=True):
super().__init__()
# boolean indicating if only non empty pickups and dropoffs should be traced
self.non_empty_only = non_empty_only
# boolean indicating if the simulated system is public transports (with gtfs tables)
self.public_transport = public_transport
self.trips = None
self.routes = None
self.keys = [self.KEY_TRIP_ID, self.KEY_TIME, self.KEY_STOP_ID, self.KEY_BOARD_TYPE, self.KEY_VALUE]
if self.public_transport:
self.keys = [self.KEY_ROUTE_ID, self.KEY_TRIP_DIRECTION] + self.keys
self.new_indicator_dict()
def setup(self, simulation_model):
if self.public_transport:
self.trips = simulation_model.gtfs.trips
self.routes = simulation_model.gtfs.routes
def new_indicator_dict(self):
self.indicator_dict = dict()
for key in [self.KEY_ID] + self.keys:
self.indicator_dict[key] = []
def update(self, event, agent):
"""
Add stop information to the list
:param agent:
:param event:
:return:
"""
if isinstance(event, StopEvent):
if event.dropoffs:
self.update_stop_information(event, agent)
self.indicator_dict[self.KEY_TIME].append(event.dropoff_time)
self.indicator_dict[self.KEY_BOARD_TYPE].append(-1)
self.indicator_dict[self.KEY_VALUE].append(len(event.dropoffs))
if event.pickups:
self.update_stop_information(event, agent)
self.indicator_dict[self.KEY_TIME].append(event.pickup_time)
self.indicator_dict[self.KEY_BOARD_TYPE].append(1)
self.indicator_dict[self.KEY_VALUE].append(len(event.pickups))
if not event.pickups and not event.dropoffs and not self.non_empty_only:
self.update_stop_information(event, agent)
self.indicator_dict[self.KEY_TIME].append(event.timestamp)
self.indicator_dict[self.KEY_BOARD_TYPE].append(0)
self.indicator_dict[self.KEY_VALUE].append(0)
def update_stop_information(self, event, agent):
"""
Update the indicator with the information common to dropoffs and pickups.
:param event:
:param agent:
"""
self.indicator_dict[self.KEY_ID].append(agent.id)
trip_id = event.trip
self.indicator_dict[self.KEY_TRIP_ID].append(trip_id)
self.indicator_dict[self.KEY_STOP_ID].append(get_stop_id_of_event(event))
if self.public_transport:
self.indicator_dict[self.KEY_ROUTE_ID].append(get_route_id_of_trip(self.trips, trip_id, event))
self.indicator_dict[self.KEY_TRIP_DIRECTION].append(get_direction_of_trip(self.trips, trip_id))
class TransferKPI(KPI):
"""
This KPI lists the transfers realised by a user,
with additional information such as walk distance and duration,
wait duration, from/to trip/stop
"""
#: **walkDistance**: walk distance of transfer [meters]
KEY_WALK_DIST = "walkDistance"
#: **walkTime**: walk time of transfer [seconds]
KEY_WALK_DURATION = "walkDuration"
#: **waitTime**: wait time of transfer [seconds]
KEY_WAIT_TIME = "waitTime"
#: **fromRoute**: origin route of transfer
KEY_FROM_ROUTE = "fromRoute"
#: **fromTrip**: origin trip of transfer
KEY_FROM_TRIP = "fromTrip"
#: **fromStop**: origin stop point of transfer
KEY_FROM_STOP = "fromStop"
#: **toRoute**: destination route of transfer
KEY_TO_ROUTE = "toRoute"
#: **toTrip**: destination trip of transfer
KEY_TO_TRIP = "toTrip"
#: **toStop**: destination stop of transfer
KEY_TO_STOP = "toStop"
def __init__(self):
super().__init__()
self.trips = None
self.routes = None
self.keys = [self.KEY_WALK_DIST, self.KEY_WALK_DURATION, self.KEY_WAIT_TIME,
self.KEY_FROM_ROUTE, self.KEY_FROM_TRIP, self.KEY_FROM_STOP,
self.KEY_TO_ROUTE, self.KEY_TO_TRIP, self.KEY_TO_STOP]
# transfer variables
self.current_walk_distance = 0
self.current_walk_duration = 0
self.current_wait_time = 0
self.from_route = None
self.from_trip = None
self.from_stop = None
self.to_route = None
self.to_trip = None
self.to_stop = None
def setup(self, simulation_model):
self.trips = simulation_model.gtfs.trips
self.routes = simulation_model.gtfs.routes
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_ID: [], self.KEY_WALK_DIST: [],
self.KEY_WALK_DURATION: [], self.KEY_WAIT_TIME: [],
self.KEY_FROM_ROUTE: [], self.KEY_FROM_TRIP: [], self.KEY_FROM_STOP: [],
self.KEY_TO_ROUTE: [], self.KEY_TO_TRIP: [], self.KEY_TO_STOP: []}
def update(self, event, agent):
if isinstance(event, InputEvent):
self.reset_variables()
if isinstance(event, WaitEvent):
self.current_wait_time += event.waiting_time
elif isinstance(event, MoveEvent) and event.mode == "walk":
self.current_walk_distance += event.distance
self.current_walk_duration += event.duration
elif isinstance(event, RequestEvent):
self.current_wait_time += sum(event.request.waitSequence)
elif isinstance(event, StopEvent):
if isinstance(event.stop, StopPoint):
stop_id = event.stop.id
elif isinstance(event.stop, UserStop):
stop_id = event.stop.stopPoint
else:
stop_id = None
dropoff_agents = [request.agent.id for request in event.dropoffs]
pickup_agents = [request.agent.id for request in event.pickups]
if agent.id in dropoff_agents:
self.from_trip = event.trip
self.from_stop = stop_id
elif agent.id in pickup_agents:
self.to_trip = event.trip
self.to_stop = stop_id
self.write_variables(agent)
self.reset_variables()
elif isinstance(event, DestinationReachedEvent):
self.write_variables(agent)
self.reset_variables()
def reset_variables(self):
self.current_walk_distance = 0
self.current_walk_duration = 0
self.current_wait_time = 0
self.from_trip = None
self.from_stop = None
self.to_trip = None
self.to_stop = None
def write_variables(self, agent):
self.indicator_dict[self.KEY_ID].append(agent.id)
self.indicator_dict[self.KEY_WALK_DIST].append(self.current_walk_distance)
self.indicator_dict[self.KEY_WALK_DURATION].append(self.current_walk_duration)
self.indicator_dict[self.KEY_WAIT_TIME].append(self.current_wait_time)
self.indicator_dict[self.KEY_FROM_ROUTE].append(get_route_short_name_of_trip(self.trips, self.routes,
self.from_trip))
self.indicator_dict[self.KEY_FROM_TRIP].append(self.from_trip)
self.indicator_dict[self.KEY_FROM_STOP].append(self.from_stop)
self.indicator_dict[self.KEY_TO_ROUTE].append(get_route_short_name_of_trip(self.trips, self.routes,
self.to_trip))
self.indicator_dict[self.KEY_TO_TRIP].append(self.to_trip)
self.indicator_dict[self.KEY_TO_STOP].append(self.to_stop)
# TODO : remove ? useless with TransferKPI
# class JourneyKPI(KPI):
# """
# This KPI evaluates the sequence of lines that compose a user journey
# """
#
# #: **journeySequence**: sequence of trip ids of the journey
# KEY_JOURNEY_SEQUENCE = "journeySequence"
#
# def __init__(self):
#
# super().__init__()
#
# self.keys = [self.KEY_JOURNEY_SEQUENCE]
#
# self.trips_table = None
# self.routes_table = None
#
# def setup(self, simulation_model):
#
# operator = simulation_model.agentPopulation["operators"]["OPR"]
#
# feed = operator.service_info
#
# self.trips_table = feed.trips
# self.routes_table = feed.routes
#
# def new_indicator_dict(self):
#
# self.indicator_dict = {self.KEY_JOURNEY_SEQUENCE: ""}
#
# def update(self, event, agent):
# """
# Add a new route for each PickupEvent
# :param agent:
# :param event:
# :return:
# """
#
# super().update(event, agent)
#
# if isinstance(event, PickupEvent):
#
# route_id = self.trips_table.loc[self.trips_table["trip_id"] == event.trip, "route_id"].iloc[0]
#
# route_name = self.routes_table.loc[self.routes_table["route_id"] == route_id, "route_short_name"].iloc[0]
#
# self.indicator_dict[self.KEY_JOURNEY_SEQUENCE] += route_name + "-"
#
# if isinstance(event, DestinationReachedEvent):
#
# self.indicator_dict[self.KEY_JOURNEY_SEQUENCE] = self.indicator_dict[self.KEY_JOURNEY_SEQUENCE][:-1]
#
class DestinationReachedKPI(KPI):
"""
This KPI evaluates the destination reach time
"""
#: **destinationReachedTime**: time when destination is reached, "NA" otherwise [seconds or NA]
KEY_DESTINATION_REACHED = "destinationReachedTime"
def __init__(self):
super().__init__()
self.keys = [self.KEY_DESTINATION_REACHED]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_DESTINATION_REACHED: "NA"}
def update(self, event, agent):
"""
Add total wait duration of the request
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, DestinationReachedEvent):
self.indicator_dict[self.KEY_DESTINATION_REACHED] = event.timestamp
class LeaveSimulationKPI(KPI):
"""
This KPI evaluates the cause of the simulation leave.
"""
#: **leaveSimulation**: code used when leaving the simulation (see model doc)
KEY_LEAVE_SIMULATION = "leaveSimulation"
def __init__(self):
super().__init__()
self.keys = [self.KEY_LEAVE_SIMULATION]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_LEAVE_SIMULATION: None}
def update(self, event, agent):
"""
Add the cause of the LeaveSimulationEvent.
:param event:
:param agent:
"""
super().update(event, agent)
if isinstance(event, LeaveSimulationEvent):
self.indicator_dict[self.KEY_LEAVE_SIMULATION] = event.cause
def get_route_id_of_trip(trips, trip_id, event):
if trips is None:
return event.serviceVehicle.operator
trip_table = trips.loc[trips["trip_id"] == trip_id, "route_id"]
# if trips is not in the gtfs (on-demand trips for instance)
# try to get operator id
if trip_table.empty:
return event.serviceVehicle.operator
return trip_table.iloc[0]
def get_direction_of_trip(trips, trip_id):
if trips is None:
return ""
trip_table = trips.loc[trips["trip_id"] == trip_id, "direction_id"]
# ignore trips that are not in the gtfs (on-demand trips for instance)
if trip_table.empty:
return ""
return trip_table.iloc[0]
def get_route_short_name_of_trip(trips, routes, trip_id):
if trip_id is None:
return None
trip_table = trips.loc[trips["trip_id"] == trip_id, "route_id"]
# ignore trips that are not in the gtfs (on-demand trips for instance)
if trip_table.empty:
return ""
route_id = trip_table.iloc[0]
route_short_name = routes.loc[routes["route_id"] == route_id, "route_short_name"].iloc[0]
return route_short_name
def get_stop_id_of_event(event):
stop_id = None
if isinstance(event.stop, StopPoint):
stop_id = event.stop.id
elif isinstance(event.stop, UserStop):
stop_id = event.stop.stopPoint
if stop_id is None:
stop_id = ""
return stop_id
|
StarcoderdataPython
|
108488
|
<gh_stars>0
# -*- coding: utf-8 -*-
'''
* @Author : jiangtao
* @Date : 2021-12-13 14:18:45
* @Email : <EMAIL>
* @LastEditTime : 2022-03-02 14:11:31
* @Description :
'''
import argparse
import cv2
import json
import os
import os.path as osp
import sys
import time
import warnings
from argparse import ArgumentParser
from tqdm import tqdm
from xtcocotools.coco import COCO
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
vis_pose_result)
from mmpose.datasets import DatasetInfo
dirpath = osp.dirname(osp.abspath(__file__)).replace('\\','/')
dirpath = osp.dirname(dirpath)
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
args = parser.parse_args()
return args
class handAlignment():
def __init__(self,
pose_config = '{}/configs/face/2d_kpt_sview_rgb_img/topdown_heatmap/dms/res50_dms_256x256.py'.format(dirpath),
pose_checkpoint = '{}/work_dirs/res50_dms_256x256/best_NME_epoch_42.pth'.format(dirpath),
device = 'cuda:0'):
self.pose_config = pose_config
self.pose_checkpoint = pose_checkpoint
self.device = device
print('self.pose_checkpoint:',self.pose_checkpoint)
self.pose_model = init_pose_model(
self.pose_config, self.pose_checkpoint, device=self.device.lower())
self.dataset = self.pose_model.cfg.data['test']['type']
self.dataset_info = self.pose_model.cfg.data['test'].get('dataset_info', None)
# dataset is a str,dataset_info is a dict
if self.dataset_info is None:
warnings.warn(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
else:
self.dataset_info = DatasetInfo(self.dataset_info)
# dataset_info is a instance of DatasetInfo
def alignment(self,img,box):
'''
box:[x,y,w,h]
'''
person = {}
person['bbox'] = box
person_results = []
person_results.append(person)
# test a single image, with a list of bboxes
pose_results, returned_outputs = inference_top_down_pose_model(
self.pose_model,
img,
person_results,
bbox_thr=None,
format='xywh',
dataset=self.dataset,
dataset_info=self.dataset_info,
return_heatmap=True,
outputs=None)
return pose_results,returned_outputs
def save(self, img, outfile, pose_results,returned_outputs):
img = vis_pose_result(
self.pose_model,
img,
pose_results,
dataset=self.dataset,
dataset_info=self.dataset_info,
kpt_score_thr=0.3,
radius=4,
thickness=1,
show=False,
out_file=outfile)
return img
handAlign = handAlignment()
if __name__ == '__main__':
args = parse_args()
handAlign = handAlignment(pose_config=args.config,
pose_checkpoint=args.checkpoint)
filename = './demo/images/face.jpg'
img = cv2.imread(filename,1)
# input box is w y w h format
# pose_results is a list of dict for every box, keys are bbox and keypoints
# returned_outputs is a list of dict for every box, keys are heatmap
pose_results, returned_outputs = handAlign.alignment(img,[495,221,400,400])
print(returned_outputs[0]['heatmap'].shape)
print(returned_outputs[0]['heatmap'][0][0][0][0])
print(returned_outputs[0]['heatmap'][-1][-1][-1][-1])
img = handAlign.save(img,
filename.replace('.jpg','_res.jpg'),
pose_results,
returned_outputs)
print(img.shape)
cv2.imwrite(filename.replace('.jpg','_res1.jpg'),img)
|
StarcoderdataPython
|
1692837
|
from .AutomatonGenerators import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine, generate_random_markov_chain
from .AutomatonGenerators import generate_random_mdp, generate_random_ONFSM
from .FileHandler import save_automaton_to_file, load_automaton_from_file, visualize_automaton
from .ModelChecking import model_check_experiment, mdp_2_prism_format, model_check_properties, get_properties_file, get_correct_prop_values, compare_automata
from ..automata.StochasticMealyMachine import smm_to_mdp_conversion
from .BenchmarkSULs import *
from .DataHandler import *
|
StarcoderdataPython
|
1752993
|
<reponame>juanelenter/basepairmodels
"""
This module containins training functions that are common to
the CLI & the API
Functions:
train_and_validate: Train and validate on a single train and
validation set
train_and_validate_ksplits: Train and validate on one or
more train/val splits specified via a json file
License:
MIT License
Copyright (c) 2020 Kundaje Lab
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import copy
import datetime
import json
import logging
import multiprocessing as mp
import os
import pandas as pd
import sys
import tensorflow.keras.backend as kb
import time
import warnings
from basepairmodels.common import model_archs
from basepairmodels.cli.bpnetutils import *
from basepairmodels.cli.losses import MultichannelMultinomialNLL, NegativePearsonCorrelation
from basepairmodels.cli import experiments
from basepairmodels.cli import logger
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from mseqgen import generators
def early_stopping_check(losses, patience=5, min_delta=1e-3):
"""
Function to check if early stopping criteria are met
Args:
losses (list): list of all losses in order of the epochs,
these could be training or validation losses
patience (int): the number of epochs with no improvement
greater than `min_delta`
min_delta (float): The smallest amount that signals
sufficienct decrease in validation loss to justify
continuation of training for a further #patience
epochs
Returns:
bool: True, if early stopping criteria are satisfied,
False otherwise
"""
# if sufficient number of epochs have not elapsed yet
if len(losses) <= patience:
return False
# the loss value upon which the patience check will be performed
anchor_loss = losses[-(patience+1)]
for i in range(patience):
if (anchor_loss - losses[i-patience]) > min_delta:
return False
return True
def reduce_lr_on_plateau(losses, current_lr, factor=0.5, patience=2,
min_lr=1e-4):
"""
Function to compute the new learning rate if loss is
plateauing
Args:
losses (list): list of all losses in order of the epochs,
these could be training or validation losses
current_lr (float): current learning rate
factor (float): the factor by which the learning rate is
to be reduced in case the plateau criteria is met
patience (int): number of epochs with no improvement after
which learning rate will be reduced.
min_lr (float): lower bound on the learning rate
Returns:
float: new learning rate
"""
# if sufficient number of epochs have not elapsed yet
if len(losses) <= patience:
return current_lr
# the loss value upon which the patience check will be performed
anchor_loss = losses[-(patience+1)]
for i in range(patience):
# improvement found
if losses[i-patience] < anchor_loss:
# no change in learning rate
return current_lr
# new learning rate
new_lr = current_lr * factor
# check if it's below lower bound
if new_lr < min_lr:
return current_lr
return new_lr
def train_and_validate(input_params, output_params, genome_params,
batch_gen_params, hyper_params, parallelization_params,
network_params, use_attribution_prior,
attribution_prior_params, train_chroms, val_chroms,
model_dir, suffix_tag=None):
"""
Train and validate on a single train and validation set
Note: the list & description for each of the required keys
in all of the json parameter files passed to this
fucntion can be found here:
http://
Args:
input_params (dict): dictionary containing input parameters
output_params (dict): dictionary containing output
parameters
genome_params (dict): dictionary containing genome
parameters
batch_gen_params (dict): dictionary containing batch
generation parameters
hyper_params (dict): dictionary containing containing
training & validation hyper parameters
parallelization_params (dict): dictionary containing
parameters for parallelization options
network_params (dict): dictionary containing parameters
specific to the deep learning architecture
use_attribution_prior (bool): indicate whether attribution
prior loss model should be used
attribution_prior_params (dict): dictionary containing
attribution prior parameters
train_chroms (list): list of training chromosomes
val_chroms (list): list of validation chromosomes
model_dir (str): the path to the output directory
suffix_tag (str): optional tag to add as a suffix to files
(model, log, history & config params files) created in
the model directory
Returns:
keras.models.Model
"""
# filename to write debug logs
if suffix_tag is not None:
logfname = '{}/trainer_{}.log'.format(model_dir, suffix_tag)
else:
logfname = '{}/trainer.log'.format(model_dir)
# we need to initialize the logger for each process
logger.init_logger(logfname)
# parameters that are specific to the training batch generation
# process
train_batch_gen_params = batch_gen_params
train_batch_gen_params['mode'] = 'train'
# parameters that are specific to the validation batch generation
# process. For validation we dont use jitter, reverse complement
# augmentation and negative sampling
val_batch_gen_params = copy.deepcopy(batch_gen_params)
val_batch_gen_params['max_jitter'] = 0
val_batch_gen_params['rev_comp_aug'] = False
val_batch_gen_params['negative_sampling_rate'] = 0.0
val_batch_gen_params['mode'] = 'val'
# get the corresponding batch generator class for this model
sequence_generator_class_name = generators.find_generator_by_name(
batch_gen_params['sequence_generator_name'])
logging.info("SEQGEN Class Name: {}".format(sequence_generator_class_name))
BatchGenerator = getattr(generators, sequence_generator_class_name)
# instantiate the batch generator class for training
train_gen = BatchGenerator(input_params, train_batch_gen_params,
genome_params['reference_genome'],
genome_params['chrom_sizes'],
train_chroms,
num_threads=parallelization_params['threads'],
epochs=hyper_params['epochs'],
batch_size=hyper_params['batch_size'],
**network_params)
# instantiate the batch generator class for validation
val_gen = BatchGenerator(input_params, val_batch_gen_params,
genome_params['reference_genome'],
genome_params['chrom_sizes'],
val_chroms,
num_threads=parallelization_params['threads'],
epochs=hyper_params['epochs'],
batch_size=hyper_params['batch_size'],
**network_params)
# lets make sure the sizes look reasonable
logging.info("TRAINING SIZE - {}".format(train_gen._samples.shape))
logging.info("VALIDATION SIZE - {}".format(val_gen._samples.shape))
# we need to calculate the number of training steps and
# validation steps in each epoch, fit/evaluate requires this
# to determine the end of an epoch
train_steps = train_gen.len()
val_steps = val_gen.len()
# we may have to reduce the --threads sometimes
# if the peak file has very few peaks, so we need to
# check if these numbers will be 0
logging.info("TRAINING STEPS - {}".format(train_steps))
logging.info("VALIDATION STEPS - {}".format(val_steps))
# get an instance of the model
logging.debug("New {} model".format(network_params['name']))
get_model = getattr(model_archs, network_params['name'])
model = get_model(train_batch_gen_params['input_seq_len'],
train_batch_gen_params['output_len'],
len(network_params['control_smoothing']) + 1,
filters=network_params['filters'],
num_tasks=train_gen._num_tasks,
use_attribution_prior=use_attribution_prior,
attribution_prior_params=attribution_prior_params)
# print out the model summary
model.summary()
# # if running in multi gpu mode
# if parallelization_params['gpus'] > 1:
# logging.debug("Multi GPU model")
# model = multi_gpu_model(model, gpus=parallelization_params['gpus'])
# compile the model
logging.debug("Compiling model")
logging.info("counts_loss_weight - {}".format(
network_params['counts_loss_weight']))
if network_params['pearson_count_loss']:
model.compile(Adam(learning_rate=hyper_params['learning_rate']),
loss=[MultichannelMultinomialNLL(
train_gen._num_tasks), NegativePearsonCorrelation()],
loss_weights=[1, network_params['counts_loss_weight']])
else:
model.compile(Adam(learning_rate=hyper_params['learning_rate']),
loss=[MultichannelMultinomialNLL(
train_gen._num_tasks), 'mse'],
loss_weights=[1, network_params['counts_loss_weight']])
# begin time for training
t1 = time.time()
# track training losses, validation losses and start & end
# times
custom_history = {
'learning_rate': {},
'loss': {},
'profile_predictions_loss': {},
'logcount_predictions_loss': {},
'attribution_prior_loss': {},
'val_loss': {},
'val_profile_predictions_loss': {},
'val_logcount_predictions_loss': {},
'val_attribution_prior_loss': {},
'start_time': {},
'end_time': {},
'elapsed': {}
}
# we maintain a separate list to track validation losses to make it
# easier for early stopping & learning rate updates
val_losses = []
# track best loss so we can restore weights
best_loss = 1e6
# keep a copy of the best weights
best_weights = None
# the epoch with the best validation loss
best_epoch = 1
# start training
logging.debug("Training started ...")
for epoch in range(hyper_params['epochs']):
# First, let's train for one epoch
logging.info("Training Epoch {}".format(epoch + 1))
train_start_time = time.time()
custom_history['learning_rate'][str(epoch + 1)] = \
model.optimizer.learning_rate.numpy()
custom_history['start_time'][str(epoch + 1)] = \
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(train_start_time))
# training generator function that will be passed to fit
train_generator = train_gen.gen(epoch)
history = model.fit(
train_generator, epochs=1, steps_per_epoch=train_steps)
train_end_time = time.time()
# record the losses
custom_history['loss'][str(epoch + 1)] = history.history['loss'][0]
custom_history['profile_predictions_loss'][str(epoch + 1)] = \
history.history['profile_predictions_loss'][0]
custom_history['logcount_predictions_loss'][str(epoch + 1)] = \
history.history['logcount_predictions_loss'][0]
if use_attribution_prior:
custom_history['attribution_prior_loss'][str(epoch + 1)] = \
history.history['attribution_prior_loss'][0]
# Then, we evaluate on the validation set
logging.info("Validation Epoch {}".format(epoch + 1))
val_start_time = time.time()
# validation generator function that will be passed to evaluate
val_generator = val_gen.gen(epoch)
val_loss = model.evaluate(
val_generator, steps=val_steps, return_dict=True)
val_losses.append(val_loss['loss'])
val_end_time = time.time()
custom_history['end_time'][str(epoch + 1)] = \
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(val_end_time))
custom_history['elapsed'][str(epoch + 1)] = \
val_end_time - train_start_time
# record the losses
custom_history['val_loss'][str(epoch + 1)] = val_loss['loss']
custom_history['val_profile_predictions_loss'][str(epoch + 1)] = \
val_loss['profile_predictions_loss']
custom_history['val_logcount_predictions_loss'][str(epoch + 1)] = \
val_loss['logcount_predictions_loss']
if use_attribution_prior:
custom_history['val_attribution_prior_loss'][str(epoch + 1)] = \
val_loss['attribution_prior_loss']
# update best weights and loss
if val_loss['loss'] < best_loss:
best_weights = model.get_weights()
best_loss = val_loss['loss']
best_epoch = epoch + 1
# check if early stopping criteria are satisfied
if early_stopping_check(
val_losses,
patience=hyper_params['early_stopping_patience'],
min_delta=hyper_params['early_stopping_min_delta']):
# restore best weights
logging.info("Restoring best weights from epoch {}".format(
best_epoch))
model.set_weights(best_weights)
break
# lower learning rate if criteria are satisfied
new_lr = reduce_lr_on_plateau(
val_losses,
model.optimizer.learning_rate.numpy(),
factor=hyper_params['lr_reduction_factor'],
patience=hyper_params['reduce_lr_on_plateau_patience'],
min_lr=hyper_params['min_learning_rate'])
# set the new learning rate
model.optimizer.learning_rate.assign(new_lr)
# display current learning rate and training status
logging.info("Current learning rate - {:5f}, Stop Training - {}".format(
model.optimizer.learning_rate.numpy(),
model.stop_training))
# end time for training
t2 = time.time()
logging.info("Total Elapsed Time: {}".format(t2-t1))
# base model filename
if output_params['automate_filenames']:
# get random alphanumeric tag for model
model_tag = getAlphaNumericTag(output_params['tag_length'])
model_fname = "{}/{}".format(model_dir, model_tag)
elif output_params['model_output_filename'] is not None:
model_fname = "{}/{}".format(model_dir,
output_params['model_output_filename'])
else:
model_fname = "{}/model".format(model_dir)
# add suffix tag to model name
if suffix_tag is not None:
model_fname += "_{}".format(suffix_tag)
# extension
model_fname += ".h5"
# save HDF5 model file
model.save(model_fname)
logging.info("Finished saving model: {}".format(model_fname))
# save history to json:
# Step 1. convert the custom history dict to a pandas DataFrame:
hist_df = pd.DataFrame(custom_history)
# file name for json file
hist_json = model_fname.replace('.h5', '.history.json')
# Step 2. write the dataframe to json
with open(hist_json, mode='w') as f:
hist_df.to_json(f)
logging.info("Finished saving training and validation history: {}".format(
hist_json))
# write all the command line arguments to a json file
# & include the number of epochs the training lasted for, and the
# validation and testchroms
config_file = '{}/config'.format(model_dir)
# add suffix tag to model name
if suffix_tag is not None:
config_file += "_{}".format(suffix_tag)
# extension
config_file += ".json"
with open(config_file, 'w') as fp:
config = {}
config['input_params'] = input_params
config['output_params'] = output_params
config['genome_params'] = genome_params
config['batch_gen_params'] = batch_gen_params
config['hyper_params'] = hyper_params
config['parallelization_params'] = parallelization_params
config['network_params'] = network_params
# the number of epochs the training lasted
config['training_epochs'] = epoch + 1
# the epoch with best validation loss
config['best_epoch'] = best_epoch
config['train_chroms'] = train_chroms
config['val_chroms'] = val_chroms
config['model_filename'] = model_fname
json.dump(config, fp)
return model
def train_and_validate_ksplits(
input_params, output_params, genome_params, batch_gen_params, hyper_params,
parallelization_params, network_params, use_attribution_prior,
attribution_prior_params, splits):
"""
Train and validate on one or more train/val splits
Args:
input_params (dict): dictionary containing input parameters
output_params (dict): dictionary containing output
parameters
genome_params (dict): dictionary containing genome
parameters
batch_gen_params (dict): dictionary containing batch
generation parameters
hyper_params (dict): dictionary containing containing
training & validation hyper parameters
parallelization_params (dict): dictionary containing
parameters for parallelization options
network_params (dict): dictionary containing parameters
specific to the deep learning architecture
use_attribution_prior (bool): indicate whether attribution
prior loss model should be used
attribution_prior_params (dict): dictionary containing
attribution prior parameters
splits (str): path to the json file containing train &
validation splits
"""
# list of chromosomes after removing the excluded chromosomes
chroms = set(genome_params['chroms']).difference(
set(genome_params['exclude_chroms']))
# list of models from all of the splits
models = []
# run training for each validation/test split
num_splits = len(list(splits.keys()))
for i in range(num_splits):
if output_params['automate_filenames']:
# create a new directory using current date/time to store the
# model, the loss history and logs
date_time_str = local_datetime_str(output_params['time_zone'])
model_dir = '{}/{}_split{:03d}'.format(
output_params['output_dir'], date_time_str, i)
os.mkdir(model_dir)
split_tag = None
elif os.path.isdir(output_params['output_dir']):
model_dir = output_params['output_dir']
split_tag = "split{:03d}".format(i)
else:
logging.error("Directory does not exist {}.".format(
output_params['output_dir']))
return
# filename to write debug logs
logfname = '{}/trainer.log'.format(model_dir)
# set up logger for main procecss
logger.init_logger(logfname)
# train & validation chromosome split
if 'val' not in splits[str(i)]:
logging.error("KeyError: 'val' required for split {}".format(i))
return
val_chroms = splits[str(i)]['val']
# if 'train' key is present
if 'train' in splits[str(i)]:
train_chroms = splits[str(i)]['train']
# if 'test' key is present but train is not
elif 'test' in splits[str(i)]:
test_chroms = splits[str(i)]['test']
# take the set difference of the whole list of
# chroms with the union of val and test
train_chroms = list(chroms.difference(
set(val_chroms + test_chroms)))
else:
# take the set difference of the whole list of
# chroms with val
train_chroms = list(chroms.difference(val_chroms))
logging.info("Split #{}".format(i))
logging.info("Train: {}".format(train_chroms))
logging.info("Val: {}".format(val_chroms))
# Start training for the split in a separate process
# This ensures that all resources are freed, when the
# process terminates, & available for training the next split
# Mitigates the problem where training subsequent splits
# is considerably slow
logging.debug("Split {}: Creating training process".format(i))
p = mp.Process(
target=train_and_validate,
args=[input_params, output_params, genome_params,
batch_gen_params, hyper_params, parallelization_params,
network_params, use_attribution_prior,
attribution_prior_params, train_chroms, val_chroms,
model_dir, split_tag])
p.start()
# wait for the process to finish
p.join()
|
StarcoderdataPython
|
6543218
|
<filename>infoset/api/__init__.py<gh_stars>0
"""Initialize the API module."""
# Import PIP3 libraries
from flask import Flask
from flask_caching import Cache
#############################################################################
# Import configuration.
# This has to be done before all other infoset imports.
#############################################################################
from infoset.utils import configuration
CONFIG = configuration.Config()
#############################################################################
#############################################################################
# Configure the cache
CACHE = Cache(config={
'CACHE_TYPE': 'memcached',
'CACHE_DEFAULT_TIMEOUT': CONFIG.interval()})
# Define the global URL prefix
from infoset.constants import API_PREFIX
# Import API Blueprints
from infoset.api.post import POST
from infoset.api.status import STATUS
from infoset.api.resources.agents import AGENTS
from infoset.api.resources.datapoints import DATAPOINTS
from infoset.api.resources.lastcontacts import LASTCONTACTS
from infoset.api.resources.devices import DEVICES
from infoset.api.resources.deviceagents import DEVICEAGENTS
# Setup API and intialize the cache
API = Flask(__name__)
CACHE.init_app(API)
# Register Blueprints
API.register_blueprint(POST, url_prefix=API_PREFIX)
API.register_blueprint(STATUS, url_prefix=API_PREFIX)
API.register_blueprint(DATAPOINTS, url_prefix=API_PREFIX)
API.register_blueprint(AGENTS, url_prefix=API_PREFIX)
API.register_blueprint(LASTCONTACTS, url_prefix=API_PREFIX)
API.register_blueprint(DEVICES, url_prefix=API_PREFIX)
API.register_blueprint(DEVICEAGENTS, url_prefix=API_PREFIX)
|
StarcoderdataPython
|
6426918
|
<reponame>qychen13/ClusterAlignReID<filename>utils/evaluation.py<gh_stars>10-100
import time
from tqdm import tqdm
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as functional
import scipy.io
import numpy as np
from .distance import compute_distance_matrix
from .rank import evaluate_rank
from .rerank import re_ranking
def test(feature_extractor, query_iterator, gallary_iterator, gpu_ids, store_fs=False, method='euclidean', flips=False, reranking=False):
print('==> extracting query features...')
qfets, qtargets = extract_features(
feature_extractor, query_iterator, gpu_ids, is_test=True, flips=flips)
print('==> extracting gallary features...')
gfets, gtargets = extract_features(
feature_extractor, gallary_iterator, gpu_ids, is_test=True, flips=flips)
print('==> compute test metrics...')
if store_fs:
# used for external test
print('==> save features and labels...')
qnorm = torch.norm(qfets, p=2, dim=1, keepdim=True)
qfets_n = qfets.div(qnorm.expand_as(qfets))
gnorm = torch.norm(gfets, p=2, dim=1, keepdim=True)
gfets_n = gfets.div(gnorm.expand_as(gfets))
features = {'gallery_f': gfets_n.cpu().numpy(),
'gallery_label': gtargets['pid'].cpu().numpy(),
'gallery_cam': gtargets['camid'].cpu().numpy(),
'query_f': qfets_n.cpu().numpy(),
'query_label': qtargets['pid'].cpu().numpy(),
'query_cam': qtargets['camid'].cpu().numpy()}
scipy.io.savemat('pytorch_result.mat', features)
if method == 'external':
if reranking:
raise NotImplementedError
result = evaluate_gpu(qfets, gfets, qtargets, gtargets, normalize=True)
else:
result = compute_test_metrics(
qfets, gfets, qtargets, gtargets, metric=method, reranking=reranking)
return dict(Top1=result['all_cmc'][0], Top5=result['all_cmc'][4], mAP=result['mAP'])
def test_external(result_file):
result = scipy.io.loadmat(result_file)
query_feature = torch.FloatTensor(result['query_f'])
query_cam = torch.IntTensor(result['query_cam'][0])
query_label = torch.IntTensor(result['query_label'][0])
gallery_feature = torch.FloatTensor(result['gallery_f'])
gallery_cam = torch.IntTensor(result['gallery_cam'][0])
gallery_label = torch.IntTensor(result['gallery_label'][0])
qtargets = dict(pid=query_label, camid=query_cam)
gtargets = dict(pid=gallery_label, camid=gallery_cam)
good_index = (gtargets['pid'] != -1)
gallery_feature = gallery_feature[good_index]
gtargets = {key: gtargets[key][good_index] for key in gtargets}
# result = compute_test_metrics(query_feature, gallery_feature, qtargets, gtargets, metric='cosine-non-normal')
result = evaluate_gpu(query_feature, gallery_feature, qtargets, gtargets)
return dict(Top1=result['all_cmc'][0], Top5=result['all_cmc'][4], mAP=result['mAP'])
def fliplr(img):
'''flip horizontal'''
inv_idx = torch.arange(img.size(3)-1, -1, -
1).long() # N x C x H x W
if img.is_cuda:
inv_idx = inv_idx.cuda()
img_flip = img.index_select(3, inv_idx)
return img_flip
def extract_features(feature_extractor, data_iterator, gpu_ids, is_test, flips=False):
feature_extractor.eval()
fets = []
targets = defaultdict(list)
if gpu_ids is not None:
feature_extractor.cuda(gpu_ids[0])
with torch.no_grad():
for ipt, target in tqdm(data_iterator):
if gpu_ids is not None:
if len(gpu_ids) == 1:
ipt = ipt.cuda(gpu_ids[0], non_blocking=True)
for key in target:
if isinstance(target[key], list):
continue
target[key] = target[key].cuda(
gpu_ids[0], non_blocking=True)
if gpu_ids is not None:
fet = nn.parallel.data_parallel(
feature_extractor, ipt, gpu_ids)
fet1 = nn.parallel.data_parallel(
feature_extractor, fliplr(ipt), gpu_ids)
else:
fet = feature_extractor(ipt)
fet1 = feature_extractor(fliplr(ipt))
if isinstance(fet, dict):
if 'features_test' in fet and is_test:
#print('==> use features_test')
key = 'features_test'
else:
key = 'features'
fet = fet[key]
fet1 = fet1[key]
if flips:
fet += fet1
fet /= 2
fets.append(fet)
for key in target:
targets[key].append(target[key])
fets = torch.cat(fets, 0)
for key in targets:
if isinstance(targets[key][0], list):
temp = []
for i in targets[key]:
temp += i
targets[key] = temp
else:
targets[key] = torch.cat(targets[key], 0)
return fets, targets
def compute_test_metrics(qfets, gfets, qtargets, gtargets, metric='euclidean', reranking=False):
distmat = compute_distance_matrix(qfets, gfets, metric=metric)
if reranking:
print('==> reranking the distance matrix at {} ...'.format(
time.strftime('%c')))
distmat = distmat.cpu()
q_q_distmat = compute_distance_matrix(
qfets, qfets, metric=metric).cpu()
g_g_distmat = compute_distance_matrix(
gfets, gfets, metric=metric).cpu()
distmat = re_ranking(distmat, q_q_distmat, g_g_distmat)
distmat = torch.from_numpy(distmat)
print('==> done at {} ...'.format(time.strftime('%c')))
q_pids = qtargets['pid']
g_pids = gtargets['pid']
q_camids = qtargets['camid']
g_camids = gtargets['camid']
return evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, use_cython=True)
################ copy from person-reid-baseline ####################
def evaluate_gpu(qfets, gfets, qtargets, gtargets, normalize=False):
# TODO: support reranking
if normalize:
qnorm = torch.norm(qfets, p=2, dim=1, keepdim=True)
qfets = qfets.div(qnorm.expand_as(qfets))
gnorm = torch.norm(gfets, p=2, dim=1, keepdim=True)
gfets = gfets.div(gnorm.expand_as(gfets))
if len(qfets.shape) != 2:
qfets = qfets.view(qfets.shape[0], -1)
gfets = gfets.view(gfets.shape[0], -1)
query_feature = qfets
query_cam = qtargets['camid'].cpu()
query_label = qtargets['pid'].cpu()
gallery_feature = gfets
gallery_cam = gtargets['camid'].cpu()
gallery_label = gtargets['pid'].cpu()
query_feature = query_feature.cuda()
gallery_feature = gallery_feature.cuda()
CMC = torch.IntTensor(len(gallery_label)).zero_()
ap = 0.0
# print(query_label)
for i in range(len(query_label)):
ap_tmp, CMC_tmp = evaluate(
query_feature[i], query_label[i], query_cam[i], gallery_feature, gallery_label, gallery_cam)
if CMC_tmp[0] == -1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp
# print(i, CMC_tmp[0])
CMC = CMC.float()
CMC = CMC/len(query_label)
return dict(all_cmc=CMC, mAP=ap/len(query_label))
# Evaluate
def evaluate(qf, ql, qc, gf, gl, gc):
query = qf.view(-1, 1)
# print(query.shape)
score = torch.mm(gf, query)
score = score.squeeze(1).cpu()
score = score.numpy()
# predict index
index = np.argsort(score) # from small to large
index = index[::-1]
# index = index[0:2000]
# good index
query_index = np.argwhere(gl == ql)
camera_index = np.argwhere(gc == qc)
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(gl == -1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1) # .flatten())
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = torch.IntTensor(len(index)).zero_()
if good_index.size == 0: # if empty
cmc[0] = -1
return ap, cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere(mask == True)
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0/ngood
precision = (i+1)*1.0/(rows_good[i]+1)
if rows_good[i] != 0:
old_precision = i*1.0/rows_good[i]
else:
old_precision = 1.0
ap = ap + d_recall*(old_precision + precision)/2
return ap, cmc
|
StarcoderdataPython
|
1996043
|
import json
import sys
import payabbhi
import responses
import unittest2
from .helpers import (assert_invoice, assert_list_of_invoice_items,
assert_list_of_invoices, assert_list_of_payments,
mock_file)
class TestInvoice(unittest2.TestCase):
def setUp(self):
self.client = payabbhi.Client(
access_id='access_id', secret_key='secret_key')
payabbhi.api_base = 'https://payabbhi.com'
self.invoice_id = 'dummy_invoice_id'
self.invoice_url = payabbhi.api_base + '/api/v1/invoices'
@responses.activate
def test_invoice_all(self):
result = mock_file('dummy_invoice_collection')
responses.add(responses.GET, self.invoice_url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.all()
resp = json.loads(result)
assert_list_of_invoices(self, response, resp)
@responses.activate
def test_invoice_with_options(self):
result = mock_file('dummy_invoice_collection_filters')
count = 3
skip = 2
url = '{0}?count={1}&skip={2}'.format(self.invoice_url, count, skip)
responses.add(responses.GET, url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.all(data={'count': count, 'skip': skip})
resp = json.loads(result)
assert_list_of_invoices(self, response, resp)
@responses.activate
def test_invoice_retrieve(self):
result = mock_file('dummy_invoice')
url = '{0}/{1}'.format(self.invoice_url, self.invoice_id)
responses.add(responses.GET, url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.retrieve(self.invoice_id)
resp = json.loads(result)
assert_invoice(self, response, resp)
@responses.activate
def test_invoice_create(self):
result = mock_file('dummy_invoice')
url = self.invoice_url
responses.add(responses.POST, url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.create(data={'customer_id': 'dummy_customer_id', 'invoice_no': '123123123123', 'due_date': 1549176945,
'currency': 'INR', 'description': 'TestInvoice', 'notes': {"mode": "test"}, 'line_items': [{"id": "dummy_item_id"}]})
resp = json.loads(result)
assert_invoice(self, response, resp)
@responses.activate
def test_invoice_void(self):
result = mock_file('dummy_invoice_void')
url = '{0}/{1}/void'.format(self.invoice_url, self.invoice_id)
responses.add(responses.POST, url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.void(self.invoice_id)
resp = json.loads(result)
assert_invoice(self, response, resp)
@responses.activate
def test_invoice_retrieve_lineitems(self):
result = mock_file('dummy_invoice_lineitems')
url = '{0}/{1}/line_items'.format(self.invoice_url, self.invoice_id)
responses.add(responses.GET, url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.line_items(self.invoice_id)
resp = json.loads(result)
assert_list_of_invoice_items(self, response, resp)
@responses.activate
def test_invoice_retrieve_payments(self):
result = mock_file('dummy_invoice_payments')
url = '{0}/{1}/payments'.format(self.invoice_url, self.invoice_id)
responses.add(responses.GET, url, status=200,
body=result, match_querystring=True)
response = self.client.invoice.payments(self.invoice_id)
resp = json.loads(result)
assert_list_of_payments(self, response, resp)
|
StarcoderdataPython
|
6609046
|
<reponame>yhat/digit-recognizer
import numpy as np
import pandas as pd
from PIL import Image
from StringIO import StringIO
import base64
import os
from sklearn.decomposition import RandomizedPCA
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
wd = "../numbers/"
files = [f for f in os.listdir(wd)]
files = [wd + f for f in files]
STANDARD_SIZE = (50, 50)
def get_image_data(filename):
img = Image.open(filename)
img = img.getdata()
img = img.resize(STANDARD_SIZE)
img = map(list, img)
img = np.array(img)
s = img.shape[0] * img.shape[1]
img_wide = img.reshape(1, s)
return img_wide[0]
data = []
labels = []
print "extracting features..."
for i, f in enumerate(files):
print i, "of", len(files)
data.append(get_image_data(f))
labels.append(int(f.split(".")[-2][-1]))
print "done."
pca = RandomizedPCA(n_components=10)
std_scaler = StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.1)
print "scaling data..."
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
print "done."
print "transforming data..."
X_train = std_scaler.fit_transform(X_train)
X_test = std_scaler.transform(X_test)
print "done."
print "training model..."
clf = KNeighborsClassifier(n_neighbors=33)
clf.fit(X_train, y_train)
print "done"
print "="*20
print clf
print "Confusion Matrix"
print "="*40
print confusion_matrix(y_test, clf.predict(X_test))
from yhat import BaseModel, Yhat
class DigitModel(BaseModel):
def require(self):
from PIL import Image
from StringIO import StringIO
import base64
def transform(self, data):
image_string = data["image_string"]
STANDARD_SIZE = (50, 50)
f = StringIO(base64.decodestring(image_string))
img = Image.open(f)
img = img.getdata()
img = img.resize(STANDARD_SIZE)
img = map(list, img)
img = np.array(img)
s = img.shape[0] * img.shape[1]
img_wide = img.reshape(1, s)
return img_wide[0]
def predict(self, img):
x = self.pca.transform([img])
x = self.std_scaler.transform(x)
results = {"label": self.clf.predict(x)[0]}
probs = {"prob_" + str(i) : prob for i, prob in enumerate(self.clf.predict_proba(x)[0])}
results['probs'] = probs
return results
digit_model = DigitModel(clf=clf, std_scaler=std_scaler, pca=pca)
# yh = Yhat("greg", "abcd1234", "http://starphleet-aa02a554-1981699582.us-west-1.elb.amazonaws.com/deployer/")
# yh.deploy("digitRecognizer", digit_model)
|
StarcoderdataPython
|
6562426
|
import scipy,pickle,numpy
import numpy as np
try:
import matplotlib.pyplot as plt
except:
import pylab as plt
import special_functions as sf
from scipy import ndimage
STANDARD = None
class IDSpectrum:
"""
IDSpectrum class for identification of spectral lines starting with an
initial model of the spectrum.
"""
def __init__(self, data,sky,wave,wave2,skymodel):
""" Plot data """
self.data = plt.plot(wave,data,c='b')[0]
self.sky = plt.plot(wave2,sky,c='gray')[0]
self.canvas = self.data.get_figure().canvas
self.ax = self.data.get_axes()
self.xdata = self.data.get_xdata().copy()
self.start = [data.copy(),sky.copy(),wave.copy(),wave2.copy(),skymodel]
self.skymodel = skymodel
""" Get metadata (ie line locations) for arcs """
data = self.data.get_ydata()
self.datapeaks = ndimage.maximum_filter(data,9)
tmp = scipy.sort(data)
thresh = tmp[tmp.size*0.95]
cond = (data==self.datapeaks)&(data>thresh)
self.datapeaks = scipy.where(cond)[0]
self.datasel = self.datapeaks*0
self.datalines = []
for peak in self.datapeaks:
l = plt.axvline(self.xdata[peak],c='k')
self.datalines.append(l)
self.spec = self.data
self.peaks = self.datapeaks
self.selected = self.datasel
self.lines = self.datalines
""" Set useful flags """
self.domotion = False
self.origx = None
self.soln = None
self.pick = False
self.fitlines = None
self.keyid = self.canvas.mpl_connect('key_press_event',self.key_press)
self.connect()
print """
Mouse Controls:
- left button drags single lines (rescales spectrum!)
- middle button drags all lines (or exits from pan/zoom modes)
- right button selects/deselects lines
Keyboard Commands:
a - add new line (use mouse to select the line)
m - fit a polynomial to the blue `solution'
d - optimize the blue fit to the gray model (like m, but optimizes too)
w - write the current state to disk
r - read a saved state
n - reset to the initial state
q - quit (performs an `m' fit if no fit has been applied yet)
"""
plt.show()
def connect(self):
""" Connect the mouse to the plot """
self.pressid = self.canvas.mpl_connect('button_press_event',
self.on_press)
self.moveid = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.offid = self.canvas.mpl_connect('button_release_event',
self.on_release)
def on_press(self,event):
"""
Deal with mouse button presses, including stretching, shifting,
and line identification.
"""
""" Turn off plot tools """
if self.canvas.toolbar.mode!='':
if event.button==2:
self.canvas.toolbar.zoom()
self.canvas.toolbar.pan()
self.canvas.toolbar.pan()
return
self.xdata = self.spec.get_xdata().copy()
if event.xdata==None or event.ydata==None: # Not in axes
return
ind = abs(self.xdata-event.xdata).argmin()
indx = abs(self.peaks-ind).argmin()
if abs(self.peaks-ind).min()>4.: # Not near any peaks
return
""" Select/unselect lines """
if event.button==3:
if self.selected[indx]==1:
self.selected[indx] = 0
self.lines[indx].set_color('k')
else:
self.selected[indx] = 1
self.lines[indx].set_color('r')
plt.draw()
return
self.origx = self.xdata[self.peaks[indx]]
self.lbound = self.xdata[0]
self.lindx = 0
for i in range(indx):
if self.xdata[self.peaks[i]]>self.origx:
break
if self.selected[i]==1:
self.lbound = self.xdata[self.peaks[i]]
self.lindx = self.peaks[i]
self.rbound = self.xdata[-1]
self.rindx = -1
for i in range(indx+1,self.peaks.size):
if self.xdata[self.peaks[i]]<self.origx:
continue
if self.selected[i]==1:
self.rbound = self.xdata[self.peaks[i]]
self.rindx = self.peaks[i]
break
self.rbound = 1e6
self.lbound = 1
self.selected[indx] = 1
self.indx = self.peaks[indx]
self.domotion = True
self.lines[indx].set_color('r')
self.pick = False
def on_motion(self, event):
""" Controls the sliding/stretching of the spectra """
"""
Ignore this if we aren't in slide/stretch mode (ie pressing the
mouse button
"""
if self.domotion is False:
return
xdata = self.xdata.copy()
""" Left mouse button is for stretching """
if event.button==1 and (event.xdata is not None) \
and (event.ydata is not None) and event.xdata>self.lbound \
and event.xdata<self.rbound:
leftpts = self.xdata[self.lindx+1:self.indx+1].copy()
left = scipy.linspace(leftpts[0],event.xdata,leftpts.size)
rightpts = self.xdata[self.indx:self.rindx].copy()
right = scipy.linspace(event.xdata,rightpts[-1],rightpts.size)[1:]
xd = scipy.concatenate((left,right))
xdata[self.lindx+1:self.rindx] = xd.copy()
self.data.set_xdata(xdata)
""" Middle mouse button is for sliding """
if event.button==2:
offset = event.xdata-self.origx
xdata = xdata + offset
self.data.set_xdata(xdata)
for i in range(self.datapeaks.size):
x = xdata[self.datapeaks[i]]
l = self.datalines[i]
l.set_xdata([x,x])
plt.draw()
def on_release(self, event):
""" If the mouse button is released, reset! """
if self.domotion:
self.domotion = False
self.xdata = self.spec.get_xdata().copy()
plt.draw()
def key_press(self,event):
"""
m is for fitting, p is for selecting new lines (or leaving select
mode)
"""
if type(event.key)!=type('m'):
return
if event.key.lower()=='m':
self.do_fit()
elif event.key.lower()=='d':
self.do_fit(True)
elif event.key.lower()=='a':
if self.pick:
self.pick = False
self.canvas.mpl_disconnect(self.addid)
self.connect()
self.pick = False
else:
self.pick = True
self.disconnect()
print "Choose the line to add (a to exit)"
self.addid = self.canvas.mpl_connect('button_press_event',
self.add_line)
elif event.key.lower()=='w':
self.write()
elif event.key.lower()=='r':
self.read()
elif event.key.lower()=='n':
self.data.remove()
self.sky.remove()
for i in self.lines:
i.remove()
a,b,c,d,e = self.start
self.disconnect()
self.canvas.mpl_disconnect(self.keyid)
self.__init__(a,b,c,d,e)
plt.draw()
elif event.key.lower()=='q':
if self.soln is None:
self.do_fit()
plt.close(self.data.get_figure())
def do_fit(self,fullFit=False):
p = self.peaks[self.selected==1]
fitdata = scipy.empty((self.xdata.size,2))
fitdata[:,0] = scipy.arange(fitdata.shape[0])
fitdata[:,1] = self.spec.get_xdata()
fitdata = fitdata[(fitdata[:,0]>p[0]-20)&(fitdata[:,0]<p[-1]+20)]
ord = int(raw_input('Enter order of fit: '))
fit = sf.lsqfit(fitdata,'polynomial',ord)
self.soln = fit
if fullFit==True:
from scipy import interpolate,optimize
spec = self.spec.get_ydata()
xvals = numpy.arange(spec.size).astype(numpy.float32)
def opt(p):
p = numpy.array(p)
n = p[-1]
coeff = numpy.atleast_2d(p[:-1]).T
m = {'coeff':coeff,'type':'polynomial'}
w = sf.genfunc(xvals,0.,m)
mod = n*interpolate.splev(w,self.skymodel)
return (spec-mod)/abs(spec)**0.5
pars = fit['coeff'].flatten().tolist()
pars.append(1.)
coeff,ier = optimize.leastsq(opt,pars,maxfev=10000,epsfcn=1e-5)
fit['coeff'] = numpy.atleast_2d(coeff[:-1]).T
self.soln = fit
print "pixel fit complete"
else:
print "Fit Complete"
xdata = sf.genfunc(scipy.arange(self.xdata.size),0.,fit)
# self.sky.set_xdata(xdata)
self.data.set_xdata(xdata)
for i in range(self.datapeaks.size):
x = xdata[self.datapeaks[i]]
l = self.datalines[i]
l.set_xdata([x,x])
plt.draw()
def add_line(self,event):
if self.canvas.toolbar.mode!='':
if event.button==2:
self.canvas.toolbar.zoom()
self.canvas.toolbar.pan()
self.canvas.toolbar.pan()
return
if event.xdata==None or event.ydata==None:
print 'Invalid data'
return
xpos = event.xdata
xdata = self.spec.get_xdata()
ydata = self.spec.get_ydata()
p = ndimage.maximum_filter(ydata,9)
ind = abs(xdata-xpos).argmin()
p = scipy.where((p==ydata))[0]
if abs(p-ind).min()>5:
print 'Not a line'
return
indx = p[abs(p-ind).argmin()]
for i in self.peaks:
if abs(indx-i)<9:
print 'Too close to another line.'
return
if indx<5. or indx>xdata.size-6:
print 'Too close to edge.'
return
peaks = scipy.arange(self.peaks.size+1)
n = self.peaks[self.peaks<indx].size
peaks[:n] = self.peaks[:n].copy()
peaks[n] = indx
peaks[n+1:] = self.peaks[n:].copy()
sel = scipy.arange(peaks.size)
sel[:n] = self.selected[:n].copy()
sel[n] = 0
sel[n+1:] = self.selected[n:].copy()
self.peaks = peaks.copy()
self.selected = sel.copy()
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
if self.spec==self.data:
l = plt.axvline(xdata[indx],c='k')
self.lines.insert(n,l)
self.datapeaks = self.peaks
self.datasel = self.selected
self.datalines = self.lines
else:
l = plt.axvline(xdata[indx],c='k',ls=':')
self.lines.insert(n,l)
self.skypeaks = self.peaks
self.skysel = self.selected
self.skylines = self.lines
self.ax.set_xlim(xlim)
self.ax.set_ylim(ylim)
plt.draw()
self.canvas.mpl_disconnect(self.addid)
self.connect()
self.pick = False
def disconnect(self):
self.canvas.mpl_disconnect(self.pressid)
self.canvas.mpl_disconnect(self.moveid)
self.canvas.mpl_disconnect(self.offid)
def write(self):
oname = raw_input('Name of output file: ')
f = open(oname,'w')
xdata = self.spec.get_xdata()
selected = self.datasel
peaks = self.datapeaks
soln = self.soln
pickle.dump([xdata,selected,peaks,soln],f)
f.close()
print "Writing Complete"
def read(self):
oname = raw_input('Name of input file: ')
try:
f = open(oname,'r')
xdata,selected,peaks,soln = pickle.load(f)
f.close()
except:
print "Could not read file: %s"%oname
return
self.data.set_xdata(xdata)
self.spec.set_xdata(xdata)
self.datapeaks = peaks
self.datasel = selected
self.soln = soln
for i in self.lines:
i.remove()
for i in range(len(self.lines)):
del self.lines[0]
for i in range(len(peaks)):
c = 'k'
if selected[i]==1:
c = 'r'
l = plt.axvline(xdata[peaks[i]],c=c)
self.datalines.append(l)
self.lines = self.datalines
self.peaks = self.datapeaks
self.selected = self.datasel
plt.draw()
def id_spec(spec,model):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.fmt_xdata = plt.FormatStrFormatter('%4.2f')
ax.fmt_ydata = plt.FormatStrFormatter('%4.2f')
from scipy import ndimage,interpolate
skymodel = model['model']
data = spec.copy()
blue,red,scale = model['blue'],model['red'],model['scale']
while (red-blue)/scale<data.size:
red += scale
blue -= scale
wave = scipy.arange(blue,red,scale)
blue,red = model['blue'],model['red']
sky = interpolate.splev(wave,skymodel)
sky /= sky.mean()/data.mean()
plt.plot(wave,sky,c='gray')
from scipy import optimize,signal
corr = signal.correlate(sky,data,mode='valid')
w0 = wave[corr.argmax()]
p = [w0,scale,0.,0.,1.]
xvals = numpy.arange(data.size).astype(numpy.float32)
def opt(p):
p = numpy.array(p)
n = p[-1]
coeff = numpy.atleast_2d(p[:-1]).T
m = {'coeff':coeff,'type':'polynomial'}
w = sf.genfunc(xvals,0.,m)
mod = n*interpolate.splev(w,skymodel)
return (data-mod)/abs(data)**0.5
#coeff,ier = optimize.leastsq(opt,p,maxfev=10000,epsfcn=1e-5)
#fit = {'coeff':numpy.atleast_2d(coeff[:-1]).T,'type':'polynomial'}
#dwave = sf.genfunc(xvals,0.,fit)
dwave = numpy.linspace(blue,red,data.size)
dr = IDSpectrum(data,sky,dwave,wave,skymodel)
x = numpy.arange(sky.size)
w = sf.genfunc(x,0.,dr.soln)
if dr.soln is None:
dr.do_fit()
xdata = dr.data.get_xdata()
ydata = dr.data.get_ydata()
return dr.soln
|
StarcoderdataPython
|
11296788
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
######################## -*- coding: utf-8 -*-
# simple script to generate p-coordinate specific input from standard experiment
import numpy as np
import matplotlib.pyplot as plt
import sys, os
# requires that the path contains utils/python/MITgcmutils or that the utils
# are installed via pip or similar:
import MITgcmutils as mit
# some helper routines
def sqinf(a):
""" replace zeros by Inf
"""
b = np.copy(np.squeeze(a))
b[b==0] = np.Inf
return b
def readfield(fname,dims,datatype):
"""Call signatures::
readfield(filename, dims, numpy.datatype)
Read unblocked binary data with dimentions "dims".
"""
try:
fid = open(fname,"rb")
except:
sys.exit( fname+": no such file or directory")
v = np.fromfile(fid, datatype)
fid.close()
if sys.byteorder == 'little': v.byteswap(True)
if len(v) == np.prod(dims): v = v.reshape(dims)
elif len(v) == np.prod(dims[1:]): v = v.reshape(dims[1:])
else:
errstr = ( "dimensions do not match: \n len(data) = " + str(len(v))
+ ", but prod(dims) = " + str(np.prod(dims)) )
raise RuntimeError(errstr)
return v
def writefield(fname,data):
"""Call signatures::
writefield(filename, numpy.ndarray)
Write unblocked binary data.
"""
if False: pass
else:
if sys.byteorder == 'little': data.byteswap(True)
fid = open(fname,"wb")
data.tofile(fid)
fid.close()
# switch back to machine format
if sys.byteorder == 'little': data.byteswap(True)
def calc_hydrostatic_pressure(s,t,p0,dz,gravity=9.81,rhoConst=1035.):
from MITgcmutils import jmd95
mskz = np.copy(t)
mskz[mskz!=0]=1.
dp = np.copy(p0)
dims=np.asarray(t.shape)
dims[0]=dims[0]+1
pf = np.zeros(dims)
grho = gravity*rhoConst
rhoInSitu0 = jmd95.dens(s,t,p0/grho)*mskz
# integration of non-linear hydrostatic equation requires iteration:
resid = 1
while resid>1e-15:
rhoInSitu = jmd95.dens(s,t,p0/grho)*mskz
# save old pressure
dp = np.copy(p0)
# compute new pressure
pf[0,...] = 0.
for k in range(nr):
dpk = dz[k,...]*gravity*rhoInSitu[k,...]
p0[k,...] = (pf[k,...] + 0.5*dpk)*mskz[k,...]
pf[k+1,...] = (p0[k,...] + 0.5*dpk)*mskz[k,...]
# check convergence
dp = dp-p0
resid = np.sqrt((dp**2).sum())
print('hydrostatic pressure: resdiual = %e, '%np.sqrt((dp**2).sum()))
print()
return p0, pf, rhoInSitu
gravity = 9.81
rhoConst= 1035.
grho=gravity*rhoConst
nr=15
nn=32
ny=nn
nx=6*nn
# from now on we assume that ./testreport has been run for this experiment
# and that we have output available in ../tr_run.seaice
# bathymetry
prec = 'float64'
#b=readfield('../input/bathy_Hmin50.bin',[nn,nn*6],prec)
b= - mit.rdmds('../tr_run.seaice/Depth')
writefield('bathy_Hmin50.bin',-b*grho)
# hydrography
t=readfield('../input/lev_T_cs_15k.bin',[nr,nn,nn*6],prec)
s=readfield('../input/lev_S_cs_15k.bin',[nr,nn,nn*6],prec)
writefield('lev_T_cs_15k.bin',t[::-1,:,:])
writefield('lev_S_cs_15k.bin',s[::-1,:,:])
hfz = mit.rdmds('../tr_run.seaice/hFacC')
rac = mit.rdmds('../tr_run.seaice/RAC')
xg = mit.rdmds('../tr_run.seaice/XG')
yg = mit.rdmds('../tr_run.seaice/YG')
mskz = np.copy(hfz)
mskz[mskz!=0] = 1.
# create geopotential anomaly file:
delz = np.asarray([50., 70., 100., 140., 190.,
240., 290., 340., 390., 440.,
490., 540., 590., 640., 690.])
delp = delz*grho
# # this is from "data", but it is unclear where these numbers come from
# delp = np.asarray([7103300.720021, 6570548.440790, 6041670.010249,
# 5516436.666057, 4994602.034410, 4475903.435290,
# 3960063.245801, 3446790.312651, 2935781.405664,
# 2426722.705046, 1919291.315988, 1413156.804970,
# 1008846.750166, 705919.025481, 504089.693499])[::-1]
# integrate initial fields vertically minus reference potential
pf0 = np.hstack([0,np.cumsum(delp)])
pc0 = 0.5*(pf0[:-1]+pf0[1:])
pc3d = np.tile(pc0.reshape((nr,1,1)),(1,ny,nx))*mskz
dz3d = np.tile(delz.reshape((nr,1,1)),(1,ny,nx))
# first guess of hydrostatic pressure at center-points based on delp
# pf is the hydrostatic pressure at interfaces (w-points)
pc = np.copy(pc3d)
pc,pf,rhoInSitu = calc_hydrostatic_pressure(s,t,pc,dz3d)
# the new pressure also implies different delp, here computed as an average
# over the model domain
pm = np.zeros((nr,))
tm = np.zeros((nr,))
sm = np.zeros((nr,))
rhom = np.zeros((nr,))
for k in range(nr):
racz = rac*mskz[k,:,:]
pm[k] = (pc[k,:,:]*racz).sum()/racz.sum()
tm[k] = (t[k,:,:]*racz).sum()/racz.sum()
sm[k] = (s[k,:,:]*racz).sum()/racz.sum()
rhom[k] = (rhoInSitu[k,:,:]*racz).sum()/racz.sum()
# hydrostatic pressure from averaged temperatuer and salinity profiles
pmm = np.copy(pm)
pmm,pff,rr = calc_hydrostatic_pressure(sm,tm,pmm,delz)
# this is very similar to diff(pfm), see below
dp = np.diff(pff)
print('hydrostatic pressure layer thickness from averaged hydrography:')
print(' delR = %14f, %14f, %14f,'%(dp[-1],dp[-2],dp[-3]))
for k in range(nr-4,0,-3):
print(' %14f, %14f, %14f,'%(dp[k],dp[k-1],dp[k-2]))
# averaged pressure at interfaces to compute delP
pfm = np.zeros((nr+1,))
for k in range(nr):
racz = rac*mskz[k,:,:]
pfm[k] = (pf[k,:,:]*racz).sum()/racz.sum()
pfm[nr] = (pf[nr,:,:]*racz).sum()/racz.sum()
dp = np.diff(pfm)
print('hydrostatic pressure layer thickness from averaged pressure:')
print(' delR = %14f, %14f, %14f,'%(dp[-1],dp[-2],dp[-3]))
for k in range(nr-4,0,-3):
print(' %14f, %14f, %14f,'%(dp[k],dp[k-1],dp[k-2]))
# now we would like to compute delRc (distance between c-points)
dp = np.zeros((nr,))
dp[0] = pm[0] # assuming zero surface pressure
dp[1:] = pm[1:]-pm[:-1]
print(' delRc = %14f, %14f, %14f,'%(dp[-1],dp[-2],dp[-3]))
for k in range(nr-4,0,-3):
print(' %14f, %14f, %14f,'%(dp[k],dp[k-1],dp[k-2]))
dp3d = np.tile(dp.reshape((nr,1,1)),(1,ny,nx))
# this is the correct way of computing the geopotential anomaly
# (if integr_geoPot = 1)
recip_rho = 1./sqinf(rhoInSitu)
geopotanom = -((recip_rho - 1/rhoConst)*hfz*dp3d).sum(axis=0)
# this is equivalent
geopotanom1= b/rhoConst-(recip_rho*hfz*dp3d).sum(axis=0)
# these are approximation that are not quite accurate
geopotanom2= ((rhoInSitu - rhoConst)*hfz*dz3d).sum(axis=0)*gravity/rhoConst
geopotanom3= -((recip_rho - 1/rhoConst)*grho*hfz*dz3d).sum(axis=0)
# the correct version
writefield('geopotanom.bin',geopotanom)
# # pickup (this is tricky)
# p = readfield('../input/pickup.0000072000',[nr*8+3,nn,nn*6],prec)
# nflds=8
# p3d = np.copy(p[:nflds*nr,:,:])
# for k in range(nflds):
# pp = p3d[(k-1)*nr:k*nr,:,:]
# p[(k-1)*nr:k*nr] = pp[::-1,:,:]
# writefield('pickup.0000072000',p)
# plot field
#plt.clf()
#mit.cs.pcol(xg,yg,geopotanom)
#plt.colorbar(orientation='horizontal')
#plt.show()
|
StarcoderdataPython
|
1686666
|
from rest_framework.permissions import BasePermission
class SubjectPermission(BasePermission):
def has_permission(self, request, view):
if view.action == 'list':
return request.user.is_authenticated()
else:
return False
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.