id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
62254
|
import tornado.gen
from .base import BaseApiHandler
from ..tasks import cel
class TaskHandler(BaseApiHandler):
@tornado.gen.coroutine
def get(self, task_id):
data = yield self.get_task_meta(task_id)
result_data = {'result': data['result'], 'status': data['status']}
self.finish(result_data)
@staticmethod
@tornado.gen.coroutine
def get_task_meta(task_id):
return cel.backend.get_task_meta(task_id)
|
62264
|
import mysql.connector
conn = mysql.connector.connect(
host="192.168.99.102",
user="root",
passwd="<PASSWORD>",
database="user_db",
port="3308"
)
def find_all():
query = "SELECT * FROM users"
try:
cursor = conn.cursor()
rows = cursor.execute(query)
cursor.close()
return rows
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
def find_one_by_id(user_id):
query = "SELECT * FROM users where id='%'"
try:
cursor = conn.cursor()
row = cursor.execute(query, user_id)
cursor.close()
return row
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
def find_one_by_name(name):
query = "SELECT * FROM users where name='%'"
try:
cursor = conn.cursor()
row = cursor.execute(query, name)
cursor.close()
return row
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
users = find_all()
user_1 = find_one_by_id(1)
user_pete = find_one_by_name('pete')
"""
Something went wrong: 1146 (42S02): Table 'user_db.users' doesn't exist
Something went wrong: 1146 (42S02): Table 'user_db.users' doesn't exist
Something went wrong: 1146 (42S02): Table 'user_db.users' doesn't exist
"""
|
62270
|
import fileinput
from itertools import count
banks = [int(n) for n in fileinput.input()[0].split()]
seen = {}
for i in count(start=1):
m = max(banks)
idx = banks.index(m)
banks[idx] = 0
for j in range(1, m + 1):
banks[(idx + j) % len(banks)] += 1
t = tuple(banks)
if t in seen:
break
seen[t] = i
print "Number of redistribution cycles:", i
print "Length of infinite loop cycle:", i - seen[t]
|
62335
|
import sqlite3
import json
import logging
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import udp
from webob import Response
from asymlist import Node, AsymLList
conn = sqlite3.connect('nfv.sqlite')
cur = conn.cursor()
flows = {}
DELTA = 3000
##################
class vnf(Node):
def __init__(self, vnf_id, is_bidirect=True, cur=None):
super().__init__(vnf_id, is_bidirect)
### added iftype bitwise support: 1(01)-out, 2(10)-in, 3(11)-inout
### & 1 - first bit; & 2 - second bit
### Ex. bitwise iftype selection:
### 'select * from vnf where iftype & 2 != 0'
### 'select dpid, in_port, locator_addr from vnf where id=X and iftype & 1 != 0'
cur.execute(''' select dpid, in_port, locator_addr, bidirectional from vnf where id=? and iftype & 2 != 0''', (self.id,))
self.dpid_in, self.port_in, self.locator_addr_in, is_bidirect = cur.fetchone()
logging.debug('Locator addr: %s', self.locator_addr_in)
cur.execute(''' select dpid, in_port, locator_addr from vnf where id=? and iftype & 1 != 0''', (self.id,))
self.dpid_out, self.port_out, self.locator_addr_out = cur.fetchone()
if is_bidirect.lower() == "false":
self.is_bidirect = False
class sfc(AsymLList):
def __init__(self, flow_id, nodeClass=vnf, cur=None):
self.cur = cur
self.cur.execute('''select * from flows where id = ? ''', (flow_id,))
self.flow_spec = cur.fetchone()
if self.flow_spec is None:
logging.debug('Flow %s is not defined', flow_id)
raise ValueError("Flow is not known")
self.flow_dict = {}
self.flows = {}
(self.flow_id, self.name, self.flow_dict['in_port'],
self.flow_dict['eth_dst'], self.flow_dict['eth_src'], self.flow_dict['eth_type'],
self.flow_dict['ip_proto'], self.flow_dict['ipv4_src'], self.flow_dict['ipv4_dst'],
self.flow_dict['tcp_src'], self.flow_dict['tcp_dst'], self.flow_dict['udp_src'],
self.flow_dict['udp_dst'], self.flow_dict['ipv6_src'], self.flow_dict['ipv6_dst'],
self.service_id) = self.flow_spec
if not self.flow_dict['eth_type']:
self.flow_dict['eth_type'] = 0x0800
self.flow_id = int(flow_id)
self.reverse_flow_id = self.flow_id+DELTA
self.flows[self.flow_id] = self.flow_dict
self.flows[self.reverse_flow_id] = sfc_app_cls.reverse_flow(self.flows[self.flow_id])
self.cur.execute('''select vnf_id from service where service_id = ? except select next_vnf_id from service where service_id = ? ''', (self.service_id, self.service_id))
vnf_id = self.cur.fetchone()[0]
super().__init__(vnf_id, is_bidirect=True, nodeClass=nodeClass, cur=self.cur)
self.fill()
def __str__(self):
return str(self.forward())
def append(self):
self.cur.execute('''select next_vnf_id from service where service_id = ? and vnf_id = ? ''', (self.service_id, self.last.id))
next_vnf_id = self.cur.fetchone()[0]
if next_vnf_id is None:
return None
logging.debug('Trying to append %s', next_vnf_id)
return super().append(next_vnf_id, cur=self.cur)
def fill(self):
logging.debug('Filling...')
while self.append():
pass
return self.last
def install_catching_rule(self, sfc_app_cls):
logging.debug("Adding catching rule...")
actions = []
flow_id = self.flow_id
for flow_id in (self.flow_id, self.reverse_flow_id):
for dp in sfc_app_cls.datapaths.values():
match = sfc_app_cls.create_match(dp.ofproto_parser, self.flows[flow_id])
sfc_app_cls.add_flow(dp, 1, match, actions, metadata=flow_id, goto_id=2)
if self.back is None:
break
return Response(status=200)
def delete_rule(self, sfc_app_cls, flow_match):
logging.debug('Deleting rule...')
flow_dict = self.flows[flow_match]
for dp in sfc_app_cls.datapaths.values():
match_del = sfc_app_cls.create_match(dp.ofproto_parser, flow_dict)
sfc_app_cls.del_flow(datapath=dp, match=match_del)
def install_steering_rule(self, sfc_app_cls, dp_entry, in_port_entry, flow_match):
logging.debug("Adding steering rule...")
actions = []
dp = dp_entry
parser = dp.ofproto_parser
flow_dict = self.flows[flow_match]
flow_dict['in_port'] = in_port_entry
match = sfc_app_cls.create_match(parser, flow_dict)
if flow_match < DELTA:
for vnf in self.forward():
#dpid_out = vnf.dpid_out
actions.append(parser.OFPActionSetField(eth_dst=vnf.locator_addr_in))
sfc_app_cls.add_flow(dp, 8, match, actions, goto_id=1)
actions = []
flow_dict['in_port'] = vnf.port_out
dp = sfc_app_cls.datapaths[vnf.dpid_out]
match = sfc_app_cls.create_match(parser, flow_dict)
else:
for vnf in self.backward():
#dpid_out = vnf.dpid_out
actions.append(parser.OFPActionSetField(eth_dst=vnf.locator_addr_out))
sfc_app_cls.add_flow(dp, 8, match, actions, goto_id=1)
actions = []
flow_dict['in_port'] = vnf.port_out
dp = sfc_app_cls.datapaths[vnf.dpid_out]
match = sfc_app_cls.create_match(parser, flow_dict)
#################################
class SFCController(ControllerBase):
def __init__(self, req, link, data, **config):
super(SFCController, self).__init__(req, link, data, **config)
self.sfc_api_app = data['sfc_api_app']
@route('hello', '/{greeting}/{name}', methods=['GET'])
def hello(self, req, **kwargs):
greeting = kwargs['greeting']
name = kwargs['name']
message = greeting +' '+ name
privet = {'message': message}
body = json.dumps(privet)
return Response(content_type='application/json', body=body.encode('utf-8'), status=200)
@route('add-flow', '/add_flow/{flow_id}', methods=['GET'])
def api_add_flow(self, req, **kwargs):
sfc_ap = self.sfc_api_app
flow_id = kwargs['flow_id']
logging.debug('FLOW ID: %s', flow_id)
try:
flows[flow_id] = sfc(flow_id, cur=cur)
except ValueError:
message = {'Result': 'Flow {} is not defined'.format(flow_id)}
body = json.dumps(message)
return Response(content_type='application/json', body=body.encode('utf-8'), status=404)
except TypeError:
message = {'Result': 'DB inconsistency'}
body = json.dumps(message)
return Response(content_type='application/json', body=body.encode('utf-8'), status=500)
logging.debug('SFC: %s', str(flows[flow_id]))
flows[flow_id].install_catching_rule(sfc_ap)
@route('delete-flow', '/delete_flow/{flow_id}', methods=['GET'])
def api_delete_flow(self, req, **kwargs):
'''Deletes flow from the application and clears the corresponding rule from DPs '''
sfc_ap = self.sfc_api_app
flow_id = kwargs['flow_id']
cur.execute('''select * from flows where id = ?''', (kwargs['flow_id'],))
flow_spec = cur.fetchone()
flow_dict = {}
if not flow_spec: return Response(status=404)
(flow_id, name, flow_dict['in_port'], flow_dict['eth_dst'],
flow_dict['eth_src'], flow_dict['eth_type'], flow_dict['ip_proto'],
flow_dict['ipv4_src'], flow_dict['ipv4_dst'], flow_dict['tcp_src'],
flow_dict['tcp_dst'], flow_dict['udp_src'], flow_dict['udp_dst'],
flow_dict['ipv6_src'], flow_dict['ipv6_dst'], service_id) = flow_spec
if not flow_dict['eth_type']: flow_dict['eth_type'] = 0x0800
reverse_flow_dict = sfc_app_cls.reverse_flow(flow_dict)
for flow_dict in (flow_dict, reverse_flow_dict):
for dp in sfc_ap.datapaths.values():
match_del = sfc_ap.create_match(dp.ofproto_parser, flow_dict)
sfc_ap.del_flow(datapath=dp, match=match_del)
try:
del flows[str(flow_id)]
logging.debug('Flow %s deleted', flow_id)
except KeyError:
logging.debug('Flow %s not found, but an attempt to delete it from DPs has been performed', flow_id)
return Response(status=200)
@route('flows', '/flows/{flow_id}', methods=['GET'])
def api_show_flow(self, req, **kwargs):
flow_id = kwargs['flow_id']
try:
body = json.dumps({flow_id:str(flows[flow_id])})
return Response(content_type='application/json', body=body.encode('utf-8'), status=200)
except KeyError:
body = json.dumps({'ERROR':'Flow {} not found/not installed'.format(flow_id)})
return Response(content_type='application/json', body=body.encode('utf-8'), status=404)
@route('flows_all', '/flows', methods=['GET'])
def api_show_flows(self, req):
logging.debug('FLOWS: {}'.format(str(flows)))
body = json.dumps(str(flows))
return Response(content_type='application/json', body=body.encode('utf-8'), status=200)
class sfc_app_cls(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(sfc_app_cls, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
wsgi.register(SFCController, {'sfc_api_app': self})
self.datapaths = {}
######## database definition
# conn = sqlite3.connect('nfv.sqlite')
# cur = conn.cursor()
# cur.executescript('''
# DROP TABLE IF EXISTS vnf;
# CREATE TABLE vnf (
# id INTEGER NOT NULL,
# name TEXT,
# type_id INTEGER,
# group_id INTEGER,
# geo_location TEXT,
# iftype INTEGER,
# bidirectional BOOLEAN,
# dpid INTEGER,
# in_port INTEGER,
# locator_addr NUMERIC
# PRIMARY KEY(id,iftype)
# );
# create unique index equipment_uind on vnf (name,iftype)
# ''')
# conn.commit()
# cur.close()
######## END of database definition
######### Register/Unregister DataPathes in datapth dictionary
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
########## Setting default rules upon DP is connectted
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#### Set flow to retrieve registration packet
match = parser.OFPMatch(eth_type=0x0800, ip_proto=17, udp_dst=30012)
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 1, match, actions)
#### Set defaults for table 1 and 2
match = parser.OFPMatch()
actions = []
self.add_flow(datapath, 0, match, actions, goto_id=1)
actions = [parser.OFPActionOutput(ofproto.OFPP_NORMAL,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions, table_id=1)
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions, table_id=2)
################ Packet_IN handler ####################
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
if msg.reason == ofproto.OFPR_NO_MATCH:
reason = 'NO MATCH'
elif msg.reason == ofproto.OFPR_ACTION:
reason = 'ACTION'
elif msg.reason == ofproto.OFPR_INVALID_TTL:
reason = 'INVALID TTL'
else:
reason = 'unknown'
self.logger.debug('OFPPacketIn received: '
'buffer_id=%x total_len=%d reason=%s '
'table_id=%d cookie=%d match=%s ',
msg.buffer_id, msg.total_len, reason,
msg.table_id, msg.cookie, msg.match)
try:
flow_match = msg.match['metadata']
if msg.match['metadata'] > DELTA:
flow_id = flow_match - DELTA
else:
flow_id = flow_match
in_port_entry = msg.match['in_port']
dp_entry = datapath
####### Deleting catching rules
logging.debug('Deleting catching rules - flow:%d match:%d ...', flow_id, flow_match)
flows[str(flow_id)].delete_rule(self, flow_match)
####### Installing steering rules
logging.debug('Installing steering rules - flow:%d match:%d ...', flow_id, flow_match)
flows[str(flow_id)].install_steering_rule(self, dp_entry, in_port_entry, flow_match)
except KeyError:
flow_match = None
pass
####### VNF self registrtation
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
#pkt_arp = pkt.get_protocol(arp.arp)
pkt_eth = pkt.get_protocol(ethernet.ethernet)
#pkt_ip = pkt.get_protocol(ipv4.ipv4)
pkt_udp = pkt.get_protocol(udp.udp)
if pkt_udp:
if pkt_udp.dst_port == 30012:
reg_string = pkt.protocols[-1]
reg_info = json.loads(reg_string)
name = reg_info['register']['name']
vnf_id = reg_info['register']['vnf_id']
logging.debug('VNF ID from reg packet %s', vnf_id)
type_id = reg_info['register']['type_id']
group_id = reg_info['register']['group_id']
geo_location = reg_info['register']['geo_location']
iftype = reg_info['register']['iftype']
bidirectional = reg_info['register']['bidirectional']
dpid = datapath.id
locator_addr = pkt_eth.src
logging.debug("Inserting self-registartion info into DB")
cur.execute('''REPLACE INTO vnf (id, name, type_id,
group_id, geo_location, iftype, bidirectional,
dpid, in_port, locator_addr ) VALUES ( ?, ?, ?,
?, ?, ?, ?, ?, ?, ? )''',
(vnf_id, name, type_id, group_id, geo_location,
iftype, bidirectional, dpid, in_port, locator_addr)
)
cur.execute('SELECT id FROM vnf WHERE name = ? AND iftype = ?',
(name, iftype)
)
vnf_id = cur.fetchone()[0]
conn.commit()
#cur.close()
############# Function definitions #############
def add_flow(self, datapath, priority, match, actions,
buffer_id=None, table_id=0, metadata=None, goto_id=None):
logging.debug("Add flow to DP %d", datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if goto_id:
#inst = [parser.OFPInstructionActions(ofproto.OFPIT_WRITE_ACTIONS, actions)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
if metadata:
inst.append(parser.OFPInstructionWriteMetadata(metadata, 0xffffffff))
inst.append(parser.OFPInstructionGotoTable(goto_id))
else:
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
#inst.append(parser.OFPInstructionWriteMetadata(1,0xffffffff))
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst, table_id=table_id)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst,
table_id=table_id)
datapath.send_msg(mod)
def del_flow(self, datapath, match):
''' Deletes a flow defined by match from a DP '''
logging.debug("Delele flow from DP %d", datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(datapath=datapath,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
datapath.send_msg(mod)
def create_match(self, parser, fields):
'''Creates OFP match struct from the list of fields. New API.'''
flow_dict = {}
for k, v in fields.items():
if v is not None:
flow_dict[k] = v
match = parser.OFPMatch(**flow_dict)
return match
def reverse_flow(flow_dict):
'''Creates reverse flow dict '''
reverse_flow_dict = {**flow_dict}
reverse_flow_dict['eth_src'] = flow_dict['eth_dst']
reverse_flow_dict['eth_dst'] = flow_dict['eth_src']
reverse_flow_dict['ipv4_src'] = flow_dict['ipv4_dst']
reverse_flow_dict['ipv4_dst'] = flow_dict['ipv4_src']
reverse_flow_dict['tcp_src'] = flow_dict['tcp_dst']
reverse_flow_dict['tcp_dst'] = flow_dict['tcp_src']
reverse_flow_dict['udp_src'] = flow_dict['udp_dst']
reverse_flow_dict['udp_dst'] = flow_dict['udp_src']
reverse_flow_dict['ipv6_src'] = flow_dict['ipv6_dst']
reverse_flow_dict['ipv6_dst'] = flow_dict['ipv6_src']
return reverse_flow_dict
|
62351
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*(testing.product({
'batchsize': [1, 5],
'size': [10, 20],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestLayerNormalization(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
shape = self.batchsize, self.size
size = numpy.prod(shape) // shape[0]
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
gamma = numpy.random.uniform(-1, 1, size).astype(self.dtype)
beta = numpy.random.uniform(-1, 1, size).astype(self.dtype)
return x, gamma, beta
def forward_expected(self, inputs):
x, gamma, beta = inputs
mean = numpy.mean(x, axis=1, keepdims=True)
var = numpy.mean(numpy.square(x - mean), axis=1, keepdims=True)
std = numpy.sqrt(var + self.eps)
y_expected = (
numpy.expand_dims(gamma, axis=0) * (x - mean) / std
+ numpy.expand_dims(beta, axis=0))
return y_expected,
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.layer_normalization(x, gamma, beta, eps=self.eps)
return y,
testing.run_module(__name__, __file__)
|
62419
|
from __future__ import with_statement
# ==============================================================================
# GGisy (python v2.7)
#
# Author: <NAME> (<EMAIL>)
# Bugs and errors: https://github.com/Sanrrone/GGisy/issues
#
# Please type "python GGisy.py -h" for usage help
#
# ==============================================================================
__author__ = '<NAME> (<EMAIL>)'
__version__ = '1.0'
import sys, os, subprocess, glob, csv, collections
from optparse import OptionParser
from operator import itemgetter
from Bio import SeqIO
def main():
parser = OptionParser(usage = "Usage: python GGisy.py -r genome1.fna -q genome2.fna")
parser.add_option("-r","--reference",dest="genome1",help="First genome to be used as reference", default=None)
parser.add_option("-q","--query",dest="genome2",help="Second genome to be used as query against the first genome (-r)", default=None)
parser.add_option("-l","--alignmentLength",dest="alignL",help="Aligment length cutoff in blast output [default: 1000]",default=1000)
parser.add_option("-e","--evalue",dest="evalue",help="E-value cutoff for blastn search [default: 1e-3]",default=1e-3)
parser.add_option("-i","--identity",dest="Identity",help="Identity cutoff on the blastn alignment to consider the region [default: 50]",default=50)
parser.add_option("-t","--threads",dest="Threads",help="Number of threads to be used for blast [default: 4]",default=4)
parser.add_option("-b","--blastout",dest="Blastout",help="Blast output file to be used instead doing it [default: none]",default=None)
parser.add_option("-c","--clean",dest="clean",help="clean files after execution [default: True]",default=True)
(options,args) = parser.parse_args()
genome1 = str(options.genome1)
genome2 = str(options.genome2)
alignL= int(options.alignL)
evalue= str(options.evalue)
Identity= int(options.Identity)
threads= str(options.Threads) #for subcallproccess must be str()
blastout= options.Blastout #dont cast to str
cleanf=options.clean
#check variables
if not genome1 or genome1 is None:
print("* No genome was provided (-g1), use -h for help")
sys.exit()
else:
if os.path.isfile(genome1) == False:
print("*",genome1," doesn't exist")
sys.exit()
if not genome2 or genome2 is None:
print("* its mandatory provide 2 genomes (-g2), use -h for help")
sys.exit()
else:
if os.path.isfile(genome2) == False:
print("* ",genome2," doesn't exist")
sys.exit()
if blastout != None:
if os.path.isfile(blastout) == False:
print("* ", blastout, "not found, check if file exist or let the program do the blast omiting this option (-b)")
sys.exit()
blastBIN=which("blastn")
if blastBIN == None:
print("No blastn was found, install it before continue (make sure is in your $PATH)")
sys.exit()
makeblastBIN=which("makeblastdb")
if makeblastBIN == None:
print("No makeblastdb was found, install it from blast+ (make sure is in your $PATH)")
sys.exit()
rscriptBIN=which("Rscript")
if rscriptBIN == None:
print("No Rscript was found, make sure is in your $PATH")
sys.exit()
Inputs = collections.namedtuple('Inputs', ['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8'])
I = Inputs(genome1, genome2, alignL, evalue, Identity, threads, blastout, cleanf)
return I
def which(program): #function to check if some program exists
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def blasting(genome1, genome2, evalue, threads):
#searching for blast binaries
subprocess.call(["makeblastdb", "-in", genome1, "-input_type", "fasta", "-dbtype", "nucl", "-out", "ref"])
subprocess.call(["blastn", "-query", genome2, "-db", "ref",
"-evalue", evalue, "-outfmt", "6", "-strand", "both",
"-num_threads", threads, "-out", "tmp.tsv"])
return str("tmp.tsv")
def filterBlastOutput(blastout,alignL,evalue,identity):
PARSED=open("synteny.tsv",'w') #overwrite if exist
with open(blastout) as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
for line in tsvreader:
#formula line [n-1:n]
toint = [int(i) for i in line[3:4]]
if toint[0] >= alignL:
toint = [float(i) for i in line[2:3]]
if toint[0] >= float(identity):
PARSED.write("\t".join(map(str, line[0:3]+line[6:10]))+"\n")
PARSED.close()
def parsingGenomes(genome):
gname = genome.split('/')[-1]
PARSED=open(str(gname+"_info.tsv"),'w') #overwrite if exist
fasta_sequences = SeqIO.parse(open(genome),'fasta')
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
lengthSeq= len(sequence)
PARSED.write("%s\t1\t%s\n" % (name, lengthSeq))
PARSED.close
return str(gname+"_info.tsv")
def handleR(conn, reference, query, alignL):
plotstep=open("handle.R", 'w')
plotstep.write("""rm(list=ls());
library(OmicCircos)
library(RColorBrewer)
library(varhandle)
args<-commandArgs()
handlefile<-as.character(args[6])
refname<-as.character(args[7])
queryname<-as.character(args[8])
filterl<-as.numeric(args[9])
handle<-read.table(handlefile,sep = "\\t",stringsAsFactors = F,check.names = F)
ref<-read.table(refname,sep = "\\t",stringsAsFactors = F,check.names = F)
query<-read.table(queryname,sep = "\\t", stringsAsFactors = F,check.names = F)
rownames(ref)<-ref$V1
rownames(query)<-query$V1
qryUniq<-unique(sort(handle$V1))
refUniq<-unique(sort(handle$V2))
ref<-ref[refUniq,]
ref<-ref[with(ref, order(-V3, V1)), ]
query<-query[qryUniq,]
query<-query[with(query, order(+V3, V1)), ]
data<-rbind(ref,query)
refname<-unlist(strsplit(refname,"_info.tsv"))[1]
queryname<-unlist(strsplit(queryname,"_info.tsv"))[1]
lowId<-min(handle$V3)
fhand<-handle[handle$V6<handle$V7,]
rhand<-handle[handle$V6>handle$V7,]
linkf<-data.frame(seg1=fhand$V1, start1=fhand$V4, end1=fhand$V5, seg2=fhand$V2, start2=fhand$V6, end2=fhand$V7, stringsAsFactors = F)
linkr<-data.frame(seg1=rhand$V1, start1=rhand$V4, end1=rhand$V5, seg2=rhand$V2, start2=rhand$V6, end2=rhand$V7, stringsAsFactors = F)
#fix reverse positions
for(i in 1:nrow(linkr)){
contign<-linkr[i,4]
contigl<-ref[contign,3]
linkr[i,5]<- contigl-linkr[i,5]+1
linkr[i,6]<- contigl-linkr[i,6]+1
}
data["V5"]<-data["V4"]<-1
colnames(data)<- c("chr", "start", "end","V4","V5")
tocir <- segAnglePo(data, seg=data$chr)
gl<-sum(data$end)+nrow(data)
maxangr<-270+(350/gl)*sum(ref$V3)
spacer<-maxangr/(maxangr-270)/nrow(ref)
for(i in 1:nrow(ref)){
#358 is the total angles (aviable) for all
tocir[i,"angle.end"]<-as.character(as.numeric(tocir[i,"angle.start"]) + (350/gl)*as.numeric(tocir[i,7]))
tocir[i+1,"angle.start"]<-as.character(as.numeric(tocir[i,"angle.end"])+spacer)
}
tocir[i+1,"angle.start"]<-as.character(as.numeric(tocir[i+1,"angle.start"])+2.5)
tocir[i+1,"angle.end"]<-as.character(as.numeric(tocir[i+1,"angle.start"]) + (350/gl)*as.numeric(tocir[i+1,7]))
maxangq<-628-maxangr
spacer<-628/maxangq/nrow(query)
if(nrow(ref)+2>=nrow(tocir)){
i<-nrow(tocir)
tocir[i,"angle.start"]<-as.character(as.numeric(tocir[i-1,"angle.end"])+spacer)
tocir[i,"angle.end"]<-as.character(628)
}else{
for(i in (nrow(ref)+2):nrow(tocir)-1){
#358 is the total angles (aviable) for all
tocir[i,"angle.end"]<-as.character(as.numeric(tocir[i,"angle.start"]) + (350/gl)*as.numeric(tocir[i,7]))
tocir[i+1,"angle.start"]<-as.character(as.numeric(tocir[i,"angle.end"])+spacer)
}
}
refang<-as.numeric(tocir[1:nrow(ref),2])
qryang<-as.numeric(tocir[(nrow(ref)+1):(nrow(ref)+nrow(query)),2])
maxangr<-max(refang)
maxangq<-max(qryang)
faketocir <- tocir
faketocir[,1]<-""
maxangr<-max(refang)
for(i in 1:nrow(tocir)){
if(270+(maxangr-270)/2<as.numeric(tocir[i,2])){
break
}
}
faketocir[i,1]<-refname
maxangq<-max(qryang)
for(i in 1:nrow(tocir)){
if(maxangr+(maxangq-maxangr)/2<as.numeric(tocir[i,2])){
break
}
}
faketocir[i,1]<-queryname
colors<-rev(colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(20))
delta<-(100-lowId)/20
scaleColors<- function(x){
cArray<-c()
for(id in x){
for(i in 1:20){
if(id>=100-(delta*i)){
break
}
}
cArray<-c(cArray,colors[i])
}
return(cArray)
}
addalpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
black<-addalpha("#000000",0.7)
colors<-addalpha(colors,1)
try({
linkf[,"colors"]<-addalpha(scaleColors(fhand$V3),1)
},silent = T)
try({
linkr[,"colors"]<-addalpha(scaleColors(rhand$V3),1)
},silent = T)
pdf(file="synteny.pdf", width = 10, height =10)
if(nrow(data)<=20){
par(mar=c(2,2,2,2))
xorigin=700
yorigin=1000
plot(c(0,2000), c(0,2000), type="n", axes=FALSE, xlab="", ylab="", main="")
circos(R=450, cir=tocir, W=10,type="chr", print.chr.lab=T, scale=F,xc = xorigin,yc = yorigin,
col = c(rep("dark blue",nrow(ref)),rep("#FEE496",nrow(query))),cex = 5)
if(nrow(linkf)>0){
circos(R=440, cir=tocir, mapping=linkf , type="link.pg", lwd=0.5, col=linkf$colors,xc = xorigin,yc = yorigin)
}
if(nrow(linkr)>0){
circos(R=440, cir=tocir, mapping=linkr , type="link.pg", lwd=0.5, col=linkr$colors,xc = xorigin,yc = yorigin)
newlinkr<-linkr
newlinkr$start1<-newlinkr$start1+as.integer((newlinkr$end1-newlinkr$start1)/2)+1
newlinkr$start2<-newlinkr$start2+as.integer((newlinkr$end2-newlinkr$start2)/2)-1
circos(R=440, cir=tocir, W=10, mapping=newlinkr , type="link", lwd=0.6, col=black,xc = xorigin,yc = yorigin)
}
legend(x = 1500, y=1700, legend = c(refname,queryname),
ncol = 1, cex = 0.8, bty="n",
fill=c("dark blue","#FEE496"),
border = c("dark blue","#FEE496"),text.width=c(0.5,0.5),
title="Sequences")
legend(x = 1430, y=1500, legend = c(paste("Reference: ", nrow(ref), " (", sum(ref$V3), " bp)", sep = ""), paste("Query: ",nrow(query), " (", sum(query$V3), " bp)", sep="")),
ncol = 1, cex = 0.8, bty="n",
fill=c("dark blue","#FEE496"),
border = c("dark blue","#FEE496"),text.width=c(0.5,0.5),
title=paste("Contigs align >= ", filterl, " bp", sep=""))
legend(x = 1520, y=1300, legend = c("Forward","Reverse"),lty = c(0,1),merge=T,seg.len = 0.6,
ncol = 1, cex = 0.8, bty="n",
fill="white",
border = "black",text.width=c(0.5,0.5),
title="Strand Match\n(on reference)")
legend(x = 1505, y=1100, legend = c("100","","","","","","","","","",(100-lowId)/2 + lowId,"","","","","","","","",lowId),
ncol = 1, cex = 0.8, bty="n",
fill=colors,
border = colors,
y.intersp = 0.5,
x.intersp = 0.5,text.width=c(0.5,0.5),
title="Identity percent\n")
}else{
par(mar=c(2,2,2,2))
xorigin=750
yorigin=550
plot(c(0,1500), c(0,1500), type="n", axes=FALSE, xlab="", ylab="", main="")
circos(R=450, cir=faketocir, W=10,type="chr", print.chr.lab=T, scale=F,xc = xorigin,yc = yorigin,
col = "white")
circos(R=410, cir=tocir, W=10,type="chr", print.chr.lab=F, scale=F,xc = xorigin,yc = yorigin,
col = c(rep("dark blue",nrow(ref)),rep("#FEE496",nrow(query))),cex = 5)
if(nrow(linkf)>0){
highlightr <- c(420, 450, tocir[1,1], 1, tocir[nrow(ref),1], tocir[nrow(ref),7], "dark blue", NA)
circos(cir=tocir, mapping=highlightr, type="hl",xc = xorigin,yc = yorigin)
circos(R=400, cir=tocir, mapping=linkf , type="link.pg", lwd=0.5, col=linkf$colors,xc = xorigin,yc = yorigin)
}
if(nrow(linkr)>0){
highlightq <- c(420, 450, query[1,1], 1, query[nrow(query),1], query[nrow(query),3], "#FEE496", NA)
circos(cir=tocir, mapping=highlightq, type="hl",xc = xorigin,yc = yorigin)
circos(R=400, cir=tocir, mapping=linkr , type="link.pg", lwd=0.5, col=linkr$colors,xc = xorigin,yc = yorigin)
newlinkr<-linkr
newlinkr$start1<-newlinkr$start1+as.integer((newlinkr$end1-newlinkr$start1)/2)+1
newlinkr$start2<-newlinkr$start2+as.integer((newlinkr$end2-newlinkr$start2)/2)-1
circos(R=400, cir=tocir, W=10, mapping=newlinkr , type="link", lwd=0.3, col=black,xc = xorigin,yc = yorigin)
}
legend(x = 210, y=1500, legend = c(paste("Reference: ", nrow(ref), " (", sum(ref$V3), " bp)", sep = ""), paste("Query: ",nrow(query), " (", sum(query$V3), " bp)", sep="")),
ncol = 1, cex = 0.8, bty="n",
fill=c("dark blue","#FEE496"),
border = c("dark blue","#FEE496"),text.width=c(0.5,0.5),
title=paste("Contigs align >= ", filterl, " bp", sep=""))
legend(x = 270, y=1300, legend = c("Forward","Reverse"),lty = c(0,1),merge=T,seg.len = 0.6,
ncol = 1, cex = 0.8, bty="n",
fill="white",
border = "black",text.width=c(0.5,0.5),
title="Strand Match\\n(on reference)")
legend(x = 990, y=1500, legend = c("100","","","","","","","","","",(100-lowId)/2 + lowId,"","","","","","","","",lowId),
ncol = 1, cex = 0.8, bty="n",
fill=colors,
border = colors,
y.intersp = 0.5,
x.intersp = 0.5,text.width=c(0.5,0.5),
title="Identity percent\\n")
}
dev.off()""")
plotstep.close()
subprocess.call(["Rscript", "handle.R", conn, reference, query, str(alignL), "--vanilla"])
def cleanfiles(ginfo1, ginfo2):
if os.path.isfile("tmp.tsv"):
os.remove("tmp.tsv")
if os.path.isfile("ref.nin"):
os.remove("ref.nin")
if os.path.isfile("ref.nsq"):
os.remove("ref.nsq")
if os.path.isfile("ref.nhr"):
os.remove("ref.nhr")
if os.path.isfile("handle.R"):
os.remove("handle.R")
if os.path.isfile(ginfo1):
os.remove(ginfo1)
if os.path.isfile(ginfo2):
os.remove(ginfo2)
if __name__ == '__main__':
mainV=main()
blastout=mainV.v7
if blastout is None:
blastout=blasting(genome1=mainV.v1, genome2=mainV.v2, evalue=mainV.v4, threads=mainV.v6)
filterBlastOutput(blastout=blastout, alignL=mainV.v3, evalue=mainV.v4, identity=mainV.v5)
ref=parsingGenomes(genome=mainV.v1)
que=parsingGenomes(genome=mainV.v2)
handleR(conn="synteny.tsv",reference=ref, query=que, alignL=mainV.v3)
if mainV.v8 == True:
cleanfiles(ref,que)
sys.exit()
|
62461
|
from dateutil import tz
from django.http.response import JsonResponse
from 臺灣言語平臺.項目模型 import 平臺項目表
from 臺灣言語資料庫.資料模型 import 來源表
from 臺灣言語平臺.介面.Json失敗回應 import Json失敗回應
from django.core.exceptions import ObjectDoesNotExist
_臺北時間 = tz.gettz('Asia/Taipei')
_時間輸出樣式 = '%Y-%m-%d %H:%M:%S'
def 轉做臺北時間字串(時間物件):
return 時間物件.astimezone(_臺北時間).strftime(_時間輸出樣式)
def 看資料詳細內容(request):
try:
平臺項目編號 = request.GET['平臺項目編號']
except KeyError:
return Json失敗回應({'錯誤': '沒有平臺項目的編號'})
try:
平臺項目 = 平臺項目表.揣編號(int(平臺項目編號))
資料 = 平臺項目.資料()
except ObjectDoesNotExist:
return Json失敗回應({'錯誤': '這不是合法平臺項目的編號'})
return JsonResponse({
'收錄者': str(資料.收錄者.編號()),
'來源': str(資料.來源.編號()),
'收錄時間': 轉做臺北時間字串(資料.收錄時間),
'種類': 資料.種類.種類,
'語言腔口': 資料.語言腔口.語言腔口,
'版權': 資料.版權.版權,
'著作所在地': 資料.著作所在地.著作所在地,
'著作年': 資料.著作年.著作年,
'屬性內容': 資料.屬性內容(),
'按呢講好': 平臺項目.按呢講好,
'按呢無好': 平臺項目.按呢無好
})
def 看來源內容(request):
try:
來源編號 = request.GET['來源編號']
except KeyError:
return Json失敗回應({'錯誤': '沒有來源編號的參數'})
try:
來源 = 來源表.objects.get(pk=來源編號)
except ObjectDoesNotExist:
return Json失敗回應({'錯誤': '這不是合法的來源編號'})
來源內容 = {
'名': 來源.名,
'屬性內容': 來源.屬性內容(),
}
try:
來源內容['email'] = 來源.使用者.email
來源內容['分數'] = 來源.使用者.分數
except Exception:
pass
return JsonResponse(來源內容)
def 投票(request):
try:
平臺項目編號 = request.POST['平臺項目編號']
decision = request.POST['decision']
except KeyError:
return Json失敗回應({'錯誤': '沒有平臺項目的編號'})
try:
rows_affect = 平臺項目表.這句講了按怎(平臺項目編號, decision)
except ValueError:
return Json失敗回應({'錯誤': 'decision傳錯了'})
return JsonResponse({
'suId': 平臺項目編號,
'success': True if rows_affect == 1 else False,
})
|
62503
|
from dataclasses import dataclass
from typing import Dict
from typing import Optional
@dataclass(frozen=True)
class CurrentDestinationStatus:
number_of_pending_messages: Optional[int]
number_of_consumers: int
messages_enqueued: int
messages_dequeued: int
@dataclass(frozen=True)
class ConsumerStatus:
address_to_destination_details: Optional[str]
destination_name: str
session_id: Optional[int]
enqueues: Optional[int]
dequeues: Optional[int]
dispatched: Optional[int]
dispatched_queue: Optional[int]
prefetch: int
max_pending: Optional[int]
exclusive: bool
retroactive: Optional[bool]
@dataclass(frozen=True)
class MessageStatus:
message_id: Optional[str]
details: Dict
persistent: Optional[bool]
correlation_id: str
properties: Optional[Dict]
@dataclass(frozen=True)
class SubscriberSetup:
address_to_subscriber_details: str
subscriber_id: str
destination: str
pending_queue_size: int
dispatched_queue_size: int
dispatched_counter: int
enqueue_counter: int
dequeue_counter: int
|
62519
|
import sklearn.datasets
import sklearn.model_selection
import sklearn.linear_model
import numpy
import compare_auc_delong_xu
import unittest
import scipy.stats
class TestIris(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = sklearn.datasets.load_iris()
x_train, x_test, y_train, cls.y_test = sklearn.model_selection.train_test_split(
data.data, (data.target == 1).astype(numpy.int), test_size=0.8, random_state=42)
cls.predictions = sklearn.linear_model.LogisticRegression(solver="lbfgs").fit(
x_train, y_train).predict_proba(x_test)[:, 1]
cls.sklearn_auc = sklearn.metrics.roc_auc_score(cls.y_test, cls.predictions)
def test_variance_const(self):
auc, variance = compare_auc_delong_xu.delong_roc_variance(self.y_test, self.predictions)
numpy.testing.assert_allclose(self.sklearn_auc, auc)
numpy.testing.assert_allclose(0.0015359814789736538, variance)
class TestGauss(unittest.TestCase):
x_distr = scipy.stats.norm(0.5, 1)
y_distr = scipy.stats.norm(-0.5, 1)
def test_variance(self):
sample_size_x = 7
sample_size_y = 14
n_trials = 50000
aucs = numpy.empty(n_trials)
variances = numpy.empty(n_trials)
numpy.random.seed(1234235)
labels = numpy.concatenate([numpy.ones(sample_size_x), numpy.zeros(sample_size_y)])
for trial in range(n_trials):
scores = numpy.concatenate([
self.x_distr.rvs(sample_size_x),
self.y_distr.rvs(sample_size_y)])
aucs[trial] = sklearn.metrics.roc_auc_score(labels, scores)
auc_delong, variances[trial] = compare_auc_delong_xu.delong_roc_variance(
labels, scores)
numpy.testing.assert_allclose(aucs[trial], auc_delong)
numpy.testing.assert_allclose(variances.mean(), aucs.var(), rtol=0.1)
|
62551
|
import torch
import cv2 as cv
import numpy as np
from sklearn.neighbors import NearestNeighbors
from .model_utils import spread_feature
def optimize_image_mask(image_mask, sp_image, nK=4, th=1e-2):
mask_pts = image_mask.reshape(-1)
xyz_pts = sp_image.reshape(-1, 3)
xyz_pts = xyz_pts[mask_pts > 0.5, :]
Neighbors = NearestNeighbors(n_neighbors=nK + 1, algorithm='kd_tree').fit(xyz_pts)
nn_dist, nn_idx = Neighbors.kneighbors(xyz_pts) # N,nK
nn_dist = nn_dist[:, 1:]
valid = (np.sum((nn_dist < th).astype(np.float), axis=1) == nK).astype(np.float)
optimized_mask = image_mask.reshape(-1)
optimized_mask[mask_pts > 0.5] = valid
optimized_mask = optimized_mask.reshape(image_mask.shape[0], image_mask.shape[1])
return optimized_mask
def generate_final_mask(image_learned_uv, image_mask,
image_resize_factor, mask_container_low_res, final_gim):
"""
Post Process Algorithm to generate mask of the unwrapped chart
Parameters
----------
image_learned_uv: [H,W,2]
image_mask: [H,W]
image_resize_factor: float
mask_container_low_res: a predefined tensor with intermediate low resolution
final_gim: a predefined tensor with target high resolution
"""
# resize (larger) rgb and uv with Bi-linear up-sampling
resized_uv = cv.resize(image_learned_uv, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = cv.resize(image_mask, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = (resized_mask > 0.5).astype(np.float)
# use gradient to remove the edge
discontinuous_mask_u = cv.Laplacian(image_learned_uv[..., 0], ddepth=cv.CV_32F) # small gradient map
discontinuous_mask_v = cv.Laplacian(image_learned_uv[..., 1], ddepth=cv.CV_32F) # small gradient map
# use the max and min in latent u and v to find the threshhold
u_max = (image_learned_uv[..., 0] * image_mask).max()
v_max = (image_learned_uv[..., 1] * image_mask).max()
u_min = (image_learned_uv[..., 0] * image_mask + (1.0 - image_mask)).min()
v_min = (image_learned_uv[..., 1] * image_mask + (1.0 - image_mask)).min()
u_th = (u_max - u_min) / 30
v_th = (v_max - v_min) / 30
discontinuous_mask_u = (discontinuous_mask_u > u_th).astype(np.float) * image_mask
discontinuous_mask_v = (discontinuous_mask_v > v_th).astype(np.float) * image_mask
discontinuous_mask = ((discontinuous_mask_u + discontinuous_mask_v) > 0).astype(np.float)
# use the mask to remove the boundary
boundary_recovery_mask = (cv.Laplacian(image_mask, ddepth=cv.CV_32F) > 0.01).astype(np.float)
discontinuous_mask = discontinuous_mask * (1.0 - boundary_recovery_mask)
resized_discontinuous_mask = cv.resize(discontinuous_mask,
dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_NEAREST)
# make the small mask & texture
high_res_mask = torch.from_numpy(resized_mask * (1.0 - resized_discontinuous_mask)) \
.unsqueeze(0).unsqueeze(0).cuda().float() # 1,1,R,R
high_res_uv = torch.from_numpy(resized_uv).permute(2, 0, 1).unsqueeze(0).cuda().float()
low_res_mask = mask_container_low_res.cuda()
low_res_mask = spread_feature(low_res_mask, high_res_uv, high_res_mask, high_res_mask)
# use close to remove the holes in small mask and then resize
low_res_mask_closed = low_res_mask.detach().cpu().squeeze(0).squeeze(0).numpy() # R,R
close_k_size = int(final_gim.shape[2] / 100)
close_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (close_k_size, close_k_size))
final_mask_np = cv.resize(low_res_mask_closed, dsize=(final_gim.shape[2],
final_gim.shape[2]),
interpolation=cv.INTER_NEAREST) # R,R,3
final_mask_np = (final_mask_np > 0).astype(np.float)
final_mask_np = cv.morphologyEx(final_mask_np, cv.MORPH_OPEN, close_kernel)
return final_mask_np
def generate_texture(sp_image, full_gim, image_rgb, image_mask, final_mask_np, final_res, nK=4, th=1e-2):
# prepare root and query points form the image and from the high-res chart
root_xyz_np = sp_image.reshape(-1, 3) # H*W,3
root_rgb_np = image_rgb.reshape(-1, 3) # H*W,3
_image_mask = image_mask.reshape(-1) # H*W
root_xyz_np = root_xyz_np[_image_mask > 0.5, :] # M,2 [0,1]
root_rgb_np = root_rgb_np[_image_mask > 0.5, :] # M,3 [0,1]
query_xyz_np = full_gim.reshape(-1, 3) # R*R,3
_final_mask_np = final_mask_np.reshape(-1) # R*R
query_xyz_np = query_xyz_np[_final_mask_np > 0.5, :] # N,3 [0,1]
# finding nearest root pixel points
Neighbors = NearestNeighbors(n_neighbors=nK, algorithm='kd_tree').fit(root_xyz_np)
nn_dist, nn_idx = Neighbors.kneighbors(query_xyz_np) # N,nK
# optimize the gim mask
valid = (nn_dist[:, 0] < th).astype(np.float)
optimized_final_mask_np = final_mask_np.reshape(-1).copy()
optimized_final_mask_np[_final_mask_np > 0.5] = valid
optimized_final_mask_np = optimized_final_mask_np.reshape(final_mask_np.shape[0], final_mask_np.shape[1])
# do interpolation based on chart distance
interpolation_weight = nn_dist.copy()
interpolation_weight = 1 - interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
interpolation_weight = interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
query_rgb_np = np.zeros((query_xyz_np.shape[0], 3))
for kdx in range(nK):
nn_color = root_rgb_np[nn_idx[:, kdx], :]
query_rgb_np += nn_color * interpolation_weight[:, kdx][..., np.newaxis]
final_texture_np = np.ones((final_res ** 2, 3))
final_texture_np[_final_mask_np > 0.5, :] = query_rgb_np
final_texture_np = final_texture_np.reshape(final_res, final_res, 3)
return final_texture_np, optimized_final_mask_np
|
62561
|
from . import builder, config, exceptions, generator, logging
import click
import os
import sys
__all__ = []
LOG = logging.getLogger(__name__)
@click.group()
@click.option('-v', '--verbose', is_flag=True)
def promenade(*, verbose):
if _debug():
verbose = True
logging.setup(verbose=verbose)
@promenade.command('build-all', help='Construct all scripts')
@click.option(
'-o',
'--output-dir',
default='.',
type=click.Path(
exists=True, file_okay=False, dir_okay=True, resolve_path=True),
required=True,
help='Location to write complete cluster configuration.')
@click.option('--validators', is_flag=True, help='Generate validation scripts')
@click.option(
'--leave-kubectl',
is_flag=True,
help='Leave behind kubectl on joined nodes')
@click.argument('config_files', nargs=-1, type=click.File('rb'))
def build_all(*, config_files, leave_kubectl, output_dir, validators):
debug = _debug()
try:
c = config.Configuration.from_streams(
debug=debug,
substitute=True,
allow_missing_substitutions=False,
leave_kubectl=leave_kubectl,
streams=config_files)
b = builder.Builder(c, validators=validators)
b.build_all(output_dir=output_dir)
except exceptions.PromenadeException as e:
e.display(debug=debug)
sys.exit(e.EXIT_CODE)
@promenade.command('generate-certs', help='Generate a certs for a site')
@click.option(
'-o',
'--output-dir',
type=click.Path(
exists=True, file_okay=False, dir_okay=True, resolve_path=True),
required=True,
help='Location to write *-certificates.yaml')
@click.argument('config_files', nargs=-1, type=click.File('rb'))
def genereate_certs(*, config_files, output_dir):
debug = _debug()
try:
c = config.Configuration.from_streams(
debug=debug,
streams=config_files,
substitute=True,
allow_missing_substitutions=True,
validate=False)
g = generator.Generator(c)
g.generate(output_dir)
except exceptions.PromenadeException as e:
e.display(debug=debug)
sys.exit(e.EXIT_CODE)
def _debug():
return os.environ.get('PROMENADE_DEBUG', '').lower() in {'1', 'True'}
|
62579
|
OntCversion = '2.0.0'
from ontology.interop.Ontology.Contract import Migrate
# from ontology.interop.Ontology.Contract import Destroy
from ontology.interop.System.Runtime import Notify
from ontology.interop.System.Storage import Put, GetContext, Get
KEY = "KEY"
NAME = "SecondName"
def Main(operation, args):
# if operation == "DestroyContract":
# return DestroyContract()
if operation == "MigrateContract":
if len(args) != 1:
Notify("param error")
return False
return MigrateContract(args[0])
if operation == "put":
return put()
if operation == "get":
return get()
if operation == "name":
return NAME
# def DestroyContract():
# Destroy()
# Notify(["Destory"])
# return True
def MigrateContract(code):
"""
Note that the existing contract will be replaced by the newly migrated contract
:param code: your avm code
:return:
"""
res = Migrate(code, True, "name", "version", "author", "email", "description")
assert(res)
Notify(["Migrate successfully"])
return True
def get():
return Get(GetContext(), KEY)
def put():
Put(GetContext(), KEY, 898)
return True
|
62607
|
import torch
import ignite.distributed as idist
from tests.ignite.distributed.utils import (
_sanity_check,
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_sync,
)
def test_no_distrib(capsys):
assert idist.backend() is None
if torch.cuda.is_available():
assert idist.device().type == "cuda"
else:
assert idist.device().type == "cpu"
assert idist.get_rank() == 0
assert idist.get_world_size() == 1
assert idist.get_local_rank() == 0
assert idist.model_name() == "serial"
from ignite.distributed.utils import _model, _SerialModel
_sanity_check()
assert isinstance(_model, _SerialModel)
idist.show_config()
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "ignite.distributed.utils INFO: distributed configuration: serial" in out[-1]
assert "ignite.distributed.utils INFO: backend: None" in out[-1]
if torch.cuda.is_available():
assert "ignite.distributed.utils INFO: device: cuda" in out[-1]
else:
assert "ignite.distributed.utils INFO: device: cpu" in out[-1]
assert "ignite.distributed.utils INFO: rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: local rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: world size: 1" in out[-1]
def test_sync_no_dist():
from ignite.distributed.comp_models import _SerialModel
_test_sync(_SerialModel)
def test_idist_methods_no_dist():
assert idist.get_world_size() < 2
assert idist.backend() is None, f"{idist.backend()}"
def test_idist__model_methods_no_dist():
_test_distrib__get_max_length("cpu")
if torch.cuda.device_count() > 1:
_test_distrib__get_max_length("cuda")
def test_idist_collective_ops_no_dist():
_test_distrib_all_reduce("cpu")
_test_distrib_all_gather("cpu")
_test_distrib_barrier("cpu")
_test_distrib_broadcast("cpu")
if torch.cuda.device_count() > 1:
_test_distrib_all_reduce("cuda")
_test_distrib_all_gather("cuda")
_test_distrib_barrier("cuda")
_test_distrib_broadcast("cuda")
|
62624
|
from __future__ import print_function
import argparse
from collections import OrderedDict
import json
import os
import logging
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score
from scipy.sparse import csr_matrix
from keras.utils.io_utils import HDF5Matrix
#from keras.utils.visualize_util import plot
from keras.optimizers import SGD, Adam
from sklearn.metrics import r2_score
import numpy as np
import theano.tensor as tt
import pandas as pd
import random
import common
import models
from predict import obtain_predictions
from eval import do_eval
import h5py
class Config(object):
"""Configuration for the training process."""
def __init__(self, params, normalize=False, whiten=True):
self.model_id = common.get_next_model_id()
self.norm = normalize
self.whiten = whiten
self.x_path = '%s_%sx%s' % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
self.y_path = '%s_%s_%s' % (params['dataset']['fact'],params['dataset']['dim'],params['dataset']['dataset'])
self.dataset_settings = params['dataset']
self.training_params = params['training']
self.model_arch = params['cnn']
self.predicting_params = params['predicting']
def get_dict(self):
object_dict = self.__dict__
first_key = "model_id"
conf_dict = OrderedDict({first_key: object_dict[first_key]})
conf_dict.update(object_dict)
return conf_dict
def _squared_magnitude(x):
return tt.sqr(x).sum(axis=-1)
def _magnitude(x):
return tt.sqrt(tt.maximum(_squared_magnitude(x), np.finfo(x.dtype).tiny))
def cosine(x, y):
return tt.clip((1 - (x * y).sum(axis=-1) /
(_magnitude(x) * _magnitude(y))) / 2, 0, 1)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def build_model(config):
"""Builds the cnn."""
params = config.model_arch
get_model = getattr(models, 'get_model_'+str(params['architecture']))
model = get_model(params)
#model = model_kenun.build_convnet_model(params)
# Learning setup
t_params = config.training_params
sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
momentum=t_params["momentum"], nesterov=t_params["nesterov"])
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
optimizer = eval(t_params['optimizer'])
metrics = ['mean_squared_error']
if config.model_arch["final_activation"] == 'softmax':
metrics.append('categorical_accuracy')
if t_params['loss_func'] == 'cosine':
loss_func = eval(t_params['loss_func'])
else:
loss_func = t_params['loss_func']
model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)
return model
def load_data_preprocesed(params, X_path, Y_path, dataset, val_percent, test_percent, n_samples, with_metadata=False, only_metadata=False, metadata_source='rovi'):
factors = np.load(common.DATASETS_DIR+'/y_train_'+Y_path+'.npy') # OJO remove S
index_factors = open(common.DATASETS_DIR+'/items_index_train_'+dataset+'.tsv').read().splitlines()
if not only_metadata:
all_X = np.load(common.TRAINDATA_DIR+'/X_train_'+X_path+'.npy')
index_train = open(common.TRAINDATA_DIR+'/index_train_%s.tsv' % (X_path)).read().splitlines()
all_Y = np.zeros((len(index_train),factors.shape[1]))
index_factors_inv = dict()
for i,item in enumerate(index_factors):
index_factors_inv[item] = i
for i,item in enumerate(index_train):
all_Y[i,:] = factors[index_factors_inv[item]]
else:
all_Y = factors
if with_metadata:
if 'w2v' in metadata_source:
all_X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,dataset))[:,:int(params['cnn']['sequence_length'])]
elif 'model' in metadata_source or not params['dataset']['sparse']:
all_X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,dataset))
else:
all_X_meta = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (metadata_source,dataset)).todense()
all_X_in_meta = all_X = all_X_meta
print(all_X.shape)
print(all_Y.shape)
if n_samples != 'all':
n_samples = int(n_samples)
all_X = all_X[:n_samples]
all_Y = all_Y[:n_samples]
if with_metadata:
all_X_in_meta = all_X_in_meta[:n_samples]
if params['training']['normalize_y'] == True:
normalize(all_Y,copy=False)
if params['training']["val_from_file"]:
Y_val = np.load(common.DATASETS_DIR+'/y_val_'+Y_path+'.npy')
Y_test = np.load(common.DATASETS_DIR+'/y_test_'+Y_path+'.npy') #!!! OJO remove S from trainS
if params['dataset']['sparse']:
X_val = load_sparse_csr(common.TRAINDATA_DIR+'/X_val_%s_%s.npz' % (metadata_source,dataset)).todense()
X_test = load_sparse_csr(common.TRAINDATA_DIR+'/X_test_%s_%s.npz' % (metadata_source,dataset)).todense()
else:
X_val = np.load(common.TRAINDATA_DIR+'/X_val_%s_%s.npy' % (metadata_source,dataset))
X_test = np.load(common.TRAINDATA_DIR+'/X_test_%s_%s.npy' % (metadata_source,dataset))
X_train = all_X
Y_train = all_Y
else:
N = all_Y.shape[0]
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
logging.debug("Training data points: %d" % N_train)
logging.debug("Validation data points: %d" % N_val)
logging.debug("Test data points: %d" % (N - N_train - N_val))
if not only_metadata:
# Slice data
X_train = all_X[:N_train]
X_val = all_X[N_train:N_train + N_val]
X_test = all_X[N_train + N_val:]
Y_train = all_Y[:N_train]
Y_val = all_Y[N_train:N_train + N_val]
Y_test = all_Y[N_train + N_val:]
if with_metadata:
if only_metadata:
X_train = all_X_in_meta[:N_train]
X_val = all_X_in_meta[N_train:N_train + N_val]
X_test = all_X_in_meta[N_train + N_val:]
else:
X_train = [X_train,all_X_in_meta[:N_train]]
X_val = [X_val,all_X_in_meta[N_train:N_train + N_val]]
X_test = [X_test,all_X_in_meta[N_train + N_val:]]
return X_train, Y_train, X_val, Y_val, X_test, Y_test
def load_data_hf5(params,val_percent, test_percent):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%s.hdf5" % (params['dataset']['dataset'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
N = f["targets"].shape[0]
f.close()
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_train = HDF5Matrix(hdf5_file, 'features', start=0, end=N_train)
Y_train = HDF5Matrix(hdf5_file, 'targets', start=0, end=N_train)
X_val = HDF5Matrix(hdf5_file, 'features', start=N_train, end=N_train+N_val)
Y_val = HDF5Matrix(hdf5_file, 'targets', start=N_train, end=N_train+N_val)
X_test = HDF5Matrix(hdf5_file, 'features', start=N_train+N_val, end=N)
Y_test = HDF5Matrix(hdf5_file, 'targets', start=N_train+N_val, end=N)
return X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train
def load_data_hf5_memory(params,val_percent, test_percent, y_path, id2gt, X_meta = None, val_from_file = False):
if val_from_file:
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
index_train = f["index"][:]
index_train = np.delete(index_train, np.where(index_train == ""))
N_train = index_train.shape[0]
val_hdf5_file = common.PATCHES_DIR+"/patches_val_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f_val = h5py.File(val_hdf5_file,"r")
X_val = f_val['features'][:]
#Y_val = f_val['targets'][:]
factors_val = np.load(common.DATASETS_DIR+'/y_val_'+y_path+'.npy')
index_factors_val = open(common.DATASETS_DIR+'/items_index_val_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt_val = dict((index,factor) for (index,factor) in zip(index_factors_val,factors_val))
index_val = [i for i in f_val['index'][:] if i in id2gt_val]
X_val = np.delete(X_val, np.where(index_val == ""), axis=0)
index_val = np.delete(index_val, np.where(index_val == ""))
Y_val = np.asarray([id2gt_val[id] for id in index_val])
test_hdf5_file = common.PATCHES_DIR+"/patches_test_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f_test = h5py.File(test_hdf5_file,"r")
X_test = f_test['features'][:]
#Y_test = f_test['targets'][:]
factors_test = np.load(common.DATASETS_DIR+'/y_test_'+y_path+'.npy')
index_factors_test = open(common.DATASETS_DIR+'/items_index_test_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt_test = dict((index,factor) for (index,factor) in zip(index_factors_test,factors_test))
index_test = [i for i in f_test['index'][:] if i in id2gt_test]
X_test = np.delete(X_test, np.where(index_test == ""), axis=0)
index_test = np.delete(index_test, np.where(index_test == ""))
Y_test = np.asarray([id2gt_test[id] for id in index_test])
else:
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
index_all = f["index"][:]
N = index_all.shape[0]
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_val = f['features'][N_train:N_train+N_val]
index_val = f['index'][N_train:N_train+N_val]
X_val = np.delete(X_val, np.where(index_val == ""), axis=0)
index_val = np.delete(index_val, np.where(index_val == ""))
Y_val = np.asarray([id2gt[id] for id in index_val])
X_test = f['features'][N_train+N_val:N]
index_test = f['index'][N_train+N_val:N]
print(index_test.shape)
print(X_test.shape)
X_test = np.delete(X_test, np.where(index_test == ""), axis=0)
index_test = np.delete(index_test, np.where(index_test == ""))
print(index_test.shape)
print(X_test.shape)
Y_test = np.asarray([id2gt[id] for id in index_test])
print(Y_test.shape)
index_train = f['index'][:N_train]
index_train = np.delete(index_train, np.where(index_train == ""))
N_train = index_train.shape[0]
if X_meta != None:
X_val = [X_val,X_meta[N_train:N_train+N_val]]
X_test = [X_test,X_meta[N_train+N_val:N]]
return X_val, Y_val, X_test, Y_test, N_train
def batch_block_generator(params, y_path, N_train, id2gt, X_meta=None,
val_from_file=False):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
block_step = 50000
batch_size = params['training']['n_minibatch']
randomize = True
with_meta = False
if X_meta != None:
with_meta = True
while 1:
for i in range(0, N_train, block_step):
x_block = f['features'][i:min(N_train, i+block_step)]
index_block = f['index'][i:min(N_train, i+block_step)]
#y_block = f['targets'][i:min(N_train,i+block_step)]
x_block = np.delete(x_block, np.where(index_block == ""), axis=0)
index_block = np.delete(index_block, np.where(index_block == ""))
y_block = np.asarray([id2gt[id] for id in index_block])
if params['training']['normalize_y']:
normalize(y_block, copy=False)
items_list = range(x_block.shape[0])
if randomize:
random.shuffle(items_list)
for j in range(0, len(items_list), batch_size):
if j+batch_size <= x_block.shape[0]:
items_in_batch = items_list[j:j+batch_size]
x_batch = x_block[items_in_batch]
y_batch = y_block[items_in_batch]
if with_meta:
x_batch = [x_batch, X_meta[items_in_batch]]
yield (x_batch, y_batch)
def process(params,with_predict=True,with_eval=True):
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
params['cnn']['n_out'] = int(params['dataset']['dim'])
#params['cnn']['n_frames'] = int(params['dataset']['window'] * SR / float(HR))
with_metadata = params['dataset']['with_metadata']
only_metadata = params['dataset']['only_metadata']
metadata_source = params['dataset']['meta-suffix']
if with_metadata:
if 'w2v' in metadata_source:
X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,params['dataset']['dataset']))[:,:int(params['cnn']['sequence_length'])]
params['cnn']['n_metafeatures'] = len(X_meta[0])
if 'meta-suffix2' in params['dataset']:
X_meta2 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = len(X_meta2[0])
if 'meta-suffix3' in params['dataset']:
X_meta3 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures4'] = len(X_meta4[0])
elif 'model' in metadata_source or not params['dataset']['sparse']:
X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,params['dataset']['dataset']))
params['cnn']['n_metafeatures'] = len(X_meta[0])
if 'meta-suffix2' in params['dataset']:
X_meta2 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = len(X_meta2[0])
if 'meta-suffix3' in params['dataset']:
X_meta3 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures4'] = len(X_meta4[0])
else:
X_meta = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (metadata_source,params['dataset']['dataset'])).todense()
params['cnn']['n_metafeatures'] = X_meta.shape[1]
if 'meta-suffix2' in params['dataset']:
X_meta2 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = X_meta2.shape[1]
if 'meta-suffix3' in params['dataset']:
X_meta3 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta4[0])
print(X_meta.shape)
else:
X_meta = None
config = Config(params)
model_dir = os.path.join(common.MODELS_DIR, config.model_id)
common.ensure_dir(common.MODELS_DIR)
common.ensure_dir(model_dir)
model_file = os.path.join(model_dir, config.model_id + common.MODEL_EXT)
logging.debug("Building Network...")
#model = build_model(config)
model = build_model(config)
print(model.summary())
#plot(model, to_file='model2.png', show_shapes=True)
trained_model = config.get_dict()
# Save model
#plot(model, to_file=os.path.join(model_dir, config.model_id + PLOT_EXT))
common.save_model(model, model_file)
logging.debug(trained_model["model_id"])
logging.debug("Loading Data...")
with_generator = True
if only_metadata:
X_train, Y_train, X_val, Y_val, X_test, Y_test = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, metadata_source)
if 'meta-suffix2' in params['dataset']:
X_train2, Y_train2, X_val2, Y_val2, X_test2, Y_test2 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix2'])
X_train = [X_train,X_train2]
X_val = [X_val,X_val2]
X_test = [X_test,X_test2]
print("X_train bi", len(X_train))
if 'meta-suffix3' in params['dataset']:
X_train3, Y_train3, X_val3, Y_val3, X_test3, Y_test3 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix3'])
X_train.append(X_train3)
X_val.append(X_val3)
X_test.append(X_test3)
print("X_train tri", len(X_train))
if 'meta-suffix4' in params['dataset']:
X_train4, Y_train4, X_val4, Y_val4, X_test4, Y_test4 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix4'])
X_train.append(X_train4)
X_val.append(X_val4)
X_test.append(X_test4)
print("X_train four", len(X_train))
else:
if with_generator:
id2gt = dict()
factors = np.load(common.DATASETS_DIR+'/y_train_'+config.y_path+'.npy')
index_factors = open(common.DATASETS_DIR+'/items_index_train_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt = dict((index,factor) for (index,factor) in zip(index_factors,factors))
X_val, Y_val, X_test, Y_test, N_train = load_data_hf5_memory(params,config.training_params["validation"],config.training_params["test"],config.y_path,id2gt,X_meta,config.training_params["val_from_file"])
if params['dataset']['nsamples'] != 'all':
N_train = min(N_train,params['dataset']['nsamples'])
else:
X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train = load_data_hf5(params,config.training_params["validation"],config.training_params["test"])
trained_model["whiten_scaler"] = common.TRAINDATA_DIR+'/scaler_%s.pk' % config.x_path
logging.debug("Training...")
if config.model_arch["final_activation"] == 'softmax':
monitor_metric = 'val_categorical_accuracy'
else:
monitor_metric = 'val_loss'
early_stopping = EarlyStopping(monitor=monitor_metric, patience=4)
if only_metadata:
epochs = model.fit(X_train, Y_train,
batch_size=config.training_params["n_minibatch"],
#shuffle='batch',
nb_epoch=config.training_params["n_epochs"],
verbose=1, validation_data=(X_val, Y_val),
callbacks=[early_stopping])
else:
if with_generator:
print(N_train)
epochs = model.fit_generator(batch_block_generator(params,config.y_path,N_train,id2gt,X_meta,config.training_params["val_from_file"]),
samples_per_epoch = N_train-(N_train % config.training_params["n_minibatch"]),
nb_epoch = config.training_params["n_epochs"],
verbose=1,
validation_data = (X_val, Y_val),
callbacks=[early_stopping])
else:
epochs = model.fit(X_train, Y_train,
batch_size=config.training_params["n_minibatch"],
shuffle='batch',
nb_epoch=config.training_params["n_epochs"],
verbose=1,
validation_data=(X_val, Y_val),
callbacks=[early_stopping])
model.save_weights(os.path.join(model_dir, config.model_id + common.WEIGHTS_EXT))
logging.debug("Saving trained model %s in %s..." %
(trained_model["model_id"], common.DEFAULT_TRAINED_MODELS_FILE))
common.save_trained_model(common.DEFAULT_TRAINED_MODELS_FILE, trained_model)
logging.debug("Evaluating...")
print(X_test[0].shape,X_test[1].shape)
preds=model.predict(X_test)
print(preds.shape)
if params["dataset"]["evaluation"] in ['binary','multiclass']:
y_pred = (preds > 0.5).astype('int32')
acc = accuracy_score(Y_test,y_pred)
prec = precision_score(Y_test,y_pred,average='macro')
recall = recall_score(Y_test,y_pred,average='macro')
f1 = f1_score(Y_test,y_pred,average='macro')
print('Accuracy', acc)
print("%.3f\t%.3f\t%.3f" % (prec,recall,f1))
if params["dataset"]["fact"] == 'class':
good_classes = np.nonzero(Y_test.sum(0))[0]
print(Y_test.shape,preds.shape)
#roc_auc=roc_auc_score(Y_test[:,good_classes],preds[:,good_classes])
#logging.debug('ROC-AUC '+str(roc_auc))
#pr_auc = average_precision_score(Y_test[:,good_classes],preds[:,good_classes])
#print('PR-AUC',pr_auc)
#r2 = roc_auc
elif params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
r2s = []
for i,pred in enumerate(preds):
r2 = r2_score(Y_test[i],pred)
r2s.append(r2)
r2 = np.asarray(r2s).mean()
logging.debug('R2 avg '+str(r2))
# Batch prediction
if X_test[1].shape == Y_test[1].shape:
score = model.evaluate(X_test, Y_test, verbose=0)
logging.debug(score)
logging.debug(model.metrics_names)
print(score)
trained_model["loss_score"] = score[0]
trained_model["mse"] = score[1]
if params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
trained_model["r2"] = r2
fw=open(common.DATA_DIR+'/results/train_results.txt','a')
fw.write(trained_model["model_id"]+'\n')
if params["training"]["loss_func"] == 'binary_crossentropy':
fw.write('ROC-AUC: '+str(roc_auc)+'\n')
print('ROC-AUC: '+str(roc_auc))
fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')
fw.write('MSE: '+str(score[1])+'\n')
elif params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
fw.write('R2 avg: '+str(r2)+'\n')
print('R2 avg: '+str(r2))
fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')
fw.write('MSE: '+str(score[1])+'\n')
fw.write(json.dumps(epochs.history)+"\n\n")
fw.close()
if with_predict:
trained_models = pd.read_csv(common.DEFAULT_TRAINED_MODELS_FILE, sep='\t')
model_config = trained_models[trained_models["model_id"] == trained_model["model_id"]]
model_config = model_config.to_dict(orient="list")
testset = open(common.DATASETS_DIR+'/items_index_test_%s.tsv' % (config.dataset_settings["dataset"])).read().splitlines()
if config.training_params["val_from_file"] and not only_metadata:
predictions, predictions_index = obtain_predictions(model_config, testset, trained_model["model_id"], config.predicting_params["trim_coeff"], model=model, with_metadata=with_metadata, only_metadata=only_metadata, metadata_source=metadata_source, with_patches=True)
else:
predictions, predictions_index = obtain_predictions(model_config, testset, trained_model["model_id"], config.predicting_params["trim_coeff"], model=model, with_metadata=with_metadata, only_metadata=only_metadata, metadata_source=metadata_source)
print("Predictions created")
if with_eval:
do_eval(trained_model["model_id"],get_roc=True,get_map=True,get_p=True,predictions=predictions,predictions_index=predictions_index)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluates the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p',
'--params',
dest="params_file",
help='JSON file with params',
default=False)
parser.add_argument('-pred',
'--predict',
dest="with_predict",
help='Predict factors',
action='store_true',
default=False)
parser.add_argument('-eval',
'--eval',
dest="with_eval",
help='Eval factors',
action='store_true',
default=False)
parser.add_argument('-m',
'--metadata',
dest="with_metadata",
help='Use metadata',
action='store_true',
default=False)
parser.add_argument('-om',
'--only_metadata',
dest="only_metadata",
help='Use only metadata',
action='store_true',
default=False)
parser.add_argument('-ms',
'--metadata_source',
dest="metadata_source",
type=str,
help='Suffix of metadata files',
default="rovi")
args = parser.parse_args()
params = models.params_1
if args.params_file:
params = json.load(open(args.params_file))
process(params)
|
62630
|
import os
from os import path
from eiffel_loop.scons.c_library import LIBRARY_INFO
from eiffel_loop.package import TAR_GZ_SOFTWARE_PACKAGE
from eiffel_loop.package import SOFTWARE_PATCH
info = LIBRARY_INFO ('source/id3.getlib')
print 'is_list', isinstance (info.configure [0], list)
print 'url', info.url
print info.configure
print 'test_data', info.test_data
pkg = TAR_GZ_SOFTWARE_PACKAGE (info.url, info.c_dev, info.extracted)
patch = SOFTWARE_PATCH (info.patch_url, info.c_dev, info.extracted)
patch.apply ()
# create links to `include' and `test_dir'
|
62642
|
import logging
import struct
from macholib.MachO import MachO
from macholib.mach_o import *
from .base_executable import *
from .section import *
INJECTION_SEGMENT_NAME = 'INJECT'
INJECTION_SECTION_NAME = 'inject'
class MachOExecutable(BaseExecutable):
def __init__(self, file_path):
super(MachOExecutable, self).__init__(file_path)
self.helper = MachO(self.fp)
if self.helper.fat:
raise Exception('MachO fat binaries are not supported at this time')
self.architecture = self._identify_arch()
if self.architecture is None:
raise Exception('Architecture is not recognized')
logging.debug('Initialized {} {} with file \'{}\''.format(self.architecture, type(self).__name__, file_path))
self.pack_endianness = self.helper.headers[0].endian
self.sections = []
for lc, cmd, data in self.helper.headers[0].commands:
if lc.cmd in (LC_SEGMENT, LC_SEGMENT_64):
for section in data:
self.sections.append(section_from_macho_section(section, cmd))
self.executable_segment = [cmd for lc, cmd, _ in self.helper.headers[0].commands
if lc.cmd in (LC_SEGMENT, LC_SEGMENT_64) and cmd.initprot & 0x4][0]
self.libraries = [fp.rstrip('\x00') for lc, cmd, fp in self.helper.headers[0].commands if lc.cmd == LC_LOAD_DYLIB]
def _identify_arch(self):
if self.helper.headers[0].header.cputype == 0x7:
return ARCHITECTURE.X86
elif self.helper.headers[0].header.cputype == 0x01000007:
return ARCHITECTURE.X86_64
elif self.helper.headers[0].header.cputype == 0xc:
return ARCHITECTURE.ARM
elif self.helper.headers[0].header.cputype == 0x0100000c:
return ARCHITECTURE.ARM_64
else:
return None
def executable_segment_vaddr(self):
return self.executable_segment.vmaddr
def executable_segment_size(self):
return self.executable_segment.vmsize
def entry_point(self):
for lc, cmd, _ in self.helper.headers[0].commands:
if lc.cmd == LC_MAIN:
return cmd.entryoff
return
def _extract_symbol_table(self):
ordered_symbols = []
symtab_command = self.helper.headers[0].getSymbolTableCommand()
if symtab_command:
self.binary.seek(symtab_command.stroff)
symbol_strings = self.binary.read(symtab_command.strsize)
self.binary.seek(symtab_command.symoff)
for i in range(symtab_command.nsyms):
if self.is_64_bit():
symbol = nlist_64.from_fileobj(self.binary, _endian_=self.pack_endianness)
else:
symbol = nlist.from_fileobj(self.binary, _endian_=self.pack_endianness)
symbol_name = symbol_strings[symbol.n_un:].split('\x00')[0]
if symbol.n_type & N_STAB == 0:
is_ext = symbol.n_type & N_EXT and symbol.n_value == 0
# Ignore Apple's hack for radar bug 5614542
if not is_ext and symbol_name != 'radr://5614542':
size = 0
logging.debug('Adding function {} from the symtab at vaddr {} with size {}'
.format(symbol_name, hex(symbol.n_value), hex(size)))
f = Function(symbol.n_value, size, symbol_name, self)
self.functions[symbol.n_value] = f
ordered_symbols.append(symbol_name)
dysymtab_command = self.helper.headers[0].getDynamicSymbolTableCommand()
if dysymtab_command:
self.binary.seek(dysymtab_command.indirectsymoff)
indirect_symbols = self.binary.read(dysymtab_command.nindirectsyms*4)
sym_offsets = struct.unpack(self.pack_endianness + 'I'*dysymtab_command.nindirectsyms, indirect_symbols)
for lc, cmd, sections in self.helper.headers[0].commands:
if lc.cmd in (LC_SEGMENT, LC_SEGMENT_64) and cmd.initprot & 0x4:
for section in sections:
if section.flags & S_NON_LAZY_SYMBOL_POINTERS == S_NON_LAZY_SYMBOL_POINTERS \
or section.flags & S_LAZY_SYMBOL_POINTERS == S_LAZY_SYMBOL_POINTERS \
or section.flags & S_SYMBOL_STUBS == S_SYMBOL_STUBS:
logging.debug('Parsing dynamic entries in {}.{}'.format(section.segname, section.sectname))
if section.flags & S_SYMBOL_STUBS:
stride = section.reserved2
else:
stride = (64 if self.is_64_bit() else 32)
count = section.size / stride
for i in range(count):
addr = self.executable_segment.vmaddr + section.offset + (i * stride)
idx = sym_offsets[i + section.reserved1]
if idx == 0x40000000:
symbol_name = "INDIRECT_SYMBOL_ABS"
elif idx == 0x80000000:
symbol_name = "INDIRECT_SYMBOL_LOCAL"
else:
symbol_name = ordered_symbols[idx]
logging.debug('Adding function {} from the dynamic symtab at vaddr {} with size {}'
.format(symbol_name, hex(addr), hex(stride)))
f = Function(addr, stride, symbol_name, self, type=Function.DYNAMIC_FUNC)
self.functions[addr] = f
def iter_string_sections(self):
STRING_SECTIONS = ['__const', '__cstring', '__objc_methname', '__objc_classname']
for s in self.sections:
if s.name in STRING_SECTIONS:
yield s
def prepare_for_injection(self):
# Total size of the stuff we're going to be adding in the middle of the binary
offset = 72+80 if self.is_64_bit() else 56+68 # 1 segment header + 1 section header
fileoff = (self.binary.len & ~0xfff) + 0x1000
vmaddr = self.function_named('__mh_execute_header').address + fileoff
logging.debug('Creating new MachOSegment at vaddr {}'.format(hex(vmaddr)))
new_segment = segment_command_64() if self.is_64_bit() else segment_command()
new_segment._endian_ = self.pack_endianness
new_segment.segname = INJECTION_SEGMENT_NAME
new_segment.fileoff = fileoff
new_segment.filesize = 0
new_segment.vmaddr = vmaddr
new_segment.vmsize = 0x1000
new_segment.maxprot = 0x7 #RWX
new_segment.initprot = 0x5 # RX
new_segment.flags = 0
new_segment.nsects = 1
logging.debug('Creating new MachOSection at vaddr {}'.format(hex(vmaddr)))
new_section = section_64() if self.is_64_bit() else section()
new_section._endian_ = self.pack_endianness
new_section.sectname = INJECTION_SECTION_NAME
new_section.segname = new_segment.segname
new_section.addr = new_segment.vmaddr
new_section.size = 0
new_section.offset = new_segment.fileoff
new_section.align = 4
new_section.flags = 0x80000400
lc = load_command()
lc._endian_ = self.pack_endianness
lc.cmd = LC_SEGMENT_64 if self.is_64_bit() else LC_SEGMENT
lc.cmdsize = offset
self.helper.headers[0].commands.append((lc, new_segment, [new_section]))
self.helper.headers[0].header.ncmds += 1
self.helper.headers[0].header.sizeofcmds += offset
return new_segment
def inject(self, asm, update_entry=False):
found = [s for lc,s,_ in self.helper.headers[0].commands if lc.cmd in (LC_SEGMENT, LC_SEGMENT_64) and s.segname == INJECTION_SEGMENT_NAME]
if found:
injection_vaddr = found[0].vmaddr
else:
logging.warning(
'prepare_for_injection() was not called before inject(). This may cause unexpected behavior')
inject_seg = self.prepare_for_injection()
injection_vaddr = inject_seg.vmaddr
if update_entry:
for lc, cmd, _ in self.helper.headers[0].commands:
if lc.cmd == LC_MAIN:
cmd.entryoff = injection_vaddr
break
self.binary.seek(0)
for lc, segment, sections in self.helper.headers[0].commands:
if lc.cmd in (LC_SEGMENT, LC_SEGMENT_64) and segment.segname == INJECTION_SEGMENT_NAME:
injection_offset = segment.fileoff + segment.filesize
segment.filesize += len(asm)
if segment.filesize + len(asm) > segment.vmsize:
segment.vmsize += 0x1000
for section in sections:
if section.sectname == INJECTION_SECTION_NAME:
section.size += len(asm)
self.next_injection_vaddr = section.addr + section.size
self.helper.headers[0].write(self.binary)
self.binary.seek(injection_offset)
self.binary.write(asm)
return injection_vaddr
|
62658
|
from django.views.generic import ListView
from ..professors.models import Professor
class SearchView(ListView):
queryset = Professor.objects.all().order_by("-first_name", "last_name")
template_name = "search.html"
def get_context_data(self):
context = super(SearchView, self).get_context_data()
context.update(
{"search_term": self.request.GET.get("q", ""), "navbarSearchShow": True}
)
return context
def get_queryset(self):
queryset = super(SearchView, self).get_queryset()
search_term = self.request.GET.get("q")
if search_term:
return Professor.objects.filter(search_index=search_term)
return queryset[:10]
|
62715
|
import unittest
from kafka_influxdb.encoder import heapster_json_encoder
class TestHeapsterJsonEncoder(unittest.TestCase):
def setUp(self):
self.encoder = heapster_json_encoder.Encoder()
def testEncoder(self):
msg = b'{ "MetricsName":"memory/major_page_faults","MetricsValue":{"value":56}, "MetricsTimestamp":"2017-01-19T17:26:00Z", "MetricsTags":{"container_name":"docker/9be430d3a1a28601292aebd76e15512d5471c630a7fa164d6a2a2fd9cbc19e3d"} } '
encoded_message = self.encoder.encode(msg)
expected_msg = [
'memory/major_page_faults,container_name=docker/9be430d3a1a28601292aebd76e15512d5471c630a7fa164d6a2a2fd9cbc19e3d value=56 1484846760']
self.assertEqual(encoded_message, expected_msg)
|
62743
|
from enum import Enum
class MenuChoice(Enum):
"""
Menu choices are always a lower or upper case letter
"""
NONE = -1
LOWER_A = 0
LOWER_B = 1
LOWER_C = 2
LOWER_D = 3
LOWER_E = 4
LOWER_F = 5
LOWER_G = 6
LOWER_H = 7
LOWER_I = 8
LOWER_J = 9
LOWER_K = 10
LOWER_L = 11
LOWER_M = 12
LOWER_N = 13
LOWER_O = 14
LOWER_P = 15
LOWER_Q = 16
LOWER_R = 17
LOWER_S = 18
LOWER_T = 19
LOWER_U = 20
LOWER_V = 21
LOWER_W = 22
LOWER_X = 23
LOWER_Y = 24
LOWER_Z = 25
UPPER_A = 26
UPPER_B = 27
UPPER_C = 28
UPPER_D = 29
UPPER_E = 30
UPPER_F = 31
UPPER_G = 32
UPPER_H = 33
UPPER_I = 34
UPPER_J = 35
UPPER_K = 36
UPPER_L = 37
UPPER_M = 38
UPPER_N = 39
UPPER_O = 40
UPPER_P = 41
UPPER_Q = 42
UPPER_R = 43
UPPER_S = 44
UPPER_T = 45
UPPER_U = 46
UPPER_V = 47
UPPER_W = 48
UPPER_X = 49
UPPER_Y = 50
UPPER_Z = 51
ZERO = 52
ONE = 53
TWO = 54
THREE = 55
FOUR = 56
FIVE = 57
SIX = 58
SEVEN = 59
EIGHT = 60
NINE = 61
ASTERISK = 62
EXCLAMATION_POINT = 63
FORWARD_SLASH = 64
QUESTION_MARK = 65
class MenuChoiceMapping:
@staticmethod
def get_menu_letter_to_menu_choice(dcss_menu_chars):
return {x:MenuChoice for x in dcss_menu_chars}
|
62753
|
import datetime
from bitmovin import Bitmovin, Encoding, S3Output, H264CodecConfiguration, AACCodecConfiguration, H264Profile, \
StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, MuxingStream, \
S3Input, FairPlayDRM, TSMuxing, HlsManifest, AudioMedia, VariantStream
from bitmovin.errors import BitmovinError
API_KEY = '<YOUR_API_KEY>'
S3_INPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_INPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_INPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
S3_INPUT_PATH = '<YOUR_S3_INPUT_PATH>'
S3_OUTPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_OUTPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_OUTPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
FAIRPLAY_KEY = '<YOUR_FAIRPLAY_KEY>'
FAIRPLAY_IV = '<YOUR_FAIRPLAY_IV>'
FAIRPLAY_URI = '<YOUR_FAIRPLAY_LICENSING_URL>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
s3_input = S3Input(access_key=S3_INPUT_ACCESSKEY,
secret_key=S3_INPUT_SECRETKEY,
bucket_name=S3_INPUT_BUCKETNAME,
name='Sample S3 Output')
s3_input = bitmovin.inputs.S3.create(s3_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='hls fairplay example encoding - {}'.format(date_component))
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_480p = H264CodecConfiguration(name='example_video_codec_configuration_480p',
bitrate=1200000,
rate=None,
height=480,
profile=H264Profile.HIGH)
video_codec_configuration_480p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_480p).resource
video_codec_configuration_360p = H264CodecConfiguration(name='example_video_codec_configuration_360p',
bitrate=800000,
rate=None,
height=360,
profile=H264Profile.HIGH)
video_codec_configuration_360p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_360p).resource
video_codec_configuration_240p = H264CodecConfiguration(name='example_video_codec_configuration_240p',
bitrate=400000,
rate=None,
height=240,
profile=H264Profile.HIGH)
video_codec_configuration_240p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_240p).resource
audio_codec_configuration_stereo = AACCodecConfiguration(name='example_audio_codec_configuration_stereo',
bitrate=128000,
rate=48000)
audio_codec_configuration_stereo = bitmovin.codecConfigurations.AAC.create(
audio_codec_configuration_stereo).resource
video_input_stream = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream_en_stereo = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_480p = Stream(codec_configuration_id=video_codec_configuration_480p.id,
input_streams=[video_input_stream],
name='Sample Stream 480p')
video_stream_480p = bitmovin.encodings.Stream.create(object_=video_stream_480p,
encoding_id=encoding.id).resource
video_stream_360p = Stream(codec_configuration_id=video_codec_configuration_360p.id,
input_streams=[video_input_stream],
name='Sample Stream 360p')
video_stream_360p = bitmovin.encodings.Stream.create(object_=video_stream_360p,
encoding_id=encoding.id).resource
video_stream_240p = Stream(codec_configuration_id=video_codec_configuration_240p.id,
input_streams=[video_input_stream],
name='Sample Stream 240p')
video_stream_240p = bitmovin.encodings.Stream.create(object_=video_stream_240p,
encoding_id=encoding.id).resource
audio_stream_en_stereo = Stream(codec_configuration_id=audio_codec_configuration_stereo.id,
input_streams=[audio_input_stream_en_stereo],
name='Sample Audio Stream EN Stereo')
audio_stream_en_stereo = bitmovin.encodings.Stream.create(object_=audio_stream_en_stereo,
encoding_id=encoding.id).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_stream_480p = MuxingStream(video_stream_480p.id)
video_muxing_stream_360p = MuxingStream(video_stream_360p.id)
video_muxing_stream_240p = MuxingStream(video_stream_240p.id)
audio_muxing_stream_en_stereo = MuxingStream(audio_stream_en_stereo.id)
video_muxing_480p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/480p',
acl=[acl_entry])
video_muxing_480p = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_480p],
name='Sample Muxing 480p')
video_muxing_480p = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_480p,
encoding_id=encoding.id).resource
fair_play_480p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_480p_output],
name='FairPlay 480p')
fair_play_480p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_480p,
encoding_id=encoding.id,
muxing_id=video_muxing_480p.id).resource
video_muxing_360p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/360p',
acl=[acl_entry])
video_muxing_360p = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_360p],
name='Sample Muxing 360p')
video_muxing_360p = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_360p,
encoding_id=encoding.id).resource
fair_play_360p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_360p_output],
name='FairPlay 360p')
fair_play_360p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_360p,
encoding_id=encoding.id,
muxing_id=video_muxing_360p.id).resource
video_muxing_240p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/240p',
acl=[acl_entry])
video_muxing_240p = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_240p],
name='Sample Muxing 240p')
video_muxing_240p = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_240p,
encoding_id=encoding.id).resource
fair_play_240p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_240p_output],
name='FairPlay 240p')
fair_play_240p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_240p,
encoding_id=encoding.id,
muxing_id=video_muxing_240p.id).resource
audio_muxing_output_en_stereo = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/hls/en_2_0',
acl=[acl_entry])
audio_muxing_en_stereo = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[audio_muxing_stream_en_stereo],
name='Sample Audio Muxing EN Stereo')
audio_muxing_en_stereo = bitmovin.encodings.Muxing.TS.create(object_=audio_muxing_en_stereo,
encoding_id=encoding.id).resource
fair_play_audio = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[audio_muxing_output_en_stereo],
name='FairPlay Audio')
fair_play_audio = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_audio,
encoding_id=encoding.id,
muxing_id=audio_muxing_en_stereo.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
# Manifest ##
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
hls_manifest = HlsManifest(manifest_name='example_manifest_hls.m3u8',
outputs=[manifest_output],
name='Sample HLS FairPlay Manifest')
hls_manifest = bitmovin.manifests.HLS.create(hls_manifest).resource
audio_media = AudioMedia(name='Sample Audio Media',
group_id='audio_group',
segment_path=audio_muxing_output_en_stereo.outputPath,
encoding_id=encoding.id,
stream_id=audio_stream_en_stereo.id,
muxing_id=audio_muxing_en_stereo.id,
drm_id=fair_play_audio.id,
language='en',
uri='audiomedia.m3u8')
audio_media = bitmovin.manifests.HLS.AudioMedia.create(manifest_id=hls_manifest.id, object_=audio_media).resource
variant_stream_480p = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=video_muxing_480p_output.outputPath,
uri='video_480p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_480p.id,
muxing_id=video_muxing_480p.id,
drm_id=fair_play_480p.id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_480p)
variant_stream_360p = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=video_muxing_360p_output.outputPath,
uri='video_360p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_360p.id,
muxing_id=video_muxing_360p.id,
drm_id=fair_play_360p.id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_360p)
variant_stream_240p = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=video_muxing_240p_output.outputPath,
uri='video_240p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_240p.id,
muxing_id=video_muxing_240p.id,
drm_id=fair_play_240p.id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_240p)
bitmovin.manifests.HLS.start(manifest_id=hls_manifest.id)
try:
bitmovin.manifests.HLS.wait_until_finished(manifest_id=hls_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for HLS manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
|
62776
|
import click
from marinetrafficapi import constants
from marinetrafficapi.bind import bind_request
from marinetrafficapi.vessels_positions.\
PS01_vessel_historical_track.models import VesselHistoricalPosition
from marinetrafficapi.vessels_positions.\
PS01_vessel_historical_track.query_params import PS01QueryParams
from marinetrafficapi.vessels_positions.\
PS02_PS06_vessel_positions.models import FleetVesselPosition
from marinetrafficapi.vessels_positions.\
PS02_PS06_vessel_positions.query_params import PS02PS06QueryParams
from marinetrafficapi.vessels_positions.\
PS07_single_vessel_positions.models import SingleVesselPosition
from marinetrafficapi.vessels_positions.\
PS07_single_vessel_positions.query_params import PS07QueryParams
class VesselPositions:
"""Retrieve forecasted information for any vessel.
Get ETA and voyage related information using one of these APIs."""
vessel_historical_track = bind_request(
api_path='/exportvesseltrack',
model=VesselHistoricalPosition,
query_parameters=PS01QueryParams,
default_parameters={
'v': '2',
constants.ClientConst.MSG_TYPE: constants.ClientConst.SIMPLE,
constants.RequestConst.PROTOCOL: constants.FormatterConst.JSONO
},
description='{}: \nGet all historical positions \n'
'for one or more vessels over a period of time'
.format(click.style("API CALL PS01", fg="red"))
)
fleet_vessel_positions = bind_request(
api_path='/exportvessels',
model=FleetVesselPosition,
query_parameters=PS02PS06QueryParams,
default_parameters={
'v': '8',
constants.ClientConst.MSG_TYPE: constants.ClientConst.SIMPLE,
constants.RequestConst.PROTOCOL: constants.FormatterConst.JSONO
},
description='{}:\nGet positional information for a set of predefined vessels \n'
'{}:\nMonitor vessel activity for your MarineTraffic fleet(s)\n'
'{}:\nMonitor vessel activity in one or more ports of your interest\n'
'{}:\nMonitor vessel activity in an area of your interest\n'
'{}:\nRetrieve positions for vessels sailing in an area that \n'
'you define each time you call the service'
.format(click.style("API CALL PS02", fg="red"),
click.style("API CALL PS03", fg="red"),
click.style("API CALL PS04", fg="red"),
click.style("API CALL PS05", fg="red"),
click.style("API CALL PS06", fg="red"))
)
single_vessel_positions = bind_request(
api_path='/exportvessel',
model=SingleVesselPosition,
query_parameters=PS07QueryParams,
default_parameters={
'v': '5',
constants.ClientConst.MSG_TYPE: constants.ClientConst.SIMPLE,
constants.RequestConst.PROTOCOL: constants.FormatterConst.JSONO
},
description='{}:\nGet the latest available position or voyage \n'
'information for a particular vessel'
.format(click.style("API CALL PS07", fg="red"))
)
|
62791
|
from django.views.generic import TemplateView
if settings.DEBUG:
# enable local preview of error pages
urlpatterns += patterns('',
(r'^403/$', TemplateView.as_view(template_name="403.html")),
(r'^404/$', TemplateView.as_view(template_name="404.html")),
(r'^500/$', TemplateView.as_view(template_name="500.html")),
)
|
62823
|
import pickle
import pytest
import numpy as np
from astropy import units as u
from astropy import modeling
from specutils.utils import QuantityModel
from ..utils.wcs_utils import refraction_index, vac_to_air, air_to_vac
wavelengths = [300, 500, 1000] * u.nm
data_index_refraction = {
'Griesen2006': np.array([3.07393068, 2.9434858 , 2.8925797 ]),
'Edlen1953': np.array([2.91557413, 2.78963801, 2.74148172]),
'Edlen1966': np.array([2.91554272, 2.7895973 , 2.74156098]),
'PeckReeder1972': np.array([2.91554211, 2.78960005, 2.74152561]),
'Morton2000': np.array([2.91568573, 2.78973402, 2.74169531]),
'Ciddor1996': np.array([2.91568633, 2.78973811, 2.74166131])
}
def test_quantity_model():
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
assert uc(10*u.nm).to(u.m) == 0*u.m
def test_pickle_quantity_model(tmp_path):
"""
Check that a QuantityModel can roundtrip through pickling, as it
would if fit in a multiprocessing pool.
"""
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
pkl_file = tmp_path / "qmodel.pkl"
with open(pkl_file, "wb") as f:
pickle.dump(uc, f)
with open(pkl_file, "rb") as f:
new_model = pickle.load(f)
assert new_model.input_units == uc.input_units
assert new_model.return_units == uc.return_units
assert type(new_model.unitless_model) == type(uc.unitless_model)
assert np.all(new_model.unitless_model.parameters == uc.unitless_model.parameters)
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_refraction_index(method):
tmp = (refraction_index(wavelengths, method) - 1) * 1e4
assert np.isclose(tmp, data_index_refraction[method], atol=1e-7).all()
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_air_to_vac(method):
tmp = refraction_index(wavelengths, method)
assert np.isclose(wavelengths.value * tmp,
air_to_vac(wavelengths, method=method, scheme='inversion').value,
rtol=1e-6).all()
assert np.isclose(wavelengths.value,
air_to_vac(vac_to_air(wavelengths, method=method),
method=method, scheme='iteration').value,
atol=1e-12).all()
|
62825
|
import logging
import os
from datetime import timedelta
# layers
import sys
sys.path.append('/opt')
import cv2
from common.config import LOG_LEVEL, FRAME_RESIZE_WIDTH, FRAME_RESIZE_HEIGHT, STORE_FRAMES, \
DDB_FRAME_TABLE, UTC_TIME_FMT
from common.utils import upload_to_s3, put_item_ddb
logger = logging.getLogger('FrameExtractor')
logger.setLevel(LOG_LEVEL)
S3_KEY_DATE_FMT = "%Y/%m/%d/%H/%M:%S:%f"
def extract_frames(stream_id, segment_s3_key, video_chunk, video_start_datetime, s3_bucket, frame_s3_prefix,
sample_fps=1):
if STORE_FRAMES not in ["all", "original", "resized"]:
raise ValueError(f'Invalid STORE_FRAMES option: {STORE_FRAMES} (Valid: all, original, resized)')
store_original_frames = STORE_FRAMES in ["all", "original"]
store_resized_frames = STORE_FRAMES in ["all", "resized"]
logger.info(f'Store original sized frame? {store_original_frames}, Store resized frames? {store_resized_frames}')
cap = cv2.VideoCapture(video_chunk)
extracted_frames_metadata = []
try:
video_metadata = extract_video_metadata(cap)
hop = round(video_metadata['fps'] / sample_fps)
if hop == 0:
hop = 1 # if sample_fps is invalid extract every frame
logger.info(f'Extracting every {hop} frame.')
frame_count = 0
extracted_frames = 0
while cap.isOpened():
success, frame = cap.read()
if success:
if frame_count % hop == 0:
# timestamp relative to start of video
frame_timestamp_millis = cap.get(cv2.CAP_PROP_POS_MSEC)
# absolute timestamp of the frame
frame_datetime = video_start_datetime + timedelta(milliseconds=frame_timestamp_millis)
segment_id = f'{stream_id}:{video_start_datetime.strftime(UTC_TIME_FMT)}'
frame_metadata = {'Stream_ID': stream_id,
'DateTime': frame_datetime.strftime(UTC_TIME_FMT),
'Segment': segment_id,
'Segment_Millis': int(frame_timestamp_millis),
'Segment_Frame_Num': frame_count,
'S3_Bucket': s3_bucket}
if store_original_frames:
jpg = cv2.imencode(".jpg", frame)[1]
# use absolute timestamps for s3 key. might be easier to reason about.
frame_key = os.path.join(frame_s3_prefix, 'original',
f'{frame_datetime.strftime(S3_KEY_DATE_FMT)}.jpg')
# TODO: Should we also store the frame metadata in the s3 object?
s3_object_metadata = {'ContentType': 'image/jpeg'}
upload_to_s3(s3_bucket, frame_key, bytearray(jpg), **s3_object_metadata)
frame_metadata['S3_Key'] = frame_key
frame_metadata['Frame_Width'] = int(video_metadata['original_frame_width'])
frame_metadata['Frame_Height'] = int(video_metadata['original_frame_height'])
if store_resized_frames:
resized_frame = cv2.resize(frame, (FRAME_RESIZE_WIDTH, FRAME_RESIZE_HEIGHT))
resized_jpg = cv2.imencode(".jpg", resized_frame)[1]
# use absolute timestamps for s3 key. might be easier to reason about.
resized_frame_key = os.path.join(frame_s3_prefix, 'resized',
f'{frame_datetime.strftime(S3_KEY_DATE_FMT)}.jpg')
s3_object_metadata = {'ContentType': 'image/jpeg'}
upload_to_s3(s3_bucket, resized_frame_key, bytearray(resized_jpg), **s3_object_metadata)
if 'S3_Key' in frame_metadata:
frame_metadata['Resized_S3_Key'] = resized_frame_key
else:
frame_metadata['S3_Key'] = frame_key
frame_metadata['Frame_Width'] = FRAME_RESIZE_WIDTH
frame_metadata['Frame_Height'] = FRAME_RESIZE_HEIGHT
# persist frame metadata in database
put_item_ddb(DDB_FRAME_TABLE, frame_metadata)
extracted_frames_metadata.append(frame_metadata)
extracted_frames += 1
frame_count += 1
else:
break
logger.info(f'Extracted {extracted_frames} out of {frame_count} frames from {video_chunk}')
return extracted_frames_metadata
finally:
cv2.destroyAllWindows()
cap.release()
def extract_video_metadata(cap):
metadata = {
'original_frame_width': cap.get(cv2.CAP_PROP_FRAME_WIDTH),
'original_frame_height': cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
'fourcc': cap.get(cv2.CAP_PROP_FOURCC),
'frame_count': int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
'format': cap.get(cv2.CAP_PROP_FORMAT),
'mode': cap.get(cv2.CAP_PROP_MODE),
'fps': cap.get(cv2.CAP_PROP_FPS),
}
logger.info(f'video metadata: {metadata}')
return metadata
|
62855
|
import pytest
from pymlconf import Root
def test_delattribute():
root = Root('''
app:
name: MyApp
''')
assert hasattr(root.app, 'name')
del root.app.name
assert not hasattr(root.app, 'name')
with pytest.raises(AttributeError):
del root.app.invalidattribute
|
62879
|
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from backend.views import app_urls
from server import settings
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'server.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^paypal/', include('paypal.standard.ipn.urls')),
)
urlpatterns += app_urls
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
62916
|
import tensorflow as tf
import experiment
class NoTraining(experiment.Training):
"""This is a replacement component that skips the training process"""
def __init__(self, config, config_global, logger):
super(NoTraining, self).__init__(config, config_global, logger)
def start(self, model, data, evaluation):
self.logger.info("Skipping training")
def restore_best_epoch(self, model, data, evaluation):
pass
def remove_checkpoints(self):
pass
component = NoTraining
|
62969
|
import django.dispatch
file_is_ready = django.dispatch.Signal()
file_upload_failed = django.dispatch.Signal()
file_joining_failed = django.dispatch.Signal()
|
62979
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView, UpdateView, CreateView, DeleteView, FormView
from rolepermissions.mixins import HasPermissionsMixin
from rolepermissions.permissions import available_perm_status
from apps.Testings.models import Phase
from apps.Users.models import Task
from extracts import run_extract
from .models import Argument, Source, Command
from .forms import ArgumentForm, SourceProductForm, SourceRobotForm, SourceLibraryForm, SourceEditProductForm, \
CommandForm, SourceEditLibraryForm, PhaseForm
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
return context
class HomeView(LoginRequiredMixin, TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['user_tasks'] = self.request.user.get_all_tasks()[:3]
context['user_tasks2'] = self.request.user.get_all_tasks()[3:]
return context
class StepperView(LoginRequiredMixin, TemplateView):
template_name = "stepper.html"
def get_context_data(self, **kwargs):
context = super(StepperView, self).get_context_data(**kwargs)
return context
class ArgumentsView(LoginRequiredMixin, HasPermissionsMixin, TemplateView):
template_name = "arguments.html"
required_permission = "read_argument"
class NewArgumentView(LoginRequiredMixin, HasPermissionsMixin, CreateView):
model = Argument
form_class = ArgumentForm
template_name = "form-snippet.html"
required_permission = "create_argument"
def get_success_url(self):
messages.success(self.request, "Argument Created")
return reverse_lazy('commands')
def get_context_data(self, **kwargs):
cmd = Command.objects.get(id = self.kwargs.get('cmd'))
form = self.form_class(cmd = cmd)
context = super(NewArgumentView, self).get_context_data(**kwargs)
context['title'] = "New Argument"
context['form'] = form
return context
class EditArgumentView(LoginRequiredMixin, HasPermissionsMixin, UpdateView):
model = Argument
form_class = ArgumentForm
template_name = "form-snippet.html"
required_permission = "update_argument"
def get_success_url(self):
messages.success(self.request, "Argument Edited")
return reverse_lazy('commands')
def get_context_data(self, **kwargs):
context = super(EditArgumentView, self).get_context_data(**kwargs)
context['title'] = "Edit Argument"
context['delete'] = True
return context
def post(self, request, pk, *args, **kwargs):
result = super(EditArgumentView, self).post(request, *args, **kwargs)
instance = Argument.objects.get(pk = pk)
include = request.POST.getlist('include[]')
exclude = request.POST.getlist('exclude[]')
instance.name = request.POST['name']
instance.description = request.POST['description']
instance.requirement = request.POST['requirement'].title()
instance.needs_value = request.POST['needs_value'].title()
instance.save()
for i in instance.include.all():
instance.include.remove(i)
for e in instance.exclude.all():
instance.exclude.remove(e)
for i in include:
instance.include.add(i)
instance.save()
for e in exclude:
instance.exclude.add(e)
instance.save()
return result
class DeleteArgumentView(LoginRequiredMixin, HasPermissionsMixin, DeleteView):
model = Argument
template_name = "delete-argument.html"
required_permission = "delete_argument"
def get_success_url(self):
messages.success(self.request, "Argument Deleted")
return reverse_lazy('commands')
# - - - - - Sources - - - - - - - - -
class SourceList(LoginRequiredMixin, TemplateView):
template_name = "source-list.html"
def get_context_data(self, **kwargs):
context = super(SourceList, self).get_context_data(**kwargs)
name = kwargs.get('slug')
if name:
title = ''
category = 0
if name == 'products':
title = 'Products'
category = 3
if name == 'robot':
title = 'Robot Framework'
category = 4
if name == 'libraries':
title = 'Robot Framework Libraries'
category = 5
context['title'] = title
context['category'] = category
return context
class CreateSourceView(LoginRequiredMixin, CreateView):
model = Source
template_name = "create-edit-source.html"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
can_create = False
if user.is_superuser:
can_create = True
else:
user_permissions = available_perm_status(user)
permissions = [
'create_robot',
'create_libraries',
'create_product'
]
for perm in permissions:
if perm in user_permissions and not can_create:
can_create = True
if can_create:
return super(CreateSourceView, self).dispatch(request, *args, **kwargs)
else:
messages.warning(request, "You don't have permission for this action")
return HttpResponseRedirect(self.get_success_url())
def get_form_class(self):
name = self.kwargs.get('slug')
if name == 'products':
return SourceProductForm
if name == 'robot':
return SourceRobotForm
if name == 'libraries':
return SourceLibraryForm
def form_valid(self, form, **kwargs):
name = self.kwargs.get('slug')
stepper = self.kwargs.get('stepper')
_config = {}
if name == 'products':
form.instance.category = 3
if Source.objects.filter(name=form.data.get('name'), version=form.data.get('version')):
messages.warning(self.request, 'Already exist a Product with this name and version')
return self.render_to_response(self.get_context_data(form=form))
source = form.save()
self.pk = source.pk
if stepper != 'stepper':
messages.success(self.request, 'Product {0} created'.format(source.name))
host = form.data.get('host')
if not host:
return HttpResponseRedirect(self.get_success_url())
if host:
_config = {
'category': 3,
'source': source.pk,
'regex': form.data.get('regex'),
'path': form.data.get('path'),
'host': host,
'port': form.data.get('port'),
'username': form.data.get('username'),
'password': <PASSWORD>')
}
if name == 'robot':
form.instance.name = 'Robot Framework'
form.instance.category = 4
source = form.save()
self.pk = source.pk
file = form.files.get('zip_file')
if file:
fs = FileSystemStorage(location='{0}/zip/'.format(settings.MEDIA_ROOT))
filename = fs.save(file.name, file)
uploaded_file_url = fs.url('zip/{}'.format(filename))
_config = {
'category': 4,
'source': source.pk,
"zip": uploaded_file_url
}
if stepper != 'stepper':
messages.success(self.request, 'Robot Framework Source created')
if name == 'libraries':
form.instance.category = 5
source = form.save()
self.pk = source.pk
if stepper != 'stepper':
messages.success(self.request, 'Library Source created')
_config = {
'category': 5,
'source': source.pk,
'url': form.data.get('url')
}
try:
extract = run_extract.delay(_config)
task = Task.objects.create(
name="Extract commands from {0}".format(name),
category=1,
task_info="Started",
task_id=extract.task_id,
state=extract.state
)
if stepper != 'stepper':
messages.info(self.request, 'Running extract in background')
self.request.user.tasks.add(task)
self.request.user.save()
return HttpResponseRedirect(self.get_success_url())
except Exception as error:
messages.error(self.request, 'Error {0}'.format(error))
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
stepper = self.kwargs.get('stepper')
source_pk = self.pk
if stepper != 'stepper':
return reverse_lazy('source-list', kwargs={'slug': self.kwargs.get('slug')})
else:
return reverse_lazy('successful', kwargs={'step': self.kwargs.get('slug'), 'pk': source_pk})
def get_context_data(self, **kwargs):
context = super(CreateSourceView, self).get_context_data()
context['slug'] = self.kwargs.get('slug')
context['title'] = 'New'
context['extra'] = 'After press "Create" the system extract the commands for'
context['stepper'] = self.kwargs.get('stepper')
return context
class EditSourceView(LoginRequiredMixin, UpdateView):
model = Source
template_name = "create-edit-source.html"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
can_create = False
if user.is_superuser:
can_create = True
else:
user_permissions = available_perm_status(user)
permissions = [
'update_robot',
'update_libraries',
'update_product'
]
for perm in permissions:
if perm in user_permissions and not can_create:
can_create = True
if can_create:
return super(EditSourceView, self).dispatch(request, *args, **kwargs)
else:
messages.warning(request, "You don't have permission for this action")
return HttpResponseRedirect(self.get_success_url())
def get_form_class(self):
_category = self.object.category
if _category == 3:
return SourceEditProductForm
if _category == 4:
return SourceRobotForm
if _category == 5:
return SourceEditLibraryForm
def form_valid(self, form):
name = self.kwargs.get('slug')
source = form.save()
self.pk = source.pk
if name == 'products':
form.instance.category = 3
if name == 'robot':
form.instance.name = 'Robot Framework'
form.instance.category = 4
if name == 'libraries':
form.instance.category = 5
return super(EditSourceView, self).form_valid(form)
def get_success_url(self):
_category = self.object.category
if _category == 3:
slug = 'products'
if _category == 4:
slug = 'robot'
if _category == 5:
slug = 'libraries'
stepper = self.kwargs.get('stepper')
source_pk = self.pk
if stepper != 'stepper':
return reverse_lazy('source-list', kwargs={'slug': slug})
else:
return reverse_lazy('successful', kwargs={'step': slug, 'pk': source_pk})
def get_context_data(self, **kwargs):
context = super(EditSourceView, self).get_context_data()
_category = self.object.category
if _category == 3:
slug = 'products'
if _category == 4:
slug = 'robot'
if _category == 5:
slug = 'libraries'
context['slug'] = slug
context['title'] = 'Edit'
context['stepper'] = self.kwargs.get('stepper')
return context
class DeleteSourceView(LoginRequiredMixin, DeleteView):
model = Source
template_name = "delete-source.html"
def get_success_url(self, slug):
messages.success(self.request, 'Robot Framework Source and his commands deleted')
return reverse_lazy('source-list', kwargs={'slug': slug})
def delete(self, request, *args, **kwargs):
source = self.get_object()
slug = ''
if source.category == 3:
slug = 'products'
if source.category == 4:
slug = 'robot'
if source.category == 5:
slug = 'libraries'
commands = Command.objects.filter(source=source.pk)
for command in commands:
arguments = command.get_arguments()
if command.source.count() <= 1:
command.delete()
source.delete()
return HttpResponseRedirect(self.get_success_url(slug))
def get_context_data(self, **kwargs):
context = super(DeleteSourceView, self).get_context_data()
_category = self.object.category
if _category == 3:
slug = 'products'
if _category == 4:
slug = 'robot'
if _category == 5:
slug = 'libraries'
context['slug'] = slug
return context
class CommandsView(LoginRequiredMixin, TemplateView):
template_name = "commands.html"
def get_context_data(self, **kwargs):
context = super(CommandsView, self).get_context_data(**kwargs)
context['stepper'] = self.kwargs.get('stepper')
return context
class NewCommandView(LoginRequiredMixin, FormView):
model = Command
template_name = 'create-edit-command.html'
form_class = CommandForm
def get_success_url(self):
return reverse_lazy('commands')
def get_context_data(self, **kwargs):
context = super(NewCommandView, self).get_context_data(**kwargs)
context['title'] = 'Create Command'
context['ArgumentForm'] = ArgumentForm
return context
class EditCommandView(LoginRequiredMixin, UpdateView):
model = Command
form_class = CommandForm
template_name = 'create-edit-command.html'
success_url = reverse_lazy('commands')
def get_context_data(self, **kwargs):
context = super(EditCommandView, self).get_context_data(**kwargs)
context['title'] = 'Edit Command'
context['ArgumentForm'] = ArgumentForm
return context
class DeleteCommandView(LoginRequiredMixin, DeleteView):
template_name = "delete-command.html"
model = Command
success_url = reverse_lazy("commands")
class PhasesView(LoginRequiredMixin, TemplateView):
template_name = "phases.html"
class NewPhaseView(LoginRequiredMixin, CreateView):
model = Phase
form_class = PhaseForm
template_name = 'create-edit-phase.html'
def form_valid(self, form):
form.instance.user = self.request.user
source = form.save()
self.pk = source.pk
return super(NewPhaseView, self).form_valid(form)
#def get_success_url(self):
# messages.success(self.request, "Phase Created")
# return reverse_lazy('phases')
def get_success_url(self):
stepper = self.kwargs.get('stepper')
source_pk = self.pk
if stepper != 'stepper':
messages.success(self.request, "Phase Created")
if stepper != 'stepper':
return reverse_lazy('phases')
else:
return reverse_lazy('successful', kwargs={'step': 'phases', 'pk': source_pk})
def get_context_data(self, **kwargs):
context = super(NewPhaseView, self).get_context_data(**kwargs)
context['stepper'] = self.kwargs.get('stepper')
return context
class EditPhaseView(LoginRequiredMixin, UpdateView):
model = Phase
form_class = PhaseForm
template_name = 'create-edit-phase.html'
def form_valid(self, form):
form.instance.user = self.request.user
source = form.save()
self.pk = source.pk
return super(EditPhaseView, self).form_valid(form)
def get_success_url(self):
stepper = self.kwargs.get('stepper')
source_pk = self.pk
if stepper != 'stepper':
messages.success(self.request, "Phase Edited")
if stepper != 'stepper':
return reverse_lazy('phases')
else:
return reverse_lazy('successful', kwargs={'step': 'phases', 'pk': source_pk})
def get_context_data(self, **kwargs):
context = super(EditPhaseView, self).get_context_data(**kwargs)
context['stepper'] = self.kwargs.get('stepper')
return context
class DeletePhaseView(LoginRequiredMixin, DeleteView):
model = Phase
template_name = "delete-phase.html"
def get_success_url(self):
messages.success(self.request, "Phase deleted")
return reverse_lazy('phases')
class SuccessfulView(LoginRequiredMixin, TemplateView):
template_name = "successful.html"
def get_context_data(self, **kwargs):
context = super(SuccessfulView, self).get_context_data(**kwargs)
context['step'] = self.kwargs.get('step')
context['pk'] = self.kwargs.get('pk')
return context
|
62985
|
import aiohttp
import asyncio
import os
import sys
import time
import random
import contextlib
seaweedfs_url = 'http://127.0.0.1:9081'
def random_content():
return os.urandom(random.randint(1, 10) * 1024)
def random_fid(volumes):
volume_id = random.choice(volumes)
file_key = random.randint(0, 1 << 24)
file_key_hex = '%x' % file_key
cookie_hex = '00000000'
return f'{volume_id},{file_key_hex}{cookie_hex}'
class Reporter:
def __init__(self):
self.items = []
@contextlib.contextmanager
def report(self):
t0 = time.monotonic()
yield
value = time.monotonic() - t0
self.items.append(value * 1000)
def summary(self, concurrency):
n = len(self.items)
s = sum(self.items)
avg = s / n if n > 0 else 0
s_items = list(sorted(self.items))
result = [f'avg={avg:.1f}']
p_s = [0.5, 0.8, 0.9, 0.95, 0.99]
if n > 0:
for p in p_s:
v = s_items[int(n * p)]
result.append('p{}={:.1f}'.format(int(p * 100), v))
qps = (1000 / avg) * concurrency
result.append(f'qps={qps:.0f}')
print(' '.join(result))
self.items = []
READER_REPORTER = Reporter()
WRITER_REPORTER = Reporter()
async def put(session, fid: str, content: bytes):
url = f'{seaweedfs_url}/{fid}'
data = aiohttp.FormData()
data.add_field(
'file',
content,
content_type='application/gzip'
)
async with session.put(url, data=data) as response:
result = await response.read()
return response.status, result
async def get(session, fid: str):
url = f'{seaweedfs_url}/{fid}'
async with session.get(url) as response:
result = await response.read()
return response.status, result
async def reader_task(session, fid_s, n):
fid_s = list(fid_s)
random.shuffle(fid_s)
for fid in fid_s:
with READER_REPORTER.report():
status, r = await get(session, fid)
assert status == 200, (status, r)
async def writer_task(session, fid_s, n):
fid_s = list(fid_s)
random.shuffle(fid_s)
for fid in fid_s:
content = random_content()
with WRITER_REPORTER.report():
status, r = await put(session, fid, content)
assert status in (200, 201, 204), (status, r)
async def benchmark(session, num_volume, num_fid, num_round, concurrency):
volumes = list(range(20, 20 + num_volume))
fid_s_s = []
for i in range(concurrency):
fid_s = [random_fid(volumes) for _ in range(num_fid // concurrency)]
fid_s_s.append(fid_s)
loop = asyncio.get_event_loop()
for n in range(num_round):
print(f'{n} ' + '-' * 60)
writer_tasks = []
for i in range(concurrency):
t = writer_task(session, fid_s_s[i], num_round)
writer_tasks.append(loop.create_task(t))
await asyncio.gather(*writer_tasks)
WRITER_REPORTER.summary(concurrency)
reader_tasks = []
for i in range(concurrency):
t = reader_task(session, fid_s_s[i], num_round)
reader_tasks.append(loop.create_task(t))
await asyncio.gather(*reader_tasks)
READER_REPORTER.summary(concurrency)
async def async_main(num_volume, concurrency):
print(f'num_volume={num_volume} concurrency={concurrency}')
async with aiohttp.ClientSession() as session:
await benchmark(
session,
num_fid=1000,
num_round=3,
num_volume=num_volume,
concurrency=concurrency,
)
def main():
num_volume = int(sys.argv[1])
concurrency = int(sys.argv[2])
loop = asyncio.get_event_loop()
loop.run_until_complete(async_main(num_volume, concurrency))
if __name__ == "__main__":
main()
|
63039
|
import pytest
from metagraph.tests.util import default_plugin_resolver
from . import RoundTripper
from metagraph.plugins.python.types import PythonNodeSetType
from metagraph.plugins.numpy.types import NumpyNodeSet, NumpyNodeMap
import numpy as np
def test_nodeset_roundtrip(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
ns = {2, 3, 55}
rt.verify_round_trip(ns)
def test_np_nodemap_2_np_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = NumpyNodeMap(np.array([00, 10, 20]))
assert len(x) == 3
intermediate = NumpyNodeSet(np.array([0, 1, 2]))
y = dpr.translate(x, NumpyNodeSet)
dpr.assert_equal(y, intermediate)
def test_np_nodeset_2_py_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = NumpyNodeSet(np.array([9, 5, 1]))
assert len(x) == 3
intermediate = {5, 1, 9}
y = dpr.translate(x, PythonNodeSetType)
dpr.assert_equal(y, intermediate)
def test_py_nodeset_2_np_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = {2, 1, 5}
assert len(x) == 3
intermediate = NumpyNodeSet.from_mask(
np.array([False, True, True, False, False, True])
)
y = dpr.translate(x, NumpyNodeSet)
dpr.assert_equal(y, intermediate)
|
63068
|
from enum import Enum
from typing import Dict, Any
from jwt.algorithms import get_default_algorithms
from cryptography.hazmat._types import (
_PRIVATE_KEY_TYPES,
_PUBLIC_KEY_TYPES,
)
# custom types
PrivateKey = _PRIVATE_KEY_TYPES
PublicKey = _PUBLIC_KEY_TYPES
JWTClaims = Dict[str, Any]
class EncryptionKeyFormat(str, Enum):
"""
represent the supported formats for storing encryption keys.
- PEM (https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)
- SSH (RFC4716) or short format (RFC4253, section-6.6, explained here: https://coolaj86.com/articles/the-ssh-public-key-format/)
- DER (https://en.wikipedia.org/wiki/X.690#DER_encoding)
"""
pem = 'pem'
ssh = 'ssh'
der = 'der'
# dynamic enum because pyjwt does not define one
# see: https://pyjwt.readthedocs.io/en/stable/algorithms.html for possible values
JWTAlgorithm = Enum('JWTAlgorithm', [(k,k) for k in get_default_algorithms().keys()])
|
63073
|
import board
import busio
import digitalio
import time
import adafruit_requests as requests
from adafruit_wiznet5k.adafruit_wiznet5k import *
import adafruit_wiznet5k.adafruit_wiznet5k_socket as socket
from adafruit_wiznet5k.adafruit_wiznet5k_ntp import NTP
import adafruit_wiznet5k.adafruit_wiznet5k_dns as dns
days = ("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")
##SPI0
SPI0_SCK = board.GP18
SPI0_TX = board.GP19
SPI0_RX = board.GP16
SPI0_CSn = board.GP17
##reset
W5x00_RSTn = board.GP20
print("Wiznet5k NTP Client ( DHCP)")
# Setup your network configuration below
# random MAC, later should change this value on your vendor ID
MY_MAC = (0x00, 0x01, 0x02, 0xFF, 0xFF, 0xFF)
IP_ADDRESS = (192, 168, 1, 11)
SUBNET_MASK = (255, 255, 255, 0)
GATEWAY_ADDRESS = (192, 168, 1, 1)
DNS_SERVER = (8, 8, 8, 8)
port = 5000
ntp_server_port= 123
led = digitalio.DigitalInOut(board.GP25)
led.direction = digitalio.Direction.OUTPUT
ethernetRst = digitalio.DigitalInOut(W5x00_RSTn)
ethernetRst.direction = digitalio.Direction.OUTPUT
# For Adafruit Ethernet FeatherWing
cs = digitalio.DigitalInOut(SPI0_CSn)
# cs = digitalio.DigitalInOut(board.D5)
spi_bus = busio.SPI(SPI0_SCK, MOSI=SPI0_TX, MISO=SPI0_RX)
# Reset W5500 first
ethernetRst.value = False
time.sleep(1)
ethernetRst.value = True
# Initialize ethernet interface without DHCP
#eth = WIZNET5K(spi_bus, cs, is_dhcp=False, mac=MY_MAC, debug=False)
# Initialize ethernet interface with DHCP
eth = WIZNET5K(spi_bus, cs, is_dhcp=True, mac=MY_MAC, debug=False)
print("Chip Version:", eth.chip)
print("MAC Address:", [hex(i) for i in eth.mac_address])
print("My IP address is:", eth.pretty_ip(eth.ip_address))
# Initialize a socket for our server
#socket.set_interface(eth)
# Set network configuration
#eth.ifconfig = (IP_ADDRESS, SUBNET_MASK, GATEWAY_ADDRESS, DNS_SERVER)
#NTP
ntpserver_ip = eth.pretty_ip(eth.get_host_by_name("time.google.com"))
print("NTP : %s" % ntpserver_ip) #DNS Domain
ntp = NTP(iface = eth, ntp_address =ntpserver_ip ,utc=9)
cal = ntp.get_time()
print("The date is %s %d/%d/%d" %(days[cal.tm_wday], cal.tm_mday,cal.tm_mon,cal.tm_year))
print("The time is %d:%02d:%02d" %(cal.tm_hour,cal.tm_min,cal.tm_sec))
|
63087
|
import torch
from torch import nn, distributed as dist
from torch.nn import functional as F
class LabelSmoothingLoss(nn.Module):
def __init__(self, ignore_index, eps=0.1, reduction="mean"):
super().__init__()
self.ignore_index = ignore_index
self.eps = eps
self.reduction = reduction
def forward(self, output, target):
n_class = output.shape[-1]
output = F.log_softmax(output, -1)
if self.ignore_index > -1:
n_class -= 1
true_dist = torch.full_like(output, self.eps / n_class)
true_dist.scatter_(
1, target.data.unsqueeze(1), 1 - self.eps + self.eps / n_class
)
if self.ignore_index > -1:
true_dist[:, self.ignore_index] = 0
padding_mat = target.data == self.ignore_index
mask = torch.nonzero(padding_mat, as_tuple=False)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
loss = F.kl_div(
output,
true_dist.detach(),
reduction="sum" if self.reduction != "none" else "none",
)
if self.reduction == "none":
loss = loss.sum(1)
elif self.reduction == "mean":
if self.ignore_index > -1:
loss = loss / (target.shape[0] - padding_mat.sum().item())
else:
loss = loss / target.shape[0]
return loss
class MixLoss(nn.Module):
def __init__(self, eps=0, reduction="mean"):
super().__init__()
self.eps = eps
self.reduction = reduction
def forward(self, output, target1, target2, interpolation):
n_class = output.shape[-1]
output = F.log_softmax(output, -1)
true_dist = torch.full_like(output, self.eps / n_class)
true1 = true_dist.scatter(
1, target1.data.unsqueeze(1), 1 - self.eps + self.eps / n_class
)
true2 = true_dist.scatter(
1, target2.data.unsqueeze(1), 1 - self.eps + self.eps / n_class
)
inter = torch.as_tensor(interpolation).unsqueeze(-1)
true_dist = inter * true1 + (1 - inter) * true2
loss = F.kl_div(
output,
true_dist.detach(),
reduction="sum" if self.reduction != "none" else "none",
)
if self.reduction == "none":
loss = loss.sum(1)
elif self.reduction == "mean":
loss = loss / target1.shape[0]
return loss
class DINOLoss(nn.Module):
def __init__(
self,
out_dim,
n_crop,
warmup_teacher_temperature,
teacher_temperature,
warmup_teacher_epoch,
n_epoch,
student_temperature=0.1,
center_momentum=0.9,
):
super().__init__()
self.student_temperature = student_temperature
self.center_momentum = center_momentum
self.n_crop = n_crop
self.register_buffer("center", torch.zeros(1, out_dim))
self.teacher_temperature_schedule = torch.cat(
(
torch.linspace(
warmup_teacher_temperature,
teacher_temperature,
warmup_teacher_epoch,
),
torch.ones(n_epoch - warmup_teacher_epoch) * teacher_temperature,
)
).tolist()
def forward(self, student_output, teacher_output, epoch):
student_out = student_output / self.student_temperature
student_out = student_out.chunk(self.n_crop)
temperature = self.teacher_temperature_schedule[epoch]
teacher_out = torch.softmax((teacher_output - self.center) / temperature, -1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_term = 0
for i_q, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == i_q:
continue
loss = torch.sum(-q * torch.log_softmax(student_out[v], -1), -1)
total_loss += loss.mean()
n_loss_term += 1
total_loss /= n_loss_term
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_out):
batch_center = torch.sum(teacher_out, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_out) * dist.get_world_size())
self.center.mul_(self.center_momentum).add_(
batch_center, alpha=1 - self.center_momentum
)
|
63088
|
import random
import torch
import time
import os
import numpy as np
from torch.utils.data import Dataset
from functools import partial
from .utils import dataset_to_dataloader, max_io_workers
from pytorch_transformers.tokenization_bert import BertTokenizer
# the following will be shared on other datasets too if not, they should become part of the ListeningDataset
# maybe make SegmentedScanDataset with only static functions and then inherit.
from .utils import check_segmented_object_order, sample_scan_object, pad_samples, objects_bboxes
from .utils import instance_labels_of_context, mean_rgb_unit_norm_transform
from ...data_generation.nr3d import decode_stimulus_string
class ListeningDataset(Dataset):
def __init__(self, references, scans, vocab, max_seq_len, points_per_object, max_distractors,
class_to_idx=None, object_transformation=None,
visualization=False, feat2dtype=None,
num_class_dim=525, evalmode=False):
self.references = references
self.scans = scans
self.vocab = vocab
self.max_seq_len = max_seq_len
self.points_per_object = points_per_object
self.max_distractors = max_distractors
self.max_context_size = self.max_distractors + 1 # to account for the target.
self.class_to_idx = class_to_idx
self.visualization = visualization
self.object_transformation = object_transformation
self.feat2dtype = feat2dtype
self.max_2d_view = 5
self.num_class_dim = num_class_dim
self.evalmode = evalmode
self.bert_tokenizer = BertTokenizer.from_pretrained(
'bert-base-uncased')
assert self.bert_tokenizer.encode(self.bert_tokenizer.pad_token) == [0]
if not check_segmented_object_order(scans):
raise ValueError
def __len__(self):
return len(self.references)
def get_reference_data(self, index):
ref = self.references.loc[index]
scan = self.scans[ref['scan_id']]
target = scan.three_d_objects[ref['target_id']]
tokens = np.array(self.vocab.encode(ref['tokens'], self.max_seq_len), dtype=np.long)
is_nr3d = ref['dataset'] == 'nr3d'
return scan, target, tokens, ref['tokens'], is_nr3d
def prepare_distractors(self, scan, target):
target_label = target.instance_label
# First add all objects with the same instance-label as the target
distractors = [o for o in scan.three_d_objects if
(o.instance_label == target_label and (o != target))]
# Then all more objects up to max-number of distractors
already_included = {target_label}
clutter = [o for o in scan.three_d_objects if o.instance_label not in already_included]
np.random.shuffle(clutter)
distractors.extend(clutter)
distractors = distractors[:self.max_distractors]
np.random.shuffle(distractors)
return distractors
def __getitem__(self, index):
res = dict()
scan, target, tokens, text_tokens, is_nr3d = self.get_reference_data(index)
## BERT tokenize
token_inds = torch.zeros(self.max_seq_len, dtype=torch.long)
indices = self.bert_tokenizer.encode(
' '.join(text_tokens), add_special_tokens=True)
indices = indices[:self.max_seq_len]
token_inds[:len(indices)] = torch.tensor(indices)
token_num = torch.tensor(len(indices), dtype=torch.long)
# Make a context of distractors
context = self.prepare_distractors(scan, target)
# Add target object in 'context' list
target_pos = np.random.randint(len(context) + 1)
context.insert(target_pos, target)
# sample point/color for them
samples = np.array([sample_scan_object(o, self.points_per_object) for o in context])
# mark their classes
res['class_labels'] = instance_labels_of_context(context, self.max_context_size, self.class_to_idx)
if self.object_transformation is not None:
samples, offset = self.object_transformation(samples)
res['obj_offset'] = np.zeros((self.max_context_size, offset.shape[1])).astype(np.float32)
res['obj_offset'][:len(offset),:] = offset.astype(np.float32)
res['context_size'] = len(samples)
# take care of padding, so that a batch has same number of N-objects across scans.
res['objects'] = pad_samples(samples, self.max_context_size)
# Get a mask indicating which objects have the same instance-class as the target.
target_class_mask = np.zeros(self.max_context_size, dtype=np.bool)
target_class_mask[:len(context)] = [target.instance_label == o.instance_label for o in context]
res['target_class'] = self.class_to_idx[target.instance_label]
res['target_pos'] = target_pos
res['target_class_mask'] = target_class_mask
res['tokens'] = tokens
res['token_inds'] = token_inds.numpy().astype(np.int64)
res['token_num'] = token_num.numpy().astype(np.int64)
res['is_nr3d'] = is_nr3d
if self.visualization:
distrators_pos = np.zeros((6)) # 6 is the maximum context size we used in dataset collection
object_ids = np.zeros((self.max_context_size))
j = 0
for k, o in enumerate(context):
if o.instance_label == target.instance_label and o.object_id != target.object_id:
distrators_pos[j] = k
j += 1
for k, o in enumerate(context):
object_ids[k] = o.object_id
res['utterance'] = self.references.loc[index]['utterance']
res['stimulus_id'] = self.references.loc[index]['stimulus_id']
res['distrators_pos'] = distrators_pos
res['object_ids'] = object_ids
res['target_object_id'] = target.object_id
if self.evalmode:
return res
# load cached 2D context information
if os.path.isfile('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy'%scan.scan_id):
context_2d = np.load('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy'%scan.scan_id,allow_pickle=True,encoding='latin1')
objfeat_2d = context_2d.item()['obj_feat']
bbox_2d = context_2d.item()['obj_coord']
bboxsize_2d = context_2d.item()['obj_size']
obj_depth = context_2d.item()['obj_depth']
campose_2d = context_2d.item()['camera_pose']
ins_id_2d = context_2d.item()['instance_id']
if (self.feat2dtype.replace('3D',''))=='ROI': featdim = 2048
elif (self.feat2dtype.replace('3D',''))=='clsvec': featdim = self.num_class_dim
elif (self.feat2dtype.replace('3D',''))=='clsvecROI': featdim = 2048+self.num_class_dim
feat_2d = np.zeros((self.max_context_size, featdim)).astype(np.float32)
coords_2d = np.zeros((self.max_context_size, 4+12)).astype(np.float32)
selected_2d_idx = 0
selected_context_id = [o.object_id+1 for o in context] ## backbround included in cache, so +1
## only for creating tensor of the correct size
selected_objfeat_2d = objfeat_2d[selected_context_id,selected_2d_idx,:]
selected_bbox_2d = bbox_2d[selected_context_id,selected_2d_idx,:]
selected_bboxsize_2d = bboxsize_2d[selected_context_id,selected_2d_idx]
selected_obj_depth = obj_depth[selected_context_id,selected_2d_idx]
selected_campose_2d = campose_2d[selected_context_id,selected_2d_idx,:]
selected_ins_id_2d = ins_id_2d[selected_context_id,selected_2d_idx]
## Fill in randomly selected view of 2D features
for ii in range(len(selected_context_id)):
cxt_id = selected_context_id[ii]
view_id = random.randint(0, max(0,int((ins_id_2d[cxt_id,:]!=0).astype(np.float32).sum())-1))
selected_objfeat_2d[ii,:] = objfeat_2d[cxt_id,view_id,:]
selected_bbox_2d[ii,:] = bbox_2d[cxt_id,view_id,:]
selected_bboxsize_2d[ii] = bboxsize_2d[cxt_id,view_id]
selected_obj_depth[ii] = obj_depth[cxt_id,view_id]
selected_campose_2d[ii,:] = campose_2d[cxt_id,view_id,:]
if self.feat2dtype!='clsvec':
feat_2d[:len(selected_context_id),:2048] = selected_objfeat_2d
for ii in range(len(res['class_labels'])):
if self.feat2dtype=='clsvec':
feat_2d[ii,res['class_labels'][ii]] = 1.
if self.feat2dtype=='clsvecROI':
feat_2d[ii,2048+res['class_labels'][ii]] = 1.
coords_2d[:len(selected_context_id),:] = np.concatenate([selected_bbox_2d, selected_campose_2d[:,:12]],axis=-1)
coords_2d[:,0], coords_2d[:,2] = coords_2d[:,0]/1296., coords_2d[:,2]/1296. ## norm by image size
coords_2d[:,1], coords_2d[:,3] = coords_2d[:,1]/968., coords_2d[:,3]/968.
else:
print('please prepare the cached 2d feature')
exit(0)
res['feat_2d'] = feat_2d
res['coords_2d'] = coords_2d
return res
def make_data_loaders(args, referit_data, vocab, class_to_idx, scans, mean_rgb, seed=None):
n_workers = args.n_workers
if n_workers == -1:
n_workers = max_io_workers()
data_loaders = dict()
is_train = referit_data['is_train']
splits = ['train', 'test']
object_transformation = partial(mean_rgb_unit_norm_transform, mean_rgb=mean_rgb,
unit_norm=args.unit_sphere_norm)
for split in splits:
mask = is_train if split == 'train' else ~is_train
d_set = referit_data[mask]
d_set.reset_index(drop=True, inplace=True)
max_distractors = args.max_distractors if split == 'train' else args.max_test_objects - 1
## this is a silly small bug -> not the minus-1.
# if split == test remove the utterances of unique targets
if split == 'test':
def multiple_targets_utterance(x):
_, _, _, _, distractors_ids = decode_stimulus_string(x.stimulus_id)
return len(distractors_ids) > 0
multiple_targets_mask = d_set.apply(multiple_targets_utterance, axis=1)
d_set = d_set[multiple_targets_mask]
d_set.reset_index(drop=True, inplace=True)
print("length of dataset before removing non multiple test utterances {}".format(len(d_set)))
print("removed {} utterances from the test set that don't have multiple distractors".format(
np.sum(~multiple_targets_mask)))
print("length of dataset after removing non multiple test utterances {}".format(len(d_set)))
assert np.sum(~d_set.apply(multiple_targets_utterance, axis=1)) == 0
dataset = ListeningDataset(references=d_set,
scans=scans,
vocab=vocab,
max_seq_len=args.max_seq_len,
points_per_object=args.points_per_object,
max_distractors=max_distractors,
class_to_idx=class_to_idx,
object_transformation=object_transformation,
visualization=args.mode == 'evaluate',
feat2dtype=args.feat2d,
num_class_dim = 525 if '00' in args.scannet_file else 608,
evalmode=(args.mode=='evaluate'))
seed = seed
if split == 'test':
seed = args.random_seed
data_loaders[split] = dataset_to_dataloader(dataset, split, args.batch_size, n_workers, pin_memory=True, seed=seed)
return data_loaders
|
63135
|
from __future__ import with_statement # this is to work with python2.5
import terapyps
from pyps import workspace
workspace.delete("convol3x3")
with terapyps.workspace("convol3x3.c", name="convol3x3", deleteOnClose=False,recoverInclude=False) as w:
for f in w.fun:
f.terapix_code_generation(debug=True)
# w.compile(terapyps.Maker())
|
63144
|
from Treap import Treap
from math import log
class IKS:
def __init__(self):
self.treap = None
self.n = [0, 0]
@staticmethod
def KSThresholdForPValue(pvalue, N):
'''Threshold for KS Test given a p-value
Args:
pval (float): p-value.
N (int): the size of the samples.
Returns:
Threshold t to compare groups 0 and 1. The null-hypothesis is discarded if KS() > t.
'''
ca = (-0.5 * log(pvalue)) ** 0.5
return ca * (2.0 * N / N ** 2)
@staticmethod
def CAForPValue(pvalue):
'''ca for KS Test given a p-value
Args:
pval (float): p-value.
Returns:
Threshold the "ca" that can be used to compute a threshold for KS().
'''
return (-0.5 * log(pvalue)) ** 0.5
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
assert(self.n[0] == self.n[1])
N = self.n[0]
if N == 0:
return 0
return max(self.treap.max_value, -self.treap.min_value) / N
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
assert(self.n[0] == self.n[1])
N = self.n[0]
if N == 0:
return 0
return (self.treap.max_value - self.treap.min_value) / N
def Add(self, obs, group):
'''Insert new observation into one of the groups.
Args:
obs: the value of the obseration. Tip: a tuple (actual value, random value) is recommended when there is overlap between groups or if values are not guaranteed to be mostly unique.
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
group = 0 if group == 2 else group
assert(group == 0 or group == 1)
key = (obs, group)
self.n[group] += 1
left, left_g, right, val = None, None, None, None
left, right = Treap.SplitKeepRight(self.treap, key)
left, left_g = Treap.SplitGreatest(left)
val = 0 if left_g is None else left_g.value
left = Treap.Merge(left, left_g)
right = Treap.Merge(Treap(key, val), right)
Treap.SumAll(right, 1 if group == 0 else -1)
self.treap = Treap.Merge(left, right)
def Remove(self, obs, group):
'''Remove observation from one of the groups.
Args:
obs: the value of the obseration. Must be identical to a previously inserted observation (including the random element of a tuple, if this was the case).
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
group = 0 if group == 2 else group
assert(group == 0 or group == 1)
key = (obs, group)
self.n[group] -= 1
left, right, right_l = None, None, None
left, right = Treap.SplitKeepRight(self.treap, key)
right_l, right = Treap.SplitSmallest(right)
if right_l is not None and right_l.key == key:
Treap.SumAll(right, -1 if group == 0 else 1)
else:
right = Treap.Merge(right_l, right)
self.treap = Treap.Merge(left, right)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
ca = ca or 1.95
n = self.n[0]
return self.KS() > ca * (2 * n / n ** 2) ** 0.5
IKS.AddObservation = IKS.Add
IKS.RemoveObservation = IKS.Remove
|
63156
|
import sys
import time
from sdk import *
addr_list = addresses()
_pid = "id_20020"
_proposer = addr_list[0]
_initial_funding = (int("2") * 10 ** 9)
_each_funding = (int("3") * 10 ** 9)
_funding_goal_general = (int("10") * 10 ** 9)
_prop = Proposal(_pid, "general", "proposal for fund", "proposal headline", _proposer, _initial_funding)
if __name__ == "__main__":
# create proposal
_prop.send_create()
time.sleep(1)
encoded_pid = _prop.pid
# check proposal state
check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusFunding)
# 1st fund
fund_proposal(encoded_pid, _each_funding, addr_list[0])
check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusFunding)
# 2nd fund
fund_proposal(encoded_pid, _each_funding, addr_list[1])
check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusFunding)
# 3rd fund
fund_proposal(encoded_pid, _each_funding, addr_list[2])
check_proposal_state(encoded_pid, ProposalOutcomeInProgress, ProposalStatusVoting)
print bcolors.OKGREEN + "#### Test fund proposals succeed" + bcolors.ENDC
print ""
|
63194
|
from torch import nn
def init_weight(weight, init, init_range, init_std):
if init == "uniform":
nn.init.uniform_(weight, -init_range, init_range)
elif init == "normal":
nn.init.normal_(weight, 0.0, init_std)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m, init, init_range, init_std, proj_init_std):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
init_weight(m.weight, init, init_range, init_std)
if hasattr(m, "bias") and m.bias is not None:
init_bias(m.bias)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
init_weight(m.weight, init, init_range, init_std)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, init_std)
if hasattr(m, "bias") and m.bias is not None:
init_bias(m.bias)
else:
if hasattr(m, "r_emb"):
init_weight(m.r_emb, init, init_range, init_std)
if hasattr(m, "r_w_bias"):
init_weight(m.r_w_bias, init, init_range, init_std)
if hasattr(m, "r_r_bias"):
init_weight(m.r_r_bias, init, init_range, init_std)
if hasattr(m, "r_bias"):
init_bias(m.r_bias)
|
63229
|
from .decorators import endpoint
from ..definitions.types import InstrumentName
from ..endpoints.annotations import LongClientExtensions
from ..endpoints.annotations import LongUnits
from ..endpoints.annotations import ShortClientExtensions
from ..endpoints.annotations import ShortUnits
from ..endpoints.position import *
from ..definitions.helpers import sentinel
__all__ = ['PositionInterface']
class PositionInterface(object):
@endpoint(GETPositions)
def list_positions(self):
"""
List all Positions for an Account. The Positions returned are for every
instrument that has had a position during the lifetime of an the
Account.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(positions=( :class:`~async_v20.Position`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
"""
pass
@endpoint(GETOpenPositions)
def list_open_positions(self):
"""
List all open Positions for an Account. An open Position is a Position
in an Account that currently has a Trade opened for it.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(positions=( :class:`~async_v20.Position`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
"""
pass
@endpoint(GETPositionsInstrument)
def get_position(self, instrument: InstrumentName = sentinel):
"""
Get the details of a single Instrument's Position in an Account. The
Position may by open or not.
Args:
instrument: :class:`~async_v20.InstrumentName`
Name of the Instrument
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(position= :class:`~async_v20.Position`,
lastTransactionID= :class:`~async_v20.TransactionID`)
"""
pass
@endpoint(PUTPositionsInstrumentClose)
def close_position(self,
instrument: InstrumentName = sentinel,
long_units: LongUnits = sentinel,
long_client_extensions: LongClientExtensions = sentinel,
short_units: ShortUnits = sentinel,
short_client_extensions: ShortClientExtensions = sentinel):
"""
Closeout the open Position for a specific instrument in an Account.
.. note::
- Either long_units or short_units **MUST** be specified.
- Do **NOT** specify `ALL` for `long_units` **or** `short_units`
if there are no units to close.
Args:
instrument: :class:`~async_v20.InstrumentName`
Name of the Instrument
long_units: :class:`~async_v20.endpoints.annotations.LongUnits`
Indication of how much of the long Position to closeout. Either
the string "ALL", the string "NONE", or a DecimalNumber
representing how many units of the long position to close using
a PositionCloseout MarketOrder. The units specified must always
be positive.
long_client_extensions: :class:`~async_v20.endpoints.annotations.LongClientExtensions`
The client extensions to add to the MarketOrder used to close
the long position.
short_units: :class:`~async_v20.endpoints.annotations.ShortUnits`
Indication of how much of the short Position to closeout.
Either the string "ALL", the string "NONE", or a DecimalNumber
representing how many units of the short position to close
using a PositionCloseout MarketOrder. The units specified must
always be positive.
short_client_extensions: :class:`~async_v20.endpoints.annotations.ShortClientExtensions`
The client extensions to add to the MarketOrder used to close
the short position.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(longOrderCreateTransaction= :class:`~async_v20.MarketOrderTransaction`,
longOrderFillTransaction= :class:`~async_v20.OrderFillTransaction`,
longOrderCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
shortOrderCreateTransaction= :class:`~async_v20.MarketOrderTransaction`,
shortOrderFillTransaction= :class:`~async_v20.OrderFillTransaction`,
shortOrderCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
status [400]
:class:`~async_v20.interface.response.Response`
(longOrderRejectTransaction= :class:`~async_v20.MarketOrderRejectTransaction`,
shortOrderRejectTransaction= :class:`~async_v20.MarketOrderRejectTransaction`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`,
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
status [401]
:class:`~async_v20.interface.response.Response`
(longOrderRejectTransaction= :class:`~async_v20.MarketOrderRejectTransaction`,
shortOrderRejectTransaction= :class:`~async_v20.MarketOrderRejectTransaction`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`,
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
"""
pass
|
63250
|
from ...utilities import db, moocdb_utils
from common import *
def GetForums(vars):
output_items = []
resource_type_id = moocdb_utils.GetResourceTypeMap(vars)['forum']
# course_doc = vars['resource_list'][0]
# discussion_topics = course_doc['metadata']['discussion_topics']
# src_forums = [{'id': discussion_topics[name]['id'], 'name': name} for name in discussion_topics.keys()]
# for forum in src_forums:
# output_items.append({
# 'original_id': forum['id'],
# 'resource_uri': forum['id'],
# 'resource_name': forum['name'],
# 'resource_parent_original_id': None,
# 'resource_child_number': None,
# 'resource_type_id': resource_type_id,
# })
output_items.append({
'original_id': 'openedx_forum',
'resource_uri': None,
'resource_name': 'Forum',
'resource_parent_original_id': None,
'resource_child_number': None,
'resource_type_id': resource_type_id,
})
return output_items
|
63321
|
print '... Importing simuvex/engines/vex/expressions/get.py ...'
from angr.engines.vex.expressions.get import *
|
63335
|
import csv
from site_crawler.cleaner.cleaner import Cleaner
class Dataset_Builder:
def __init__(self):
self.cleaner = Cleaner()
self.create_csv_headers()
def create_csv_headers(self):
csv_files = [
'negative_sentiment',
'positive_sentiment',
'dataset_sentiment'
]
for csv_file in csv_files:
with open('../data/dataset/csv/' + csv_file + '.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(["text", "label"])
def write_tweet_txt(self, sentiment, name):
file = open('../data/dataset/txt/'+name+ '.txt', 'a')
line = sentiment.strip()
cleaned_line = self.cleaner.clean_tweets(line)
file.write(cleaned_line)
file.write('\n')
def write_tweet_csv(self, sentiment, name, polarity):
with open('../data/dataset/csv/' + name + '.csv', 'a') as f:
writer = csv.writer(f)
line = sentiment.strip()
cleaned_line = self.cleaner.clean_tweets(line)
writer.writerow([cleaned_line,polarity, ])
pass
def extract_sentiment_csv(self,csv_name):
with open('../data/twitter_data/labeled_data/unlabeled_'+csv_name+'.csv', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# negative
if row['label'] == '-1':
self.write_tweet_txt(row['text'].strip(), 'negative_sentiment.txt')
self.write_tweet_csv(row['text'].strip(), 'negative_sentiment','-1')
self.write_tweet_csv(row['text'].strip(), 'dataset_sentiment','-1')
# positive
elif row['label'] == '1':
self.write_tweet_txt(row['text'].strip(), 'positive_sentiment')
self.write_tweet_csv(row['text'].strip(), 'positive_sentiment', '1')
self.write_tweet_csv(row['text'].strip(), 'dataset_sentiment', '1')
# neutral / irrelevant
elif row['label'] == '0':
self.write_tweet_txt(row['text'].strip(), 'neutral')
if __name__ == "__main__":
D_builder = Dataset_Builder()
tweets_csvs = [
'Business_KE',
'MadeItInAfrica',
'IFCAfrica',
'africareview',
'AfDB_Group',
'_AfricanUnion',
'Taifa_Leo',
'BD_Africa',
'RadioCitizenFM',
'citizentvkenya',
'KTNKenya',
'K24Tv',
'StandardKenya',
'TheStarKenya',
'radiomaisha',
'KBCChannel1',
'CapitalFMKenya',
'African_Markets',
'Africafinancial',
'InvestInAfrica',
'AfricanInvestor',
'forbesafrica',
'cnbcafrica',
'BBCAfrica',
'CNNAfrica',
'allafrica',
'ReutersAfrica',
'VenturesAfrica',
'BBGAfrica',
'GhettoRadio895',
'kenyanwalstreet',
'SokoAnalyst',
'NSEKenya',
'wazua'
]
for tweets_csv in tweets_csvs:
D_builder.extract_sentiment_csv(tweets_csv)
|
63356
|
import os
import pytest
import yaml
from gcasc.utils.yaml_include import YamlIncluderConstructor
from .helpers import read_file, read_yaml
YamlIncluderConstructor.add_to_loader_class(
loader_class=yaml.FullLoader,
base_dir=os.path.dirname(os.path.realpath(__file__)) + "/data",
)
@pytest.fixture()
def file1():
return read_yaml("yaml_include_f1.yml")
@pytest.fixture()
def file2():
return read_yaml("yaml_include_f2.yml")
@pytest.fixture()
def file_txt():
return read_file("yaml_include_txt.md")
def test_files_included_into_yaml(file1, file2, file_txt):
# given
file = "yaml_include.yml"
# when
data = read_yaml(file)
# then
assert data["inc1"] == file1
assert data["inc2"] == [file2, file_txt]
|
63441
|
import tensorflow as tf
from dltk.core.activations import leaky_relu
import numpy as np
def test_leaky_relu():
test_alpha = tf.constant(0.1)
test_inp_1 = tf.constant(1.)
test_inp_2 = tf.constant(-1.)
test_relu_1 = leaky_relu(test_inp_1, test_alpha)
test_relu_2 = leaky_relu(test_inp_2, test_alpha)
with tf.Session() as s:
out_1 = s.run(test_relu_1)
assert np.isclose(out_1, 1.), \
'Got {} but expected {}'.format(out_1, 1.)
out_2 = s.run(test_relu_2)
assert np.isclose(out_2, -0.1), \
'Got {} but expected {}'.format(out_2, -0.1)
|
63453
|
def test_get_symbols(xtb_client):
symbols = list(xtb_client.get_all_symbols())
assert len(symbols) > 0
def test_get_balance(xtb_client):
balance = xtb_client.get_balance()
assert balance.get('balance') is not None
def test_ping(xtb_client):
response = xtb_client.ping()
assert response
|
63460
|
import argparse
import penman
from amrlib.evaluate.smatch_enhanced import compute_smatch
from ensemble.utils import align, get_entries
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Graph Ensemble (Graphene)')
parser.add_argument(
'-g', '--gold', default='./datasets/spring_gold_bio.txt',
type=str,
help='Gold amr file')
parser.add_argument(
'-p', '--prediction', default='./datasets/graphene_bio_all.wiki.txt',
type=str,
help='Prediction files')
args = parser.parse_args()
ref_fname = args.gold
print('Gold file:', ref_fname)
gen_fname = args.prediction
original_gold_entries, gold_entries = get_entries(ref_fname)
print('Prediction file:', gen_fname)
original_test_entries_1, test_entries_1 = get_entries(gen_fname)
print("Align files")
test = align(original_gold_entries, original_test_entries_1, test_entries_1)
precision, recall, f_score = compute_smatch(test, gold_entries)
print(' SMATCH -> P: %.3f, R: %.3f, F: %.3f' % (precision, recall, f_score))
test = [penman.encode(penman.decode(g)) for g in test]
outputs = []
for g, p in zip(original_gold_entries, test):
r = penman.decode(g)
s = '# ::snt ' + r.metadata['snt'] + '\n' + '# ::id ' + r.metadata['id'] + '\n' + p
outputs.append(s)
output_file = args.prediction + '.aligned'
with open(output_file, 'wt') as f:
print('Write prediction to', output_file)
f.write('\n\n'.join(map(str, outputs)))
|
63484
|
from rest_framework import serializers
from openbook_categories.models import Category
class GetCategoriesCategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = (
'id',
'name',
'title',
'description',
'avatar',
'color'
)
|
63490
|
def iob_ranges(words, tags):
"""
IOB -> Ranges
"""
assert len(words) == len(tags)
ranges = []
def check_if_closing_range():
if i == len(tags) - 1 or tags[i + 1].split('_')[0] == 'O':
ranges.append({
'entity': ''.join(words[begin: i + 1]),
'type': temp_type,
'start': begin,
'end': i
})
for i, tag in enumerate(tags):
if tag.split('_')[0] == 'O':
pass
elif tag.split('_')[0] == 'B':
begin = i
temp_type = tag.split('_')[1]
check_if_closing_range()
elif tag.split('_')[0] == 'I':
check_if_closing_range()
return ranges
|
63507
|
import bisect
import copy
import hashlib
import itertools
import json
import operator
import time
from collections import ChainMap
import pmdefaults as PM
from pmdefaults import *
from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator
CIB_EXPIRED = 2
class CIBEntryError(Exception):
pass
def load_json(filename):
"""
Read CIB node from JSON file
"""
cib_file = open(filename, 'r')
try:
j = json.load(cib_file)
except json.decoder.JSONDecodeError as e:
logging.error("Could not parse CIB file " + filename)
print(e)
return
return j
class CIBNode(object):
cib = None
def __init__(self, node_dict=None):
if node_dict is None:
node_dict = dict()
if not isinstance(node_dict, dict):
raise CIBEntryError("invalid CIB object")
self.root = node_dict.get('root', False)
# otherwise chain matched CIBs
self.link = node_dict.get('link', False)
self.priority = node_dict.get('priority', 0)
# TTL for the CIB node: the node is considered invalid after the time specified
self.expire = node_dict.get('expire', None) or node_dict.get('expires', None) # FIXME expires is deprecated
self.filename = node_dict.get('filename', None)
self.description = node_dict.get('description', '')
# convert to PropertyMultiArray with NEATProperties
properties = node_dict.get('properties', [])
if not isinstance(properties, list):
# properties should be in a list. The list elements are expanded when generating the CIB rows.
properties = [properties]
self.properties = PropertyMultiArray()
for p in properties:
if isinstance(p, list):
self.properties.add([PropertyArray.from_dict(ps) for ps in p])
else:
self.properties.add(PropertyArray.from_dict(p))
self.match = []
# FIXME better error handling if match undefined
for l in node_dict.get('match', []):
# convert to NEATProperties
self.match.append(PropertyArray.from_dict(l))
self.linked = set()
if self.link and not self.match:
logging.warning('link attribute set but no match field!')
self.uid = node_dict.get('uid')
if self.uid is None:
self.uid = self._gen_uid()
def dict(self):
d = {}
for attr in ['uid', 'root', 'link', 'priority', 'filename', 'description', 'expire', ]:
try:
d[attr] = getattr(self, attr)
except AttributeError:
logging.debug("CIB node doesn't contain attribute %s" % attr)
if self.match:
d['match'] = []
for m in self.match:
d['match'].append(m.dict())
d['properties'] = self.properties.list()
return d
@property
def expire(self):
return self._expire
@expire.setter
def expire(self, value):
if value is None:
self._expire = time.time() + CIB_DEFAULT_TIMEOUT
return
value = float(value)
if value == -1:
# does not expire
self._expire = value
elif time.time() > value:
raise CIBEntryError('ignoring expired CIB node', CIB_EXPIRED)
else:
self._expire = value
def _gen_uid(self):
# FIXME generate persistent UIDs
d = self.dict()
for k in ['expire', 'filename', 'uid', ]:
try:
del d[k]
except KeyError:
pass
s = json.dumps(d, indent=0, sort_keys=True)
return hashlib.md5(s.encode('utf-8')).hexdigest()
def json(self, indent=4):
return json.dumps(self.dict(), indent=indent, sort_keys=True)
def resolve_paths(self, path=None):
"""recursively find all paths from this CIBNode to all other matched CIBnodes in the CIB graph"""
if path is None:
path = []
# insert own index based on CIB node priority to resolve overlapping properties later
# FIXME priorities no longer work
pos = bisect.bisect([self.cib[uid].priority for uid in path], self.priority)
path.insert(pos, self.uid)
# no more links to check
if not (self.linked - set(path)):
return [path]
new_paths = []
for uid in self.linked:
if uid in path:
continue
new_paths.extend(self.cib[uid].resolve_links(path.copy()))
return new_paths
def match_entry(self, entry):
for match_properties in self.match:
if match_properties <= entry:
return True
return False
def expand(self):
for p in self.properties.expand():
yield p
def update_links_from_match(self):
"""
Look at the list elements in self.match and try to match all of its properties to another CIB entry. Generates a
list containing the UIDs of the matched rows. The list is stored in self.linked.
"""
for match_properties in self.match:
for node in self.cib.nodes.values():
if node.uid == self.uid: continue # ??
for p in node.expand():
# Check if the properties in the match list are a full subset of some CIB properties.
# Also include the CIB uid as a property while matching
if match_properties <= set(p.values()) | {NEATProperty(('uid', node.uid))}:
self.linked.add(node.uid)
def resolve_graph(self, path=None):
"""new try """
if path is None:
path = []
path.append(self.uid)
remaining = set(self.cib.graph.get(self.uid, [])) - set(path)
if len(remaining) == 0:
return [path]
new_paths = []
for u in remaining:
paths = self.cib.nodes[u].resolve_graph(path.copy())
new_paths.extend(paths)
return new_paths
def resolve_links(self, path=None):
"""find paths from current CIB to all linked CIBS """
if path is None:
path = []
# insert own index based on CIB node priority to resolve overlapping properties later
pos = bisect.bisect([self.cib[uid].priority for uid in path], self.priority)
path.insert(pos, self.uid)
# no more links to check
if not (self.linked - set(path)):
return [path]
new_paths = []
for uid in self.linked:
if uid in path:
continue
new_paths.extend(self.cib[uid].resolve_links(path.copy()))
return new_paths
def expand_rows(self, apply_extended=True):
"""Generate CIB rows by expanding all CIBs pointing to current CIB """
paths = self.resolve_graph()
# for storing expanded rows
rows = []
for path in paths:
expanded_properties = (self.cib[uid].expand() for uid in path)
for pas in itertools.product(*expanded_properties):
chain = ChainMap(*pas)
# For debugging purposes, add the path list to the chain.
# Store as string to preserve path order (NEAT properties are not ordered).
dbg_path = '<<'.join(uid for uid in path)
# insert at position 0 to override any existing entries
# chain.maps.insert(0, PropertyArray(NEATProperty(('cib_uids', dbg_path))))
# convert back to normal PropertyArrays
row = PropertyArray(*(p for p in chain.values()))
row.meta['cib_uids'] = dbg_path
rows.append(row)
if not apply_extended:
return rows
if not self.cib.extenders:
# no extender CIB nodes loaded
return rows
# TODO optimize
extended_rows = rows.copy()
for entry in rows:
# TODO take priorities into account
# iterate extender cib_nodes
for uid, xs in self.cib.extenders.items():
for pa in xs.expand():
if xs.match_entry(entry):
entry_copy = copy.deepcopy(entry)
chain = ChainMap(pa, entry_copy)
new_pa = PropertyArray(*(p for p in chain.values()))
try:
del new_pa['uid']
except KeyError:
pass
extended_rows.append(new_pa)
return extended_rows
def __repr__(self):
s = str(self.properties)
if self.linked:
s += " linked@%s" % self.linked
return s
class CIB(object):
"""
Internal representation of the CIB for testing
"""
cib_dir = PM.CIB_DIR
CIB_EXTENSIONS = ('.cib', '.local', '.connection', '.remote', '.slim')
def __init__(self, cib_dir=None):
# dictionary containing all loaded CIB nodes, keyed by their uid
self.nodes = {}
# track CIB files
self.files = dict()
CIBNode.cib = self
self.graph = {}
if cib_dir:
self.cib_dir = cib_dir
self.reload_files()
def __getitem__(self, uid):
return self.nodes[uid]
def items(self):
return self.nodes.items()
def keys(self):
return self.nodes.keys()
def values(self):
return self.nodes.values()
@property
def roots(self):
return {k: v for k, v in self.nodes.items() if v.root is True}
@property
def extenders(self):
return {k: v for k, v in self.nodes.items() if not v.link}
@property
def rows(self):
"""
Returns a generator containing all expanded root CIB nodes
"""
for uid, r in self.roots.items():
# expand all cib nodes
for entry in r.expand_rows():
entry.cib_node = uid
yield entry
def reload_files(self, cib_dir=None):
"""
Reload CIB files when a change is detected on disk
"""
if not cib_dir:
cib_dir = self.cib_dir
full_names = set()
logging.info("checking for CIB updates...")
if not os.path.exists(cib_dir):
sys.exit('CIB directory %s does not exist' % cib_dir)
for dirpath, dirnames, filenames in os.walk(cib_dir):
for filename in filenames:
if not filename.endswith(CIB.CIB_EXTENSIONS) or filename.startswith(('.', '#')):
continue
full_name = os.path.join(dirpath, filename)
stat = os.stat(full_name)
full_names.add(full_name)
if full_name in self.files:
if self.files[full_name] != stat.st_mtime_ns:
logging.info("CIB node %s has changed", full_name)
self.files[full_name] = stat.st_mtime_ns
self.load_cib_file(full_name)
else:
logging.info("Loading new CIB node %s.", full_name)
self.files[full_name] = stat.st_mtime_ns
self.load_cib_file(full_name)
removed_files = self.files.keys() - full_names
for filename in removed_files:
logging.info("CIB node %s has been removed", filename)
del self.files[filename]
deleted_cs = [cs for cs in self.nodes.values() if cs.filename == filename]
# remove corresponding CIBNode object
for cs in deleted_cs:
self.nodes.pop(uid, None)
self.update_graph()
def load_cib_file(self, filename):
cs = load_json(filename)
if not cs:
logging.warning("CIB node file %s was invalid" % filename)
return
try:
cib_node = CIBNode(cs)
except CIBEntryError as e:
if CIB_EXPIRED in e.args:
logging.debug("Ignoring CIB node %s: %s" % (filename, e.args[0]))
return
logging.error("Unable to load CIB node %s: %s" % (filename, e.args[0]))
return
cib_node.filename = filename
self.register(cib_node)
def update_graph(self):
# FIXME this tree should be rebuilt dynamically
# update links for all registered CIBs
for cs in self.nodes.values():
cs.update_links_from_match()
# FIXME check for invalid pointers
self.graph = {}
for i in self.nodes.values():
if not i.link:
continue
for r in i.linked:
if r not in self.graph:
self.graph[r] = []
if i.uid not in self.graph[r]:
self.graph[r].append(i.uid)
def import_json(self, slim, uid=None):
"""
Import JSON formatted CIB entries into current cib.
"""
# TODO optimize
try:
json_slim = json.loads(slim)
except json.decoder.JSONDecodeError:
logging.warning('invalid CIB file format')
return
# check if we received multiple objects in a list
if isinstance(json_slim, list):
for c in json_slim:
self.import_json(json.dumps(c))
return
# convert to CIB node object to do sanity check
try:
cs = CIBNode(json_slim)
except CIBEntryError as e:
print(e)
return
# no not import cache nodes if disabled
if not PM.CIB_CACHE and any(['__cached' in p for p in cs.properties.expand()]):
logging.debug('Ignoring cache CIB node')
return
if uid is not None:
cs.uid = uid
filename = cs.uid
slim = cs.json()
if not filename:
logging.warning("CIB entry has no UID")
# generate CIB filename
filename = hashlib.md5(slim.encode('utf-8')).hexdigest()
filename = '%s.cib' % filename.lower()
with open(os.path.join(self.cib_dir, '%s' % filename), 'w') as f:
f.write(slim)
logging.debug("CIB entry saved as \"%s\"." % filename)
self.reload_files()
def register(self, cib_node):
if cib_node in self.nodes:
logging.debug("overwriting existing CIB with uid %s" % cib_node.uid)
self.nodes[cib_node.uid] = cib_node
def unregister(self, cib_uid):
del self.nodes[cib_uid]
self.update_graph()
def remove(self, cib_uid):
self.unregister(cib_uid)
def lookup(self, input_properties, candidate_num=5):
"""CIB lookup logic implementation
Return CIB rows that include *all* required properties from the request PropertyArray
"""
assert isinstance(input_properties, PropertyArray)
candidates = [input_properties]
for e in self.rows:
try:
# FIXME better check whether all input properties are included in row - improve matching
# ignore optional properties in input request
required_pa = PropertyArray(
*(p for p in input_properties.values() if p.precedence == NEATProperty.IMMUTABLE))
if len(required_pa & e) != len(required_pa):
continue
except ImmutablePropertyError:
continue
try:
candidate = e + input_properties
candidate.cib_node = e.cib_node
candidates.append(candidate)
except ImmutablePropertyError:
pass
return sorted(candidates, key=operator.attrgetter('score'), reverse=True)[:candidate_num]
def dump(self, show_all=False):
print(term_separator("CIB START"))
# ============================================================================
for i, e in enumerate(self.rows):
print("%3i. %s" % (i, str(e)))
# ============================================================================
print(term_separator("CIB END"))
def __repr__(self):
return 'CIB<%d>' % (len(self.nodes))
if __name__ == "__main__":
cib = CIB('./cib/example/')
b = cib['B']
c = cib['C']
cib.dump()
import code
code.interact(local=locals(), banner='CIB')
for uid in cib.roots:
z = cib[uid].resolve_links([])
print(z)
query = PropertyArray()
test_request_str = '{"MTU": {"value": [1500, Infinity]}, "low_latency": {"precedence": 2, "value": true}, "remote_ip": {"precedence": 2, "value": "10:54:1.23"}, "transport": {"value": "TCP"}}'
test = json.loads(test_request_str)
for k, v in test.items():
query.add(NEATProperty((k, v['value']), precedence=v.get('precedence', 1)))
candidates = cib.lookup(query)
for i in candidates:
print(i)
# print(i, i.cib_node, i.score)
|
63536
|
from .init import *
from .opt import *
from .checkpoint import *
from .framework import *
from .logger import *
from .metrics import *
from .geometry import *
try:
from .visualization import *
except ImportError:
__KAOLIN_LOADED__ = False
else:
__KAOLIN_LOADED__ = True
|
63539
|
import os
import cv2
import numpy as np
import torch
import pickle
import argparse
from configs import paths
from utils.cam_utils import perspective_project_torch
from models.smpl_official import SMPL
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.zeros(2)
src_center[0] = c_x
src_center[1] = c_y # np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def generate_patch_image_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height,
do_flip, scale, rot):
img = cvimg.copy()
img_height, img_width, img_channels = img.shape
if do_flip:
img = img[:, ::-1, :]
c_x = img_width - c_x - 1
trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, inv=False)
img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return img_patch, trans
def get_single_image_crop(image, bbox, scale=1.2, crop_size=224):
if isinstance(image, str):
if os.path.isfile(image):
image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
else:
print(image)
raise BaseException(image, 'is not a valid file!')
elif not isinstance(image, np.ndarray):
raise('Unknown type for object', type(image))
crop_image, trans = generate_patch_image_cv(
cvimg=image.copy(),
c_x=bbox[0],
c_y=bbox[1],
bb_width=bbox[2],
bb_height=bbox[3],
patch_width=crop_size,
patch_height=crop_size,
do_flip=False,
scale=scale,
rot=0,
)
return crop_image
def pw3d_eval_extract(dataset_path, out_path, crop_wh=512):
bbox_scale_factor = 1.2
smpl_male = SMPL(paths.SMPL, batch_size=1, gender='male').to(device)
smpl_female = SMPL(paths.SMPL, batch_size=1, gender='female').to(device)
# imgnames_, scales_, centers_, parts_ = [], [], [], []
cropped_frame_fnames_, whs_, centers_, = [], [], []
poses_, shapes_, genders_ = [], [], []
sequence_files = sorted([os.path.join(dataset_path, 'sequenceFiles', 'test', f)
for f in os.listdir(os.path.join(dataset_path, 'sequenceFiles', 'test'))
if f.endswith('.pkl')])
for filename in sequence_files:
print('\n\n\n', filename)
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='latin1')
smpl_poses = data['poses'] # list of (num frames, 72) pose params for each person
smpl_betas = data['betas'] # list of (10,) or (300,) shape params for each person
poses2d = data['poses2d'] # list of (num frames, 3, 18) 2d kps for each person
cam_extrinsics = data['cam_poses'] # array of (num frames, 4, 4) cam extrinsics
cam_K = data['cam_intrinsics'] # array of (3, 3) cam intrinsics.
genders = data['genders'] # list of genders for each person
valid = data['campose_valid'] # list of (num frames,) boolean arrays for each person, indicating whether camera pose has been aligned to that person (for trans).
trans = data['trans'] # list of (num frames, 3) translations in SMPL space for each person, to align them with image data (after projection)
num_people = len(smpl_poses) # Number of people in sequence
num_frames = len(smpl_poses[0]) # Number of frames in sequence
seq_name = str(data['sequence'])
print('smpl poses', len(smpl_poses), smpl_poses[0].shape,
'smpl betas', len(smpl_betas), smpl_betas[0].shape,
'poses2d', len(poses2d), poses2d[0].shape,
'global poses', cam_extrinsics.shape,
'cam_K', cam_K.shape,
'genders', genders, type(genders),
'valid', len(valid), valid[0].shape, np.sum(valid[0]), np.sum(valid[-1]),
'trans', len(trans), trans[0].shape,
'num people', num_people, 'num frames', num_frames, 'seq name', seq_name, '\n')
cam_K = torch.from_numpy(cam_K[None, :]).float().to(device)
for person_num in range(num_people):
# Get valid frames flags, shape and gender
valid_frames = valid[person_num].astype(np.bool)
shape = smpl_betas[person_num][:10]
torch_shape = torch.from_numpy(shape[None, :]).float().to(device)
gender = genders[person_num]
for frame_num in range(num_frames):
if valid_frames[frame_num]: # Only proceed if frame has valid camera pose for person
# Get bounding box using projected vertices
pose = smpl_poses[person_num][frame_num]
cam_R = cam_extrinsics[frame_num][:3, :3]
cam_t = cam_extrinsics[frame_num][:3, 3]
frame_trans = trans[person_num][frame_num]
pose = torch.from_numpy(pose[None, :]).float().to(device)
cam_t = torch.from_numpy(cam_t[None, :]).float().to(device)
cam_R = torch.from_numpy(cam_R[None, :, :]).float().to(device)
frame_trans = torch.from_numpy(frame_trans[None, :]).float().to(device)
if gender == 'm':
smpl_out = smpl_male(body_pose=pose[:, 3:],
global_orient=pose[:, :3],
betas=torch_shape,
transl=frame_trans)
elif gender == 'f':
smpl_out = smpl_female(body_pose=pose[:, 3:],
global_orient=pose[:, :3],
betas=torch_shape,
transl=frame_trans)
vertices = smpl_out.vertices
projected_aligned_vertices = perspective_project_torch(vertices, cam_R,
cam_t, cam_K=cam_K)
projected_aligned_vertices = projected_aligned_vertices[0].cpu().detach().numpy()
bbox = [min(projected_aligned_vertices[:, 0]),
min(projected_aligned_vertices[:, 1]),
max(projected_aligned_vertices[:, 0]),
max(projected_aligned_vertices[:, 1])] # (x1, y1, x2, y2) where x is cols and y is rows from top right corner.
center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
wh = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
# Save cropped frame using bounding box
image_fpath = os.path.join(dataset_path, 'imageFiles', seq_name,
'image_{}.jpg'.format(str(frame_num).zfill(5)))
image = cv2.imread(image_fpath)
centre_wh_bbox = center + [wh, wh]
cropped_image = get_single_image_crop(image, centre_wh_bbox,
scale=bbox_scale_factor,
crop_size=crop_wh)
cropped_image_fname = seq_name + '_image_{}_person_{}.png'.format(str(frame_num).zfill(5),
str(person_num).zfill(3))
cropped_image_fpath = os.path.join(out_path, 'cropped_frames',
cropped_image_fname)
cv2.imwrite(cropped_image_fpath, cropped_image)
# Transform global using cam extrinsics pose before storing
pose = pose[0].cpu().detach().numpy()
cam_R = cam_R[0].cpu().detach().numpy()
pose[:3] = cv2.Rodrigues(np.dot(cam_R, cv2.Rodrigues(pose[:3])[0]))[0].T[0]
# Store everything in lists
cropped_frame_fnames_.append(cropped_image_fname)
centers_.append(center)
whs_.append(wh)
poses_.append(pose)
shapes_.append(shape)
genders_.append(gender)
# print(cropped_image_fname, shape.shape, pose.shape, center, wh, gender)
# Store all data in npz file.
out_file = os.path.join(out_path, '3dpw_test.npz')
np.savez(out_file, imgname=cropped_frame_fnames_,
center=centers_,
wh=whs_,
pose=poses_,
shape=shapes_,
gender=genders_)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('\nDevice: {}'.format(device))
out_path = os.path.join(args.dataset_path, 'test')
if not os.path.isdir(out_path):
os.makedirs(os.path.join(out_path, 'cropped_frames'))
pw3d_eval_extract(args.dataset_path, out_path)
|
63570
|
from unittest.mock import patch
from django.test import TestCase
import vcr
from data_refinery_common.models import (
Contribution,
Experiment,
ExperimentSampleAssociation,
OntologyTerm,
Sample,
SampleAttribute,
)
from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import (
Command,
import_metadata,
import_sample_attributes,
)
TEST_METADATA = "/home/user/data_store/externally_supplied_metadata/test_data/metadata.json"
class ImportExternalSampleAttributesTestCase(TestCase):
def setUp(self):
experiment = Experiment()
experiment.accession_code = "GSE000"
experiment.alternate_accession_code = "E-GEOD-000"
experiment.title = "NONONONO"
experiment.description = "Boooooourns. Wasabi."
experiment.technology = "RNA-SEQ"
experiment.save()
self.experiment = experiment
# Create some samples to attach metadata to
sample = Sample()
sample.accession_code = "SRR123"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample
experiment_sample_association.experiment = experiment
experiment_sample_association.save()
sample2 = Sample()
sample2.accession_code = "SRR456"
sample2.technology = "RNA-SEQ"
sample2.source_database = "SRA"
sample2.title = "Not important"
sample2.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample2
experiment_sample_association.experiment = experiment
experiment_sample_association.save()
# Create the ontology terms I'm using in the tests
name = OntologyTerm()
name.ontology_term = "PATO:0000122"
name.human_readable_name = "length"
name.save()
unit = OntologyTerm()
unit.ontology_term = "UO:0010012"
unit.human_readable_name = "thou"
unit.save()
contribution = Contribution()
contribution.source_name = "refinebio_tests"
contribution.methods_url = "ccdatalab.org"
contribution.save()
self.contribution = contribution
#
# Test import_sample_attributes()
#
def test_skip_unknown_sample(self):
"""Make sure that if someone has metadata for a sample that we haven't
surveyed then we just do nothing"""
METADATA = [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}]
import_sample_attributes("SRR789", METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 0)
def test_import_invalid_ontology_term(self):
METADATA = [{"PATO:0000122": {"value": 25, "unit": "thou"}}]
self.assertRaises(
ValueError, import_sample_attributes, "SRR123", METADATA, self.contribution
)
METADATA = [{"length": {"value": 25, "unit": "UO:0010012"}}]
self.assertRaises(
ValueError, import_sample_attributes, "SRR123", METADATA, self.contribution
)
def test_import_valid_sample_attributes(self):
METADATA = [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}]
import_sample_attributes("SRR123", METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = Sample.objects.get(accession_code="SRR123").contributed_metadata
self.assertEqual(
contributed_metadata[self.contribution.source_name]["length"],
{"unit": "thou", "value": 25},
)
#
# Test import_metadata()
#
def test_import_valid_metadata(self):
METADATA = [
{
"sample_accession": "SRR123",
"attributes": [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}],
}
]
import_metadata(METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = Sample.objects.get(accession_code="SRR123").contributed_metadata
self.assertEqual(
contributed_metadata[self.contribution.source_name]["length"],
{"unit": "thou", "value": 25},
)
#
# End-to-end test
#
@vcr.use_cassette("/home/user/data_store/cassettes/foreman.sample_attributes.end-to-end.yaml")
def test_management_command(self):
sample = Sample()
sample.accession_code = "DRR001173"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
command = Command()
SOURCE_NAME = "refinebio_tests"
command.handle(file=TEST_METADATA, source_name=SOURCE_NAME, methods_url="ccdatalab.org")
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = sample.contributed_metadata
self.assertEqual(
set(contributed_metadata[SOURCE_NAME]["biological sex"].keys()),
{"value", "confidence"},
)
self.assertEqual(
contributed_metadata[SOURCE_NAME]["biological sex"]["value"].human_readable_name,
"female",
)
self.assertAlmostEqual(
contributed_metadata[SOURCE_NAME]["biological sex"]["confidence"], 0.7856624891880539
)
|
63615
|
import torch
import numpy as np
import argparse
import os
from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState,\
shuffle
from model import FewShotClassifier
from dataset import get_dataset_reader
from tf_dataset_reader import TfDatasetReader
from image_folder_reader import ImageFolderReader
NUM_VALIDATION_TASKS = 200
NUM_TEST_TASKS = 600
PRINT_FREQUENCY = 1000
def main():
learner = Learner()
learner.run()
class Learner:
def __init__(self):
self.args = self.parse_command_line()
self.log_files = LogFiles(self.args.checkpoint_dir, self.args.resume_from_checkpoint,
(self.args.mode == 'test') or (self.args.mode == 'test_vtab'))
self.logger = Logger(self.args.checkpoint_dir, "log.txt")
self.logger.print_and_log("Options: %s\n" % self.args)
self.logger.print_and_log("Checkpoint Directory: %s\n" % self.log_files.checkpoint_dir)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.model = self.init_model()
self.train_set, self.validation_set, self.test_set = self.init_data()
if self.args.mode == "train" or self.args.mode == "test" or self.args.mode == 'train_test':
self.dataset = get_dataset_reader(
args=self.args,
train_set=self.train_set,
validation_set=self.validation_set,
test_set=self.test_set)
if self.args.train_method == 'lite':
self.train_fn = self.train_lite
else:
self.train_fn = self.train_task
self.use_batches = False if self.args.train_method == 'no_lite' else True
self.loss = cross_entropy_loss
self.accuracy_fn = compute_accuracy
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.validation_accuracies = ValidationAccuracies(self.validation_set)
self.start_iteration = 0
if self.args.resume_from_checkpoint:
self.load_checkpoint()
self.optimizer.zero_grad()
self.feature_cache = None
def init_model(self):
model = FewShotClassifier(args=self.args, logger=self.logger, device=self.device).to(self.device)
model.count_parameters(model)
# set encoder is always in train mode (it only sees context data).
# Feature extractor gets switched in model.
model.train()
return model
def init_data(self):
train_set = ['ilsvrc_2012', 'omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mnist']
validation_set = ['omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mscoco']
test_set = self.args.test_datasets
return train_set, validation_set, test_set
"""
Command line parser
"""
def parse_command_line(self):
parser = argparse.ArgumentParser()
# operational parameters
parser.add_argument("--mode", choices=["train", "test", "train_test", "test_vtab"], default="train_test",
help="Whether to run meta-training only, meta-testing only,"
"both meta-training and meta-testing, or testing on vtab.")
parser.add_argument("--checkpoint_dir", "-c", default='../checkpoints', help="Directory to save checkpoint to.")
parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False,
action="store_true", help="Restart from latest checkpoint.")
# data parameters
parser.add_argument('--test_datasets', nargs='+', help='Datasets to use for testing',
default=["omniglot", "aircraft", "cu_birds", "dtd", "quickdraw", "fungi", "traffic_sign",
"mscoco"])
parser.add_argument("--data_path", default="../datasets", help="Path to Meta-Dataset records.")
parser.add_argument("--download_path_for_tensorflow_datasets", default=None,
help="Path to download the tensorflow datasets.")
parser.add_argument("--download_path_for_sun397_dataset", default=None,
help="Path to download the sun397 dataset.")
# training parameters
parser.add_argument("--train_method", choices=["lite", "small_task", "no_lite"], default="lite",
help="Whether to use lite, small tasks, or not lite.")
parser.add_argument("--pretrained_model_path", default="../models/efficientnet-b0_84.pt",
help="Path to dataset records.")
parser.add_argument("--learning_rate", "-lr", type=float, default=0.001, help="Learning rate.")
parser.add_argument("--tasks_per_step", type=int, default=16,
help="Number of tasks between parameter optimizations.")
parser.add_argument("--training_iterations", "-i", type=int, default=10000,
help="Number of meta-training iterations.")
parser.add_argument("--max_way_train", type=int, default=50, help="Maximum way of meta-train task.")
parser.add_argument("--max_support_train", type=int, default=500,
help="Maximum support set size of meta-train task.")
parser.add_argument("--image_size", type=int, default=224, help="Image height and width.")
parser.add_argument("--batch_size", type=int, default=40, help="Size of batch.")
parser.add_argument("--h", type=int, default=40,
help="Number of support set samples to back-propagate when training with LITE.")
# testing parameters
parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.")
parser.add_argument("--val_freq", type=int, default=5000, help="Number of iterations between validations.")
args = parser.parse_args()
return args
def run(self):
if self.args.mode == 'train' or self.args.mode == 'train_test':
train_accuracies = []
losses = []
total_iterations = self.args.training_iterations
for iteration in range(self.start_iteration, total_iterations):
task_dict = self.dataset.get_train_task()
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.clear_caches()
self.feature_cache = None
target_set_size = len(target_labels)
num_batches = self._get_number_of_batches(target_set_size)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, target_set_size)
batch_loss, batch_accuracy = self.train_fn(
context_images,
target_images[batch_start_index : batch_end_index],
context_labels,
target_labels[batch_start_index : batch_end_index]
)
train_accuracies.append(batch_accuracy)
losses.append(batch_loss)
else:
task_loss, task_accuracy = self.train_fn(context_images, target_images, context_labels,
target_labels)
train_accuracies.append(task_accuracy)
losses.append(task_loss)
# optimize
if ((iteration + 1) % self.args.tasks_per_step == 0) or (iteration == (total_iterations - 1)):
self.optimizer.step()
self.optimizer.zero_grad()
if (iteration + 1) % PRINT_FREQUENCY == 0:
# print training stats
self.save_checkpoint(iteration + 1)
torch.save(self.model.state_dict(), os.path.join(self.log_files.checkpoint_dir,
"model_{}.pt".format(iteration + 1)))
self.logger.print_and_log('Task [{}/{}], Train Loss: {:.7f},'
'Train Accuracy: {:.7f}, Learning Rate: {:.7f}'
.format(iteration + 1, total_iterations,
torch.Tensor(losses).mean().item(),
torch.Tensor(train_accuracies).mean().item(),
self.optimizer.param_groups[0]['lr']))
train_accuracies = []
losses = []
if ((iteration + 1) % self.args.val_freq == 0) and (iteration + 1) != total_iterations:
# validate
accuracy_dict = self.validate()
self.validation_accuracies.print(self.logger, accuracy_dict)
# save the model if validation is the best so far
if self.validation_accuracies.is_better(accuracy_dict):
self.validation_accuracies.replace(accuracy_dict)
torch.save(self.model.state_dict(), self.log_files.best_validation_model_path)
self.logger.print_and_log('Best validation model was updated.')
self.logger.print_and_log('')
# save the final model
torch.save(self.model.state_dict(), self.log_files.fully_trained_model_path)
if self.args.mode == 'train_test':
self.test(self.log_files.fully_trained_model_path)
self.test(self.log_files.best_validation_model_path)
if self.args.mode == 'test':
self.test(self.args.test_model_path)
if self.args.mode == 'test_vtab':
self._test_transfer_learning(self.args.test_model_path)
def train_task(self, context_images, target_images, context_labels, target_labels):
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TRAIN)
task_loss = self.loss(target_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
task_loss += regularizer_scaling * regularization_term
task_accuracy = self.accuracy_fn(target_logits, target_labels)
task_loss.backward(retain_graph=False)
return task_loss, task_accuracy
def train_lite(self, context_images, target_images, context_labels, target_labels):
# We'll split the context set into two: the first part will be of size batch_size and we'll use gradients
# for that. The second part will be everything else and we'll use no gradients for that, so we only need to
# compute that once per task.
context_size = context_images.size(0)
indices = np.random.permutation(context_size)
h = min(self.args.h, context_size) # number of example to back propagate
grad_indices = indices[0: h]
no_grad_indices = indices[h:]
self.model.build_task_representation_with_split_batch(context_images, grad_indices, no_grad_indices)
context_features = self._compute_features_with_split_batch(context_images, grad_indices, no_grad_indices,
MetaLearningState.META_TRAIN)
self.model.configure_classifier(context_features, context_labels[indices])
# now the target set
torch.set_grad_enabled(True)
batch_logits = self.model.predict(target_images, MetaLearningState.META_TRAIN)
# compute the loss
batch_loss = self.loss(batch_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
batch_loss += regularizer_scaling * regularization_term
# compute accuracy
batch_accuracy = self.accuracy_fn(batch_logits, target_labels)
batch_loss.backward(retain_graph=False)
return batch_loss, batch_accuracy
def _get_number_of_batches(self, task_size):
num_batches = int(np.ceil(float(task_size) / float(self.args.batch_size)))
if num_batches > 1 and (task_size % self.args.batch_size == 1):
num_batches -= 1
return num_batches
def _get_batch_indices(self, index, last_element):
batch_start_index = index * self.args.batch_size
batch_end_index = batch_start_index + self.args.batch_size
if batch_end_index == (last_element - 1): # avoid batch size of 1
batch_end_index = last_element
if batch_end_index > last_element:
batch_end_index = last_element
return batch_start_index, batch_end_index
def validate(self):
with torch.no_grad():
accuracy_dict ={}
for item in self.validation_set:
accuracies = []
for _ in range(NUM_VALIDATION_TASKS):
task_dict = self.dataset.get_validation_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence}
return accuracy_dict
def test(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
with torch.no_grad():
for item in self.test_set:
accuracies = []
for _ in range(NUM_TEST_TASKS):
task_dict = self.dataset.get_test_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images,
MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
accuracy_confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
self.logger.print_and_log('{0:}: {1:3.1f}+/-{2:2.1f}'.format(item, accuracy, accuracy_confidence))
def _test_transfer_learning(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
context_set_size = 1000
datasets = [
{'name': "caltech101", 'task': None, 'enabled': True},
{'name': "cifar100", 'task': None, 'enabled': True},
{'name': "oxford_flowers102", 'task': None, 'enabled': True},
{'name': "oxford_iiit_pet", 'task': None, 'enabled': True},
{'name': "sun397", 'task': None, 'enabled': True},
{'name': "svhn_cropped", 'task': None, 'enabled': True},
{'name': "eurosat", 'task': None, 'enabled': True},
{'name': "resisc45", 'task': None, 'enabled': True},
{'name': "patch_camelyon", 'task': None, 'enabled': True},
{'name': "diabetic_retinopathy_detection", 'task': None, 'enabled': True},
{'name': "clevr", 'task': "count", 'enabled': True},
{'name': "clevr", 'task': "distance", 'enabled': True},
{'name': "dsprites", 'task': "location", 'enabled': True},
{'name': "dsprites", 'task': "orientation", 'enabled': True},
{'name': "smallnorb", 'task': "azimuth", 'enabled': True},
{'name': "smallnorb", 'task': "elevation", 'enabled': True},
{'name': "dmlab", 'task': None, 'enabled': True},
{'name': "kitti", 'task': None, 'enabled': True},
]
with torch.no_grad():
for dataset in datasets:
if dataset['enabled'] is False:
continue
if dataset['name'] == "sun397": # use the image folder reader as the tf reader is broken for sun397
dataset_reader = ImageFolderReader(
path_to_images=self.args.download_path_for_sun397_dataset,
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
image_size=self.args.image_size,
device=self.device)
else: # use the tensorflow dataset reader
dataset_reader = TfDatasetReader(
dataset=dataset['name'],
task=dataset['task'],
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
path_to_datasets=self.args.download_path_for_tensorflow_datasets,
image_size=self.args.image_size,
device=self.device
)
context_images, context_labels = dataset_reader.get_context_batch()
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = dataset_reader.get_target_dataset_length()
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
target_labels = []
for batch in range(num_batches):
batch_target_images, batch_target_labels = dataset_reader.get_target_batch()
batch_logits = self.model.predict(batch_target_images, MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_labels.append(batch_target_labels)
target_logits = torch.vstack(target_logits)
target_labels = torch.hstack(target_labels)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracy = target_accuracy * 100.0
if dataset['task'] is None:
self.logger.print_and_log('{0:}: {1:3.1f}'.format(dataset['name'], accuracy))
else:
self.logger.print_and_log('{0:} {1:}: {2:3.1f}'.format(dataset['name'], dataset['task'], accuracy))
def _compute_features_by_batch(self, images, meta_learning_state):
features = []
num_images = images.size(0)
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
return torch.vstack(features)
def _compute_features_with_split_batch(self, images, grad_indices, no_grad_indices, meta_learning_state):
num_images = images.size(0)
if self.feature_cache is None: # cache the part with no gradients
features = []
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
torch.set_grad_enabled(False)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
self.feature_cache = torch.vstack(features).to(self.device)
# now select some random images for that will have gradients and process those
embeddings = []
if len(grad_indices) > 0:
torch.set_grad_enabled(True)
embeddings.append(self.model.get_context_features(images[grad_indices], meta_learning_state))
# now add in the no_grad images
embeddings.extend(self.feature_cache[no_grad_indices])
return torch.vstack(embeddings)
def prepare_task(self, task_dict):
context_images_np, context_labels_np = task_dict['context_images'], task_dict['context_labels']
target_images_np, target_labels_np = task_dict['target_images'], task_dict['target_labels']
context_images_np = context_images_np.transpose([0, 3, 1, 2])
context_images_np, context_labels_np = shuffle(context_images_np, context_labels_np)
context_images = torch.from_numpy(context_images_np)
context_labels = torch.from_numpy(context_labels_np)
target_images_np = target_images_np.transpose([0, 3, 1, 2])
target_images_np, target_labels_np = shuffle(target_images_np, target_labels_np)
target_images = torch.from_numpy(target_images_np)
target_labels = torch.from_numpy(target_labels_np)
context_images = context_images.to(self.device)
target_images = target_images.to(self.device)
context_labels = context_labels.to(self.device)
target_labels = target_labels.type(torch.LongTensor).to(self.device)
return context_images, target_images, context_labels, target_labels
def save_checkpoint(self, iteration):
torch.save({
'iteration': iteration,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_accuracy': self.validation_accuracies.get_current_best_accuracy_dict(),
}, os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
def load_checkpoint(self):
checkpoint = torch.load(os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
self.start_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.validation_accuracies.replace(checkpoint['best_accuracy'])
if __name__ == "__main__":
main()
|
63633
|
import os
import subprocess
import sys
args = sys.argv[:]
print('hello from %s' % args[0])
print('args: ' + ' '.join(args))
print('current directory: ' + os.getcwd())
p = subprocess.Popen('ls -al', shell=True, bufsize=1, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = p.stdout.readline()
if line != '':
print(line.rstrip())
else:
break
retval = p.wait()
print('%s done' % args[0])
|
63660
|
from io import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from flags.state import flag_enabled
class EnableFlagTestCase(TestCase):
def test_enable_flag(self):
out = StringIO()
self.assertFalse(flag_enabled("DB_FLAG"))
call_command("enable_flag", "DB_FLAG", stdout=out)
self.assertTrue(flag_enabled("DB_FLAG"))
self.assertIn("Successfully enabled", out.getvalue())
def test_enable_flag_non_existent_flag(self):
with self.assertRaises(CommandError):
call_command("enable_flag", "FLAG_DOES_NOT_EXIST")
|
63720
|
from program_synthesis.karel.dataset import executor
from program_synthesis.karel.dataset import parser_for_synthesis
branch_types = {'if', 'ifElse', 'while'}
stmt_types = {'move', 'turnLeft', 'turnRight', 'putMarker', 'pickMarker'}
class CoverageMeasurer(object):
def __init__(self, code):
self.parser = parser_for_synthesis.KarelForSynthesisParser(
build_tree=True)
self.executor = executor.KarelExecutor()
self.code = code
tree = self.parser.parse(code)
# Statement coverage: actions
self.stmt_coverage = {span: 0 for span in self.parser.action_spans}
# Branch coverage: if, ifelse, while
self.branch_coverage = {(span, cond_value): 0
for span in self.parser.cond_block_spans
for cond_value in (True, False)}
def add(self, inp):
out, trace = self.executor.execute(
self.code, None, inp, record_trace=True)
if not out:
return False
for event in trace.events:
if event.type in branch_types:
self.branch_coverage[event.span, event.cond_value] += 1
elif event.type in stmt_types:
self.stmt_coverage[event.span] += 1
return True
def uncovered(self):
return (tuple(k for k, v in self.stmt_coverage.iteritems() if v == 0),
tuple(k for k, v in self.branch_coverage.iteritems() if v == 0))
|
63742
|
import os
import re
import sys
import cffi
from ._compat import PY2
_directive_re = re.compile(r'^\s*#.*?$(?m)')
def make_ffi(module_path, crate_path, cached_header_filename=None):
"""Creates a FFI instance for the given configuration."""
if cached_header_filename is not None and \
os.path.isfile(cached_header_filename):
with open(cached_header_filename, 'rb') as f:
header = f.read()
if not PY2:
header = header.decode('utf-8')
else:
from .bindgen import generate_header
header = generate_header(crate_path)
header = _directive_re.sub('', header)
if os.environ.get('SNAEK_DEBUG_HEADER') == '1':
sys.stderr.write('/* generated header for "%s" */\n' % module_path)
sys.stderr.write(header)
sys.stderr.write('\n')
sys.stderr.flush()
ffi = cffi.FFI()
ffi.cdef(header)
ffi.set_source(module_path, None)
return ffi
|
63754
|
import luhn
def test_checksum_len1():
assert luhn.checksum('7') == 7
def test_checksum_len2():
assert luhn.checksum('13') == 5
def test_checksum_len3():
assert luhn.checksum('383') == 3
def test_checksum_len4():
assert luhn.checksum('2827') == 3
def test_checksum_len13():
assert luhn.checksum('4346537657597') == 9
def test_checksum_len14():
assert luhn.checksum('27184931073326') == 1
def test_valid():
assert luhn.verify('356938035643809')
def test_invalid():
assert not luhn.verify('4222222222222222')
def test_generate():
assert luhn.generate('7992739871') == 3
def test_append():
assert luhn.append('53461861341123') =='534618613411234'
|
63759
|
from setuptools import setup
def _md(filename):
'''
Load md file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- all badges
'''
content = open(filename).read()
return content
long_description = '\n'.join((
_md('README.md'),
_md('CHANGELOG.md'),
''
))
exec(compile(
open('devpi_semantic_ui/__about__.py').read(),
'devpi_semantic_ui/__about__.py',
'exec'
))
setup(
name="devpi-semantic-ui",
description=__description__,
url="https://github.com/apihackers/devpi-semantic-ui",
version=__version__,
maintainer="API Hackers",
maintainer_email="<EMAIL>",
license="MIT",
entry_points={
'devpi_server': [
"devpi-semantic-ui = devpi_semantic_ui"
]
},
install_requires=['devpi-web'],
include_package_data=True,
zip_safe=False,
packages=['devpi_semantic_ui'],
keywords='devpi semantic-ui',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
],
)
|
63784
|
import re
from PIL import Image, ImageOps
from io import BytesIO
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.core.files.uploadedfile import SimpleUploadedFile
from forum.models import Topic
MENTION_REGEX = re.compile(r'@(\w+)', re.M)
IMAGE_LARGE = 144
IMAGE_MEDIUM = 96
IMAGE_SMALL = 48
NUM_PER_PAGE = 20
def _thumbnail(upload, size, fmt):
img = ImageOps.fit(upload, size, Image.ANTIALIAS)
temp = BytesIO()
img.save(temp, fmt, quality=95)
temp.seek(0)
return temp
def create_thumbnail(src, new_name, ext):
upload = Image.open(BytesIO(src.read()))
fmt = src.content_type.split('/')[-1]
large = _thumbnail(upload, (IMAGE_LARGE, IMAGE_LARGE), fmt)
filename_l = "%s_l.%s" % (new_name, ext)
large_file = SimpleUploadedFile(filename_l, large.read(), content_type=src.content_type)
medium = _thumbnail(upload, (IMAGE_MEDIUM, IMAGE_MEDIUM), fmt)
filename_m = "%s_m.%s" % (new_name, ext)
medium_file = SimpleUploadedFile(filename_m, medium.read(), content_type=src.content_type)
small = _thumbnail(upload, (IMAGE_SMALL, IMAGE_SMALL), fmt)
filename_s = "%s_s.%s" % (new_name, ext)
small_file = SimpleUploadedFile(filename_s, small.read(), content_type=src.content_type)
return large_file, medium_file, small_file
def get_pagination(current_page, num_pages, count):
page_list = []
show_pages = 2*count+1
if show_pages >= num_pages:
page_list.extend(range(1, num_pages+1))
elif current_page - count < 1:
page_list.extend(range(1, show_pages+1))
elif current_page + count > num_pages:
page_list.extend(range(num_pages+1-show_pages, num_pages+1))
else:
page_list.extend(range(current_page-count, current_page+count+1))
return page_list
def topic_pagination(page, topics):
paginator = Paginator(topics, NUM_PER_PAGE)
try:
topic_list = paginator.page(page)
except PageNotAnInteger:
topic_list = paginator.page(1)
except EmptyPage:
topic_list = paginator.page(paginator.num_pages)
page_list = get_pagination(topic_list.number, paginator.num_pages, 2)
return topic_list, page_list
def author_required(view_func):
def _wrapped_view_func(request, *args, **kwargs):
topic_id = kwargs.get('topic_id')
topic = get_object_or_404(Topic, id=topic_id)
if topic.author == request.user:
return view_func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return _wrapped_view_func
def get_metioned_user(sender, markdown):
mentioned = set(re.findall(MENTION_REGEX, markdown)) - set([sender.username])
# mentioned = set(re.findall(MENTION_REGEX, markdown))
if mentioned:
return User.objects.filter(username__in=mentioned)
return None
|
63818
|
import sys
# add your project directory to the sys.path
project_home = u'.'
if project_home not in sys.path:
sys.path = [project_home] + sys.path
from DataComets import app as application
|
63822
|
from ..factory import Type
class updateChatPinnedMessage(Type):
chat_id = None # type: "int53"
pinned_message_id = None # type: "int53"
|
63881
|
from .base import BaseModel
from .head import MLPHeadModel
from .pretraining import (
DenoisingPretrainModel,
SAINTPretrainModel,
TabTransformerPretrainModel,
VIMEPretrainModel,
)
|
63897
|
import fabric.api
from provy.core import Role
from provy.more.debian.package.aptitude import AptitudeRole
'''
Roles in this namespace are meant to provide `SELinux <http://selinuxproject.org/>`_ management utilities for Debian distributions.
'''
class SELinuxRole(Role):
'''
This role provides `SELinux <http://selinuxproject.org/>`_ utilities for Debian distributions.
.. warning::
If you're provisioning a Ubuntu server, it's highly recommended you use :class:`AppArmorRole <provy.more.debian.security.apparmor.AppArmorRole>` instead of this one.
Please note that, for SELinux to be installed from scratch, you have to reboot the server so that it relabels all the files in the system for SELinux.
So it's also highly recommended that you provision a server that has SELinux installed and activated already.
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.ensure_login_mapping("foo")
selinux.map_login("foo", "staff_u")
selinux.map_role("foo", ["staff_r", "sysadm_r"])
'''
def __init__(self, prov, context):
super(SELinuxRole, self).__init__(prov, context)
def __distro_is_ubuntu(self):
distro_info = self.get_distro_info()
return distro_info.distributor_id.lower() == 'ubuntu'
def provision(self):
'''
Installs SELinux, its dependencies, its utilities and the `Audit framework <https://www.wzdftpd.net/docs/selinux/audit.html>`_.
Also, it activates SELinux after installing the packages, puts the system in enforce mode and puts the generic users into confinement for enhanced security.
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
self.provision_role(SELinuxRole) # no need to call this if using with block.
'''
self.install_packages()
self.activate()
self.log('''SELinux provisioned. Don't forget to reboot the server if it didn't have SELinux already installed and activated.''')
def install_packages(self):
'''
Installs the necessary packages to provision SELinux.
This is executed during provisioning, so you can ignore this method.
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.install_packages() # no need to call this directly.
'''
with self.using(AptitudeRole) as aptitude:
if self.__distro_is_ubuntu():
aptitude.ensure_package_installed('selinux')
else:
aptitude.ensure_package_installed('selinux-basics')
aptitude.ensure_package_installed('selinux-policy-default')
aptitude.ensure_package_installed('selinux-utils')
aptitude.ensure_package_installed('auditd')
aptitude.ensure_package_installed('audispd-plugins')
def activate(self):
'''
Activates SELinux, confines generic users and puts the system into enforce mode.
This is executed during provisioning, so you can ignore this method.
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.activate() # no need to call this directly.
'''
if not self.__distro_is_ubuntu():
self.execute('selinux-activate', stdout=False, sudo=True)
self.__confine_generic_users()
self.enforce()
def __confine_generic_users(self):
self.execute("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True)
def enforce(self):
'''
Puts the system into enforce mode.
This is executed during provisioning, so you can ignore this method.
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.enforce() # no need to call this directly.
'''
with fabric.api.settings(warn_only=True):
self.execute('setenforce 1', stdout=False, sudo=True)
self.ensure_line('SELINUX=enforcing', '/etc/selinux/config', sudo=True)
def ensure_login_mapping(self, user_or_group):
'''
Makes sure that a mapping exists for a login user to an SELinux user (if creating one now, sets it to the "user_u" SELinux user).
:param user_or_group: The user or group to be changed. If providing a group, pass it with an "@" before the group name (like "@my-group").
:type user_or_group: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.ensure_login_mapping("foo")
selinux.ensure_login_mapping("@bar")
'''
with fabric.api.settings(warn_only=True):
self.execute('semanage login -a %s' % user_or_group, stdout=False, sudo=True)
def map_login(self, user_or_group, selinux_user):
'''
Maps a login user to an SELinux user.
If the login user has no mapping yet, the role creates one.
:param user_or_group: The user or group to be changed. If providing a group, pass it with an "@" before the group name (like "@my-group").
:type user_or_group: :class:`str`
:param selinux_user: The SELinux user to be referenced.
:type selinux_user: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.map_login("foo", "staff_u")
'''
self.ensure_login_mapping(user_or_group)
self.execute('semanage login -m -s %s %s' % (selinux_user, user_or_group), stdout=False, sudo=True)
def map_role(self, user_or_group, selinux_roles):
'''
Maps a login user to one or more SELinux roles.
If the login user has no mapping yet, the role creates one.
:param user_or_group: The user or group to be changed. If providing a group, pass it with an "@" before the group name (like "@my-group").
:type user_or_group: :class:`str`
:param selinux_roles: The roles to be referenced.
:type selinux_roles: :class:`iterable`
Example:
::
from provy.core import Role
from provy.more.debian import SELinuxRole
class MySampleRole(Role):
def provision(self):
with self.using(SELinuxRole) as selinux:
selinux.map_role("foo", ["staff_r", "sysadm_r"])
'''
self.ensure_login_mapping(user_or_group)
roles_as_string = ' '.join(selinux_roles)
self.execute("semanage user -m -R '%s' %s" % (roles_as_string, user_or_group), stdout=False, sudo=True)
|
63902
|
import os
from unittest import TestCase
from keras_gpt_2 import get_bpe_from_files
class TestBPE(TestCase):
def test_encode_and_decode(self):
current_path = os.path.dirname(os.path.abspath(__file__))
toy_checkpoint_path = os.path.join(current_path, 'toy_checkpoint')
encoder_path = os.path.join(toy_checkpoint_path, 'encoder.json')
vocab_path = os.path.join(toy_checkpoint_path, 'vocab.bpe')
bpe = get_bpe_from_files(encoder_path, vocab_path)
text = 'Power, give me more power!'
indices = bpe.encode(text)
self.assertEqual([13434, 11, 1577, 502, 517, 1176, 0], indices)
self.assertEqual(text, bpe.decode(indices))
self.assertEqual(text, bpe.decode(bpe.encode(text)))
|
63915
|
from .ingredient import Ingredient
from .recipe import Recipe
from .source import Source
__all__ = ["Ingredient", "Recipe", "Source"]
|
63918
|
import os
import sys
import torch
import models
import logging
import argparse
import datetime
from amp import AMP
from data_utils import load_data
class Instructor:
def __init__(self, args):
self.args = args
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler(sys.stdout))
self.logger.addHandler(logging.FileHandler(args.log_name))
self.logger.info(f"> creating model {args.model}")
self.model = models.__dict__[args.model](num_classes=args.num_classes, dropout=args.dropout)
self.model.to(args.device)
if args.device.type == 'cuda':
self.logger.info(f"> cuda memory allocated: {torch.cuda.memory_allocated(args.device.index)}")
self._print_args()
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.size()))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
self.logger.info(f"> n_trainable_params: {n_trainable_params}, n_nontrainable_params: {n_nontrainable_params}")
self.logger.info('> training arguments:')
for arg in vars(self.args):
self.logger.info(f">>> {arg}: {getattr(self.args, arg)}")
def _train(self, train_dataloader, criterion, optimizer):
train_loss, n_correct, n_train = 0, 0, 0
n_batch = len(train_dataloader)
self.model.train()
for i_batch, (inputs, targets) in enumerate(train_dataloader):
inputs, targets = inputs.to(self.args.device), targets.to(self.args.device)
def closure():
optimizer.zero_grad()
outputs = self.model(inputs)
loss = criterion(outputs, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_norm)
return outputs, loss
outputs, loss = optimizer.step(closure)
train_loss += loss.item() * targets.size(0)
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_train += targets.size(0)
ratio = int((i_batch+1)*50/n_batch)
sys.stdout.write(f"\r[{'>'*ratio}{' '*(50-ratio)}] {i_batch+1}/{n_batch} {(i_batch+1)*100/n_batch:.2f}%")
sys.stdout.flush()
print()
return train_loss / n_train, n_correct / n_train
def _test(self, test_dataloader, criterion):
test_loss, n_correct, n_test = 0, 0, 0
n_batch = len(test_dataloader)
self.model.eval()
with torch.no_grad():
for i_batch, (inputs, targets) in enumerate(test_dataloader):
inputs, targets = inputs.to(self.args.device), targets.to(self.args.device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item() * targets.size(0)
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_test += targets.size(0)
ratio = int((i_batch+1)*50/n_batch)
sys.stdout.write(f"\r[{'>'*ratio}{' '*(50-ratio)}] {i_batch+1}/{n_batch} {(i_batch+1)*100/n_batch:.2f}%")
sys.stdout.flush()
print()
return test_loss / n_test, n_correct / n_test
def run(self):
train_dataloader, test_dataloader = load_data(batch_size=self.args.batch_size,
workers=0,
dataset=self.args.dataset,
data_target_dir=os.path.join(self.args.data_dir, self.args.dataset),
data_aug=(self.args.no_data_aug==False),
cutout=self.args.cutout,
autoaug=self.args.autoaug)
criterion = torch.nn.CrossEntropyLoss()
optimizer = AMP(params=filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.args.lr,
epsilon=self.args.epsilon,
inner_lr=self.args.inner_lr,
inner_iter=self.args.inner_iter,
base_optimizer=torch.optim.SGD,
momentum=self.args.momentum,
weight_decay=self.args.decay,
nesterov=True)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, self.args.milestones, self.args.gamma)
best_loss, best_acc = 0, 0
for epoch in range(self.args.num_epoch):
train_loss, train_acc = self._train(train_dataloader, criterion, optimizer)
test_loss, test_acc = self._test(test_dataloader, criterion)
scheduler.step()
if test_acc > best_acc or (test_acc == best_acc and test_loss < best_loss):
best_acc, best_loss = test_acc, test_loss
self.logger.info(f"{epoch+1}/{self.args.num_epoch} - {100*(epoch+1)/self.args.num_epoch:.2f}%")
self.logger.info(f"[train] loss: {train_loss:.4f}, acc: {train_acc*100:.2f}, err: {100-train_acc*100:.2f}")
self.logger.info(f"[test] loss: {test_loss:.4f}, acc: {test_acc*100:.2f}, err: {100-test_acc*100:.2f}")
self.logger.info(f"best loss: {best_loss:.4f}, best acc: {best_acc*100:.2f}, best err: {100-best_acc*100:.2f}")
self.logger.info(f"log saved: {self.args.log_name}")
if __name__ == '__main__':
model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name]))
num_classes = {'svhn': 10, 'cifar10': 10, 'cifar100': 100}
parser = argparse.ArgumentParser(description='Trainer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', type=str, default='cifar10', choices=list(num_classes.keys()), help='Dataset name.')
parser.add_argument('--data_dir', type=str, default='data', help='Dictionary for dataset.')
parser.add_argument('--no_data_aug', default=False, action='store_true', help='Disable data augmentation.')
parser.add_argument('--cutout', default=False, action='store_true', help='Enable Cutout augmentation.')
parser.add_argument('--autoaug', default=False, action='store_true', help='Enable AutoAugment.')
parser.add_argument('--model', default='preactresnet18', choices=model_names, help='Model architecture.')
parser.add_argument('--num_epoch', type=int, default=200, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=50, help='Number of samples in a batch.')
parser.add_argument('--lr', type=float, default=0.1, help='Outer learning rate.')
parser.add_argument('--epsilon', type=float, default=0.5, help='Perturbation norm ball radius.')
parser.add_argument('--inner_lr', type=float, default=1, help='Inner learning rate.')
parser.add_argument('--inner_iter', type=int, default=1, help='Inner iteration number.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', type=float, default=1e-4, help='Weight decay (L2 penalty).')
parser.add_argument('--dropout', type=float, default=0, help='Dropout applied to the model.')
parser.add_argument('--clip_norm', type=int, default=50, help='Maximum norm of parameter gradient.')
parser.add_argument('--milestones', type=int, nargs='+', default=[100, 150], help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on each milstone.')
parser.add_argument('--device', type=str, default=None, choices=['cpu', 'cuda'], help='Device.')
args = parser.parse_args()
args.num_classes = num_classes[args.dataset]
args.log_name = f"{args.dataset}_{args.model}_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')[2:]}.log"
args.device = torch.device(args.device) if args.device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ins = Instructor(args)
ins.run()
|
63941
|
import math
import os
import time
import numpy as np
import pybullet as p
import pybullet_utils.bullet_client as bc
from gripper_module import load_gripper
from misc.urdf_editor import UrdfEditor
import utils
from fusion import TSDFVolume
class Gripper(object):
"""
A moving mount and a gripper.
the mount has 4 joints:
0: prismatic x;
1: prismatic y;
2: prismatic z;
3: revolute z;
the gripper is defined by the `gripper_type`.
"""
def __init__(self, gripper_type, bullet_client, home_position, num_side_images, voxel_size=0.004, trunc_margin_scale=5, **kwargs):
self._bullet_client = bullet_client
self._gripper_type = gripper_type
self._gripper_size = kwargs['gripper_size']
self._home_position = home_position
self._default_orientation = [0,0,0]
self._num_side_images = num_side_images
# load gripper
self._gripper = load_gripper(gripper_type)(self._bullet_client, **kwargs)
gripper_body_id = self._gripper.load(self._home_position)
# load mount
mount_urdf = 'assets/gripper/mount.urdf'
mount_body_id = self._bullet_client.loadURDF(
mount_urdf,
basePosition=self._home_position,
useFixedBase=True
)
# combine mount and gripper by a joint
ed_mount = UrdfEditor()
ed_mount.initializeFromBulletBody(mount_body_id, self._bullet_client._client)
ed_gripper = UrdfEditor()
ed_gripper.initializeFromBulletBody(gripper_body_id, self._bullet_client._client)
self._gripper_parent_index = 4
newjoint = ed_mount.joinUrdf(
childEditor=ed_gripper,
parentLinkIndex=self._gripper_parent_index,
jointPivotXYZInParent=self._gripper.get_pos_offset(),
jointPivotRPYInParent=self._bullet_client.getEulerFromQuaternion(self._gripper.get_orn_offset()),
jointPivotXYZInChild=[0, 0, 0],
jointPivotRPYInChild=[0, 0, 0],
parentPhysicsClientId=self._bullet_client._client,
childPhysicsClientId=self._bullet_client._client
)
newjoint.joint_type = self._bullet_client.JOINT_FIXED
newjoint.joint_name = "joint_mount_gripper"
urdfname = f".tmp_combined_{self._gripper_type}_{self._gripper_size:.4f}_{np.random.random():.10f}_{time.time():.10f}.urdf"
ed_mount.saveUrdf(urdfname)
# remove mount and gripper bodies
self._bullet_client.removeBody(mount_body_id)
self._bullet_client.removeBody(gripper_body_id)
self._body_id = self._bullet_client.loadURDF(
urdfname,
useFixedBase=True,
basePosition=self._home_position,
baseOrientation=self._bullet_client.getQuaternionFromEuler([0, 0, 0])
)
# remove the combined URDF
os.remove(urdfname)
# configure the gripper (e.g. friction)
self._gripper.configure(self._body_id, self._gripper_parent_index+1)
# define force and speed (movement of mount)
self._force = 10000
self._speed = 0.005
self._tsdf_size = [64, 64, 32]
self._voxel_size = voxel_size
self._trunc_margin_scale = trunc_margin_scale
bond = np.array(self._tsdf_size) * self._voxel_size
self._vol_bnds = np.array([[-bond[0]/2, bond[0]/2],
[-bond[1]/2, bond[1]/2],
[0, bond[2]]])
self._vol_bnds += np.array(self._home_position).reshape(3, -1)
# Add RGB-D camera (mimic RealSense D415) for gripper
self._gripper_cam_lookat = self._vol_bnds.mean(1)
self._gripper_cam_image_size = (512, 512)
self._gripper_cam_z_near = 0.01
self._gripper_cam_z_far = 10.0
self._gripper_cam_fov_w = 69.40
self._gripper_cam_focal_length = (float(self._gripper_cam_image_size[1])/2)/np.tan((np.pi*self._gripper_cam_fov_w/180)/2)
self._gripper_cam_fov_h = (math.atan((float(self._gripper_cam_image_size[0])/2)/self._gripper_cam_focal_length)*2/np.pi)*180
self._gripper_cam_projection_matrix = self._bullet_client.computeProjectionMatrixFOV(
fov=self._gripper_cam_fov_h,
aspect=float(self._gripper_cam_image_size[1])/float(self._gripper_cam_image_size[0]),
nearVal=self._gripper_cam_z_near,
farVal=self._gripper_cam_z_far
) # notes: 1) FOV is vertical FOV 2) aspect must be float
self._gripper_cam_intrinsics = np.array([[self._gripper_cam_focal_length, 0, float(self._gripper_cam_image_size[1])/2],
[0, self._gripper_cam_focal_length, float(self._gripper_cam_image_size[0])/2],
[0, 0, 1]])
self.fix_joints(range(self._bullet_client.getNumJoints(self._body_id)))
def get_gripper_cam_data(self, cam_position, cam_lookat, cam_up_direction):
cam_view_matrix = self._bullet_client.computeViewMatrix(cam_position, cam_lookat, cam_up_direction)
cam_pose_matrix = np.linalg.inv(np.array(cam_view_matrix).reshape(4, 4).T)
# TODO: fix flipped up and forward vectors (quick hack)
cam_pose_matrix[:, 1:3] = -cam_pose_matrix[:, 1:3]
camera_data = self._bullet_client.getCameraImage(self._gripper_cam_image_size[1],self._gripper_cam_image_size[0],
cam_view_matrix,self._gripper_cam_projection_matrix,
shadow=1,flags=self._bullet_client.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
renderer=self._bullet_client.ER_BULLET_HARDWARE_OPENGL)
rgb_pixels = np.array(camera_data[2]).reshape((self._gripper_cam_image_size[0], self._gripper_cam_image_size[1], 4))
color_image = rgb_pixels[:,:,:3] # remove alpha channel
z_buffer = np.array(camera_data[3]).reshape((self._gripper_cam_image_size[0], self._gripper_cam_image_size[1]))
segmentation_mask = None # camera_data[4] - not implemented yet with renderer=p.ER_BULLET_HARDWARE_OPENGL
depth_image = (2.0*self._gripper_cam_z_near*self._gripper_cam_z_far)/(self._gripper_cam_z_far+self._gripper_cam_z_near-(2.0*z_buffer-1.0)*(self._gripper_cam_z_far-self._gripper_cam_z_near))
return color_image, depth_image, segmentation_mask, cam_pose_matrix
def get_tsdf(self, open_scale):
self.move(self._home_position, 0)
self.close()
self.open(open_scale=open_scale)
self._gripper_tsdf = TSDFVolume(self._vol_bnds, voxel_size=self._voxel_size)
# take side images
cam_up_direction = [0, 0, 1]
side_look_directions = np.linspace(0, 2*np.pi, num=self._num_side_images, endpoint=False)
cam_distance = 1
for direction in side_look_directions:
cam_position = [
self._home_position[0] + cam_distance * np.cos(direction),
self._home_position[1] + cam_distance * np.sin(direction),
self._home_position[2]
]
color_image, depth_image, _, cam_pose_matrix = self.get_gripper_cam_data(cam_position, self._gripper_cam_lookat, cam_up_direction)
self._gripper_tsdf.integrate(color_image, depth_image, self._gripper_cam_intrinsics, cam_pose_matrix, obs_weight=1.)
# take image from top
color_image, depth_image, _, cam_pose_matrix = self.get_gripper_cam_data([0, 0, 2], self._gripper_cam_lookat, [1, 0, 0])
self._gripper_tsdf.integrate(color_image, depth_image, self._gripper_cam_intrinsics, cam_pose_matrix, obs_weight=2.)
# take image from bottom
color_image, depth_image, _, cam_pose_matrix = self.get_gripper_cam_data([0, 0, 0], self._gripper_cam_lookat, [1, 0, 0])
self._gripper_tsdf.integrate(color_image, depth_image, self._gripper_cam_intrinsics, cam_pose_matrix, obs_weight=2.)
tsdf_vol_cpu, _ = self._gripper_tsdf.get_volume()
tsdf_vol_cpu = np.transpose(tsdf_vol_cpu, [1, 0, 2]) # swap x-axis and y-axis to make it consitent with scene_tsdf
return tsdf_vol_cpu
def open(self, open_scale):
self._gripper.open(self._body_id, self._gripper_parent_index+1, open_scale=open_scale)
def close(self):
self._gripper.close(self._body_id, self._gripper_parent_index+1)
def move(self, target_position, rotation_angle, stop_at_contact=False):
"""
:param target_position: (x, y, z). the position of the bottom center, not the base!
:param rotation_angle: rotation in z axis \in [0, 2 * \pi]. For 2-finger gripper, angle=0 --> parallel to x-axis
"""
target_position = np.array(target_position) - np.array(self._home_position)
joint_ids = [0, 1, 2, 3]
target_states = [target_position[0], target_position[1], target_position[2], rotation_angle%(2*np.pi)]
self._bullet_client.setJointMotorControlArray(
self._body_id,
joint_ids,
self._bullet_client.POSITION_CONTROL,
targetPositions=target_states,
forces=[self._force] * len(joint_ids),
positionGains=[self._speed] * len(joint_ids)
)
for i in range(240 * 6):
current_states = np.array([self._bullet_client.getJointState(self._body_id, joint_id)[0] for joint_id in joint_ids])
states_diff = np.abs(target_states - current_states)
# stop moving gripper if gripper collide with other objects
if stop_at_contact:
is_in_contact = False
points = self._bullet_client.getContactPoints(bodyA=self._body_id)
if len(points) > 0:
for p in points:
if p[9] > 0:
is_in_contact = True
break
if is_in_contact:
break
if np.all(states_diff < 1e-4):
break
self._gripper.step_constraints(self._body_id, self._gripper_parent_index+1)
self._bullet_client.stepSimulation()
self.fix_joints(joint_ids)
def fix_joints(self, joint_ids):
current_states = np.array([self._bullet_client.getJointState(self._body_id, joint_id)[0] for joint_id in joint_ids])
self._bullet_client.setJointMotorControlArray(
self._body_id,
joint_ids,
self._bullet_client.POSITION_CONTROL,
targetPositions=current_states,
forces=[self._force] * len(joint_ids),
positionGains=[self._speed] * len(joint_ids)
)
def primitive_grasping(self, target_position, rotation_angle, open_scale=1.0, stop_at_contact=False):
"""
:param target_position: (x, y, z). the position of the bottom center, not the base!
:param rotation_angle: rotation in z axis \in [0, 2 * \pi]
:return successs or not (True/False)
"""
self.move([target_position[0], target_position[1], self._home_position[2]], rotation_angle)
self.open(open_scale)
self.move(target_position, rotation_angle, stop_at_contact=stop_at_contact)
self.close()
self.move([target_position[0], target_position[1], self._home_position[2]], rotation_angle)
def remove(self):
self._bullet_client.removeBody(self._body_id)
def get_vis_pts(self, open_scale):
pts = self._gripper.get_vis_pts(open_scale)
angle = self._default_orientation[-1] # only add rotation around z axis
rotated_pts = np.transpose(np.dot(np.asarray(
[[np.cos(angle),-np.sin(angle)],
[np.sin(angle), np.cos(angle)]]),np.transpose(pts)))
return rotated_pts
|
63975
|
from time import clock
import sys
sys.path.append('../../../')
print (sys.path)
from tspdb.src.data import generateHarmonics as gH
from tspdb.src.data import generateTrend as gT
import tspdb.src.data.generateARMA as gA
import numpy as np
from tspdb.src.hdf_util import write_data
import matplotlib.pyplot as plt
def armaDataTest(timeSteps):
arLags = []#[0.4, 0.3, 0.2]
maLags = []#[0.5, 0.1]
startingArray = np.zeros(np.max([len(arLags), len(maLags)])) # start with all 0's
noiseMean = 0.0
noiseSD = [1.0]
(observedArray, meanArray, errorArray) = gA.generate(arLags, maLags, startingArray, timeSteps, noiseMean, noiseSD)
return (observedArray, meanArray)
def trendDataTest(timeSteps):
dampening = 2.0*float(1.0/timeSteps)
power = 0.35
displacement = -2.5
f1 = gT.linearTrendFn
data = gT.generate(f1, power=power, displacement=displacement, timeSteps=timeSteps)
f2 = gT.logTrendFn
f3 = gT.negExpTrendFn
return data
def harmonicDataTest(timeSteps):
sineCoeffs = [-2.0, 3.0]
sinePeriods = [560.0, 30.0]
cosineCoeffs = [-2.5]
cosinePeriods = [16.0]
data = gH.generate(sineCoeffs, sinePeriods, cosineCoeffs, cosinePeriods, timeSteps)
#plt.plot(data)
#plt.show()
return data
timeSteps = 10**5 +10000
print('generating data..')
dt = clock()
harmonicsTS = harmonicDataTest(timeSteps)
trendTS = trendDataTest(timeSteps)
(armaTS, armaMeanTS) = armaDataTest(timeSteps)
meanTS = harmonicsTS + trendTS #+ armaMeanTS
# combinedTS = harmonicsTS + trendTS + armaTS
var = harmonicsTS
var = (var - min(var))
errorArray = np.random.normal(0, np.sqrt(var[:timeSteps]), timeSteps)
combinedTS = meanTS + errorArray
# max1 = np.nanmax(combinedTS)
# min1 = np.nanmin(combinedTS)
# max2 = np.nanmax(meanTS)
# min2 = np.nanmin(meanTS)
# max = np.max([max1, max2])
# min = np.min([min1, min2])
# combinedTS = tsUtils.normalize(combinedTS, max, min)
# meanTS = tsUtils.normalize(meanTS, max, min)
# p = 1
plt.plot(combinedTS, label = 'obs')
plt.plot(meanTS, label = 'mean')
plt.plot(var, label = 'var')
plt.show()
print('Data Generated in ', clock() - dt)
write_data('MixtureTS_var2.h5', 'means', meanTS)
write_data('MixtureTS_var2.h5', 'obs', combinedTS,'a')
write_data('MixtureTS_var2.h5', 'var', var,'a')
# DF = pd.DataFrame()
# DF['means'] = meanTS
# DF['Obs'] = combinedTS
# DF['trainData'] = trainData
# DF.to_hdf('MixtureTS.h5','ts1')
|
64018
|
import re
import bs4
from LimeSoup.lime_soup import Soup, RuleIngredient
from LimeSoup.parser.elsevier_xml import (
resolve_elsevier_entities, extract_ce_text, find_non_empty_children,
node_named, extract_ce_para, extract_ce_section, extract_ce_abstract,
extract_ce_title, remove_consecutive_whitespaces)
__author__ = '<NAME>, <NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.3.2-xml'
__all__ = ['ElsevierXMLSoup']
class ElsevierParseXML(RuleIngredient):
@staticmethod
def _parse(xml_str):
xml_str = resolve_elsevier_entities(xml_str)
return bs4.BeautifulSoup(xml_str, 'lxml-xml')
class ElsevierReadMetaData(RuleIngredient):
@staticmethod
def get_text_or_none(soup, name, handler=None):
if soup is None:
return None
node = soup.find(name=name)
if node is None:
return None
elif handler is not None:
return handler(node)
else:
return node.get_text().strip()
@staticmethod
def _parse(soup):
# journal
journal_name = ElsevierReadMetaData.get_text_or_none(soup, 'xocs:srctitle') or \
ElsevierReadMetaData.get_text_or_none(soup, 'prism:publicationName')
doi = ElsevierReadMetaData.get_text_or_none(soup, 'xocs:doi')
# https://www.elsevier.com/__data/assets/pdf_file/0003/58872/ja5_tagbytag5_v1.9.5.pdf
# Elsevier XML definition pp. 46
head_node = soup.find('head')
title = ElsevierReadMetaData.get_text_or_none(head_node, 'ce:title', extract_ce_title) or \
ElsevierReadMetaData.get_text_or_none(soup, 'dc:title')
keywords = []
if head_node is not None:
# Elsevier XML definition pp. 366
for node in head_node.find_all('ce:keyword'):
text_node = node.find('ce:text')
if text_node is not None:
keyword = remove_consecutive_whitespaces(
extract_ce_text(text_node),
keep_newline=False
).strip()
keywords.append(keyword)
if len(keywords) == 0:
for subject in soup.find_all('dcterms:subject'):
keywords.append(subject.get_text().strip())
return soup, {
'Journal': journal_name,
'DOI': doi,
'Title': title,
'Keywords': keywords
}
class ElsevierCollect(RuleIngredient):
@staticmethod
def _parse(args):
soup, obj = args
paragraphs = []
# find all sections
for node in soup.find_all('ce:abstract'):
abstract_paragraph = extract_ce_abstract(node)
normalized_name = re.sub(r'[^\w]', '', abstract_paragraph['name'])
if re.match(r'abstracts?', normalized_name, re.IGNORECASE):
paragraphs.append(abstract_paragraph)
sections = soup.find('ce:sections')
if sections is not None:
for node in find_non_empty_children(sections):
if node_named(node, 'ce:para'):
paragraphs.extend(extract_ce_para(node).split('\n'))
elif node_named(node, 'ce:section'):
paragraphs.append(extract_ce_section(node))
obj['Sections'] = paragraphs
return obj
ElsevierXMLSoup = Soup(parser_version=__version__)
ElsevierXMLSoup.add_ingredient(ElsevierParseXML())
ElsevierXMLSoup.add_ingredient(ElsevierReadMetaData())
ElsevierXMLSoup.add_ingredient(ElsevierCollect())
|
64025
|
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import logging
import configparser
import time
import json
import concurrent.futures
logger = logging.getLogger(__name__)
config = configparser.ConfigParser()
config.read('config.ini')
crxcavator_api = config['crxcavator']['api']
MAX_THREADS = int(config['crxcavator']['threads']) # Get max number of threads for multi-threading
class CrXcavator(object):
def __init__(self, extension_id, version, name):
self.id = extension_id
self.version = version
self.name = name
self.risk_csp = None
self.risk_external_javascript = None
self.risk_external_calls = None
self.risk_score = 0
self.entry_points = None
self.dangerous_functions = None
self.chrome_link = "https://chrome.google.com/webstore/detail/{0}".format(extension_id)
self.crxcavator_link = "https://crxcavator.io/report/{0}/{1}".format(extension_id, version)
def print(self):
print('ID: %s' % self.id)
print('Version: %s' % self.version)
print('Score: %d' % self.risk_score)
print('Link: %s' % self.chrome_link)
print('CrXcavator Link: %s' % self.crxcavator_link)
if self.risk_csp is not None:
print('CSP: \n%s' % json.dumps(self.risk_csp, indent=2))
if self.risk_external_javascript is not None:
print('External JavaScript: \n%s' % json.dumps(self.risk_external_javascript, indent=2))
if self.risk_external_calls is not None:
print('External Calls: \n%s' % json.dumps(self.risk_external_calls, indent=2))
if self.dangerous_functions is not None:
print('Dangerous Functions: \n%s' % json.dumps(self.dangerous_functions, indent=2))
if self.entry_points is not None:
print('Entry Points: \n%s' % json.dumps(self.entry_points, indent=2))
print()
# Generate session with max of 3 retries and interval of 1 second
def session_generator():
session = requests.Session()
session.headers.update({'API-Key': config['crxcavator']['key'], 'Content-Type': 'application/json'})
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Parse risk data returned from report of crxcavator
def parse_risk_data(extension_id, version, data):
riskobj = CrXcavator(extension_id, version, data['webstore']['name'])
if 'csp' in data:
riskobj.risk_csp = data['csp']
if 'extjs' in data:
riskobj.risk_external_javascript = data['extjs']
if 'extcalls' in data:
riskobj.risk_external_calls = data['extcalls']
if 'entrypoints' in data:
riskobj.entry_points = data['entrypoints']
if 'dangerousfunctions' in data:
riskobj.dangerous_functions = data['dangerousfunctions']
if 'risk' in data:
for each_item in data['risk']:
if each_item == 'total' or each_item == 'webstore' or each_item == 'metadata':
continue
else:
riskobj.risk_score = riskobj.risk_score + int(data['risk'][each_item]['total'])
return riskobj
# Get risk data for a particular extension and their version
def get_extension_risk(extension_id, version):
risk_obj = None
session = session_generator()
resp = session.get("%s/report/%s/%s" % (crxcavator_api, extension_id, version))
if resp.ok:
try:
response = resp.json()
except json.decoder.JSONDecodeError:
logger.warning('JSON Decode Error. Retrying for extension %s version %s' % (extension_id, version))
risk_obj = get_extension_risk(extension_id, version)
return risk_obj
if response is None:
logger.info('Failed to fetch report on %s version %s' % (extension_id, version))
else:
if 'version' in response:
if response['version'] is not None:
risk_obj = parse_risk_data(extension_id, response['version'], response['data'])
else:
print(json.dumps(response, indent=4))
elif 600 > resp.status_code >= 500 or resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
risk_obj = get_extension_risk(extension_id, version)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to get risk data on extension %s of version %s' % (extension_id, version))
return risk_obj
# Submit an extension to get it scanned by crxcavator. This would also be useful to classify the extensions to the
# below categories
def submit_extension(extension_id: str):
submit_results = {}
submit_results['id'] = extension_id
submit_results['version'] = None
submit_results['extension'] = False
submit_results['not_free'] = False
submit_results['run_again'] = False
submit_results['removed_from_store'] = False
data = {'extension_id': extension_id}
session = session_generator()
resp = session.post("%s/submit" % crxcavator_api, json=data)
if resp.ok:
try:
response = resp.json()
except json.decoder.JSONDecodeError:
logger.warning('JSON Decode Error. Retrying for extension %s' % extension_id)
submit_results = submit_extension(extension_id)
return submit_results
if 'error' not in response:
if "no longer in Chrome" in response['message']:
submit_results['removed_from_store'] = True
else:
submit_results['version'] = response['version']
submit_results['extension'] = True
else:
if "not free" in response['error']:
submit_results['not_free'] = True
elif "Error retrieving extension from webstore" in response['error']:
submit_results['run_again'] = True
elif "Theme" in response['error']:
submit_results['extension'] = False
elif 'Error extension is too big' in response['error']:
submit_results['version'] = ""
submit_results['extension'] = True
else:
logger.error('Extension %s: %s' % (extension_id, response['error']))
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
submit_results = submit_extension(extension_id)
elif 600 > resp.status_code >= 500:
time.sleep(90)
logger.error('Server not responsive for extension %s. Trying Again' % extension_id)
submit_results['run_again'] = True
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
return submit_results
# Get risk data on multiple versions of the same chrome extension
def fetch_risk_details(extension_id, versions):
riskobjs = []
# Check if report exist for current version
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(get_extension_risk, extension_id, version) for version in versions]
for future in concurrent.futures.as_completed(fs):
riskobj = future.result()
if riskobj is not None:
riskobjs.append(riskobj)
return riskobjs
|
64031
|
from .build_arima import BuildArima
from .build_sarimax import BuildSarimax
from .build_autoarimax import BuildAutoSarimax
from .build_var import BuildVAR
|
64066
|
import sqlalchemy.orm
from .authors import Author
from .base import Base
from .books import Book
def setup(engine):
Base.metadata.create_all(engine)
session = sqlalchemy.orm.Session(engine)
author_wodehouse = Author(name="<NAME>")
author_bernières = Author(name="<NAME>")
session.add_all((author_wodehouse, author_bernières))
session.flush()
session.add(Book(title="Leave It to Psmith", genre="comedy", author_id=author_wodehouse.id))
session.add(Book(title="Right Ho, Jeeves", genre="comedy", author_id=author_wodehouse.id))
session.add(Book(title="Captain Corelli's Mandolin", genre="historical_fiction", author_id=author_bernières.id))
session.commit()
|
64076
|
import datetime
import unittest
from flask import Blueprint, request, jsonify
from freezegun import freeze_time
from mock import Mock, patch
import jwt
from requests.exceptions import HTTPError
from shared_helpers import services
from testing import TrottoTestCase, LIVE_APP_HOST
class TestFunctions(unittest.TestCase):
@patch('shared_helpers.services.get_service_config', return_value={'signing_secret': 'so_secret'})
def test__create_internal_token(self, mock_get_service_config):
now = datetime.datetime.now(datetime.timezone.utc)
with freeze_time(now):
token = services._create_internal_token('my_service', {'id': 1})
self.assertEqual({'exp': int(now.timestamp()) + 30,
'id': 1},
jwt.decode(token, 'so_secret', algorithms=['HS256']))
with freeze_time(now + datetime.timedelta(seconds=40)):
with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
jwt.decode(token, 'so_secret', algorithms=['HS256'])
mock_get_service_config.assert_called_once_with('my_service')
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to'})
def test_get__basic(self, mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.json.return_value = {'id': 1}
mock_requests_get.return_value = mock_response
self.assertEqual({'id': 1},
services.get('my_service', 'api/users'))
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to/'})
def test_get__trailing_and_leading_slashes(self,
mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.json.return_value = {'id': 1}
mock_requests_get.return_value = mock_response
self.assertEqual({'id': 1},
services.get('my_service', '/api/users'))
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to'})
def test_get__http_error(self, mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.raise_for_status.side_effect = HTTPError
mock_requests_get.return_value = mock_response
with self.assertRaises(HTTPError):
services.get('my_service', 'api/users')
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
def test_validate_internal_request__no_token(self):
mock_request = Mock()
mock_request.headers = {}
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('no token',
str(cm.exception))
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__invalid_signature__wrong_secret(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users'},
'a_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('invalid signature',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__invalid_signature__no_exp(self, mock_get_config_by_key_path):
token = jwt.encode({'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('missing exp',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__expired_token(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() - datetime.timedelta(seconds=1),
'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('expired',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__mismatched_url(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users/1'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('mismatched URL',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__valid_token(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
self.assertEqual(True,
services.validate_internal_request(mock_request))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
routes = Blueprint('test', __name__)
@routes.route('/_/api/users', methods=['GET'])
def get_users():
services.validate_internal_request(request)
return jsonify([{'id': 1}])
class TestIntegration(TrottoTestCase):
blueprints_under_test = [routes]
start_live_app = True
live_app_config = {'sessions_secret': 'a_sessions_secret',
'signing_secret': 'so_secret',
'postgres': {'url': 'postgresql://admin:testing@/testing_trotto_core'}}
@patch('shared_helpers.config.get_config', return_value={'services': {'my_service': {'signing_secret': 'so_secret',
'base_url': LIVE_APP_HOST}}})
def test_internal_request__real_handler__valid_token(self, _):
self.assertEqual([{'id': 1}],
services.get('my_service', '/_/api/users'))
@patch('shared_helpers.config.get_config', return_value={'services': {'my_service': {'signing_secret': 'a_secret',
'base_url': LIVE_APP_HOST}}})
def test_internal_request__real_handler__invalid_token(self, _):
with self.assertRaises(HTTPError) as cm:
self.assertEqual([{'id': 1}],
services.get('my_service', '/_/api/users'))
self.assertEqual(500,
cm.exception.response.status_code)
|
64088
|
from tableschema import Table
# Data from WEB, schema from MEMORY
SOURCE = 'https://raw.githubusercontent.com/frictionlessdata/tableschema-py/master/data/data_infer.csv'
SCHEMA = {'fields': [{'name': 'id', 'type': 'integer'}, {'name': 'age', 'type': 'integer'}, {'name': 'name', 'type': 'string'}] }
# If schema is not passed it will be inferred
table = Table(SOURCE, schema=SCHEMA)
rows = table.iter()
while True:
try:
print(next(rows))
except StopIteration:
break
except Exception as exception:
print(exception)
|
64129
|
import numpy as np
import sys,os
##################################### INPUT ############################################
realizations = 2000
########################################################################################
root1 = '/simons/scratch/fvillaescusa/pdf_information/Snapshots/latin_hypercube'
root2 = '/simons/scratch/fvillaescusa/pdf_information/Linear_Pk/latin_hypercube'
# do a loop over all realizations
for i in xrange(realizations):
folder_in = '%s/%d'%(root1,i)
folder_out = '%s/%d'%(root2,i)
if not(os.path.exists(folder_out)): os.system('mkdir %s'%folder_out)
os.system('cp %s/CAMB.params %s/'%(folder_in, folder_out))
os.system('cp %s/ICs/Pk_mm_z=0.000.txt %s/'%(folder_in, folder_out))
|
64160
|
import re
import lib.core.common
__product__ = "3dcart"
__description__ = (
"The 3dcart Shopping Cart Software is a complete e-commerce solution for anyone."
)
def search(html, **kwargs):
html = str(html)
headers = kwargs.get("headers", None)
plugin_detection_schema = (
re.compile(r"3dcart.stats", re.I),
re.compile(r"/3dvisit/", re.I)
)
for plugin in plugin_detection_schema:
if plugin.search(html) is not None:
return True
if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SET_COOKIE, "")) is not None:
return True
|
64179
|
expected_output = {
"tag": {
"test": {
"system_id": {
"R2_xr": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.01",
"format": "Phase V",
"interface": "GigabitEthernet2.115",
"ip_address": ["10.12.115.2*"],
"ipv6_address": ["FE80::F816:3EFF:FE67:2452"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
"R3_nx": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.02",
"format": "Phase V",
"interface": "GigabitEthernet3.115",
"ip_address": ["10.13.115.3*"],
"ipv6_address": ["FE80::5C01:FF:FE02:7"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
}
},
"test1": {
"system_id": {
"2222.22ff.4444": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "2222.22ff.4444.01",
"format": "Phase V",
"interface": "GigabitEthernet2.415",
"ip_address": ["10.12.115.2*"],
"ipv6_address": ["FE80::F816:3EFF:FE67:2452"],
"nsf": "capable",
"priority": 128,
"state": "init",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
"R3_nx": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.02",
"format": "Phase V",
"interface": "GigabitEthernet3.415",
"ip_address": ["10.13.115.3*"],
"ipv6_address": ["FE80::5C01:FF:FE02:7"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
}
},
}
}
|
64185
|
from wasmer import engine, Store, Module, Instance
from wasmer_compiler_cranelift import Compiler as Cranelift
from wasmer_compiler_llvm import Compiler as LLVM
from wasmer_compiler_singlepass import Compiler as Singlepass
TEST_BYTES = open('benchmarks/nbody.wasm', 'rb').read()
def test_benchmark_headless_time_nbody_cranelift_jit(benchmark):
store = Store(engine.JIT(Cranelift))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_cranelift_native(benchmark):
store = Store(engine.Native(Cranelift))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_llvm_jit(benchmark):
store = Store(engine.JIT(LLVM))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_llvm_native(benchmark):
store = Store(engine.Native(LLVM))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_singlepass_jit(benchmark):
store = Store(engine.JIT(Singlepass))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
def test_benchmark_headless_time_nbody_singlepass_native(benchmark):
store = Store(engine.Native(Singlepass))
module = Module(store, TEST_BYTES)
serialized = module.serialize()
@benchmark
def bench():
deserialized = Module.deserialize(store, serialized)
_ = Instance(deserialized)
|
64240
|
import torch
import torch.nn as nn
import physics_aware_training.digital_twin_utils
class SplitInputParameterNet(nn.Module):
def __init__(self,
input_dim,
nparams,
output_dim,
parameterNunits = [100,100,100],
internalNunits = [10,10,10]):
'''
Defines network that splits inputs x into physical system input and parameters.
Inputs are propagated through a "main" neural network whose weights are predicted by an
auxiliary neural network whose inputs are the parameters.
Args:
inputDim (int): dimension of physical system inputs
outputDim (int): dimension of physical system outputs
parameterDim (int): dimension of all physical system parameters combined
parameterNunits (list of int): defines the number of hidden units per layer in the
auxiliary parameter network.
internalDim (int): number of hidden units per layer of the main neural network that
propagates physical system inputs
inputNlayers (int): number of hidden layers of main neural network
'''
super(SplitInputParameterNet, self).__init__()
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
self.internalNunits = internalNunits
self.inputNlayers = len(internalNunits)
nparameters = 0
for i in range(len(internalNunits)-1):
nparameters += internalNunits[i]*internalNunits[i+1]
nparameters += internalNunits[i+1]
# parameterNet is a submodel that predicts a matrix of dimensions
self.parameterNet = torch.nn.Sequential()
self.parameterNet.add_module("fcIn", torch.nn.Linear(nparams, parameterNunits[0]))
for i in range(len(parameterNunits)):
if i<len(parameterNunits)-1:
self.parameterNet.add_module(f"relu{i}", torch.nn.ReLU())
self.parameterNet.add_module(f"fc{i}", torch.nn.Linear(parameterNunits[i], parameterNunits[i+1]))
else:
self.parameterNet.add_module(f"relu{i}", torch.nn.ReLU())
self.parameterNet.add_module(f"fcOut", torch.nn.Linear(parameterNunits[i], nparameters))
# two fully connected input and output layers adjust the input and output dimenstion to
# the internal dimension
self.fcIn = nn.Linear(input_dim, internalNunits[0])
self.fcOut = nn.Linear(internalNunits[-1], output_dim)
def forward(self, x):
batch_size, _ = x.shape
# initialize matrices for inputNet
inputNetMatrices = []
inputNetBiases = []
for i in range(len(self.internalNunits)-1):
inputNetMatrices.append([torch.zeros(batch_size, self.internalNunits[i], self.internalNunits[i+1])])
inputNetBiases.append([torch.zeros(batch_size, self.internalNunits[i+1], 1)])
# split x into physical system inputs and parameters
inputs = x[:, :self.input_dim]
parameters = x[:, self.input_dim:]
# AUXILIARY PARAMETER NETWORK
parameters = self.parameterNet(parameters)
# fill inputNetMatrices with outputs from parameterNet
index = 0
for i in range(len(self.internalNunits)-1):
index_temp = index
index += self.internalNunits[i] * self.internalNunits[i+1]
inputNetMatrices[i] = parameters[:, index_temp:index].reshape(batch_size, self.internalNunits[i+1], self.internalNunits[i])
# fill inputNetBiases with outputs from parameterNet
for i in range(len(self.internalNunits)-1):
index_temp = index
index += self.internalNunits[i+1]
inputNetBiases[i] = parameters[:, index_temp:index].reshape(batch_size, self.internalNunits[i+1], 1)
# MAIN INPUT NETWORK
inputs = self.fcIn(inputs).unsqueeze(-1)
# MAIN INPUT NETWORK
for i in range(len(self.internalNunits)-1):
# apply matrices and biases just filled with outputs from parameterNet
inputs = torch.bmm(inputNetMatrices[i], inputs)
inputs += inputNetBiases[i]
inputs = torch.relu(inputs)
return self.fcOut(inputs.squeeze(-1))
class SplitInputParameterObjective(object):
# define class to smuggle additional arguments into objective function
def __init__(self, train_loader, test_loader, dt_path, input_dim, nparams, output_dim, **modelargs):
self.modelargs = modelargs
self.dt_path = dt_path
self.train_loader = train_loader
self.test_loader = test_loader
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
def __call__(self, trial):
lr = trial.suggest_loguniform("lr", 1e-4, 1e-1)
parameterNlayers = trial.suggest_categorical("parameterNlayers", [1, 2, 3, 4, 5])
parameterNunits = []
if parameterNlayers == 1:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
if parameterNlayers == 2:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
if parameterNlayers == 3:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
if parameterNlayers == 4:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
if parameterNlayers == 5:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits5", 50, 1000)))
internalNlayers = trial.suggest_categorical("internalNlayers", [1, 2, 3, 4, 5])
internalNunits = []
if parameterNlayers == 1:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
if parameterNlayers == 2:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
if parameterNlayers == 3:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
if parameterNlayers == 4:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits4", 10, 100)))
if parameterNlayers == 5:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits4", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits5", 10, 100)))
name = f"{self.dt_path}_v{trial.number}" #create name with trial index
value, model_path = physics_aware_training.digital_twin_utils.train_loop_reg_model(
self.train_loader,
self.test_loader,
name,
self.input_dim,
self.nparams,
self.output_dim,
Model = SplitInputParameterNet,
parameterNunits = parameterNunits,
internalNunits = internalNunits,
lr = lr,
**self.modelargs)
trial.set_user_attr('model_path', model_path) #save the model path string in NAS study
return value
|
64256
|
from random import randint
from cowait.worker.worker_node import WorkerNode
from .html_logger import HTMLLogger
class NotebookNode(WorkerNode):
"""
The Notebook Node is a variant of the standard worker node meant to run in a notebook.
It simulates a running task by connecting upstream and forwarding events from tasks
created from within the notebook.
NotebookNodes use random ports for their web servers to allow multiple nodes on a single host.
Output is disabled to prevent event spam into the notebook.
"""
def __init__(self, taskdef):
self.taskdef = taskdef
super().__init__(
id=taskdef.id,
upstream=taskdef.upstream,
port=randint(10000, 60000),
logger=HTMLLogger(),
)
async def start(self, token: str) -> None:
"""
Starts the node by connecting upstream, sending initialization
events and starting the local web server.
"""
await self.connect(token)
await self.parent.send_init(self.taskdef)
await self.parent.send_run()
await self.parent.send_log(data='Kernel ready.', file='stdout')
self.serve()
async def stop(self):
await self.parent.send_log(data='Kernel stopped!', file='stdout')
await self.parent.send_stop()
await self.parent.close()
async def connect(self, token: str) -> None:
await self.parent.connect(self.upstream, token)
|
64326
|
import click
from .schemaless import SchemalessPrompter
from agent import source
class KafkaPrompter(SchemalessPrompter):
timestamp_types = ['datetime', 'string', 'unix', 'unix_ms']
target_types = ['counter', 'gauge', 'running_counter']
def prompt_config(self):
self.data_preview()
self.set_values()
self.prompt_measurement_names()
self.prompt_timestamp()
self.set_dimensions()
self.set_consumer_group()
self.prompt_static_dimensions()
self.prompt_tags()
self.filter()
self.set_transform()
self.set_uses_schema()
def set_consumer_group(self):
self.config['override_source'][source.KafkaSource.CONFIG_CONSUMER_GROUP] =\
click.prompt('Consumer group name', self._get_default_consumer_group())
def _get_default_consumer_group(self) -> str:
if source.KafkaSource.CONFIG_CONSUMER_GROUP in self.config['override_source']:
return self.config['override_source'][source.KafkaSource.CONFIG_CONSUMER_GROUP]
return "agent_" + self.pipeline.name
|
64329
|
import heapq
import itertools as itt
import operator as op
from collections import OrderedDict, UserDict, defaultdict
from .array import Array
from .optional import Nothing, Some
from .repr import short_repr
from .row import KeyValue, Row
from .stream import Stream
def identity(_): return _
class Map(OrderedDict):
'''A mutable dictionary enhanced with a bulk of useful methods.
'''
def items(self):
return Stream(super().items()).starmap(KeyValue)
def values(self):
return Stream(super().values())
def keys(self):
return Stream(super().keys())
def update(self, *args, **kwds):
'''Update Map from dict/iterable and ``return self``
>>> m = Map(a=3, b=4)
>>> m2 = m.update(a=5, c=3).update({'d': 2})
>>> m is m2
True
>>> m
Map({'a': 5, 'b': 4, 'c': 3, 'd': 2})
'''
super().update(*args, **kwds)
return self
def updated(self, *args, **kwds):
'''Create a new Map instance that is updated from dict/iterable.
This method is the same as ``m.copy().update(...)``
>>> m = Map(a=3, b=4)
>>> m2 = m.updated(a=5, c=3).update({'d': 2})
>>> m2
Map({'a': 5, 'b': 4, 'c': 3, 'd': 2})
>>> m
Map({'a': 3, 'b': 4})
'''
m = self.copy()
return m.update(*args, **kwds)
def join(self, *others, fillvalue=None, agg=None):
"""Create a new Map instance with keys merged and values joined.
>>> m1 = Map(a=1, b=2)
>>> m2 = m1.join(dict(a=3, b=4, c=5))
>>> m2 is m1
False
>>> m2
Map({'a': Row(f0=1, f1=3), 'b': Row(f0=2, f1=4), 'c': Row(f0=None, f1=5)})
>>> m1 = Map(a=1, b=2)
>>> m2 = m1.join(dict(a=3, b=4, c=5), agg=sum, fillvalue=0)
>>> m2
Map({'a': 4, 'b': 6, 'c': 5})
"""
return Map(self.iter_joined(*others, fillvalue=fillvalue, agg=agg))
def iter_joined(self, *others, fillvalue=None, agg=None):
"""Create a ``Row(key, Row(v0, v1, ...))`` iterator with keys from
all Maps and value joined.
>>> m = Map(a=1, b=2)
>>> l = list(m.iter_joined(
... Map(a=3, b=4, c=5),
... Map(a=6, c=7),
... fillvalue=0))
>>> l[0]
Row(key='a', values=Row(f0=1, f1=3, f2=6))
>>> l[1]
Row(key='b', values=Row(f0=2, f1=4, f2=0))
>>> l[2]
Row(key='c', values=Row(f0=0, f1=5, f2=7))
"""
if agg is None:
agg = identity
keys = list(self.keys())
keys_set = set(keys)
for other in others:
for key in other.keys():
if key not in keys_set:
keys_set.add(key)
keys.append(key)
dicts = (self,) + others
for key in keys:
yield Row(key=key,
values=agg(Row.from_values(
d.get(key, fillvalue)
for d in dicts)))
def __repr__(self):
return f'Map({self.make_string()})'
def map(self, func):
'''Create a new Map instance that each key, value pair is derived by
applying function to original key, value.
>>> Map(a=3, b=4).map(lambda k, v: (v, k))
Map({3: 'a', 4: 'b'})
Parameters
----------
func : ``pred(key, value) -> (key, value)``
function for computing new key/value pair
'''
return Map(func(key, value) for key, value in self.items())
def map_keys(self, func):
'''Create a new Map instance that all values remains the same,
while each corresponding key is updated by applying function to
original key, value.
>>> Map(a=3, b=4).map_keys(lambda k, v: k + '_1')
Map({'a_1': 3, 'b_1': 4})
Parameters
----------
func : ``pred(key, value) -> key``
function for computing new keys
'''
return Map((func(key, value), value) for key, value in self.items())
def map_values(self, func):
'''Create a new Map instance that all keys remains the same,
while each corresponding value is updated by applying function to
original key, value.
>>> Map(a=3, b=4).map_values(lambda k, v: v * 2)
Map({'a': 6, 'b': 8})
Parameters
----------
func : ``pred(key, value) -> value``
function for computing new values
'''
return Map((key, func(key, value)) for key, value in self.items())
def revamp_values(self, func):
'''Update values of current Map and return self.
Each value is derived by computing the function using
both key and value.
>>> m = Map(a=3, b=4)
>>> m.revamp_values(lambda k, v: v * 2)
Map({'a': 6, 'b': 8})
>>> m
Map({'a': 6, 'b': 8})
Parameters
----------
func : ``pred(key, value) -> value``
function for computing new values
Returns
-------
self
'''
for key, value in self.items():
self[key] = func(key, value)
return self
def keep(self, *keys):
'''Delete keys not specified and return self
>>> m = Map(a=3, b=4, c=5)
>>> m.keep('a', 'c')
Map({'a': 3, 'c': 5})
>>> m
Map({'a': 3, 'c': 5})
Returns
-------
self
'''
keys = set(keys)
current_keys = set(self.keys())
keys_to_delete = current_keys - keys
for key, in keys_to_delete:
del self[key]
return self
def project(self, *keys):
'''Create a new Map instance contains only specified keys.
>>> m = Map(a=3, b=4, c=5)
>>> m.project('a', 'c')
Map({'a': 3, 'c': 5})
>>> m
Map({'a': 3, 'b': 4, 'c': 5})
Returns
-------
Map[key, value]
'''
return Map((k, self[k]) for k in keys)
def get_opt(self, key):
'''Get the value of specified key as Optional type.
Return Some(value) if key exists, otherwise return Nothing.
>>> m = Map(a=3, b=4)
>>> m.get_opt('a')
Some(3)
>>> m.get_opt('c')
Nothing
>>> m.get_opt('a').map(lambda v: v * 2)
Some(6)
>>> m.get_opt('c').map(lambda v: v * 2)
Nothing
Returns
-------
Optional[value]
'''
if key in self:
return Some(self[key])
return Nothing
def remove(self, *keys):
'''Delete keys and return self
>>> m = Map(a=3, b=4, c=5)
>>> m.remove('a', 'c')
Map({'b': 4})
>>> m
Map({'b': 4})
Returns
-------
self
'''
for key in keys:
del self[key]
return self
def without(self, *keys):
'''Create a new Map instance with those keys
>>> m = Map(a=3, b=4, c=6)
>>> m.without('a', 'c')
Map({'b': 4})
>>> m
Map({'a': 3, 'b': 4, 'c': 6})
Returns
-------
Map[key, value]
'''
return Map((key, value)
for key, value in self.items()
if key not in keys)
def retain(self, pred):
'''Delete key/value pairs not satisfying the predicate and return self
>>> m = Map(a=3, b=4, c=5)
>>> m.retain(lambda k, v: k == 'b' or v == 5)
Map({'b': 4, 'c': 5})
>>> m
Map({'b': 4, 'c': 5})
Parameters
----------
pred : ``(k, v) -> bool``
Returns
-------
self
'''
keys_to_delete = []
for key, value in self.items():
if not pred(key, value):
keys_to_delete.append(key)
return self.remove(*keys_to_delete)
def retain_false(self, pred):
'''Delete key/value pairs satisfying the predicate and return self
>>> m = Map(a=3, b=4, c=5)
>>> m.retain_false(lambda k, v: k == 'b' or v == 5)
Map({'a': 3})
>>> m
Map({'a': 3})
Parameters
----------
pred : ``(k, v) -> bool``
Returns
-------
self
'''
keys_to_delete = []
for key, value in self.items():
if pred(key, value):
keys_to_delete.append(key)
return self.remove(*keys_to_delete)
def retain_by_key(self, pred):
'''Delete key/value pairs not satisfying the predicate and return self
>>> m = Map(a=3, b=4, c=5)
>>> m.retain_by_key(lambda k: k == 'b')
Map({'b': 4})
>>> m
Map({'b': 4})
Parameters
----------
pred : ``(k) -> bool``
Returns
-------
self
'''
keys_to_delete = []
for key, value in self.items():
if not pred(key):
keys_to_delete.append(key)
return self.remove(*keys_to_delete)
def retain_by_value(self, pred):
'''Delete key/value pairs not satisfying the predicate and return self
>>> m = Map(a=3, b=4, c=5)
>>> m.retain_by_value(lambda v: v == 4)
Map({'b': 4})
>>> m
Map({'b': 4})
Parameters
----------
pred : ``(k) -> bool``
Returns
-------
self
'''
keys_to_delete = []
for key, value in self.items():
if not pred(value):
keys_to_delete.append(key)
return self.remove(*keys_to_delete)
def filter(self, pred):
'''Create a new Map with key/value pairs satisfying the predicate
>>> m = Map({1: 2, 2: 4, 3: 6})
>>> m2 = m.filter(lambda k, v: (v-k) % 3 == 0)
>>> m2
Map({3: 6})
Parameters
----------
pred : ``(k, v) -> bool``
predicate
Returns
-------
Map[key, value]
'''
return Map((k, v) for k, v in self.items() if pred(k, v))
def filter_false(self, pred):
'''Create a new Map with key/value pairs not satisfying the predicate
>>> m = Map({1: 2, 2: 4, 3: 6})
>>> m2 = m.filter_false(lambda k, v: (v-k) % 3 == 0)
>>> m2
Map({1: 2, 2: 4})
Parameters
----------
pred : ``(k, v) -> bool``
predicate
Returns
-------
Map[key, value]
'''
return Map((k, v) for k, v in self.items() if not pred(k, v))
def filter_by_key(self, pred):
'''Create a new Map with keys satisfying the predicate
>>> m = Map({1: 2, 2: 4, 3: 6})
>>> m2 = m.filter_by_key(lambda k: k % 3 == 0)
>>> m2
Map({3: 6})
Parameters
----------
pred : ``(k, v) -> bool``
predicate
Returns
-------
Map[key, value]
'''
return Map((k, v) for k, v in self.items() if pred(k))
def filter_by_value(self, pred):
'''Create a new Map with values satisfying the predicate
>>> m = Map({1: 2, 2: 4, 3: 6})
>>> m2 = m.filter_by_value(lambda v: v % 3 == 0)
>>> m2
Map({3: 6})
Parameters
----------
pred : ``(k, v) -> bool``
predicate
Returns
-------
Map[key, value]
'''
return Map((k, v) for k, v in self.items() if pred(v))
def group_by(self, key_func):
'''Group key/value pairs into nested Maps.
>>> Map(a=3, b=4, c=5).group_by(lambda k, v: v % 2)
Map({1: Map({'a': 3, 'c': 5}), 0: Map({'b': 4})})
Parameters
----------
key_func : ``(key, value) -> group_key``
predicate
Returns
-------
Map[key_func(key), Map[key, value]]
'''
grouped_d = defaultdict(Map)
for key, value in self.items():
grouped_d[key_func(key, value)][key] = value
return Map(grouped_d)
def reduce(self, key):
pass
def make_string(self,
key_value_format='{key!r}: {value!r}',
start='{', item_sep=', ', end='}'):
'''Construct a string from key/values.
>>> m = Map(a=3, b=4, c=5)
>>> m.make_string()
"{'a': 3, 'b': 4, 'c': 5}"
>>> m.make_string(start='(', key_value_format='{key}={value!r}',
... item_sep=', ', end=')')
'(a=3, b=4, c=5)'
Parameters
----------
key_value_format : str
string template using builtin ``str.format()`` for formatting
key/value pairs. Default to ``'{key!r}: {value!r}'``.
Available named placeholders: ``{key}``, ``{value}``
start : str
Default to ``'{'``.
item_sep : str
Default to ``', '``
end : str
Default to ``}``
Returns
-------
str
'''
items_str = item_sep.join(
key_value_format.format(key=key, value=value)
for key, value in self.items())
return start + items_str + end
def take(self, n):
'''create a Stream instance of first ``n`` ``Row(key, value)`` elements.
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.take(2).to_list()
[Row(key='a', value=4), Row(key='b', value=5)]
Returns
-------
Stream[Row[key, value]]
'''
return self.to_stream().take(n)
def first(self):
'''Get the first item in ``Row(key, value)`` type
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.first()
Row(key='a', value=4)
>>> m.first().key
'a'
>>> m.first().value
4
>>> m = Map()
>>> m.first()
Traceback (most recent call last):
...
IndexError: index out of range.
Returns
-------
Row[key, value]
'''
return self.nth(0)
def first_opt(self):
'''Optionally get the first item.
Return Some(Row(key, value)) if first item exists,
otherwise return Nothing
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.first_opt().map(lambda kv: kv.transform(value=lambda v: v * 2))
Some(Row(key='a', value=8))
>>> m.first_opt().map(lambda kv: kv.value)
Some(4)
>>> m = Map()
>>> m.first_opt()
Nothing
Returns
-------
Optional[Row[key, value]]
'''
return self.nth_opt(0)
def nth(self, index):
'''Get the nth item in ``Row(key, value)`` type.
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.nth(2)
Row(key='c', value=6)
>>> m = Map(a=4, b=5)
>>> m.nth(2)
Traceback (most recent call last):
...
IndexError: index out of range.
Returns
-------
Row[key, value]
'''
try:
key, value = next(itt.islice(self.items(), index, None))
return KeyValue(key, value)
except StopIteration:
raise IndexError('index out of range.')
def nth_opt(self, index):
'''Optionally get the nth item.
Return ``Some(Row(key, value))`` if first item exists,
otherwise return Nothing.
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.first_opt().map(lambda kv: kv.transform(value=lambda v: v * 2))
Some(Row(key='a', value=8))
>>> m = Map()
>>> m.first_opt()
Nothing
Returns
-------
Optional[Row[key, value]]
'''
try:
return Some(self.nth(index))
except IndexError:
return Nothing
def len(self):
'''Get the length of this Map
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.len()
4
Returns
-------
int
'''
return len(self)
def to_stream(self, key_field='key', value_field='value'):
'''Convert to a Stream instance of ``Row(key, value)`` iterable.
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.to_stream().take(2).to_list()
[Row(key='a', value=4), Row(key='b', value=5)]
Returns
-------
Stream[Row[key, value]]
'''
return (Stream(super().items())
.starmap(lambda key, value:
Row(**{key_field: key, value_field: value})))
def to_array(self):
'''Convert to an Array instance of ``Row(key, value)`` iterable.
>>> m = Map(a=4, b=5, c=6, d=7)
>>> m.to_array().take(2)
Array([Row(key='a', value=4), Row(key='b', value=5)])
Returns
-------
Array[Row[key, value]]
'''
return self.to_stream().to_array()
def to_list(self):
'''Convert to an list instance of ``Row(key, value)`` iterable.
>>> m = Map(a=4, b=5)
>>> m.to_list()
[Row(key='a', value=4), Row(key='b', value=5)]
Returns
-------
Array[Row[key, value]]
'''
return self.to_stream().to_list()
def to_dict(self):
'''Convert to dict'''
return dict(self)
def flip(self):
'''Create a new Map which key/value pairs are fliped
>>> m = Map(a=4, b=5, c=6)
>>> m.flip()
Map({4: 'a', 5: 'b', 6: 'c'})
'''
return Map((value, key) for key, value in self.items())
def for_each(self, func):
'''Call func for each key/value pair
>>> m = Map(a=[], b=[], c=[])
>>> m.for_each(lambda k, v: v.append(k))
>>> m
Map({'a': ['a'], 'b': ['b'], 'c': ['c']})
'''
for k, v in self.items():
func(k, v)
def for_each_key(self, func):
'''Call func for each key
>>> m = Map(a=[], b=[], c=[])
>>> keys = []
>>> m.for_each_key(lambda k: keys.append(k))
>>> keys
['a', 'b', 'c']
'''
for k in self.keys():
func(k)
def for_each_value(self, func):
'''Call func for each value
>>> m = Map(a=[], b=[], c=[])
>>> m.for_each_value(lambda v: v.append(3))
>>> m
Map({'a': [3], 'b': [3], 'c': [3]})
'''
for v in self.values():
func(v)
def nlargest_value_items(self, n=None):
'''Get top n largest values
>>> m = Map(a=6, b=2, c=10, d=9)
>>> m.nlargest_value_items(n=2)
Array([Row(key='c', value=10), Row(key='d', value=9)])
Returns
-------
Array[Row[key, value]]
'''
if n is None:
vs = sorted(self.items(), key=op.itemgetter(1), reverse=True)
vs = heapq.nlargest(n, self.items(), key=op.itemgetter(1))
return Array(vs)
def nsmallest_value_items(self, n=None):
'''Get top n smallest values
>>> m = Map(a=6, b=2, c=10, d=9)
>>> m.nsmallest_value_items(n=2)
Array([Row(key='b', value=2), Row(key='a', value=6)])
Returns
-------
Array[Row[key, value]]
'''
if n is None:
vs = sorted(self.items(), key=op.itemgetter(1), reverse=False)
vs = heapq.nsmallest(n, self.items(), key=op.itemgetter(1))
return Array(vs)
|
64406
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from .models import Feed
class FeedViewsTest(TestCase):
def setUp(self):
self.client = Client()
user = User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='<PASSWORD>'
)
self.feed = Feed.objects.create(user=user, post='test feed')
def test_feeds(self):
response = self.client.get('/feeds/')
self.assertEqual(response.status_code, 200)
def test_feed(self):
response = self.client.get('/feeds/123/')
self.assertEqual(response.status_code, 404)
response = self.client.get(f'/feeds/{self.feed.pk}/')
self.assertEqual(response.status_code, 200)
|
64444
|
import unittest
from pyparsing import ParseException
from media_management_scripts.support.search_parser import parse_and_execute, parse
class ParseTestCase():
def parse(self, query, expected, context={}):
self.assertEqual(parse_and_execute(query, context), expected)
class SimpleTest(unittest.TestCase, ParseTestCase):
def test_basic(self):
self.parse('1+1', 2)
self.parse('1-1', 0)
self.parse('-1-2', -3)
self.parse('3*2', 6)
self.parse('10/2', 5)
def test_whitespace(self):
self.parse(' 1 + 1 ', 2)
self.parse(' 1 + 1 ', 2)
def test_order_of_operations(self):
self.parse('1+2*3', 7)
self.parse('2*3+1', 7)
self.parse('(1+2)*3', 9)
def test_boolean(self):
self.parse('true', True)
self.parse('false', False)
self.parse('true and true', True)
self.parse('true and false', False)
self.parse('True and False', False)
self.parse('true or false', True)
self.parse('not true', False)
self.parse('not false', True)
def test_boolean_order_of_operations(self):
self.parse('true and true or false', True)
self.parse('not false and false', False)
self.parse('not false or false', True)
self.parse('1 in [1] or false', True)
self.parse('1 in [1] and false', False)
def test_comparison(self):
self.parse('1 = 1', True)
self.parse('1 != 1', False)
self.parse('1 != 2', True)
self.parse('1 > 1', False)
self.parse('2 > 1', True)
self.parse('1 < 1', False)
self.parse('1 >= 1', True)
self.parse('1 <= 1', True)
def test_in(self):
self.parse('1 in [1]', True)
self.parse('1 in [1,2]', True)
self.parse('2 in [1]', False)
def test_basic_context(self):
self.parse('a', 2, {'a': 2})
self.parse('a+1', 3, {'a': 2})
self.parse('a.b+1', 3, {'a': {'b': 2}})
def test_reuse(self):
op = parse('a+1')
self.assertEqual(2, op.exec({'a': 1}))
self.assertEqual(3, op.exec({'a': 2}))
def test_invalid(self):
with self.assertRaises(ParseException):
parse('True and')
with self.assertRaises(ParseException):
parse('1+')
def test_isNull(self):
self.parse('isNull(1)', False)
self.parse('isNull(a)', True, {'a': None})
self.parse('not isNull(a)', True, {'a': 1})
def test_all(self):
self.parse('a = 1', True, {'a': [1, 2]})
self.parse('all(a) = 1', True, {'a': [1, 1]})
self.parse('all(a) = 1', False, {'a': [1, 2]})
self.parse('all(a) != 1', True, {'a': [1, 2]})
self.parse('all(a) != 1', False, {'a': [1, 1]})
def test_string(self):
self.parse('"test"', 'test')
self.parse('test', 'test')
self.parse('"test test"', 'test test')
self.parse('"test test"', 'test test')
self.parse('"test test" = "test test"', True)
|
64450
|
import random
class Fluctuation:
def __init__(self, percentage, baseline):
self.percentage = percentage
self.baseline = baseline
def __call__(self, value):
return self.apply(value)
def apply(self, value):
return value + self.generate(self.baseline)
def generate(self, baseline):
relative_value = self.get_relative_value(baseline)
corridor = (-relative_value, relative_value)
return random.randint(corridor[0], corridor[1])
def get_relative_value(self, baseline):
relative_value = float(baseline) / 100.0 * float(self.percentage)
return int(relative_value)
|
64481
|
def fib(n):
"return nth term of Fibonacci sequence"
a, b = 0, 1
i = 0
while i<n:
a, b = b, a+b
i += 1
return b
def linear_recurrence(n, (a,b)=(2,0), (u0, u1)=(1,1)):
"""return nth term of the sequence defined by the
linear recurrence
u(n+2) = a*u(n+1) + b*u(n)"""
i = 0
u, v = u0, u1
while i<n:
w = a*v + b*u
u, v = v, w
i +=1
return w
|
64482
|
import os
import sys
import json
import commands
import re
testrange = [300, 500, 800, 1100, 1400]
WIDTH, HEIGHT = 800, 480
FPS = 25
def eventloop(test_file):
os.system('mkdir img')
#os.system('mkdir img/%s' % (test_file))
os.system('mkdir tmp_%s' % (test_file))
os.system('mkdir tmp2_%s' % (test_file))
# ffmpeg -i mov/1.flv -vf fps=25 -s 1280x720 img/1.flv/%5d.png
os.system('ffmpeg -y -i mov/%s -vf fps=%d -s 1280x720 tmp_%s/%%d.png' %
(test_file, FPS, test_file))
os.system('ffmpeg -y -i tmp_%s/%%d.png -vf fps=5 -s 64x36 img/%s_%%d.png' %
(test_file, test_file))
img_files = os.listdir('tmp_%s/' % test_file)
img_files.sort()
_count = len(img_files)
_file = open(test_file + '_vmaf.log', 'w')
_filelen = open(test_file + '_len.log', 'w')
for _frame in xrange(1, _count + 1, FPS):
for _p in xrange(FPS):
os.system('cp -f tmp_%s/%d.png tmp2_%s/%d.png' %
(test_file, _frame + _p, test_file, _p))
os.system(
'ffmpeg -y -i tmp2_%s/%%d.png -pix_fmt yuv420p tmp_%s.yuv' % (test_file, test_file))
for _range in testrange:
os.system(
'ffmpeg -y -i tmp2_%s/%%d.png -vcodec libx264 -s %dx%d -b:v %dk -f flv tmp_%s.flv' % (test_file, WIDTH, HEIGHT, _range, test_file))
os.system(
'../ffmpeg2vmaf %d %d tmp_%s.yuv tmp_%s.flv --ref-fmt yuv420p --ref-width 1280 --ref-height 720 --out-fmt json 1>tmp_%s.json' % (WIDTH, HEIGHT, test_file, test_file, test_file))
_size = os.path.getsize('tmp_%s.flv' % (test_file))
_filelen.write(str(_size))
_filelen.write(',')
with open('tmp_' + test_file + '.json') as json_file:
data = json.load(json_file)
score = float(data['aggregate']['VMAF_score']) / 100.0
_file.write(str(score))
_file.write(',')
_file.write('\n')
_filelen.write('\n')
_file.close()
_filelen.close()
os.system('rm -rf tmp_%s' % (test_file))
os.system('rm -rf tmp2_%s' % (test_file))
os.system('rm -rf tmp_%s.flv' % (test_file))
os.system('rm -rf tmp_%s.yuv' % (test_file))
print 'done'
if __name__ == '__main__':
os.system('export PYTHONPATH=\"$(pwd)/../python/src:$PYTHONPATH\"')
for _file in os.listdir('mov/'):
eventloop(_file)
|
64492
|
import numpy as np
from public_tool.form_index import form_index
from XGB_HMM.form_B_matrix_by_XGB import form_B_matrix_by_XGB
from XGB_HMM.predict import self_pred
def pred_proba_XGB(A, model, pi, O, allow_flag, lengths):
# 对dataset形成pred_proba,注意这里的dataset是solve_on_raw_data后的结果,即附带allow_flag的数据
# output:
# pred_proba:数组类型
n_states = len(pi)
pred_proba = np.zeros((O.shape[0], n_states))
for i in range(len(lengths)):
begin_index, end_index = form_index(lengths, i)
now_O = O[begin_index:end_index, :]
now_allow_flag = allow_flag[begin_index:end_index]
now_pred_proba = np.zeros((now_O.shape[0], n_states))
now_allow_B = form_B_matrix_by_XGB(model, now_O[now_allow_flag == 1], pi)
_, now_allow_pred_proba, _ = self_pred(now_allow_B, [now_allow_B.shape[0]], A, pi)
now_pred_proba[now_allow_flag == 1] = now_allow_pred_proba
pred_proba[begin_index:end_index] = now_pred_proba
return pred_proba
|
64506
|
from __future__ import unicode_literals
from django.db import models # noqa
# Create your models here.
|
64507
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import cPickle
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras import optimizers
from full_params_conv_101 import *
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
x = Input(shape=original_img_size)
conv_1 = Conv2D(32,
kernel_size=(4, 4),
strides=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(64,
kernel_size=(4, 4),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(128,
kernel_size=(4, 4),
padding='same', activation='relu',
strides=(2, 2))(conv_2)
conv_4 = Conv2D(256,
kernel_size=(4, 4),
padding='same', activation='relu',
strides=(2, 2))(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(16384, activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, 256, 8, 8)
else:
output_shape = (batch_size, 8, 8, 256)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(128,
kernel_size=(4, 4),
padding='same',
strides=(2, 2),
activation='relu')
decoder_deconv_2 = Conv2DTranspose(64,
kernel_size=(4, 4),
padding='same',
strides=(2, 2),
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(32,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
activation='relu')
decoder_mean_squash = Conv2DTranspose(3,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
activation='relu')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
sgd = optimizers.SGD(lr=0.01)
vae.compile(optimizer=sgd, loss=None)
vae.summary()
"""
with open('../datasets/101_ObjectCategories.pkl') as f:
dic = cPickle.load(f)
x_train = dic['all_images']
"""
x_train = np.load('../datasets/full_x.npy')
print "dataset loaded"
history = vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
)
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
"""
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
"""
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
vae.save('../models/object101_ld_%d_conv_%d_id_%d_e_%d_vae.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
encoder.save('../models/object101_ld_%d_conv_%d_id_%d_e_%d_encoder.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
generator.save('../models/object101_ld_%d_conv_%d_id_%d_e_%d_generator.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
fname = '../models/object101_ld_%d_conv_%d_id_%d_e_%d_history.pkl' % (latent_dim, num_conv, intermediate_dim, epochs)
with open(fname, 'wb') as file_pi:
cPickle.dump(history.history, file_pi)
"""
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
"""
|
64545
|
from leavedemo.leave.models import Account
from datetime import timedelta
def update_hr(workitem):
''' automated and simplistic version of hrform.
'''
instance = workitem.instance
leaverequest = workitem.instance.content_object
if leaverequest.reason_denial:
raise Exception('denial reason is not empty')
if leaverequest.dayStart > leaverequest.day_end:
raise Exception('date error')
delta = leaverequest.dayEnd - leaverequest.day_start
nbjours = delta.days + 1
account = Account.objects.get(user=instance.user)
if account.days < nbjours:
raise Exception('no days enough in user account.')
account.days -= nbjours
account.save()
print('test')
pass
|
64726
|
from typing import List
class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
res = list()
nums.sort()
stdList = range(1, len(nums) + 1) # len(nums) = n
repeatedNum = sum(nums) - sum(set(nums))
res.append(repeatedNum)
missingNum = (set(stdList) - set(nums)).pop()
res.append(missingNum)
return res
# below is testing
sol = Solution()
print(sol.findErrorNums([4,2, 1,2]))
|
64750
|
from concurrent import futures
import logging
import os
import grpc
from PIL import Image, ImageOps
import helloworld_pb2
import helloworld_pb2_grpc
from minio import Minio
minioEnvKey = "MINIO_ADDRESS"
image_name = 'img2.jpeg'
image2_name = 'img3.jpeg'
image_path = '/pulled_' + image_name
image_path2 = '/pulled_' +image2_name
responses = ["record_response", "replay_response"]
minioAddress = os.getenv(minioEnvKey)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
if minioAddress == None:
return None
minioClient = Minio(minioAddress,
access_key='minioadmin',
secret_key='minioadmin',
secure=False)
if request.name == "record":
msg = 'Hello, %s!' % responses[0]
minioClient.fget_object('mybucket', image_name, image_path)
image = Image.open(image_path)
img = image.transpose(Image.ROTATE_90)
elif request.name == "replay":
msg = 'Hello, %s!' % responses[1]
minioClient.fget_object('mybucket', image2_name, image_path2)
image2 = Image.open(image_path2)
img = image2.transpose(Image.ROTATE_90)
else:
msg = 'Hello, %s!' % request.name
minioClient.fget_object('mybucket', image_name, image_path)
image = Image.open(image_path)
img = image.transpose(Image.ROTATE_90)
return helloworld_pb2.HelloReply(message=msg)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
|
64772
|
import csv
from flask_wtf import FlaskForm as Form
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from wtforms import ValidationError
# noinspection PyMethodMayBeStatic
class HostForm(Form):
fqdn = StringField('FQDN or IP', validators=[DataRequired()])
port = StringField('TCP Port')
friendly_name = StringField('Friendly Name')
submit = SubmitField('Submit')
def validate_port(self, field):
if len(field.data) > 0:
try:
int(field.data)
except ValueError:
raise ValidationError('Port provided is not valid')
class ImportForm(Form):
file = FileField('Hosts', validators=[FileRequired(), FileAllowed(['csv'], 'Only CSV is supported!')])
submit = SubmitField('Submit')
|
64839
|
import os
import os
test_list = [line. rstrip('\n') for line in open('./food-101/meta/test.txt')]
os.mkdir('./food-101/test')
source_base = './food-101/images/'
target_base = './food-101/test/'
for item in test_list:
c = item.split('/')[0]
if not os.path.exists(os.path.join(base, c)):
os.mkdir(os.path.join(base, c))
os.rename(os.path.join(source_base, item) + '.jpg', os.path.join(target_base, item) + '.jpg')
|
64844
|
from typing import Dict
class APIException(Exception):
"""
example:
{
"status": bool,
"system": {
"code": int,
"message": str
},
"data": None,
}
"""
def __init__(self, status: bool, system: Dict[str, int], source: None):
self.status = status
self.system = system
self.source = source
|
64870
|
import unittest
import numpy as np
from revpy import fare_transformation
class FareTransformationTest(unittest.TestCase):
def setUp(self):
# example data from page 13 of research paper
# "Optimization of Mixed Fare Structures: Theory and Applications"
# by <NAME> al. (2010)
self.fares = np.array([1200, 1000, 800, 600, 400, 200])
self.demands = np.array([31.2, 10.9, 14.8, 19.9, 26.9, 36.3])
def test_faretrafo_zero_demand(self):
demands = np.zeros(self.fares.shape)
adjusted_fares, adjusted_demand = \
fare_transformation.calc_fare_transformation(self.fares, demands)
np.testing.assert_equal([1200, np.nan, np.nan, np.nan, np.nan, np.nan],
adjusted_fares)
np.testing.assert_equal([0, np.nan, np.nan, np.nan, np.nan, np.nan],
adjusted_demand)
def test_example1(self):
# test example from above mentioned paper
adjusted_fares, adjusted_demand = \
fare_transformation.calc_fare_transformation(self.fares,
self.demands)
np.testing.assert_almost_equal(adjusted_fares, [1200, 427, 231, 28,
np.nan, np.nan], 0)
def test_example2(self):
# example containing some zero demands
demands = np.array([0, 15, 0, 30, 2, 60])
adjusted_fares, adjusted_demand = \
fare_transformation.calc_fare_transformation(self.fares, demands)
np.testing.assert_almost_equal(adjusted_fares, [1200, 1000, np.nan,
400, np.nan, np.nan, ])
def test_efficient_strategies(self):
fares = np.array([69.5, 59.5, 48.5, 37.5, 29.])
demands = np.array([3, 1, 0, 0, 10])
Q = demands.cumsum()
TR = Q*fares
__, __, __, __, eff_indices = \
fare_transformation.efficient_strategies(Q, TR, fares[0])
self.assertEqual(eff_indices.tolist(), [0, 1, 4])
|
64918
|
from typing import List
import torch
from torch.utils.data.dataset import Dataset
def noise(outlier_classes: List[int], generated_noise: torch.Tensor, norm: torch.Tensor,
nom_class: int, train_set: Dataset, gt: bool = False) -> Dataset:
"""
Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies.
:param outlier_classes: a list of all outlier class indices.
:param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w).
:param norm: torch tensor of nominal images (n x c x h x w).
:param nom_class: the index of the class that is considered nominal.
:param train_set: some training dataset.
:param gt: whether to provide ground-truth maps as well, atm not available!
:return: a modified dataset, with training data consisting of nominal samples and artificial anomalies.
"""
if gt:
raise ValueError('No GT mode for pure noise available!')
anom = generated_noise.clamp(0, 255).byte()
data = torch.cat((norm, anom))
targets = torch.cat(
(torch.ones(norm.size(0)) * nom_class,
torch.ones(anom.size(0)) * outlier_classes[0])
)
train_set.data = data
train_set.targets = targets
return train_set
def malformed_normal(outlier_classes: List[int], generated_noise: torch.Tensor, norm: torch.Tensor, nom_class: int,
train_set: Dataset, gt: bool = False, brightness_threshold: float = 0.11*255) -> Dataset:
"""
Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies.
Unlike above, the noise images are not directly utilized as anomalies, but added to nominal samples to
create malformed normal anomalies.
:param outlier_classes: a list of all outlier class indices.
:param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w).
:param norm: torch tensor of nominal images (n x c x h x w).
:param nom_class: the index of the class that is considered nominal.
:param train_set: some training dataset.
:param gt: whether to provide ground-truth maps as well.
:param brightness_threshold: if the average brightness (averaged over color channels) of a pixel exceeds this
threshold, the noise image's pixel value is subtracted instead of added.
This avoids adding brightness values to bright pixels, where approximately no effect is achieved at all.
:return: a modified dataset, with training data consisting of nominal samples and artificial anomalies.
"""
assert (norm.dim() == 4 or norm.dim() == 3) and generated_noise.shape == norm.shape
norm_dim = norm.dim()
if norm_dim == 3:
norm, generated_noise = norm.unsqueeze(1), generated_noise.unsqueeze(1) # assuming ch dim is skipped
anom = norm.clone()
# invert noise for bright regions (bright regions are considered being on average > brightness_threshold)
generated_noise = generated_noise.int()
bright_regions = norm.sum(1) > brightness_threshold * norm.shape[1]
for ch in range(norm.shape[1]):
gnch = generated_noise[:, ch]
gnch[bright_regions] = gnch[bright_regions] * -1
generated_noise[:, ch] = gnch
anom = (anom.int() + generated_noise).clamp(0, 255).byte()
data = torch.cat((norm, anom))
targets = torch.cat(
(torch.ones(norm.size(0)) * nom_class,
torch.ones(anom.size(0)) * outlier_classes[0])
)
if norm_dim == 3:
data = data.squeeze(1)
train_set.data = data
train_set.targets = targets
if gt:
gtmaps = torch.cat(
(torch.zeros_like(norm)[:, 0].float(), # 0 for nominal
(norm != anom).max(1)[0].clone().float()) # 1 for anomalous
)
if norm_dim == 4:
gtmaps = gtmaps.unsqueeze(1)
return train_set, gtmaps
else:
return train_set
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.