id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4929903
|
<filename>1_screen_pipeline/10_generate_ontology.py
#!/usr/bin/env python
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 <NAME>, <NAME>, <NAME>, <NAME>
from __future__ import print_function
import gzip
import os
import sys
import json
import psycopg2
import argparse
import StringIO
import math
from importlib import import_module
sys.path.append(os.path.join(os.path.dirname(__file__), '../common/'))
from dbconnect import db_connect
from constants import paths
from config import Config
sys.path.append(os.path.join(os.path.dirname(__file__), '../../metadata/utils/'))
from exp import Exp
from utils import Utils, printt, printWroteNumLines, cat
from db_utils import getcursor, makeIndex, makeIndexRev, makeIndexArr, makeIndexIntRange
from files_and_paths import Dirs
class BuildOntology:
def __init__(self, assembly):
self.assembly = assembly
def run(self):
mod = import_module("10_generate_ontology_actual")
runF = getattr(mod, "run")
downloadDate = '2017-10Oct-25'
if 1:
uberon_url = "http://ontologies.berkeleybop.org/uberon/composite-metazoan.owl"
efo_url = "http://sourceforge.net/p/efo/code/HEAD/tree/trunk/src/efoinowl/InferredEFOOWLview/EFO_inferred.owl?format=raw"
obi_url = "http://purl.obolibrary.org/obo/obi.owl"
else:
uberon_url = paths.path("ontology", downloadDate, "composite-metazoan.owl")
efo_url = paths.path("ontology", downloadDate, "EFO_inferred.owl")
obi_url = paths.path("ontology", downloadDate, "obi.owl")
printt("running ENCODE DCC generate ontology...")
terms = runF(uberon_url, efo_url, obi_url)
fnp = paths.path("ontology", downloadDate, "ontology.json.gz")
Utils.ensureDir(fnp)
printt("done; about to write", fnp)
with gzip.open(fnp, 'wb') as f:
json.dump(terms, f)
printWroteNumLines(fnp)
def run(args, DBCONN):
assemblies = ["hg19"] # Config.assemblies
if args.assembly:
assemblies = [args.assembly]
for assembly in assemblies:
if "hg19" != assembly:
print("skipping...")
continue
printt('***********', assembly)
ig = BuildOntology(assembly)
ig.run()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--assembly", type=str, default="")
args = parser.parse_args()
return args
def main():
args = parse_args()
run(args, None)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5145421
|
from .basic import Partial_Order_Models
from .loss import Ranking
from minder_utils.models.utils import Feature_extractor
from minder_utils.dataloader import Partial_Order_Loader
class Partial_Order(Feature_extractor):
def __init__(self):
super(Partial_Order, self).__init__()
self.model = Partial_Order_Models(**self.config["model"])
self.criterion = Ranking(**self.config["loss"])
def _custom_loader(self, data):
X, y = data
return Partial_Order_Loader(X, y, **self.config['loader'])
def step(self, data):
pre_anchor, anchor, post_anchor = data
loss = 0
for idx_day in range(len(post_anchor) - 1):
loss += self._step(post_anchor[idx_day], post_anchor[idx_day + 1], anchor)
loss += self._step(pre_anchor[idx_day], pre_anchor[idx_day + 1], anchor)
return loss
def _step(self, xi, xj, anchor):
ris, zis = self.model(xi)
rjs, zjs = self.model(xj)
ras, zas = self.model(anchor)
return self.criterion(zis, zjs, zas)
@staticmethod
def which_data(data):
return data[0]
|
StarcoderdataPython
|
3347636
|
import re
import types
import os
from time import time
from collections import defaultdict
from itertools import izip
from nearpy.storage.storage import Storage
from os.path import join as pjoin
from nearpy.utils.utils import load_dict_from_json, save_dict_to_json
class FileStorage(Storage):
""" Storage using files and folders. """
def __init__(self, name, root="./"):
self.root = root
self.infos_filename = pjoin(root, "infos.json")
#Create repository structure
if not os.path.isdir(root):
os.makedirs(root)
if name != "":
self.buckets_dir = pjoin(root, name)
if not os.path.isdir(self.buckets_dir):
os.makedirs(self.buckets_dir)
if not os.path.isfile(self.infos_filename):
save_dict_to_json(self.infos_filename, {})
def get_info(self, key):
infos = load_dict_from_json(self.infos_filename)
return infos.get(key, [])
def set_info(self, key, value, append=False):
infos = load_dict_from_json(self.infos_filename)
if append:
if key not in infos:
infos[key] = []
infos[key].append(value)
else:
infos[key] = value
save_dict_to_json(self.infos_filename, infos)
def del_info(self, key, value=None):
infos = load_dict_from_json(self.infos_filename)
if value is not None:
infos[key].remove(value)
else:
del infos[key]
save_dict_to_json(self.infos_filename, infos)
def store(self, bucketkeys, bucketvalues):
buf = defaultdict(lambda: [])
start = time()
for attribute, values in bucketvalues.items():
for key, value in izip(bucketkeys, attribute.dumps(values)):
filename = pjoin(self.buckets_dir, key + "_" + attribute.name + ".npy")
buf[filename].append(value)
print "buffering: {:.2f} ({:,} buckets)".format(time()-start, len(buf)/len(bucketvalues))
start = time()
for filename, values in buf.items():
# with open(filename, 'ab') as f:
# f.write("".join(values))
data = ""
if os.path.isfile(filename):
data = open(filename, 'rb').read()
open(filename, 'wb').write(data + "".join(values))
print "writing: {:.2f}".format(time()-start)
return len(bucketkeys)
def retrieve(self, bucketkeys, attribute):
filenames = [pjoin(self.buckets_dir, bucketkey + "_" + attribute.name + ".npy") for bucketkey in bucketkeys]
results = []
for filename in filenames:
if os.path.isfile(filename):
results.append(open(filename).read())
else:
results.append("")
return [attribute.loads("".join(result)) for result in results]
def clear(self, bucketkeys):
"""
Parameters
----------
bucket_keys: iterable of string
keys of the buckets to delete
prefix: string
if set, clear every buckets having this prefix
Return
------
count: int
number of buckets cleared
"""
if not isinstance(bucketkeys, types.ListType) and not isinstance(bucketkeys, types.GeneratorType):
bucketkeys = [bucketkeys]
count = 0
for bucketkey in bucketkeys:
filename = pjoin(self.buckets_dir, bucketkey + ".npy")
if os.path.isfile(filename):
os.remove(filename)
count += 1
return count
def count(self, bucketkeys):
"""
Parameters
----------
bucketkeys: iterable of string
keys of buckets to count
Return
------
counts: list of int
size of each given bucket
"""
counts = []
suffix = "_label"
for bucketkey in bucketkeys:
filename = pjoin(self.buckets_dir, bucketkey + suffix + ".npy")
nb_bytes = os.path.getsize(filename)
counts.append(nb_bytes) # We suppose each label fits in a byte.
return counts
def bucketkeys(self, pattern=".*", as_generator=False):
suffix = "patch"
extension = ".npy"
pattern = "{pattern}_{suffix}{extension}".format(pattern=pattern, suffix=suffix, extension=extension)
regex = re.compile(pattern)
end = -(len(suffix) + len(extension) + 1)
filenames = os.listdir(self.buckets_dir)
keys = (filename[:end] for filename in filenames if regex.match(filename) is not None)
if not as_generator:
keys = list(keys)
return keys
def bucketkeys_all_attributes(self, pattern=".*", as_generator=False):
extension = ".npy"
pattern = "{pattern}{extension}".format(pattern=pattern, extension=extension)
regex = re.compile(pattern)
end = -len(extension)
filenames = os.listdir(self.buckets_dir)
keys = (filename[:end] for filename in filenames if regex.match(filename) is not None)
if not as_generator:
keys = list(keys)
return keys
|
StarcoderdataPython
|
26794
|
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import types
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_arr_almost
import pytest
import shapely.geometry as sgeom
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
#: Maps Google tile coordinates to native mercator coordinates as defined
#: by https://goo.gl/pgJi.
KNOWN_EXTENTS = {(0, 0, 0): (-20037508.342789244, 20037508.342789244,
-20037508.342789244, 20037508.342789244),
(2, 0, 2): (0., 10018754.17139462,
10018754.17139462, 20037508.342789244),
(0, 2, 2): (-20037508.342789244, -10018754.171394622,
-10018754.171394622, 0),
(2, 2, 2): (0, 10018754.17139462,
-10018754.171394622, 0),
(8, 9, 4): (0, 2504688.542848654,
-5009377.085697312, -2504688.542848654),
}
if ccrs.PROJ4_VERSION == (5, 0, 0):
KNOWN_EXTENTS = {
(0, 0, 0): (-20037508.342789244, 20037508.342789244,
-19994827.892149, 19994827.892149),
(2, 0, 2): (0, 10018754.171395,
9997413.946075, 19994827.892149),
(0, 2, 2): (-20037508.342789244, -10018754.171394622,
-9997413.946075, 0),
(2, 2, 2): (0, 10018754.171395,
-9997413.946075, 0),
(8, 9, 4): (0, 2504688.542849,
-4998706.973037, -2499353.486519),
}
def GOOGLE_IMAGE_URL_REPLACEMENT(self, tile):
url = ('https://chart.googleapis.com/chart?chst=d_text_outline&'
'chs=256x256&chf=bg,s,00000055&chld=FFFFFF|16|h|000000|b||||'
'Google:%20%20(' + str(tile[0]) + ',' + str(tile[1]) + ')'
'|Zoom%20' + str(tile[2]) + '||||||______________________'
'______')
return url
def test_google_tile_styles():
"""
Tests that setting the Google Maps tile style works as expected.
This is essentially just assures information is properly propagated through
the class structure.
"""
reference_url = ("https://mts0.google.com/vt/lyrs={style}@177000000&hl=en"
"&src=api&x=1&y=2&z=3&s=G")
tile = ["1", "2", "3"]
# Default is street.
gt = cimgt.GoogleTiles()
url = gt._image_url(tile)
assert reference_url.format(style="m") == url
# Street
gt = cimgt.GoogleTiles(style="street")
url = gt._image_url(tile)
assert reference_url.format(style="m") == url
# Satellite
gt = cimgt.GoogleTiles(style="satellite")
url = gt._image_url(tile)
assert reference_url.format(style="s") == url
# Terrain
gt = cimgt.GoogleTiles(style="terrain")
url = gt._image_url(tile)
assert reference_url.format(style="t") == url
# Streets only
gt = cimgt.GoogleTiles(style="only_streets")
url = gt._image_url(tile)
assert reference_url.format(style="h") == url
# Exception is raised if unknown style is passed.
with pytest.raises(ValueError):
cimgt.GoogleTiles(style="random_style")
def test_google_wts():
gt = cimgt.GoogleTiles()
ll_target_domain = sgeom.box(-15, 50, 0, 60)
multi_poly = gt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
with pytest.raises(AssertionError):
list(gt.find_images(target_domain, -1))
assert (tuple(gt.find_images(target_domain, 0)) ==
((0, 0, 0),))
assert (tuple(gt.find_images(target_domain, 2)) ==
((1, 1, 2), (2, 1, 2)))
assert (list(gt.subtiles((0, 0, 0))) ==
[(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
assert (list(gt.subtiles((1, 0, 1))) ==
[(2, 0, 2), (2, 1, 2), (3, 0, 2), (3, 1, 2)])
with pytest.raises(AssertionError):
gt.tileextent((0, 1, 0))
assert_arr_almost(gt.tileextent((0, 0, 0)), KNOWN_EXTENTS[(0, 0, 0)])
assert_arr_almost(gt.tileextent((2, 0, 2)), KNOWN_EXTENTS[(2, 0, 2)])
assert_arr_almost(gt.tileextent((0, 2, 2)), KNOWN_EXTENTS[(0, 2, 2)])
assert_arr_almost(gt.tileextent((2, 2, 2)), KNOWN_EXTENTS[(2, 2, 2)])
assert_arr_almost(gt.tileextent((8, 9, 4)), KNOWN_EXTENTS[(8, 9, 4)])
def test_tile_bbox_y0_at_south_pole():
tms = cimgt.MapQuestOpenAerial()
# Check the y0_at_north_pole keywords returns the appropriate bounds.
assert_arr_almost(tms.tile_bbox(8, 6, 4, y0_at_north_pole=False),
np.array(KNOWN_EXTENTS[(8, 9, 4)]).reshape([2, 2]))
def test_tile_find_images():
gt = cimgt.GoogleTiles()
# Test the find_images method on a GoogleTiles instance.
ll_target_domain = sgeom.box(-10, 50, 10, 60)
multi_poly = gt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
assert (list(gt.find_images(target_domain, 4)) ==
[(7, 4, 4), (7, 5, 4), (8, 4, 4), (8, 5, 4)])
@pytest.mark.network
def test_image_for_domain():
gt = cimgt.GoogleTiles()
gt._image_url = types.MethodType(GOOGLE_IMAGE_URL_REPLACEMENT, gt)
ll_target_domain = sgeom.box(-10, 50, 10, 60)
multi_poly = gt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
_, extent, _ = gt.image_for_domain(target_domain, 6)
ll_extent = ccrs.Geodetic().transform_points(gt.crs,
np.array(extent[:2]),
np.array(extent[2:]))
if ccrs.PROJ4_VERSION == (5, 0, 0):
assert_arr_almost(ll_extent[:, :2],
[[-11.25, 49.033955],
[11.25, 61.687101]])
else:
assert_arr_almost(ll_extent[:, :2],
[[-11.25, 48.92249926],
[11.25, 61.60639637]])
def test_quadtree_wts():
qt = cimgt.QuadtreeTiles()
ll_target_domain = sgeom.box(-15, 50, 0, 60)
multi_poly = qt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
with pytest.raises(ValueError):
list(qt.find_images(target_domain, 0))
assert qt.tms_to_quadkey((1, 1, 1)) == '1'
assert qt.quadkey_to_tms('1') == (1, 1, 1)
assert qt.tms_to_quadkey((8, 9, 4)) == '1220'
assert qt.quadkey_to_tms('1220') == (8, 9, 4)
assert tuple(qt.find_images(target_domain, 1)) == ('0', '1')
assert tuple(qt.find_images(target_domain, 2)) == ('03', '12')
assert list(qt.subtiles('0')) == ['00', '01', '02', '03']
assert list(qt.subtiles('11')) == ['110', '111', '112', '113']
with pytest.raises(ValueError):
qt.tileextent('4')
assert_arr_almost(qt.tileextent(''), KNOWN_EXTENTS[(0, 0, 0)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((2, 0, 2), google=True)),
KNOWN_EXTENTS[(2, 0, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((0, 2, 2), google=True)),
KNOWN_EXTENTS[(0, 2, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((2, 0, 2), google=True)),
KNOWN_EXTENTS[(2, 0, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((2, 2, 2), google=True)),
KNOWN_EXTENTS[(2, 2, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((8, 9, 4), google=True)),
KNOWN_EXTENTS[(8, 9, 4)])
def test_mapbox_tiles_api_url():
token = 'foo'
map_name = 'bar'
tile = [0, 1, 2]
exp_url = ('https://api.mapbox.com/v4/mapbox.bar'
'/2/0/1.png?access_token=foo')
mapbox_sample = cimgt.MapboxTiles(token, map_name)
url_str = mapbox_sample._image_url(tile)
assert url_str == exp_url
def test_mapbox_style_tiles_api_url():
token = 'foo'
username = 'baz'
map_id = 'bar'
tile = [0, 1, 2]
exp_url = ('https://api.mapbox.com/styles/v1/'
'baz/bar/tiles/256/2/0/1'
'?access_token=foo')
mapbox_sample = cimgt.MapboxStyleTiles(token, username, map_id)
url_str = mapbox_sample._image_url(tile)
assert url_str == exp_url
|
StarcoderdataPython
|
4956910
|
<gh_stars>0
from django.shortcuts import render, redirect,get_object_or_404
from .models import Cliente
from .forms import ClienteForm
from django.contrib import messages
from django.db.models import Q
from django.core.paginator import Paginator
# Create your views here.
def lista_de_clientes(request):
clientes = Cliente.objects.all().order_by('-id')
querySet = request.GET.get('q')
if (querySet):
clientes = Cliente.objects.filter(
Q(nome__icontains=querySet) |
Q(email__icontains=querySet) |
Q(cpf__icontains=querySet)
)
#paginacao
paginator = Paginator(clientes, 5) # mostra 5 clientes por pagina
page = request.GET.get('page')
clientes = paginator.get_page(page)
v_template="clientes/lista_de_clientes.html"
v_context_parms = {"clientes":clientes}
return render(request,v_template, v_context_parms)
def adicionar_cliente(request):
form = ClienteForm(request.POST)
if form.is_valid() :
obj = form.save()
obj.save()
form = ClienteForm()
messages.success(request, 'Cliente adicionado com sucesso')
v_to = 'lista_de_clientes'
return redirect(v_to)
v_template="clientes/adicionar_cliente.html"
v_context_parms = {"form":form}
return render(request,v_template, v_context_parms)
def editar_cliente(request, id=None):
cliente = get_object_or_404(Cliente, id=id)
form = ClienteForm(request.POST or None, instance=cliente)
if form.is_valid():
obj = form.save()
obj.save()
form = ClienteForm()
v_to = 'lista_de_clientes'
messages.info(request, 'Cliente editado com sucesso')
return redirect(v_to)
v_template = "clientes/editar_cliente.html"
v_context_parms = {"form": form}
return render(request, v_template, v_context_parms)
def remover_cliente(request, id=None):
cliente = get_object_or_404(Cliente, id=id)
if(request.method == 'POST'):
cliente.delete()
messages.warning(request, 'Cliente removido com sucesso')
v_to = 'lista_de_clientes'
return redirect(v_to)
v_template = "clientes/remover_cliente.html"
v_context_parms = {"cliente": cliente}
return render(request, v_template, v_context_parms)
|
StarcoderdataPython
|
11233767
|
from resonator_tools import circuit
from pathlib import Path
from pandas import read_csv
from numpy import float64
from pyqum.instrument.analyzer import curve
import inspect
pyfilename = inspect.getfile(inspect.currentframe()) # current pyscript filename (usually with path)
MAIN_PATH = Path(pyfilename).parents[5] / "HODOR" / "CONFIG" # ...parents[7]... for logger
PORTAL_PATH = MAIN_PATH / "PORTAL"
Datafile = Path(PORTAL_PATH) / '1Dfresp[abc].csv'
title = "<b>frequency(GHz)</b>"
dataf = read_csv(Datafile, dtype={'I': float64})
print('I: %.8f' %dataf['I'][0])
port1 = circuit.notch_port()
port1.add_fromtxt(Datafile,'realimag',1,(0,3,4),fdata_unit=1e9,delimiter=',')
port1.autofit()
print("Fit results:", port1.fitresults)
# port1.plotall()
print('z_data %s' %port1.z_data)
print('z_data_sim %s' %port1.z_data_sim)
print('z_data_raw %s' %port1.z_data_raw)
x, y = [], []
for i in range(2):
x.append(dataf[title])
y.append(abs(port1.z_data_raw))
y.append(abs(port1.z_data_sim))
curve(x,y,'Q_fit','freq','I',style=['or','.b'])
I, Q = [], []
I.append(port1.z_data_raw.real)
I.append(port1.z_data_sim.real)
Q.append(port1.z_data_raw.imag)
Q.append(port1.z_data_sim.imag)
curve(I,Q,'IQ_fit','I','Q',style=['or','.b'])
print("single photon limit:", port1.get_single_photon_limit(diacorr=True), "dBm")
print("photons in resonator for input -140dBm:", port1.get_photons_in_resonator(-140,unit='dBm',diacorr=True), "photons")
|
StarcoderdataPython
|
5197713
|
<gh_stars>1-10
import jupyter
print(jupyter)
print('Successful.')
|
StarcoderdataPython
|
27399
|
<gh_stars>1-10
import MagicPanels
MagicPanels.panelMove("Xp")
|
StarcoderdataPython
|
56490
|
<gh_stars>0
#
# @lc app=leetcode id=332 lang=python3
#
# [332] Reconstruct Itinerary
#
# @lc code=start
import collections
class Solution:
def findItinerary(self, tickets):
graph = collections.defaultdict(list)
city_counter = len(tickets) + 1
for pair in tickets:
graph[pair[0]].append(pair[1])
for k, v in graph.items():
v.sort(reverse = True)
res = []
self.dfs(graph, "JFK", res)
return res[::-1]
def dfs(self, graph, frm, res):
while graph[frm]:
nxt = graph[frm].pop()
self.dfs(graph, nxt, res)
res.append(frm)
# @lc code=end
if __name__ == '__main__':
a = Solution()
b = a.findItinerary([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]])
print(b)
|
StarcoderdataPython
|
6412804
|
import os
def find_keytxts():
file_path = 'key_emotion'
try:
os.mkdir(file_path)
print(file_path+'文件夹已经建立,请查看当前文件路径')
except Exception as e:
print('文件夹已经存在,请查看当前程序路径')
# 基于字典的查找
key_words_list = [{'环境','周边','风景','空气','江景','小区','景点','夜景','街','周围','景区','声音','景色'},
{'价格','房价','性价比','价位','单价','价钱'},
{'特色','装潢','布置','建筑','结构','格调','装修','设计','风格','隔音'},
{'设施','设备','条件','硬件','房间','热水','马桶','电梯','阳台','卫生间','洗手间','空调','被子','床','大厅','电话','电','摆设'},
{'餐饮','早餐','咖啡','味道','饭','菜','水果','特产','餐','美食','烧烤','宵夜','食材','饭馆','小吃'},
{'交通','车程','地段','路程','停车','机场','离','车站','地理','位置','地理','中心','海拔','码头'},
{'服务','态度','前台','服务员','老板','掌柜','店家','工作人员'},
{'体验','整体','感觉'},]
i = 0
for key_words in key_words_list:
find_keytxt(i,key_words)
i += 1
def find_keytxt(number,key_words):
key_list = {0: '环境',
1: '价格',
2: '特色',
3: '设施',
4: '餐饮',
5: '交通',
6: '服务',
7: '体验',
}
f = open('key/pure_cut_final.txt','r',encoding='utf-8')
key_txt = open('key_list/%s.txt'%key_list[number],'w',encoding='utf-8')
for sentence in f:
for i in key_words:
if i in sentence:
key_txt.write(sentence)
else:continue
f.close()
key_txt.close()
print(key_list[number]+'已经查找完成')
if __name__ == '__main__':
find_keytxts()
|
StarcoderdataPython
|
4894698
|
# -*- coding: utf-8 -*-
from hcloud.core.client import ClientEntityBase, BoundModelBase, GetEntityByNameMixin
from hcloud.core.domain import add_meta_to_result
from hcloud.actions.client import BoundAction
from hcloud.networks.domain import Network, NetworkRoute, NetworkSubnet
class BoundNetwork(BoundModelBase):
model = Network
def __init__(self, client, data, complete=True):
subnets = data.get("subnets", [])
if subnets is not None:
subnets = [NetworkSubnet.from_dict(subnet) for subnet in subnets]
data['subnets'] = subnets
routes = data.get("routes", [])
if routes is not None:
routes = [NetworkRoute.from_dict(route) for route in routes]
data['routes'] = routes
from hcloud.servers.client import BoundServer
servers = data.get("servers", [])
if servers is not None:
servers = [BoundServer(client._client.servers, {"id": server}, complete=False) for server in servers]
data['servers'] = servers
super(BoundNetwork, self).__init__(client, data, complete)
def update(self, name=None, labels=None):
# type: (Optional[str], Optional[Dict[str, str]]) -> BoundNetwork
"""Updates a network. You can update a network’s name and a networks’s labels.
:param name: str (optional)
New name to set
:param labels: Dict[str, str] (optional)
User-defined labels (key-value pairs)
:return: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>`
"""
return self._client.update(self, name, labels)
def delete(self):
# type: () -> BoundAction
"""Deletes a network.
:return: boolean
"""
return self._client.delete(self)
def get_actions_list(self, status=None, sort=None, page=None, per_page=None):
# type: (Optional[List[str]], Optional[List[str]], Optional[int], Optional[int]) -> PageResults[List[BoundAction, Meta]]
"""Returns all action objects for a network.
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:param page: int (optional)
Specifies the page to fetch
:param per_page: int (optional)
Specifies how many results are returned by page
:return: (List[:class:`BoundAction <hcloud.actions.client.BoundAction>`], :class:`Meta <hcloud.core.domain.Meta>`)
"""
return self._client.get_actions_list(self, status, sort, page, per_page)
def get_actions(self, status=None, sort=None):
# type: (Optional[List[str]], Optional[List[str]]) -> List[BoundAction]
"""Returns all action objects for a network.
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:return: List[:class:`BoundAction <hcloud.actions.client.BoundAction>`]
"""
return self._client.get_actions(self, status, sort)
def add_subnet(self, subnet):
# type: (NetworkSubnet) -> List[BoundAction]
"""Adds a subnet entry to a network.
:param subnet: :class:`NetworkSubnet <hcloud.networks.domain.NetworkSubnet>`
The NetworkSubnet you want to add to the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.add_subnet(self, subnet=subnet)
def delete_subnet(self, subnet):
# type: (NetworkSubnet) -> List[BoundAction]
"""Removes a subnet entry from a network
:param subnet: :class:`NetworkSubnet <hcloud.networks.domain.NetworkSubnet>`
The NetworkSubnet you want to remove from the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.delete_subnet(self, subnet=subnet)
def add_route(self, route):
# type: (NetworkRoute) -> List[BoundAction]
"""Adds a route entry to a network.
:param route: :class:`NetworkRoute <hcloud.networks.domain.NetworkRoute>`
The NetworkRoute you want to add to the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.add_route(self, route=route)
def delete_route(self, route):
# type: (NetworkRoute) -> List[BoundAction]
"""Removes a route entry to a network.
:param route: :class:`NetworkRoute <hcloud.networks.domain.NetworkRoute>`
The NetworkRoute you want to remove from the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.delete_route(self, route=route)
def change_ip_range(self, ip_range):
# type: (str) -> List[BoundAction]
"""Changes the IP range of a network.
:param ip_range: str
The new prefix for the whole network.
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.change_ip_range(self, ip_range=ip_range)
def change_protection(self, delete=None):
# type: (Optional[bool]) -> BoundAction
"""Changes the protection configuration of a network.
:param delete: boolean
If True, prevents the network from being deleted
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.change_protection(self, delete=delete)
class NetworksClient(ClientEntityBase, GetEntityByNameMixin):
results_list_attribute_name = "networks"
def get_by_id(self, id):
# type: (int) -> BoundNetwork
"""Get a specific network
:param id: int
:return: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>
"""
response = self._client.request(
url="/networks/{network_id}".format(network_id=id), method="GET"
)
return BoundNetwork(self, response["network"])
def get_list(
self,
name=None, # type: Optional[str]
label_selector=None, # type: Optional[str]
page=None, # type: Optional[int]
per_page=None, # type: Optional[int]
):
# type: (...) -> PageResults[List[BoundNetwork], Meta]
"""Get a list of networks from this account
:param name: str (optional)
Can be used to filter networks by their name.
:param label_selector: str (optional)
Can be used to filter networks by labels. The response will only contain networks matching the label selector.
:param page: int (optional)
Specifies the page to fetch
:param per_page: int (optional)
Specifies how many results are returned by page
:return: (List[:class:`BoundNetwork <hcloud.networks.client.BoundNetwork>`], :class:`Meta <hcloud.core.domain.Meta>`)
"""
params = {}
if name is not None:
params["name"] = name
if label_selector is not None:
params["label_selector"] = label_selector
if page is not None:
params["page"] = page
if per_page is not None:
params["per_page"] = per_page
response = self._client.request(url="/networks", method="GET", params=params)
ass_networks = [
BoundNetwork(self, network_data) for network_data in response["networks"]
]
return self._add_meta_to_result(ass_networks, response)
def get_all(self, name=None, label_selector=None):
# type: (Optional[str], Optional[str]) -> List[BoundNetwork]
"""Get all networks from this account
:param name: str (optional)
Can be used to filter networks by their name.
:param label_selector: str (optional)
Can be used to filter networks by labels. The response will only contain networks matching the label selector.
:return: List[:class:`BoundNetwork <hcloud.networks.client.BoundNetwork>`]
"""
return super(NetworksClient, self).get_all(
name=name, label_selector=label_selector
)
def get_by_name(self, name):
# type: (str) -> BoundNetwork
"""Get network by name
:param name: str
Used to get network by name.
:return: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>`
"""
return super(NetworksClient, self).get_by_name(name)
def create(
self,
name, # type: str
ip_range, # type: str
subnets=None, # type: Optional[List[NetworkSubnet]]
routes=None, # type: Optional[List[NetworkRoute]]
labels=None, # type: Optional[Dict[str, str]]
):
"""Creates a network with range ip_range.
:param name: str
Name of the network
:param ip_range: str
IP range of the whole network which must span all included subnets and route destinations
:param subnets: List[:class:`NetworkSubnet <hcloud.networks.domain.NetworkSubnet>`]
Array of subnets allocated
:param routes: List[:class:`NetworkRoute <hcloud.networks.domain.NetworkRoute>`]
Array of routes set in this network
:param labels: Dict[str, str] (optional)
User-defined labels (key-value pairs)
:return: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>`
"""
data = {"name": name, "ip_range": ip_range}
if subnets is not None:
data["subnets"] = [{'type': subnet.type, 'ip_range': subnet.ip_range, 'network_zone': subnet.network_zone}
for subnet in subnets]
if routes is not None:
data["routes"] = [{'destination': route.destination, 'gateway': route.gateway} for route in routes]
if labels is not None:
data["labels"] = labels
response = self._client.request(url="/networks", method="POST", json=data)
return BoundNetwork(self, response["network"])
def update(self, network, name=None, labels=None):
# type:(Network, Optional[str], Optional[Dict[str, str]]) -> BoundNetwork
"""Updates a network. You can update a network’s name and a network’s labels.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param name: str (optional)
New name to set
:param labels: Dict[str, str] (optional)
User-defined labels (key-value pairs)
:return: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>`
"""
data = {}
if name is not None:
data.update({"name": name})
if labels is not None:
data.update({"labels": labels})
response = self._client.request(
url="/networks/{network_id}".format(network_id=network.id),
method="PUT",
json=data,
)
return BoundNetwork(self, response["network"])
def delete(self, network):
# type: (Network) -> BoundAction
"""Deletes a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:return: boolean
"""
self._client.request(
url="/networks/{network_id}".format(network_id=network.id), method="DELETE"
)
return True
def get_actions_list(
self, network, status=None, sort=None, page=None, per_page=None
):
# type: (Network, Optional[List[str]], Optional[List[str]], Optional[int], Optional[int]) -> PageResults[List[BoundAction], Meta]
"""Returns all action objects for a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:param page: int (optional)
Specifies the page to fetch
:param per_page: int (optional)
Specifies how many results are returned by page
:return: (List[:class:`BoundAction <hcloud.actions.client.BoundAction>`], :class:`Meta <hcloud.core.domain.Meta>`)
"""
params = {}
if status is not None:
params["status"] = status
if sort is not None:
params["sort"] = sort
if page is not None:
params["page"] = page
if per_page is not None:
params["per_page"] = per_page
response = self._client.request(
url="/networks/{network_id}/actions".format(network_id=network.id),
method="GET",
params=params,
)
actions = [
BoundAction(self._client.actions, action_data)
for action_data in response["actions"]
]
return add_meta_to_result(actions, response, "actions")
def get_actions(self, network, status=None, sort=None):
# type: (Network, Optional[List[str]], Optional[List[str]]) -> List[BoundAction]
"""Returns all action objects for a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:return: List[:class:`BoundAction <hcloud.actions.client.BoundAction>`]
"""
return super(NetworksClient, self).get_actions(
network, status=status, sort=sort
)
def add_subnet(self, network, subnet):
# type: (Union[Network, BoundNetwork], NetworkSubnet) -> List[BoundAction]
"""Adds a subnet entry to a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param subnet: :class:`NetworkSubnet <hcloud.networks.domain.NetworkSubnet>`
The NetworkSubnet you want to add to the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {
"type": subnet.type,
"network_zone": subnet.network_zone,
}
if subnet.ip_range is not None:
data["ip_range"] = subnet.ip_range
response = self._client.request(
url="/networks/{network_id}/actions/add_subnet".format(network_id=network.id),
method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
def delete_subnet(self, network, subnet):
# type: (Union[Network, BoundNetwork], NetworkSubnet) -> List[BoundAction]
"""Removes a subnet entry from a network
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param subnet: :class:`NetworkSubnet <hcloud.networks.domain.NetworkSubnet>`
The NetworkSubnet you want to remove from the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {
"ip_range": subnet.ip_range,
}
response = self._client.request(
url="/networks/{network_id}/actions/delete_subnet".format(network_id=network.id),
method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
def add_route(self, network, route):
# type: (Union[Network, BoundNetwork], NetworkRoute) -> List[BoundAction]
"""Adds a route entry to a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param route: :class:`NetworkRoute <hcloud.networks.domain.NetworkRoute>`
The NetworkRoute you want to add to the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {
"destination": route.destination,
"gateway": route.gateway,
}
response = self._client.request(
url="/networks/{network_id}/actions/add_route".format(network_id=network.id),
method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
def delete_route(self, network, route):
# type: (Union[Network, BoundNetwork], NetworkRoute) -> List[BoundAction]
"""Removes a route entry to a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param route: :class:`NetworkRoute <hcloud.networks.domain.NetworkRoute>`
The NetworkRoute you want to remove from the Network
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {
"destination": route.destination,
"gateway": route.gateway,
}
response = self._client.request(
url="/networks/{network_id}/actions/delete_route".format(network_id=network.id),
method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
def change_ip_range(self, network, ip_range):
# type: (Union[Network, BoundNetwork], str) -> List[BoundAction]
"""Changes the IP range of a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param ip_range: str
The new prefix for the whole network.
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {
"ip_range": ip_range,
}
response = self._client.request(
url="/networks/{network_id}/actions/change_ip_range".format(network_id=network.id),
method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
def change_protection(self, network, delete=None):
# type: (Union[Network, BoundNetwork], Optional[bool]) -> BoundAction
"""Changes the protection configuration of a network.
:param network: :class:`BoundNetwork <hcloud.networks.client.BoundNetwork>` or :class:`Network <hcloud.networks.domain.Network>`
:param delete: boolean
If True, prevents the network from being deleted
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {}
if delete is not None:
data.update({"delete": delete})
response = self._client.request(
url="/networks/{network_id}/actions/change_protection".format(network_id=network.id),
method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
|
StarcoderdataPython
|
11284918
|
<reponame>nasa/scrub<filename>scrub/scrub_cli.py
import sys
from scrub import scrubme
from scrub.utils import diff_results
from scrub.utils import scrub_utilities
help_message = ('run\n' +
scrubme.main.__doc__ + '\n\n'
'diff\n' +
diff_results.diff.__doc__ + '\n\n'
'get-conf\n' +
scrub_utilities.create_conf_file.__doc__ + '\n')
def main():
"""Console script for SCRUB."""
if len(sys.argv) <= 1:
print(help_message)
else:
if 'run' in sys.argv or 'run-all' in sys.argv:
# Run analysis
scrubme.parse_arguments()
elif 'run-tool' in sys.argv:
# Get the tool name
tool = sys.argv[sys.argv.index('--module') + 1].split('_')[-1]
insert_index = sys.argv.index('--module')
# Update the arguments
sys.argv[1] = 'run'
sys.argv[insert_index] = '--tool'
sys.argv[insert_index + 1] = tool
# Run analysis
scrubme.parse_arguments()
elif 'diff' in sys.argv:
# Run analysis
diff_results.parse_arguments()
elif 'get-conf' in sys.argv:
# Run analysis
scrub_utilities.create_conf_file()
else:
print(help_message)
return 0
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
1869145
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 15 09:57:21 2017
@author: dalonlobo
"""
from __future__ import absolute_import, division, print_function
import os
import os.path as ospath
import sys
import subprocess
import argparse
import pandas as pd
import scipy.io.wavfile as wav
from timeit import default_timer as timer
from deepspeech.model import Model
from pydub import AudioSegment
from pydub.effects import normalize
from pydub.silence import split_on_silence
class AudioProcessing():
DEBUG = False # set to true for verbose
MSL = 500 # minimum silence length in ms
# These constants control the beam search decoder
# Beam width used in the CTC decoder when building candidate transcriptions
BEAM_WIDTH = 500
# The alpha hyperparameter of the CTC decoder. Language Model weight
# LM_WEIGHT = 1.75
LM_WEIGHT = 1.75
# The beta hyperparameter of the CTC decoder. Word insertion weight (penalty)
WORD_COUNT_WEIGHT = 1.00
# Valid word insertion weight. This is used to lessen the word insertion penalty
# when the inserted word is part of the vocabulary
VALID_WORD_COUNT_WEIGHT = 1.00
# These constants are tied to the shape of the graph used (changing them changes
# the geometry of the first layer), so make sure you use the same constants that
# were used during training
# Number of MFCC features to use
N_FEATURES = 26
# Size of the context window used for producing timesteps in the input vector
N_CONTEXT = 9
def __init__(self, args):
self.fpath = args.fpath # Input video file path
self.args = args
def convert_mp4_to_wav(self, fpath_in, fpath_out):
"""Convert to wav format with 1 channel and 16Khz freq"""
cmd = "ffmpeg -i '" + fpath_in + "' -ar 16000 -ac 1 '" + fpath_out + "'"
return cmd
def execute_cmd_on_system(self, command):
p = subprocess.Popen(command, bufsize=2048, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=(sys.platform != 'win32'))
output = p.communicate()
print("Executed : " + command)
if self.DEBUG:
print(output)
def process_wav(self):
# Create temporary directory, to hold the audio chunks
tmp_dir = os.path.join(os.path.dirname(self.fpath), "tmp")
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
# Convert mp4 to wav
output_wav_path = ospath.join(ospath.split(ospath.abspath(self.fpath))[0],\
"tmp", "output.wav")
self.execute_cmd_on_system(\
self.convert_mp4_to_wav(self.fpath, output_wav_path))
# Segmenting the audio
input_audio = AudioSegment.from_file(output_wav_path, format="wav")
# Normalizing the audio file
full_audio_wav = normalize(input_audio)
print("Length of the entire audio: ", len(full_audio_wav))
# Calculating the silence threshold
loudness_ms_list = []
for ms_chunk in full_audio_wav:
loudness_ms_list.append(round(ms_chunk.dBFS))
# st = silence threshold
st = pd.DataFrame(loudness_ms_list).mode()[0][0]
print("Set the silence threshold to: ", st)
st = st if st < -16 else -16 # Because -16db is default
chunks = split_on_silence(
full_audio_wav,
# split on silences longer than 1000ms (1 sec)
min_silence_len=self.MSL,
# anything under -16 dBFS is considered silence
silence_thresh=-36, # hardcoded for now
# keep 250 ms of leading/trailing silence
keep_silence=200,
)
# for i, chunk in enumerate(chunks):
# chunk_file_name = tmp_dir + "/chunk{0}.wav".format(i)
# chunk.export(chunk_file_name, format="wav")
# Loading the deepspeech module
print('Loading model from file %s' % (self.args.model), file=sys.stderr)
model_load_start = timer()
ds = Model(self.args.model, self.N_FEATURES, self.N_CONTEXT,
self.args.alphabet, self.BEAM_WIDTH)
model_load_end = timer() - model_load_start
print('Loaded model in %0.3fs.' % (model_load_end), file=sys.stderr)
if self.args.lm and self.args.trie:
print('Loading language model from files %s %s' % (self.args.lm,
self.args.trie),
file=sys.stderr)
lm_load_start = timer()
ds.enableDecoderWithLM(self.args.alphabet, self.args.lm,
self.args.trie, self.LM_WEIGHT,
self.WORD_COUNT_WEIGHT,
self.VALID_WORD_COUNT_WEIGHT)
lm_load_end = timer() - lm_load_start
print('Loaded language model in %0.3fs.' % (lm_load_end),
file=sys.stderr)
output_text_file = tmp_dir + "/output_decoded.txt"
with open(output_text_file, "w+") as output_text:
for i, chunk in enumerate(chunks):
chunk_file_name = tmp_dir + "/chunk{0}.wav".format(i)
chunk.export(chunk_file_name, format="wav")
fs, audio = wav.read(chunk_file_name)
# We can assume 16kHz
audio_length = len(audio) * ( 1 / 16000)
print('Running inference.', file=sys.stderr)
inference_start = timer()
model_stt = ds.stt(audio, fs) + " "
print(model_stt)
output_text.write(model_stt)
inference_end = timer() - inference_start
print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end,
audio_length),
file=sys.stderr)
print("Processing done")
def main():
# Use the following for defaults
# model /home/dalonlobo/deepspeech_models/models/output_graph.pb
# audio /home/dalonlobo/deepspeech_models/models/2830-3980-0043.wav
# alphabet /home/dalonlobo/deepspeech_models/lm_models/alphabet.txt
# lm /home/dalonlobo/deepspeech_models/lm_models/lm_o5.binary
# trie /home/dalonlobo/deepspeech_models/lm_models/o5_trie
# python audio_processing.py --fpath v2.mp4
parser = argparse.ArgumentParser(description="Preprocessing the audio")
parser.add_argument("--fpath", type=str,
help="Enter the file path to the video mp4 file")
parser.add_argument('--model', type=str, nargs='?',
default='/home/dalonlobo/deepspeech_models/models/output_graph.pb',
help='Path to the model (protocol buffer binary file)')
parser.add_argument('--audio', type=str, nargs='?',
default='/home/dalonlobo/deepspeech_models/models/2830-3980-0043.wav',
help='Path to the audio file to run (WAV format)')
parser.add_argument('--alphabet', type=str, nargs='?',
default='/home/dalonlobo/deepspeech_models/models/alphabet.txt',
help='Path to the configuration file specifying the alphabet used by the network')
parser.add_argument('--lm', type=str, nargs='?',
default='/home/dalonlobo/deepspeech_models/models/lm.binary',
help='Path to the language model binary file')
parser.add_argument('--trie', type=str, nargs='?',
default='/home/dalonlobo/deepspeech_models/models/trie',
help='Path to the language model trie file created with native_client/generate_trie')
args = parser.parse_args()
audio = AudioProcessing(args)
audio.process_wav()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3437904
|
<filename>src/frr/tests/topotests/bgp_aggregator_zero/test_bgp_aggregator_zero.py
#!/usr/bin/env python
#
# Copyright (c) 2021 by
# <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Test if BGP UPDATE with AGGREGATOR AS attribute with value zero (0)
is continued to be processed, but AGGREGATOR attribute is discarded.
"""
import os
import sys
import json
import pytest
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
r1 = tgen.add_router("r1")
peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
switch = tgen.add_switch("s1")
switch.add_link(r1)
switch.add_link(peer1)
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r1/zebra.conf"))
router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r1/bgpd.conf"))
router.start()
peer = tgen.gears["peer1"]
peer.start(os.path.join(CWD, "peer1"), os.path.join(CWD, "exabgp.env"))
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
def test_bgp_aggregator_zero():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
def _bgp_converge():
output = json.loads(
tgen.gears["r1"].vtysh_cmd("show ip bgp neighbor 10.0.0.2 json")
)
expected = {
"10.0.0.2": {
"bgpState": "Established",
"addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
}
}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r1"])
def _bgp_has_correct_aggregator_route_with_asn_0():
output = json.loads(
tgen.gears["r1"].vtysh_cmd("show ip bgp 192.168.100.101/32 json")
)
if "aggregatorAs" in output["paths"][0].keys():
return False
else:
return True
assert (
_bgp_has_correct_aggregator_route_with_asn_0() is True
), 'Aggregator AS attribute with ASN 0 found in "{}"'.format(tgen.gears["r1"])
def _bgp_has_correct_aggregator_route_with_good_asn():
output = json.loads(
tgen.gears["r1"].vtysh_cmd("show ip bgp 192.168.100.102/32 json")
)
expected = {"paths": [{"aggregatorAs": 65001, "aggregatorId": "10.0.0.2"}]}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_has_correct_aggregator_route_with_good_asn)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, 'Aggregator AS attribute not found in "{}"'.format(
tgen.gears["r1"]
)
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
|
StarcoderdataPython
|
12820269
|
<gh_stars>0
import os
from configparser import ConfigParser
from .database import Database
from create_databases import GuildSettings
class GuildSettingsModel(Database):
def __init__(self):
super().__init__()
async def add(self, guild_id: int, server_name: str, region: str, owner_id: int):
new = GuildSettings(
guild_id=guild_id,
server_name=server_name,
prefix=os.environ["PREFIX"],
region=region,
owner_id=owner_id,
is_premium=False
)
self.session.add(new)
return self.session.commit()
async def get_by_id(self, guild_id:int):
return self.session.query(GuildSettings).filter_by(guild_id=guild_id).one_or_none()
|
StarcoderdataPython
|
289547
|
<gh_stars>0
# Made by <NAME>
# June 2016
# Python 3_5
import os
import sys
import csv
import argparse as arg
from lxml import etree
from io import StringIO
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
def init_driver():
driver = webdriver.Firefox()
driver.wait = WebDriverWait(driver,1)
return driver
def lookup(driver, query):
driver.get("http://www.opensecrets.org/indivs/")
try:
button = driver.wait.until(EC.element_to_be_clickable(
(By.ID, "name")))
button.click()
_input = driver.wait.until(EC.element_to_be_clickable(
(By.ID, "name")))
_input.send_keys(query)
_id = driver.wait.until(EC.element_to_be_clickable(
(By.NAME, "submit")))
_id.click()
except TimeoutException:
print("Box or Button not found in google.com")
def updateDriver(driver,root, name):
isEnd = True
try:
for child in root:
url = child.xpath("@href")
if len(url) == 1:
isEnd = True
text = child.text.strip()
if text.strip() == "Next":
isEnd = False
print(url[0])
print(child.text)
url = "http://www.opensecrets.org/indivs/"+url[0]
try:
driver.get(url)
except:
print ('%s Not Found' %name)
driver.quit()
except:
print ('%s Not Found' %name)
isEnd = True
return isEnd
def getXML(driver):
parser = etree.HTMLParser()
try:
html = driver.execute_script("return document.documentElement.outerHTML")
tree = etree.parse(StringIO(html), parser)
root = tree.find("//*[@class='pageCtrl']")
except NoSuchElementException:
driver.quit()
print("Name not found")
sys.exit(0)
return root
def scrap(driver):
driver.current_url #Getting current url
data = [] #Container for table data
for tr in driver.find_elements_by_xpath('//table[@id="top"]//tr'): #loop table id top
tds = tr.find_elements_by_tag_name('td')
if tds:
data.append([td.text for td in tds])
return data
def iter_scrap(driver,name):
container = []
endPage = False
while not endPage:
root = getXML(driver)
print( driver.current_url )
container.append(scrap(driver))
endPage = updateDriver(driver,root,name)
print( 'Is end page? %s' %endPage)
return container
def flatten(xs):
result = []
if isinstance(xs, (list, tuple)):
for x in xs:
result.extend(flatten(x))
else:
result.append(xs)
return result
def save_file(name,data):
name = name.replace(' ','_')
save_root = "./save"
if not os.path.exists(save_root):
os.makedirs(save_root)
name = name+".csv"
csvfile = "./save/"+name
with open(csvfile, "w") as output:
for infos in data:
for info in infos:
info[0] = info[0].replace('\n',' ')
writer = csv.writer(output, lineterminator='\n')
writer.writerows(infos)
print( 'Saved as'+name)
def joinName(arg):
len_ = len(arg)
name =''
for i in range(1,len_):
name = name + arg[i]
name = name + ' '
return name
def importCEO(fileName):
with open(fileName) as csvfile:
con = []
spamreader = csv.reader(csvfile, delimiter='\n')
for row in spamreader:
con.append(row[0])
return con
if __name__ == "__main__":
parser = arg.ArgumentParser()
parser.add_argument('--filename', default = '', type = str, help = 'Name of excel file to read')
args = parser.parse_args()
con = importCEO(args.filename)
for co in con:
driver = init_driver()
name = co
lookup(driver, name)
data = iter_scrap(driver,name)
driver.quit()
save_file(name,data)
|
StarcoderdataPython
|
309127
|
<reponame>weiweitoo/easy21-rl
# Easy21 Environment
import numpy as np
class Easy21():
''' Environment Easy21 '''
def __init__ (self):
self.dealer_threshold = 17;
self.min_card_value, self.max_card_value = 1, 10;
self.game_lower_bound, self.game_upper_bound = 1, 21;
def drawCard (self):
value = np.random.randint(self.min_card_value, self.max_card_value+1)
if np.random.random() <= 1/3:
return -value
else:
return value
def initGame (self):
return (np.random.randint(self.min_card_value, self.max_card_value+1),
np.random.randint(self.min_card_value, self.max_card_value+1))
def actionSpace(self):
'''
available action for this environment, 0 stands for hit,1 stands for stick
'''
return (0,1);
def isBust (self,val):
if(val < self.game_lower_bound or val > self.game_upper_bound):
return True;
return False;
def dealerTakeCard(self,dealerVal):
while(dealerVal < self.dealer_threshold and not self.isBust(dealerVal)):
dealerVal += self.drawCard();
return dealerVal;
def step (self,dealerVal,playerVal,action):
'''
Take step
Return next state and reward and whether the espisode is terminate
'''
terminate = False;
reward = 0;
# Stick
if (action == 0):
playerVal += self.drawCard();
if (self.isBust(playerVal)):
reward = -1;
terminate = True;
# Hit
elif (action == 1):
terminate = True;
dealerVal = self.dealerTakeCard(dealerVal);
# Check what rewards should get
if (self.isBust(dealerVal)):
reward = 1;
elif (playerVal == dealerVal):
reward = 0;
else:
reward = -1 if (playerVal < dealerVal) else 1;
return dealerVal,playerVal,reward,terminate;
|
StarcoderdataPython
|
389005
|
# -*- coding: utf-8 -*-
"""
dicom2nifti
@author: abrys
"""
import os
import shutil
import tempfile
import unittest
import nibabel
import numpy
import tests.test_data as test_data
import dicom2nifti.convert_ge as convert_ge
from dicom2nifti.common import read_dicom_directory
from tests.test_tools import assert_compare_nifti, assert_compare_bval, assert_compare_bvec, ground_thruth_filenames
class TestConversionGE(unittest.TestCase):
def test_diffusion_images(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI),
None)
self.assertTrue(results.get('NII_FILE') is None)
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
self.assertTrue(results.get('BVAL_FILE') is None)
self.assertTrue(isinstance(results['BVAL'], numpy.ndarray))
self.assertTrue(results.get('BVEC_FILE') is None)
self.assertTrue(isinstance(results['BVEC'], numpy.ndarray))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_DTI)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
assert_compare_bval(results['BVAL_FILE'],
ground_thruth_filenames(test_data.GE_DTI)[2])
self.assertTrue(isinstance(results['BVAL'], numpy.ndarray))
assert_compare_bval(results['BVEC_FILE'],
ground_thruth_filenames(test_data.GE_DTI)[3])
self.assertTrue(isinstance(results['BVEC'], numpy.ndarray))
convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI_IMPLICIT),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_DTI_IMPLICIT)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
assert_compare_bval(results['BVAL_FILE'],
ground_thruth_filenames(test_data.GE_DTI_IMPLICIT)[2])
self.assertTrue(isinstance(results['BVAL'], numpy.ndarray))
assert_compare_bval(results['BVEC_FILE'],
ground_thruth_filenames(test_data.GE_DTI_IMPLICIT)[3])
self.assertTrue(isinstance(results['BVEC'], numpy.ndarray))
finally:
shutil.rmtree(tmp_output_dir)
def test_diffusion_images_old(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI_OLD),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_DTI_OLD)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
def test_4d(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_FMRI),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_FMRI)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_FMRI_IMPLICIT),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_FMRI_IMPLICIT)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
def test_anatomical(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL),
None)
self.assertTrue(results.get('NII_FILE') is None)
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_ANATOMICAL)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL_IMPLICIT),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_ANATOMICAL_IMPLICIT)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
def test_is_ge(self):
assert not convert_ge.is_ge(read_dicom_directory(test_data.SIEMENS_ANATOMICAL))
assert convert_ge.is_ge(read_dicom_directory(test_data.GE_ANATOMICAL))
assert not convert_ge.is_ge(read_dicom_directory(test_data.PHILIPS_ANATOMICAL))
assert not convert_ge.is_ge(read_dicom_directory(test_data.GENERIC_ANATOMICAL))
assert not convert_ge.is_ge(read_dicom_directory(test_data.HITACHI_ANATOMICAL))
def test_is_4d(self):
diffusion_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_DTI))
_4d_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_FMRI))
anatomical_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_ANATOMICAL))
self.assertTrue(convert_ge._is_4d(diffusion_group))
self.assertTrue(convert_ge._is_4d(_4d_group))
self.assertFalse(convert_ge._is_4d(anatomical_group))
def test_is_diffusion_imaging(self):
diffusion_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_DTI))
_4d_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_FMRI))
anatomical_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_ANATOMICAL))
assert convert_ge._is_diffusion_imaging(diffusion_group)
assert not convert_ge._is_diffusion_imaging(_4d_group)
assert not convert_ge._is_diffusion_imaging(anatomical_group)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9682406
|
<gh_stars>0
from losoto.h5parm import h5parm
import numpy as np
if __name__ == '__main__':
filename = 'debug_losoto.h5'
solset = 'testSolset'
H = h5parm(filename, readonly=False)
pols = ['xx','yy']
dirs = ['a','b']
ants = ['c','d']
times = np.array([0.,1.])
vals = np.ones((2,2,2,2))
H.makeSolset(solsetName=solset, addTables=True)
solset = H.getSolset(solset)
solset.makeSoltab('testSoltab', axesNames=['pol', 'dir', 'ant', 'time'],
axesVals=[pols, dirs, ants, times],
vals=vals, weights=np.ones_like(vals),
weightDtype='f64')
soltab = solset.getSoltab('testSoltab')
print(soltab)
|
StarcoderdataPython
|
19345
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Demo for 2D Optimal transport between empirical distributions
@author: rflamary
"""
import numpy as np
import matplotlib.pylab as pl
import ot
#%% parameters and data generation
n=20 # nb samples
mu_s=np.array([0,0])
cov_s=np.array([[1,0],[0,1]])
mu_t=np.array([4,4])
cov_t=np.array([[1,-.8],[-.8,1]])
xs=ot.datasets.get_2D_samples_gauss(n,mu_s,cov_s)
xt=ot.datasets.get_2D_samples_gauss(n,mu_t,cov_t)
a,b = ot.unif(n),ot.unif(n) # uniform distribution on samples
# loss matrix
M=ot.dist(xs,xt)
M/=M.max()
#%% plot samples
pl.figure(1)
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('Source and traget distributions')
pl.figure(2)
pl.imshow(M,interpolation='nearest')
pl.title('Cost matrix M')
#%% EMD
G0=ot.emd(a,b,M)
pl.figure(3)
pl.imshow(G0,interpolation='nearest')
pl.title('OT matrix G0')
pl.figure(4)
ot.plot.plot2D_samples_mat(xs,xt,G0,c=[.5,.5,1])
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix with samples')
#%% sinkhorn
# reg term
lambd=5e-3
Gs=ot.sinkhorn(a,b,M,lambd)
pl.figure(5)
pl.imshow(Gs,interpolation='nearest')
pl.title('OT matrix sinkhorn')
pl.figure(6)
ot.plot.plot2D_samples_mat(xs,xt,Gs,color=[.5,.5,1])
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn with samples')
|
StarcoderdataPython
|
46404
|
<reponame>vishalnalwa/DAT210x-master---Old
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 17:14:36 2017
@author: m037382
"""
import pandas as pd
df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2')
df1 = pd.concat(df)
df1.columns = ['RK', 'PLAYER', 'TEAM', 'GP','G','A','PTS','+/-',' PIM','PTS/G','SOG','PCT','GWG','PP-G','PP-A','SH-G','SH-A']
df1= df1[df1.RK != "RK"]
df1= df1.drop('RK',axis=1)
df1=df1.dropna()
df1=df1.reset_index(drop=True)
print df1
print len(df1.PCT.unique())
print add( df1.loc[15, 'GP'] , df1.loc[16, 'GP'])????
|
StarcoderdataPython
|
8125511
|
<filename>userbot/plugins/clock.py
# (c) @UniBorg
# Original written by @UniBorg edit by @INF1N17Y
from telethon import events
import asyncio
from collections import deque
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern=r"clock"))
async def _(event):
if event.fwd_from:
return
deq = deque(list("🕛🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚"))
for _ in range(60):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
|
StarcoderdataPython
|
8054206
|
<gh_stars>0
def other_angle(a, b):
return abs(a + b - 180)
|
StarcoderdataPython
|
3275875
|
<reponame>utkarshdeorah/sympy<gh_stars>1-10
from sympy.core.numbers import E
from sympy.core.symbol import symbols
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.geometry.curve import Curve
from sympy.integrals.integrals import line_integrate
s, t, x, y, z = symbols('s,t,x,y,z')
def test_lineintegral():
c = Curve([E**t + 1, E**t - 1], (t, 0, log(2)))
assert line_integrate(x + y, c, [x, y]) == 3*sqrt(2)
|
StarcoderdataPython
|
4880143
|
""" Build file for cython extensions """
from distutils.core import Extension
import numpy
from Cython.Build import cythonize
_EXTENSIONS = [
Extension("deepgrp.mss",
sources=["deepgrp/_mss/pymss.pyx", "./deepgrp/_mss/mss.c"],
include_dirs=[numpy.get_include()] + ["./deepgrp"]),
Extension("deepgrp.sequence",
sources=["deepgrp/sequence.pyx","deepgrp/maxcalc.c"],
include_dirs=[numpy.get_include()] + ["./deepgrp"]),
]
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update({'ext_modules': cythonize(_EXTENSIONS)})
|
StarcoderdataPython
|
12859433
|
# generate data from bag images
from PIL import Image
from pathlib import Path
import os, glob # manipulate file or directory
import numpy as np
class DataArrangement(object):
def __init__(self):
self.path = Path(__file__).parent
self.current_directories = ['not_traking', 'traking']
self.X_not_traking = []
self.Y_not_traking = []
self.X_traking = []
self.Y_traking = []
def load_data(self):
for current_directory in self.current_directories:
print(current_directory) # not traking or traking
self.path /= '../../video_to_image/{}'.format(current_directory)
directories = os.listdir(self.path)
for i, directory in enumerate(directories):
print('{}, {}'.format(i, directory))
files = glob.glob(str(self.path.resolve()) + '/{}/*.jpg'.format(directory))
for j, file in enumerate(files):
image = Image.open(file)
image = image.convert('RGB')
# image = image.resize(50, 50)
data = np.asarray(image)
print('{} - {}'.format(i, j))
if current_directory == 'not_traking': # section off files by directory name
self.X_not_traking.append(data)
self.Y_not_traking.append(i)
else:
self.X_traking.append(data)
self.Y_traking.append(i)
return np.array(self.X_not_traking), np.array(self.Y_not_traking), \
np.array(self.X_traking), np.array(self.Y_traking)
if __name__ == '__main__':
DA = DataArrangement()
X_not_traking, Y_not_traking, X_traking, Y_traking = DA.load_data()
|
StarcoderdataPython
|
12853229
|
'''
Test deleting SG with 2 attached NICs.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_security_group as test_sg_header
import zstackwoodpecker.zstack_test.zstack_test_sg_vm as test_sg_vm_header
import apibinding.inventory as inventory
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Port = test_state.Port
def test():
'''
Test image requirements:
1. have nc to check the network port
2. have "nc" to open any port
3. it doesn't include a default firewall
VR image is a good candiate to be the guest image.
'''
test_util.test_dsc("Create 3 VMs with vlan VR L3 network and using VR image.")
vm1 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm2)
vm1.check()
vm2.check()
test_util.test_dsc("Create security groups.")
sg1 = test_stub.create_sg()
sg_vm = test_sg_vm_header.ZstackTestSgVm()
test_obj_dict.set_sg_vm(sg_vm)
l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid
vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0]
vm2_ip = test_lib.lib_get_vm_nic_by_l3(vm2.vm, l3_uuid).ip
rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
sg1.add_rule([rule1])
sg1.add_rule([rule2])
sg1.add_rule([rule3])
sg_vm.check()
nic_uuid1 = vm1.vm.vmNics[0].uuid
nic_uuid2 = vm2.vm.vmNics[0].uuid
# nic_uuid3 = vm2.vm.vmNics[0].uuid
vm1_nics = (nic_uuid1, vm1)
vm2_nics = (nic_uuid2, vm2)
# vm3_nics = (nic_uuid3, vm3)
#test_stub.lib_add_sg_rules(sg1.uuid, [rule0, rule1])
test_util.test_dsc("Add nic to security group 1.")
test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports)
#sg_vm.attach(sg1, [vm1_nics, vm2_nics, vm3_nics])
sg_vm.attach(sg1, [vm1_nics, vm2_nics])
sg_vm.check()
sg_vm.delete_sg(sg1)
sg_vm.check()
vm1.destroy()
test_obj_dict.rm_vm(vm1)
vm2.destroy()
test_obj_dict.rm_vm(vm2)
test_util.test_pass('Delete Security Group with 2 attached NICs Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
StarcoderdataPython
|
1902591
|
<filename>backend/apps/system/models.py
from datetime import datetime
from sqlalchemy import (
Table, Column, Integer, String, DateTime, ForeignKey, JSON
)
from utils.database import metadata
settings = Table(
"system_settings",
metadata,
Column("id", Integer, primary_key=True),
Column("label", String(length=100), nullable=False),
Column("key", String(length=60), nullable=False, unique=True),
# This is the difference of this version from its previous one, for the same `text_id` and `locale`
Column("value", JSON, nullable=False),
Column("user_id", Integer, ForeignKey("user_user.id"), nullable=True),
Column("created_at", DateTime, nullable=False, default=datetime.utcnow),
)
"""
locale = Table(
"system_locale",
metadata,
Column("id", Integer, primary_key=True),
Column("label", String(length=100), nullable=False),
Column("locale", String(length=40), nullable=False),
Column("user_id", Integer, ForeignKey("user_user.id"), nullable=True),
Column("created_at", DateTime, nullable=False, default=datetime.utcnow),
)
"""
|
StarcoderdataPython
|
6578828
|
<reponame>farhadinima75/FireHR<gh_stars>10-100
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_models.ipynb (unless otherwise specified).
__all__ = ['expand_filter', 'ChLin', 'FireHR', 'download_model_weights', 'load_pretrained_model']
# Cell
import os, sys
import requests
from fastai.vision.all import *
import FireHR
# Cell
def expand_filter(x, ks=3):
with torch.no_grad():
k5 = nn.Conv2d(1, 1, kernel_size=ks, padding=ks//2, padding_mode='reflect', bias=False)
k5.weight.data = torch.ones(1, 1, ks, ks)/(ks*ks)
xbuffer = k5(x[:,-1].unsqueeze(1))
x = torch.cat([x[:,:-1], xbuffer], dim=1)
return x
class ChLin(Module):
def __init__(self, ni, nf):
self.chlin = nn.Sequential(
nn.Linear(ni, nf, bias=False), nn.BatchNorm1d(nf), nn.ReLU(inplace=True))
def forward(self, x):
sh = x.shape
x = x.permute(0,2,3,1).contiguous().view(sh[0]*sh[2]*sh[3], sh[1])
x = self.chlin(x).view(sh[0],sh[2],sh[3], -1).permute(0,3,1,2).contiguous()
return x
class FireHR(Module):
def __init__(self, ni, nc):
self.conv = ConvLayer(1, 8)
self.chlin = nn.Sequential(ChLin(ni+8, 128), ChLin(128, 64))
self.middleconv = nn.Sequential(ConvLayer(64, 128), ConvLayer(128, 64))
self.finalconv = nn.Conv2d(64, nc, kernel_size=1, bias=True)
def forward(self, x):
x = torch.cat([x[:,:-1], self.conv(x[:,-1].unsqueeze(1))], dim=1)
x = self.chlin(x)
x = self.middleconv(x)
return self.finalconv(x)
# Cell
def download_model_weights(weight_file='model512.pth'):
"""Download model weights if they don't exist yet on ~/.firehr."""
path_save = Path(os.path.expandvars('$HOME'))/'.firehr'
path_save.mkdir(exist_ok=True)
file_save = path_save/weight_file
if not file_save.is_file():
print(f'Downloading model weights {weight_file}.')
url = 'https://github.com/mnpinto/FireHR_weights/raw/main/model512.pth'
file = requests.get(url)
open(str(file_save), 'wb').write(file.content)
else:
print(f'Using local model weights {file_save}')
# Cell
_WEIGHTS = Path(os.path.expandvars('$HOME'))/'.firehr/model512.pth'
def load_pretrained_model(weights=_WEIGHTS, ni=6, nc=1, half_precision=True, gpu=True):
download_model_weights()
model = FireHR(ni,nc)
st = torch.load(weights, map_location=torch.device('cpu'))
model.load_state_dict(st['model'])
if gpu:
if half_precision: model = model.half()
if torch.cuda.is_available():
model = model.cuda()
else:
warnings.warn('GPU is not available. torch.cuda.is_available() returned False.')
return model
|
StarcoderdataPython
|
4854721
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import dataflow_lut
expected_verilog = """
module test
(
);
reg CLK;
reg RST;
reg [32-1:0] xdata;
reg xvalid;
wire xready;
reg [32-1:0] ydata;
reg yvalid;
wire yready;
wire [32-1:0] zdata;
wire zvalid;
reg zready;
main
uut
(
.CLK(CLK),
.RST(RST),
.xdata(xdata),
.xvalid(xvalid),
.xready(xready),
.ydata(ydata),
.yvalid(yvalid),
.yready(yready),
.zdata(zdata),
.zvalid(zvalid),
.zready(zready)
);
reg reset_done;
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
reset_done = 0;
xdata = 0;
xvalid = 0;
ydata = 0;
yvalid = 0;
zready = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
reset_done = 1;
@(posedge CLK);
#1;
#10000;
$finish;
end
reg [32-1:0] xfsm;
localparam xfsm_init = 0;
reg [32-1:0] _tmp_0;
localparam xfsm_1 = 1;
localparam xfsm_2 = 2;
localparam xfsm_3 = 3;
localparam xfsm_4 = 4;
localparam xfsm_5 = 5;
localparam xfsm_6 = 6;
localparam xfsm_7 = 7;
localparam xfsm_8 = 8;
localparam xfsm_9 = 9;
localparam xfsm_10 = 10;
localparam xfsm_11 = 11;
localparam xfsm_12 = 12;
localparam xfsm_13 = 13;
localparam xfsm_14 = 14;
localparam xfsm_15 = 15;
localparam xfsm_16 = 16;
localparam xfsm_17 = 17;
localparam xfsm_18 = 18;
localparam xfsm_19 = 19;
localparam xfsm_20 = 20;
localparam xfsm_21 = 21;
localparam xfsm_22 = 22;
localparam xfsm_23 = 23;
localparam xfsm_24 = 24;
always @(posedge CLK) begin
if(RST) begin
xfsm <= xfsm_init;
_tmp_0 <= 0;
end else begin
case(xfsm)
xfsm_init: begin
xvalid <= 0;
if(reset_done) begin
xfsm <= xfsm_1;
end
end
xfsm_1: begin
xfsm <= xfsm_2;
end
xfsm_2: begin
xfsm <= xfsm_3;
end
xfsm_3: begin
xfsm <= xfsm_4;
end
xfsm_4: begin
xfsm <= xfsm_5;
end
xfsm_5: begin
xfsm <= xfsm_6;
end
xfsm_6: begin
xfsm <= xfsm_7;
end
xfsm_7: begin
xfsm <= xfsm_8;
end
xfsm_8: begin
xfsm <= xfsm_9;
end
xfsm_9: begin
xfsm <= xfsm_10;
end
xfsm_10: begin
xfsm <= xfsm_11;
end
xfsm_11: begin
xvalid <= 1;
xfsm <= xfsm_12;
end
xfsm_12: begin
if(xready) begin
xdata <= xdata + 1;
end
if(xready) begin
_tmp_0 <= _tmp_0 + 1;
end
if((_tmp_0 == 5) && xready) begin
xvalid <= 0;
end
if((_tmp_0 == 5) && xready) begin
xfsm <= xfsm_13;
end
end
xfsm_13: begin
xfsm <= xfsm_14;
end
xfsm_14: begin
xfsm <= xfsm_15;
end
xfsm_15: begin
xfsm <= xfsm_16;
end
xfsm_16: begin
xfsm <= xfsm_17;
end
xfsm_17: begin
xfsm <= xfsm_18;
end
xfsm_18: begin
xfsm <= xfsm_19;
end
xfsm_19: begin
xfsm <= xfsm_20;
end
xfsm_20: begin
xfsm <= xfsm_21;
end
xfsm_21: begin
xfsm <= xfsm_22;
end
xfsm_22: begin
xfsm <= xfsm_23;
end
xfsm_23: begin
xvalid <= 1;
if(xready) begin
xdata <= xdata + 1;
end
if(xready) begin
_tmp_0 <= _tmp_0 + 1;
end
if((_tmp_0 == 10) && xready) begin
xvalid <= 0;
end
if((_tmp_0 == 10) && xready) begin
xfsm <= xfsm_24;
end
end
endcase
end
end
reg [32-1:0] yfsm;
localparam yfsm_init = 0;
reg [32-1:0] _tmp_1;
localparam yfsm_1 = 1;
localparam yfsm_2 = 2;
localparam yfsm_3 = 3;
localparam yfsm_4 = 4;
localparam yfsm_5 = 5;
localparam yfsm_6 = 6;
localparam yfsm_7 = 7;
localparam yfsm_8 = 8;
localparam yfsm_9 = 9;
localparam yfsm_10 = 10;
localparam yfsm_11 = 11;
localparam yfsm_12 = 12;
localparam yfsm_13 = 13;
localparam yfsm_14 = 14;
localparam yfsm_15 = 15;
localparam yfsm_16 = 16;
localparam yfsm_17 = 17;
localparam yfsm_18 = 18;
localparam yfsm_19 = 19;
localparam yfsm_20 = 20;
localparam yfsm_21 = 21;
localparam yfsm_22 = 22;
localparam yfsm_23 = 23;
localparam yfsm_24 = 24;
localparam yfsm_25 = 25;
localparam yfsm_26 = 26;
localparam yfsm_27 = 27;
localparam yfsm_28 = 28;
localparam yfsm_29 = 29;
localparam yfsm_30 = 30;
localparam yfsm_31 = 31;
localparam yfsm_32 = 32;
localparam yfsm_33 = 33;
localparam yfsm_34 = 34;
localparam yfsm_35 = 35;
localparam yfsm_36 = 36;
localparam yfsm_37 = 37;
localparam yfsm_38 = 38;
localparam yfsm_39 = 39;
localparam yfsm_40 = 40;
localparam yfsm_41 = 41;
localparam yfsm_42 = 42;
localparam yfsm_43 = 43;
localparam yfsm_44 = 44;
always @(posedge CLK) begin
if(RST) begin
yfsm <= yfsm_init;
_tmp_1 <= 0;
end else begin
case(yfsm)
yfsm_init: begin
yvalid <= 0;
if(reset_done) begin
yfsm <= yfsm_1;
end
end
yfsm_1: begin
yfsm <= yfsm_2;
end
yfsm_2: begin
yfsm <= yfsm_3;
end
yfsm_3: begin
yfsm <= yfsm_4;
end
yfsm_4: begin
yfsm <= yfsm_5;
end
yfsm_5: begin
yfsm <= yfsm_6;
end
yfsm_6: begin
yfsm <= yfsm_7;
end
yfsm_7: begin
yfsm <= yfsm_8;
end
yfsm_8: begin
yfsm <= yfsm_9;
end
yfsm_9: begin
yfsm <= yfsm_10;
end
yfsm_10: begin
yfsm <= yfsm_11;
end
yfsm_11: begin
yfsm <= yfsm_12;
end
yfsm_12: begin
yfsm <= yfsm_13;
end
yfsm_13: begin
yfsm <= yfsm_14;
end
yfsm_14: begin
yfsm <= yfsm_15;
end
yfsm_15: begin
yfsm <= yfsm_16;
end
yfsm_16: begin
yfsm <= yfsm_17;
end
yfsm_17: begin
yfsm <= yfsm_18;
end
yfsm_18: begin
yfsm <= yfsm_19;
end
yfsm_19: begin
yfsm <= yfsm_20;
end
yfsm_20: begin
yfsm <= yfsm_21;
end
yfsm_21: begin
yvalid <= 1;
yfsm <= yfsm_22;
end
yfsm_22: begin
if(yready) begin
ydata <= ydata + 2;
end
if(yready) begin
_tmp_1 <= _tmp_1 + 1;
end
if((_tmp_1 == 5) && yready) begin
yvalid <= 0;
end
if((_tmp_1 == 5) && yready) begin
yfsm <= yfsm_23;
end
end
yfsm_23: begin
yfsm <= yfsm_24;
end
yfsm_24: begin
yfsm <= yfsm_25;
end
yfsm_25: begin
yfsm <= yfsm_26;
end
yfsm_26: begin
yfsm <= yfsm_27;
end
yfsm_27: begin
yfsm <= yfsm_28;
end
yfsm_28: begin
yfsm <= yfsm_29;
end
yfsm_29: begin
yfsm <= yfsm_30;
end
yfsm_30: begin
yfsm <= yfsm_31;
end
yfsm_31: begin
yfsm <= yfsm_32;
end
yfsm_32: begin
yfsm <= yfsm_33;
end
yfsm_33: begin
yfsm <= yfsm_34;
end
yfsm_34: begin
yfsm <= yfsm_35;
end
yfsm_35: begin
yfsm <= yfsm_36;
end
yfsm_36: begin
yfsm <= yfsm_37;
end
yfsm_37: begin
yfsm <= yfsm_38;
end
yfsm_38: begin
yfsm <= yfsm_39;
end
yfsm_39: begin
yfsm <= yfsm_40;
end
yfsm_40: begin
yfsm <= yfsm_41;
end
yfsm_41: begin
yfsm <= yfsm_42;
end
yfsm_42: begin
yfsm <= yfsm_43;
end
yfsm_43: begin
yvalid <= 1;
if(yready) begin
ydata <= ydata + 2;
end
if(yready) begin
_tmp_1 <= _tmp_1 + 1;
end
if((_tmp_1 == 10) && yready) begin
yvalid <= 0;
end
if((_tmp_1 == 10) && yready) begin
yfsm <= yfsm_44;
end
end
endcase
end
end
reg [32-1:0] zfsm;
localparam zfsm_init = 0;
localparam zfsm_1 = 1;
localparam zfsm_2 = 2;
localparam zfsm_3 = 3;
localparam zfsm_4 = 4;
localparam zfsm_5 = 5;
localparam zfsm_6 = 6;
localparam zfsm_7 = 7;
localparam zfsm_8 = 8;
always @(posedge CLK) begin
if(RST) begin
zfsm <= zfsm_init;
end else begin
case(zfsm)
zfsm_init: begin
zready <= 0;
if(reset_done) begin
zfsm <= zfsm_1;
end
end
zfsm_1: begin
zfsm <= zfsm_2;
end
zfsm_2: begin
if(zvalid) begin
zready <= 1;
end
if(zvalid) begin
zfsm <= zfsm_3;
end
end
zfsm_3: begin
zready <= 0;
zfsm <= zfsm_4;
end
zfsm_4: begin
zready <= 0;
zfsm <= zfsm_5;
end
zfsm_5: begin
zready <= 0;
zfsm <= zfsm_6;
end
zfsm_6: begin
zready <= 0;
zfsm <= zfsm_7;
end
zfsm_7: begin
zready <= 0;
zfsm <= zfsm_8;
end
zfsm_8: begin
zfsm <= zfsm_2;
end
endcase
end
end
always @(posedge CLK) begin
if(reset_done) begin
if(xvalid && xready) begin
$display("xdata=%d", xdata);
end
if(yvalid && yready) begin
$display("ydata=%d", ydata);
end
if(zvalid && zready) begin
$display("zdata=%d", zdata);
end
end
end
endmodule
module main
(
input CLK,
input RST,
input [32-1:0] xdata,
input xvalid,
output xready,
input [32-1:0] ydata,
input yvalid,
output yready,
output [32-1:0] zdata,
output zvalid,
input zready
);
wire [32-1:0] _dataflow_lut_data_2;
reg _dataflow_lut_valid_2;
wire _dataflow_lut_ready_2;
wire [8-1:0] _dataflow_lut_lut_address_2;
assign _dataflow_lut_lut_address_2 = xdata;
_dataflow_lut_LUT_ROM_2
_dataflow_lut_lut_2
(
.CLK(CLK),
.addr(_dataflow_lut_lut_address_2),
.enable((_dataflow_lut_ready_2 || !_dataflow_lut_valid_2) && xready && xvalid),
.val(_dataflow_lut_data_2)
);
assign xready = (_dataflow_lut_ready_2 || !_dataflow_lut_valid_2) && xvalid;
reg [32-1:0] _dataflow__delay_data_4;
reg _dataflow__delay_valid_4;
wire _dataflow__delay_ready_4;
assign yready = (_dataflow__delay_ready_4 || !_dataflow__delay_valid_4) && yvalid;
reg [32-1:0] _dataflow_plus_data_3;
reg _dataflow_plus_valid_3;
wire _dataflow_plus_ready_3;
assign _dataflow_lut_ready_2 = (_dataflow_plus_ready_3 || !_dataflow_plus_valid_3) && (_dataflow_lut_valid_2 && _dataflow__delay_valid_4);
assign _dataflow__delay_ready_4 = (_dataflow_plus_ready_3 || !_dataflow_plus_valid_3) && (_dataflow_lut_valid_2 && _dataflow__delay_valid_4);
assign zdata = _dataflow_plus_data_3;
assign zvalid = _dataflow_plus_valid_3;
assign _dataflow_plus_ready_3 = zready;
always @(posedge CLK) begin
if(RST) begin
_dataflow_lut_valid_2 <= 0;
_dataflow__delay_data_4 <= 0;
_dataflow__delay_valid_4 <= 0;
_dataflow_plus_data_3 <= 0;
_dataflow_plus_valid_3 <= 0;
end else begin
if(_dataflow_lut_valid_2 && _dataflow_lut_ready_2) begin
_dataflow_lut_valid_2 <= 0;
end
if((_dataflow_lut_ready_2 || !_dataflow_lut_valid_2) && xready) begin
_dataflow_lut_valid_2 <= xvalid;
end
if((_dataflow__delay_ready_4 || !_dataflow__delay_valid_4) && yready && yvalid) begin
_dataflow__delay_data_4 <= ydata;
end
if(_dataflow__delay_valid_4 && _dataflow__delay_ready_4) begin
_dataflow__delay_valid_4 <= 0;
end
if((_dataflow__delay_ready_4 || !_dataflow__delay_valid_4) && yready) begin
_dataflow__delay_valid_4 <= yvalid;
end
if((_dataflow_plus_ready_3 || !_dataflow_plus_valid_3) && (_dataflow_lut_ready_2 && _dataflow__delay_ready_4) && (_dataflow_lut_valid_2 && _dataflow__delay_valid_4)) begin
_dataflow_plus_data_3 <= _dataflow_lut_data_2 + _dataflow__delay_data_4;
end
if(_dataflow_plus_valid_3 && _dataflow_plus_ready_3) begin
_dataflow_plus_valid_3 <= 0;
end
if((_dataflow_plus_ready_3 || !_dataflow_plus_valid_3) && (_dataflow_lut_ready_2 && _dataflow__delay_ready_4)) begin
_dataflow_plus_valid_3 <= _dataflow_lut_valid_2 && _dataflow__delay_valid_4;
end
end
end
endmodule
module _dataflow_lut_LUT_ROM_2
(
input CLK,
input [8-1:0] addr,
input enable,
output reg [32-1:0] val
);
always @(posedge CLK) begin
if(enable) begin
case(addr)
0: begin
val <= 0;
end
1: begin
val <= 1;
end
2: begin
val <= 4;
end
3: begin
val <= 9;
end
4: begin
val <= 16;
end
5: begin
val <= 25;
end
6: begin
val <= 36;
end
7: begin
val <= 49;
end
8: begin
val <= 64;
end
9: begin
val <= 81;
end
10: begin
val <= 100;
end
11: begin
val <= 121;
end
12: begin
val <= 144;
end
13: begin
val <= 169;
end
14: begin
val <= 196;
end
15: begin
val <= 225;
end
16: begin
val <= 256;
end
17: begin
val <= 289;
end
18: begin
val <= 324;
end
19: begin
val <= 361;
end
20: begin
val <= 400;
end
21: begin
val <= 441;
end
22: begin
val <= 484;
end
23: begin
val <= 529;
end
24: begin
val <= 576;
end
25: begin
val <= 625;
end
26: begin
val <= 676;
end
27: begin
val <= 729;
end
28: begin
val <= 784;
end
29: begin
val <= 841;
end
30: begin
val <= 900;
end
31: begin
val <= 961;
end
32: begin
val <= 1024;
end
33: begin
val <= 1089;
end
34: begin
val <= 1156;
end
35: begin
val <= 1225;
end
36: begin
val <= 1296;
end
37: begin
val <= 1369;
end
38: begin
val <= 1444;
end
39: begin
val <= 1521;
end
40: begin
val <= 1600;
end
41: begin
val <= 1681;
end
42: begin
val <= 1764;
end
43: begin
val <= 1849;
end
44: begin
val <= 1936;
end
45: begin
val <= 2025;
end
46: begin
val <= 2116;
end
47: begin
val <= 2209;
end
48: begin
val <= 2304;
end
49: begin
val <= 2401;
end
50: begin
val <= 2500;
end
51: begin
val <= 2601;
end
52: begin
val <= 2704;
end
53: begin
val <= 2809;
end
54: begin
val <= 2916;
end
55: begin
val <= 3025;
end
56: begin
val <= 3136;
end
57: begin
val <= 3249;
end
58: begin
val <= 3364;
end
59: begin
val <= 3481;
end
60: begin
val <= 3600;
end
61: begin
val <= 3721;
end
62: begin
val <= 3844;
end
63: begin
val <= 3969;
end
64: begin
val <= 4096;
end
65: begin
val <= 4225;
end
66: begin
val <= 4356;
end
67: begin
val <= 4489;
end
68: begin
val <= 4624;
end
69: begin
val <= 4761;
end
70: begin
val <= 4900;
end
71: begin
val <= 5041;
end
72: begin
val <= 5184;
end
73: begin
val <= 5329;
end
74: begin
val <= 5476;
end
75: begin
val <= 5625;
end
76: begin
val <= 5776;
end
77: begin
val <= 5929;
end
78: begin
val <= 6084;
end
79: begin
val <= 6241;
end
80: begin
val <= 6400;
end
81: begin
val <= 6561;
end
82: begin
val <= 6724;
end
83: begin
val <= 6889;
end
84: begin
val <= 7056;
end
85: begin
val <= 7225;
end
86: begin
val <= 7396;
end
87: begin
val <= 7569;
end
88: begin
val <= 7744;
end
89: begin
val <= 7921;
end
90: begin
val <= 8100;
end
91: begin
val <= 8281;
end
92: begin
val <= 8464;
end
93: begin
val <= 8649;
end
94: begin
val <= 8836;
end
95: begin
val <= 9025;
end
96: begin
val <= 9216;
end
97: begin
val <= 9409;
end
98: begin
val <= 9604;
end
99: begin
val <= 9801;
end
100: begin
val <= 10000;
end
101: begin
val <= 10201;
end
102: begin
val <= 10404;
end
103: begin
val <= 10609;
end
104: begin
val <= 10816;
end
105: begin
val <= 11025;
end
106: begin
val <= 11236;
end
107: begin
val <= 11449;
end
108: begin
val <= 11664;
end
109: begin
val <= 11881;
end
110: begin
val <= 12100;
end
111: begin
val <= 12321;
end
112: begin
val <= 12544;
end
113: begin
val <= 12769;
end
114: begin
val <= 12996;
end
115: begin
val <= 13225;
end
116: begin
val <= 13456;
end
117: begin
val <= 13689;
end
118: begin
val <= 13924;
end
119: begin
val <= 14161;
end
120: begin
val <= 14400;
end
121: begin
val <= 14641;
end
122: begin
val <= 14884;
end
123: begin
val <= 15129;
end
124: begin
val <= 15376;
end
125: begin
val <= 15625;
end
126: begin
val <= 15876;
end
127: begin
val <= 16129;
end
128: begin
val <= 16384;
end
129: begin
val <= 16641;
end
130: begin
val <= 16900;
end
131: begin
val <= 17161;
end
132: begin
val <= 17424;
end
133: begin
val <= 17689;
end
134: begin
val <= 17956;
end
135: begin
val <= 18225;
end
136: begin
val <= 18496;
end
137: begin
val <= 18769;
end
138: begin
val <= 19044;
end
139: begin
val <= 19321;
end
140: begin
val <= 19600;
end
141: begin
val <= 19881;
end
142: begin
val <= 20164;
end
143: begin
val <= 20449;
end
144: begin
val <= 20736;
end
145: begin
val <= 21025;
end
146: begin
val <= 21316;
end
147: begin
val <= 21609;
end
148: begin
val <= 21904;
end
149: begin
val <= 22201;
end
150: begin
val <= 22500;
end
151: begin
val <= 22801;
end
152: begin
val <= 23104;
end
153: begin
val <= 23409;
end
154: begin
val <= 23716;
end
155: begin
val <= 24025;
end
156: begin
val <= 24336;
end
157: begin
val <= 24649;
end
158: begin
val <= 24964;
end
159: begin
val <= 25281;
end
160: begin
val <= 25600;
end
161: begin
val <= 25921;
end
162: begin
val <= 26244;
end
163: begin
val <= 26569;
end
164: begin
val <= 26896;
end
165: begin
val <= 27225;
end
166: begin
val <= 27556;
end
167: begin
val <= 27889;
end
168: begin
val <= 28224;
end
169: begin
val <= 28561;
end
170: begin
val <= 28900;
end
171: begin
val <= 29241;
end
172: begin
val <= 29584;
end
173: begin
val <= 29929;
end
174: begin
val <= 30276;
end
175: begin
val <= 30625;
end
176: begin
val <= 30976;
end
177: begin
val <= 31329;
end
178: begin
val <= 31684;
end
179: begin
val <= 32041;
end
180: begin
val <= 32400;
end
181: begin
val <= 32761;
end
182: begin
val <= 33124;
end
183: begin
val <= 33489;
end
184: begin
val <= 33856;
end
185: begin
val <= 34225;
end
186: begin
val <= 34596;
end
187: begin
val <= 34969;
end
188: begin
val <= 35344;
end
189: begin
val <= 35721;
end
190: begin
val <= 36100;
end
191: begin
val <= 36481;
end
192: begin
val <= 36864;
end
193: begin
val <= 37249;
end
194: begin
val <= 37636;
end
195: begin
val <= 38025;
end
196: begin
val <= 38416;
end
197: begin
val <= 38809;
end
198: begin
val <= 39204;
end
199: begin
val <= 39601;
end
200: begin
val <= 40000;
end
201: begin
val <= 40401;
end
202: begin
val <= 40804;
end
203: begin
val <= 41209;
end
204: begin
val <= 41616;
end
205: begin
val <= 42025;
end
206: begin
val <= 42436;
end
207: begin
val <= 42849;
end
208: begin
val <= 43264;
end
209: begin
val <= 43681;
end
210: begin
val <= 44100;
end
211: begin
val <= 44521;
end
212: begin
val <= 44944;
end
213: begin
val <= 45369;
end
214: begin
val <= 45796;
end
215: begin
val <= 46225;
end
216: begin
val <= 46656;
end
217: begin
val <= 47089;
end
218: begin
val <= 47524;
end
219: begin
val <= 47961;
end
220: begin
val <= 48400;
end
221: begin
val <= 48841;
end
222: begin
val <= 49284;
end
223: begin
val <= 49729;
end
224: begin
val <= 50176;
end
225: begin
val <= 50625;
end
226: begin
val <= 51076;
end
227: begin
val <= 51529;
end
228: begin
val <= 51984;
end
229: begin
val <= 52441;
end
230: begin
val <= 52900;
end
231: begin
val <= 53361;
end
232: begin
val <= 53824;
end
233: begin
val <= 54289;
end
234: begin
val <= 54756;
end
235: begin
val <= 55225;
end
236: begin
val <= 55696;
end
237: begin
val <= 56169;
end
238: begin
val <= 56644;
end
239: begin
val <= 57121;
end
240: begin
val <= 57600;
end
241: begin
val <= 58081;
end
242: begin
val <= 58564;
end
243: begin
val <= 59049;
end
244: begin
val <= 59536;
end
245: begin
val <= 60025;
end
246: begin
val <= 60516;
end
247: begin
val <= 61009;
end
248: begin
val <= 61504;
end
249: begin
val <= 62001;
end
250: begin
val <= 62500;
end
251: begin
val <= 63001;
end
252: begin
val <= 63504;
end
253: begin
val <= 64009;
end
254: begin
val <= 64516;
end
255: begin
val <= 65025;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = dataflow_lut.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
StarcoderdataPython
|
5041883
|
<reponame>rafaeldias98/python-customer-api<filename>api/migrations/0001_initial.py
# Generated by Django 2.2.5 on 2019-10-06 20:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=200, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'customer',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='CustomerFavoriteProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_id', models.CharField(max_length=200)),
('product_title', models.CharField(max_length=200)),
('product_price', models.FloatField()),
('product_image', models.URLField()),
('review_score', models.FloatField(default=None, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(db_column='customer_id', on_delete=django.db.models.deletion.CASCADE, to='api.Customer')),
],
options={
'db_table': 'customer_favorite_product',
'ordering': ['id'],
'unique_together': {('customer', 'product_id')},
},
),
]
|
StarcoderdataPython
|
12824480
|
<reponame>cued-ia-computing/flood-jw2190-lk476
"""tests geo.rivers_with_station function and station_by_river function"""
from floodsystem.geo import rivers_with_station
from floodsystem.geo import station_by_river
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1D"""
# build list of stations
stations = build_station_list()
# print number of rivers with stations
print("Number of rivers: {}".format(len(rivers_with_station(stations))))
# print the first rivers in alphabetical order
print(rivers_with_station(stations)[:10])
# print names of stations located on the different rivers in alphabetical order
station = station_by_river(stations)
print("Stations on the River Aire: {}".format(sorted(station['River Aire'])))
print("Stations on the River Cam: {}".format(sorted(station['River Cam'])))
print("Stations on the River Thames: {}".format(sorted(station['River Thames'])))
if __name__ == "__main__":
print("*** Task 1D: CUED Part 1A Flood Warning System ***")
run()
|
StarcoderdataPython
|
9624826
|
from inflection import camelize
from twiml_generator.specificity.common import Language, rename_attr, to_bytes
class Python(Language):
_classes = {}
@classmethod
def clean(cls, generator) -> None:
"""Python library specificities which requires to change the TwiML IR.
"""
for verb, event in generator.twimlir:
if verb.name == 'break':
verb.name = 'break_'
rename_attr(verb, 'from', 'from_')
rename_attr(verb, 'for', 'for_')
cls.verb_processing(verb, generator.specific_imports)
@classmethod
def verb_processing(cls, verb, imports):
super().verb_processing(verb, imports)
for name, value in verb.attributes.items():
if value in ['true', 'false']:
verb.attributes[name] = camelize(value)
to_bytes(verb, name)
@Python.register
class Play:
@classmethod
def process(cls, verb, imports):
if not verb.text:
verb.text = ' '
|
StarcoderdataPython
|
5045871
|
# Copyright 2021 The sunds Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data specification of features of scene understanding datasets."""
from typing import Tuple
from sunds.core import spec_dict
from sunds.typing import Dim, FeatureSpecs, FeaturesOrBool, LabelOrFeaturesOrBool # pylint: disable=g-multiple-import
import tensorflow as tf
import tensorflow_datasets as tfds
def scene_spec(
frames: FeatureSpecs, # pylint: disable=redefined-outer-name
*,
point_cloud: FeaturesOrBool = False,
) -> FeatureSpecs:
"""Scene spec definitions."""
frames = dict(frames)
frames.pop('scene_name') # Clear the scene field
specs = spec_dict.SpecDict({
# A unique name 🔑 that identifies the scene.
'scene_name': tfds.features.Text(),
# A scene has several frames. This stores a lightweight information of all
# frames (without sensor data) in the scene. This can be used for random
# lookup of a particular frame from the `frame` store.
'frames': tfds.features.Sequence(frames),
# Scene bounding box 📦. This axis aligned bounding box can be used to
# represent the extent of the scene in its local coordinate frame.
'scene_box': aligned_box_3d_spec(),
# Nominal time ⏰ of the scene encoded as `RFC3339_full` format datetime
# string e.g. `1970-01-01 00:00:00.0 +0000`. All timestamps in `frame`
# level data are expected to be relative (elapsed time in seconds) to this
# nominal time. This field can be left unspecified for most datasets
# unless there is a need to explicitly get the absolute time point.
'nominal_time': tfds.features.Text(),
})
# LiDAR point cloud.
specs.maybe_set('point_cloud', point_cloud)
return specs
def frame_spec(cameras: FeatureSpecs) -> FeatureSpecs:
"""Frame specification used for storing frame data in `frame` stores.
A `frame` is typically used to group sensor measurements taken during a small
timespan with a shared sensor rig or rosette. For example `frame` may be used
to group all cameras and lidar sensor information and observations captured at
a particular timespan during a run of an autonomous driving vehicle. So each
frame can have multiple cameras (e.g. stereo setups) and lidars ( e.g.
autonomous cars with multiple lidar sensors). However in this simple synthetic
dataset, each frame has only one camera.
See `frame_info_spec()` for a lightweight version (without sensor data) of
`frame` which is used to store information about all frames in a scene.
Args:
cameras: A dict[camera_name, sunds.specs.camera_spec()]
Returns:
A composite `tfds` feature defining the specification of `frame` data.
"""
return {
# A unique name 🔑 that identifies the sequence the frame is from.
'scene_name': tfds.features.Text(),
# A unique name 🔑 that identifies this particular frame.
'frame_name': tfds.features.Text(),
# Frame pose w.r.t scene: X_{scene} = R * X_{frame} + t.
'pose': pose_spec(),
# Frame timestamp ⏰. This is expected to be the timestamp when frame
# `pose` was recorded. We expect this timestamp to be relative to the
# nominal timestamp of the scene this frame belongs to.
'timestamp': tf.float32,
# Camera sensor data. Each frame can have multiple cameras (e.g. stereo
# setups, autonomous cars with multiple cameras). See `camera_spec` for
# more details about the contents of each camera.
'cameras': cameras,
}
def camera_spec(
*,
color_image: FeaturesOrBool = False,
category_image: LabelOrFeaturesOrBool = False,
instance_image: LabelOrFeaturesOrBool = False,
depth_image: FeaturesOrBool = False,
camera_rays: FeaturesOrBool = False,
img_shape: Tuple[Dim, Dim] = (None, None),
) -> FeatureSpecs:
"""Feature specification of camera sensor 📷.
This functions returns the specification of camera sensor data like intrinsics
and extrinsics of the camera, and optionally the images caputured by the
camera and image level annotations.
Note that the camera extrinsics stored here are w.r.t frame. To get the pose
of a camera w.r.t to scene, we have to also use the pose of the frame w.r.t
scene.
Args:
color_image: Rgb color image is stored.
category_image: Category segmentation label image.
instance_image: Object instance ids.
depth_image: depth image is stored.
camera_rays: The given camera specs.
img_shape: The (h, w) image shape.
Returns:
A composite `tfds` feature defining the specification of camera data.
"""
spec = spec_dict.SpecDict({
# Camera intrinsics.
'intrinsics': camera_intrinsics_spec(),
# Camera extrinsics w.r.t frame (frame to camera transform):
# X_{camera} = R * X_{frame} + t.
# If a camera is not posed, this can be left to `Identity`.
'extrinsics': pose_spec(),
})
# Color image data.
spec.maybe_set(
'color_image',
color_image,
tfds.features.Image(shape=(*img_shape, 3)),
)
# Category segmentation data.
spec.maybe_set(
'category_image',
category_image,
spec_dict.labeled_image(shape=(*img_shape, 1)),
)
# Object instance id data.
spec.maybe_set(
'instance_image',
instance_image,
spec_dict.labeled_image(shape=(*img_shape, 1)),
)
# Depth image.
spec.maybe_set(
'depth_image',
depth_image,
tfds.features.Image(shape=(*img_shape, 1), dtype=tf.float32),
)
# Camera rays
spec.maybe_update(
camera_rays,
camera_rays_spec(img_shape=img_shape),
)
return spec
def camera_intrinsics_spec() -> FeatureSpecs:
"""Specification of camera intrinsics.
The camera instrisics model is identical to the `opencv` and `vision::sfm`
camera calibration model. This is used in `camera_spec` which has other camera
data like extrinsics of the camera and image data.
Returns:
A composite `tfds` feature defining the specification of camera intrinsics.
"""
return {
# Image width of the camera sensor.
'image_width': tf.int32,
# Image height of the camera sensor.
'image_height': tf.int32,
# Camera intrinsics matrix K (3x3 matrix).
# [fx skew cx]
# K = [ O fy cy]
# [ 0 0 1]
'K': tfds.features.Tensor(shape=(3, 3), dtype=tf.float32),
# Camera projection type. Should be either 'PERSPECTIVE' | 'FISHEYE'. For
# the `nerf_synthetic` data this is always `PERSPECTIVE` (pinhole).
'type': tfds.features.Text(),
# Camera distortion coefficients. Since cameras in this dataset does not
# have any distortions, these will have zero values and can be ignored.
'distortion': {
# Radial distortion coefficients [k1, k2, k3].
'radial': tfds.features.Tensor(shape=(3,), dtype=tf.float32),
# Tangential distortion coefficients [p1, p2].
'tangential': tfds.features.Tensor(shape=(2,), dtype=tf.float32),
}
}
def pose_spec() -> FeatureSpecs:
"""Specification of pose represented by 3D Isometric transformation.
Returns:
A composite `tfds` feature defining the specification of SE(3) pose data.
"""
return {
# 3x3 rotation matrix.
'R': tfds.features.Tensor(shape=(3, 3), dtype=tf.float32),
# 3D translation vector.
't': tfds.features.Tensor(shape=(3,), dtype=tf.float32),
}
def camera_rays_spec(
*,
img_shape: Tuple[Dim, Dim] = (None, None),
encoding: tfds.features.Encoding = tfds.features.Encoding.ZLIB,
) -> FeatureSpecs:
"""Specification for explicit camera rays."""
return {
'ray_directions':
tfds.features.Tensor(
shape=(*img_shape, 3),
dtype=tf.float32,
encoding=encoding,
),
'ray_origins':
tfds.features.Tensor(
shape=(*img_shape, 3),
dtype=tf.float32,
encoding=encoding,
),
}
def aligned_box_3d_spec() -> tfds.features.FeaturesDict:
"""Specification of an Axis aligned bounding box 📦."""
return {
# A box is considered null (empty) if any(min > max).
# Minimum extent of an axis aligned box.
'min_corner': tfds.features.Tensor(shape=(3,), dtype=tf.float32),
# Maximum extent of an axis aligned box.
'max_corner': tfds.features.Tensor(shape=(3,), dtype=tf.float32), # pytype: disable=bad-return-type # gen-stub-imports
}
def point_cloud_spec(
*,
category_labels: FeaturesOrBool = False,
) -> FeatureSpecs:
"""Specification of a LiDAR point cloud."""
# TODO(epot): Rather than using "None" for the first dimension of each Tensor,
# use tfds.features.Sequence(per_point_feature). Also consider using
# tfds.features.ClassLabel instead of int32 for semantic category.
result = spec_dict.SpecDict({
'positions':
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
'point_identifiers':
tfds.features.Tensor(shape=(None, 1), dtype=tf.int64),
'timestamps':
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
})
# TODO(epot): Replace by ClassLabel
result.maybe_set(
'category_labels',
category_labels,
tfds.features.Tensor(shape=(None, 1), dtype=tf.int32),
)
return result
|
StarcoderdataPython
|
12813350
|
'''
Descripttion: Automatically generated file comment
version:
Author: Wesley
Date: 2021-07-27 09:53:43
LastEditors: Wesley
LastEditTime: 2021-08-13 15:34:36
'''
from ctypes import cdll, CFUNCTYPE, c_char_p, c_void_p, c_bool, POINTER, c_uint64, c_uint32
from wtpy.WtCoreDefs import BarList, TickList, WTSBarStruct, WTSTickStruct
from wtpy.wrapper.PlatformHelper import PlatformHelper as ph
from wtpy.WtUtilDefs import singleton
import os
CB_GET_BAR = CFUNCTYPE(c_void_p, POINTER(WTSBarStruct), c_uint32, c_bool)
CB_GET_TICK = CFUNCTYPE(c_void_p, POINTER(WTSTickStruct), c_uint32, c_bool)
@singleton
class WtDtServoApi:
'''
Wt平台数据组件C接口底层对接模块
'''
# api可以作为公共变量
api = None
ver = "Unknown"
# 构造函数,传入动态库名
def __init__(self):
paths = os.path.split(__file__)
dllname = ph.getModule("WtDtServo")
a = (paths[:-1] + (dllname,))
_path = os.path.join(*a)
self.api = cdll.LoadLibrary(_path)
self.api.get_version.restype = c_char_p
self.ver = bytes.decode(self.api.get_version())
self.api.get_bars_by_range.argtypes = [c_char_p, c_char_p, c_uint64, c_uint64, CB_GET_BAR]
self.api.get_ticks_by_range.argtypes = [c_char_p, c_uint64, c_uint64, CB_GET_TICK]
self.api.get_bars_by_count.argtypes = [c_char_p, c_char_p, c_uint32, c_uint64, CB_GET_BAR]
self.api.get_ticks_by_count.argtypes = [c_char_p, c_uint32, c_uint64, CB_GET_TICK]
def initialize(self, cfgfile:str, isFile:bool):
self.api.initialize(bytes(cfgfile, encoding = "utf8"), isFile)
def get_bars(self, stdCode:str, period:str, fromTime:int = None, dataCount:int = None, endTime:int = 0) -> BarList:
'''
重采样K线\n
@stdCode 标准合约代码\n
@period 基础K线周期,m1/m5/d\n
@fromTime 开始时间,日线数据格式yyyymmdd,分钟线数据为格式为yyyymmddHHMM\n
@endTime 结束时间,日线数据格式yyyymmdd,分钟线数据为格式为yyyymmddHHMM,为0则读取到最后一条
'''
bar_cache = BarList()
if fromTime is not None:
ret = self.api.get_bars_by_range(bytes(stdCode, encoding="utf8"), bytes(period,'utf8'), fromTime, endTime, CB_GET_BAR(bar_cache.on_read_bar))
else:
ret = self.api.get_bars_by_count(bytes(stdCode, encoding="utf8"), bytes(period,'utf8'), dataCount, endTime, CB_GET_BAR(bar_cache.on_read_bar))
if ret == 0:
return None
else:
return bar_cache
def get_ticks(self, stdCode:str, fromTime:int = None, dataCount:int = None, endTime:int = 0) -> TickList:
'''
重采样K线\n
@stdCode 标准合约代码\n
@fromTime 开始时间,格式为yyyymmddHHMM\n
@endTime 结束时间,格式为yyyymmddHHMM,为0则读取到最后一条
'''
tick_cache = TickList()
if fromTime is not None:
ret = self.api.get_ticks_by_range(bytes(stdCode, encoding="utf8"), fromTime, endTime, CB_GET_TICK(tick_cache.on_read_tick))
else:
ret = self.api.get_ticks_by_count(bytes(stdCode, encoding="utf8"), dataCount, endTime, CB_GET_TICK(tick_cache.on_read_tick))
if ret == 0:
return None
else:
return tick_cache
|
StarcoderdataPython
|
11353690
|
import io
import time
import threading
import queue
import picamera
class ImageProcessor(threading.Thread):
def __init__(self, owner):
super(ImageProcessor, self).__init__()
self.terminated = False
self.owner = owner
self.start()
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Get a buffer from the owner's outgoing queue
try:
stream = self.owner.outgoing.get(timeout=1)
except queue.Empty:
pass
else:
stream.seek(0)
# Read the image and do some processing on it
#Image.open(stream)
#...
#...
# Set done to True if you want the script to terminate
# at some point
#self.owner.done=True
stream.seek(0)
stream.truncate()
self.owner.incoming.put(stream)
class ProcessOutput(object):
def __init__(self, threads):
self.done = False
# Construct a pool of image processors, a queue of incoming buffers,
# and a (currently empty) queue of outgoing buffers. Prime the incoming
# queue with proc+1 buffers (+1 to permit output to be written while
# all procs are busy with existing buffers)
self.incoming = queue.Queue(threads)
self.outgoing = queue.Queue(threads)
self.pool = [ImageProcessor(self) for i in range(threads)]
buffers = (io.BytesIO() for i in range(threads + 1))
for buf in buffers:
self.incoming.put(buf)
self.buffer = None
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame; push current buffer to the outgoing queue and attempt
# to get a buffer from the incoming queue
if self.buffer is not None:
self.outgoing.put(self.buffer)
try:
self.buffer = self.incoming.get_nowait()
except queue.Empty:
# No buffers available (means all threads are busy); skip
# this frame
self.buffer = None
if self.buffer is not None:
self.buffer.write(buf)
def flush(self):
# When told to flush (this indicates end of recording), shut
# down in an orderly fashion. Tell all the processor's they're
# terminated and wait for them to quit
for proc in self.pool:
proc.terminated = True
for proc in self.pool:
proc.join()
with picamera.PiCamera(resolution='VGA') as camera:
camera.start_preview()
time.sleep(2)
output = ProcessOutput(4)
camera.start_recording(output, format='mjpeg')
while not output.done:
camera.wait_recording(1)
camera.stop_recording()
|
StarcoderdataPython
|
1898881
|
"""
# Phase Noise JSON format
:frequency (float): VCO/PLL frequency in Hz
:offset (list of floats): phase noise offset frequency points
:phase_noise (list of floats): phase noise points
:tune_ppm_per_volt (float): tuning sensitivity in ppm per volt (optional)
:tune_Hz_per_volt (float): tuning sensitivity in Hz per volt (optional)
"""
import json
import numpy as np
def load_pn_json(fname):
"""
"""
with open(fname, 'r') as f:
data = json.load(f)
return data
def load_pn_files_directory(mydir):
"""
"""
pass
def translate_phase_noise(in_dict, freq_Hz):
"""
return the frequency offset and translated phase noise
to the target frequency
"""
base_freq_Hz = in_dict['frequency']
pn_factor = 20*np.log10(freq_Hz/base_freq_Hz)
freq_ar = []
pn_ar = []
for i in range(len(in_dict['offset'])):
freq_ar.append(in_dict['offset'][i])
pn_ar.append(in_dict['phase_noise'][i] + pn_factor)
return freq_ar, pn_ar
|
StarcoderdataPython
|
6507498
|
<filename>LearnGUI.py<gh_stars>1-10
from tkinter import *
from tkinter import messagebox, filedialog
from Modules import PublicModules as libs
from Modules import LSTM_Config as cf
import cv2
import threading
from PIL import Image
from PIL import ImageTk
from Modules.MyThreading import MyThreadingVideo
WINDOWS_WIDTH = int(1280 * 0.6)
WINDOWS_HEIGHT = int(720 * 0.6)
URL_VIDEO = 'FileInput/006.avi'
IS_USING_WEBCAM = False
CURSOR_DF = 'hand2'
CURSOR_NO = 'spider'
class EntryWithPlaceholder(Entry):
def __init__(self, master=None, placeholder="PLACEHOLDER", color='grey'):
super().__init__(master)
self.placeholder = placeholder
self.placeholder_color = color
self.default_fg_color = self['fg']
self.bind("<FocusIn>", self.foc_in)
self.bind("<FocusOut>", self.foc_out)
self.put_placeholder()
def put_placeholder(self):
self.insert(0, self.placeholder)
self['fg'] = self.placeholder_color
def foc_in(self, *args):
if self['fg'] == self.placeholder_color:
self.delete('0', 'end')
self['fg'] = self.default_fg_color
def foc_out(self, *args):
if not self.get():
self.put_placeholder()
class ChoseSourceWindow:
def __init__(self, master):
self.isUsingIpWebcam = IntVar()
self.valSource = StringVar()
self.master = master
self.master.minsize(500, 100)
self.frame = Frame(self.master)
# self.master.grab_set()
libs.fun_makeCenter(self.master)
self.DIALOG_OK = False
self.RETURN_RESULT = 'NULL'
self.iconCheck= PhotoImage(file='FileInput/Icons/ic_check2.png').subsample(3, 3)
self.iconMp4 = PhotoImage(file='FileInput/Icons/ic_check2.png').subsample(3, 3)
# goi sau cung nhe
self.fun_initComponent()
def fun_initComponent(self):
self.frame.grid(row=0, column=0, sticky='nsew')
self.master.grid_columnconfigure(0, weight=1)
self.master.grid_rowconfigure(0, weight=1)
# frame 1
self.frame1 = Frame(self.frame, bg='#95deff', padx=10, pady=10)
self.frame2 = Frame(self.frame, bg='#c1ffe5', padx=10, pady=10)
self.frame3 = Frame(self.frame, bg='#f7b5c7', padx=10, pady=10)
self.frame1.grid(row=0, column=0, sticky='nsew')
self.frame2.grid(row=1, column=0, sticky='nsew')
self.frame3.grid(row=2, column=0, sticky='nsew')
self.frame.grid_columnconfigure(0, weight=1)
self.frame.grid_rowconfigure(0, weight=1)
self.frame.grid_rowconfigure(1, weight=1)
self.frame.grid_rowconfigure(2, weight=1)
self.checkDir = Checkbutton(self.frame1, text='VIDEO FROM DISK...',
variable=self.isUsingIpWebcam, command=self.fun_CheckIsUsingCamChange,
padx=10, pady=10,
font=('Helvetica', 18, 'bold'),
cursor=CURSOR_DF
)
self.checkDir.grid(row=0, column=0, sticky='nsew')
self.frame1.grid_rowconfigure(0, weight=1)
self.frame1.grid_columnconfigure(0, weight=1)
self.tbSource = EntryWithPlaceholder(self.frame2, 'IP WEBCAM EXAMPLE: 192.168.1.1')
self.tbSource.grid(row=0, column=0, sticky='nsew')
self.btnSource = Button(self.frame2,
command=self.btnGetPathFromSourceClicked, cursor=CURSOR_DF,
image= self.iconCheck,
compound= CENTER,
bg='#c1ffe5'
)
self.btnSource.grid(row=0, column=1, sticky='nsew')
self.frame2.grid_columnconfigure(0, weight=9)
self.frame2.grid_columnconfigure(1, weight=1)
self.frame2.grid_rowconfigure(0, weight=1)
self.btnOk = Button(self.frame3, padx=10, pady=10, text='Load Video Clip'
, command=self.btnLoadVideoClicked,
state='disable',
cursor=CURSOR_NO
)
self.btnOk.grid(row=0, column=0, sticky='nsew')
self.frame3.grid_columnconfigure(0, weight=1)
self.frame3.grid_rowconfigure(0, weight=1)
def fun_CheckIsUsingCamChange(self):
if self.isUsingIpWebcam.get() == 0:
self.btnSource.config(image= self.iconCheck)
holder = 'IP WEBCAM EXAMPLE: 192.168.1.1'
self.checkDir.config(bg= 'white')
else:
self.btnSource.config(image= self.iconMp4)
holder = 'EXAMPLE: C:/VIDEO/DETECTION.MP4'
self.checkDir.config(bg= '#c1ffe5')
self.fun_reloadHolderSource(source=holder)
def fun_reloadHolderSource(self, source: str):
self.tbSource.delete('0', 'end')
self.tbSource.placeholder = source
self.tbSource.put_placeholder()
def fun_checkVideoFromSource(self, source: str):
try:
frames = libs.fun_getFramesOfVideo(path=source, count=20)
messagebox.showinfo('Thong Bao', 'Check Video Load OK, Video Size: {0}'.format(frames[0].shape))
return True
except:
messagebox.showerror('Thong Bao', 'Yeu Cau khong duoc chap nhan!')
return False
def fun_getURL_IPCam(self, ip: str):
return '{0}{1}{2}'.format('http://', ip, ':8080/video')
def btnLoadVideoClicked(self):
if self.isUsingIpWebcam.get() == 0:
self.RETURN_RESULT = self.fun_getURL_IPCam(ip=self.tbSource.get())
self.DIALOG_OK = True
self.master.destroy()
def btnGetPathFromSourceClicked(self):
if self.isUsingIpWebcam.get() == 0:
url = self.fun_getURL_IPCam(ip=self.tbSource.get())
else:
self.RETURN_RESULT = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("AVI files", "*.AVI"), ("MP4 files", "*.MP4"), ("ALL files", "*.*")))
self.fun_reloadHolderSource(source=self.RETURN_RESULT)
url = self.RETURN_RESULT
isCheck = self.fun_checkVideoFromSource(source=url)
if isCheck:
self.btnOk.config(state='normal', cursor=CURSOR_DF)
else:
self.btnOk.config(state='disable', cursor=CURSOR_NO)
def close_windows(self):
self.master.destroy()
class MyApp:
def __init__(self, title: str = 'GUI HUMAN''S VIOLENCE DETECTIONS'):
self.URL_VIDEO = URL_VIDEO
self.videoCap = None
self.title = title
self.root = Tk()
self.root.title(string=title)
self.arrACTION = []
self.stopEvent = None
self.IS_PAUSE = False
self.containerTrai = None
self.containerPhai = None
self.root.minsize(width=WINDOWS_WIDTH, height=WINDOWS_HEIGHT)
# libs.fun_makeCenter(self.root)
libs.fun_makeMaximumSize(self.root)
# Load model VGG16
self.vgg16_model = None
# self.vgg16_model.summary()
# Load model LSTM
self.lstm_model = None
# self.lstm_model.summary()
self.initComponent()
def initComponent(self):
#
self.containerTrai = Frame(self.root, bg='white', padx=10, pady=10)
self.containerPhai = Frame(self.root, bg='white', padx=10, pady=10)
self.containerTrai.grid(row=0, column=0, sticky='nsew')
self.containerPhai.grid(row=0, column=1, sticky='nsew')
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_columnconfigure(1, weight=1)
self.root.grid_rowconfigure(0, weight=1)
# Container con cua trai
self.containerChonNguonDuLieu = Frame(self.containerTrai, bg='#95deff', padx=10, pady=10)
self.containerVideoCamera = Frame(self.containerTrai, bg='#c1ffe5', padx=10, pady=10)
self.containerChucNang = Frame(self.containerTrai, bg='#f7b5c7', padx=10, pady=10)
self.containerChonNguonDuLieu.grid(row=0, column=0, sticky='nsew')
self.containerVideoCamera.grid(row=1, column=0, sticky='nsew')
self.containerChucNang.grid(row=2, column=0, sticky='nsew')
self.containerTrai.grid_columnconfigure(0, weight=1)
self.containerTrai.grid_rowconfigure(0, weight=1)
self.containerTrai.grid_rowconfigure(1, weight=8)
self.containerTrai.grid_rowconfigure(2, weight=1)
# giao dien cho button chon nguon du lieu
iconChonNguonDuLieu = PhotoImage(file='FileInput/Icons/ic_dir.png')
# Resizing image to fit on button
iconChonNguonDuLieu = iconChonNguonDuLieu.subsample(1, 1)
self.btnChonNguonDuLieu = Button(self.containerChonNguonDuLieu, padx=10,
pady=10, text='INSERT VIDEO FROM SOURCE...',
command=self.fun_chonNguonDuLieu,
# bg='green',
cursor=CURSOR_DF,
font=('Helvetica', 18, 'bold'),
image=iconChonNguonDuLieu,
compound=LEFT
)
self.btnChonNguonDuLieu.image=iconChonNguonDuLieu
self.btnChonNguonDuLieu.grid(row=0, column=0, sticky='nsew')
# Giao dien cho nut load lai video
iconTaiLaiVideo = PhotoImage(file='FileInput/Icons/ic_process.png')
# Resizing image to fit on button
iconTaiLaiVideo = iconTaiLaiVideo.subsample(1, 1)
self.btnRefresh = Button(self.containerChonNguonDuLieu, padx=10,
pady=10,
# bg='green',
# text='Tai lai video',
command=self.fun_taiLaiVideo,
state='disable',
cursor=CURSOR_NO,
image=iconTaiLaiVideo,
compound=CENTER
)
self.btnRefresh.image= iconTaiLaiVideo
self.btnRefresh.grid(row=0, column=1, sticky='nsew')
# Giao dien cho nut ngat ket noi
iconNgatKetNoi = PhotoImage(file='FileInput/Icons/ic_powerof.png')
iconNgatKetNoi = iconNgatKetNoi.subsample(1, 1)
self.btnDisconection = Button(self.containerChonNguonDuLieu, padx=10,
pady=10,
# bg='green',
# text='Ngat Ke Noi',
image=iconNgatKetNoi,
command=self.fun_ngatKetNoi,
cursor=CURSOR_DF,
compound=CENTER
)
self.btnDisconection.imgage=iconNgatKetNoi
self.btnDisconection.grid(row=0, column=2, sticky='nsew')
self.containerChonNguonDuLieu.grid_columnconfigure(0, weight=8)
self.containerChonNguonDuLieu.grid_columnconfigure(1, weight=1)
self.containerChonNguonDuLieu.grid_columnconfigure(2, weight=1)
self.containerChonNguonDuLieu.grid_rowconfigure(0, weight=1)
# Container con cua phai
self.containerPhanDoanBaoLuc = Frame(self.containerPhai, bg='#95deff', padx=10, pady=10)
self.containerTongHopMoTaPhanDoanDanh = Frame(self.containerPhai, bg='#c1ffe5', padx=10, pady=10)
self.containerPhanDoanBaoLuc.grid(row=0, column=0, sticky='nsew')
self.containerTongHopMoTaPhanDoanDanh.grid(row=1, column=0, sticky='nsew')
# Label hien thi loai bao luc gi
self.lbKetQuaBaoLuc = Label(self.containerTongHopMoTaPhanDoanDanh,
text='<NAME>', padx=10,
pady=10,
bg='white',
font=('Helvetica', 18, 'bold')
)
self.lbKetQuaBaoLuc.grid(row=0, column=0, sticky='nsew')
self.containerTongHopMoTaPhanDoanDanh.grid_rowconfigure(0, weight=1)
self.containerTongHopMoTaPhanDoanDanh.grid_columnconfigure(0, weight=1)
self.containerPhai.grid_rowconfigure(0, weight=9)
self.containerPhai.grid_rowconfigure(1, weight=1)
self.containerPhai.grid_columnconfigure(0, weight=1)
# Container con cua ContainerVideoFrames
self.lbVideoFrames = Label(self.containerVideoCamera, bg='white', padx=10, pady=10)
self.lbVideoFrames.grid(row=0, column=0, sticky='nsew')
self.containerVideoCamera.grid_rowconfigure(0, weight=1)
self.containerVideoCamera.grid_columnconfigure(0, weight=1)
self.makePhanDoanBaoLucGUI6()
# self.videoLoadingThreading()
self.root.wm_protocol('VM_DELETE_WINDOW', self.onClose)
self.fun_initGUI()
self.fun_taiGiaoDien17CapDo()
def fun_initGUI(self):
img = cv2.imread(filename= 'FileInput/Imgs/ImgNotFound2.jpg')
img1 = cv2.imread(filename= 'FileInput/Imgs/ImgNotFound.jpg')
size = libs.fun_getSizeOfFrame(frame= img)
size1 = libs.fun_getSizeOfFrame(frame= img1)
self.imgNotFound = libs.fun_cv2_imageArrayToImage(containerFather= self.containerVideoCamera, frame= img, reSize= size)
self.imgNotFound1 = libs.fun_cv2_imageArrayToImage(containerFather= self.containerVideoCamera, frame= img1, reSize= (int(size[0] * 0.2), int(size[1] * 0.2)))
self.lbVideoFrames.config(image= self.imgNotFound)
self.lbVideoFrames1.config(image= self.imgNotFound1)
self.lbVideoFrames2.config(image= self.imgNotFound1)
self.lbVideoFrames3.config(image= self.imgNotFound1)
self.lbVideoFrames4.config(image= self.imgNotFound1)
def fun_ngatKetNoi(self):
if self.stopEvent is None:
return
self.stopEvent.set()
self.fun_initGUI()
def fun_taiLaiVideo(self):
self.btnRefresh.config(state='disable', cursor=CURSOR_NO)
self.videoLoadingThreading()
def fun_taiGiaoDien17CapDo(self):
# Giao dien cho container 17 Cap do
self.arrACTION.clear()
actionNames = cf.VIDEO_NAMES.copy()
actionNames.insert(0, 'no')
for i in range(0, len(actionNames)):
action = Label(self.containerChucNang, bg='#ffffff', padx=10, pady=10,
text=actionNames[i],
font=('Helvetica', 18, 'bold')
)
action.grid(row=0, column=i, sticky='nsew')
self.arrACTION.append(action)
self.containerChucNang.grid_rowconfigure(0, weight=1)
for i in range(0, len(actionNames)):
self.containerChucNang.grid_columnconfigure(i, weight=1)
# event cho button chon nguon du lieu
def fun_chonNguonDuLieu(self):
self.newWindow = Toplevel(self.root)
self.app = ChoseSourceWindow(self.newWindow)
self.app.master.grab_set()
self.root.wait_window(self.app.master)
# Hanh dong khong duoc xac thuc tu nguoi dung -> ket thuc
if not self.app.DIALOG_OK:
messagebox.showwarning('Thong Bao', 'Chon nguon video that bai')
return
# Hang dong duoc xac thuc tu phai nguoi dung
self.URL_VIDEO = self.app.RETURN_RESULT
self.fun_taiGiaoDien17CapDo()
self.videoLoadingThreading()
def makePhanDoanBaoLucGUI6(self):
self.frameVideo1 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='white')
self.frameVideo2 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='#c1ffe5')
self.frameVideo3 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='#c1ffe5')
self.frameVideo4 = Frame(self.containerPhanDoanBaoLuc, padx=10, pady=10, bg='white')
self.frameVideo1.grid(row=0, column=0, sticky='nsew')
self.frameVideo2.grid(row=0, column=1, sticky='nsew')
self.frameVideo3.grid(row=1, column=0, sticky='nsew')
self.frameVideo4.grid(row=1, column=1, sticky='nsew')
self.containerPhanDoanBaoLuc.grid_rowconfigure(0, weight=1)
self.containerPhanDoanBaoLuc.grid_rowconfigure(1, weight=1)
self.containerPhanDoanBaoLuc.grid_columnconfigure(0, weight=1)
self.containerPhanDoanBaoLuc.grid_columnconfigure(1, weight=1)
# phan doan 1
self.lbVideoFrames1 = Label(self.frameVideo1, padx=10, pady=10, bg='white')
self.lbVideoFrames1.grid(row=0, column=0, sticky='nsew')
self.frameVideo1.grid_rowconfigure(0, weight=1)
self.frameVideo1.grid_columnconfigure(0, weight=1)
# phan doan 2
self.lbVideoFrames2 = Label(self.frameVideo2, padx=10, pady=10, bg='white')
self.lbVideoFrames2.grid(row=0, column=0, sticky='nsew')
self.frameVideo2.grid_rowconfigure(0, weight=1)
self.frameVideo2.grid_columnconfigure(0, weight=1)
# phan doan 3
self.lbVideoFrames3 = Label(self.frameVideo3, padx=10, pady=10, bg='white')
self.lbVideoFrames3.grid(row=0, column=0, sticky='nsew')
self.frameVideo3.grid_rowconfigure(0, weight=1)
self.frameVideo3.grid_columnconfigure(0, weight=1)
# phan doan 4
self.lbVideoFrames4 = Label(self.frameVideo4, padx=10, pady=10, bg='white')
self.lbVideoFrames4.grid(row=0, column=0, sticky='nsew')
self.frameVideo4.grid_rowconfigure(0, weight=1)
self.frameVideo4.grid_columnconfigure(0, weight=1)
self.arrThread = []
thread1 = MyThreadingVideo(lbShow=self.lbVideoFrames1, lbFather=self.frameVideo1, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction=None)
thread2 = MyThreadingVideo(lbShow=self.lbVideoFrames2, lbFather=self.frameVideo2, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction=None)
thread3 = MyThreadingVideo(lbShow=self.lbVideoFrames3, lbFather=self.frameVideo3, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction=None)
thread4 = MyThreadingVideo(lbShow=self.lbVideoFrames4, lbFather=self.frameVideo4, lbShowKetQua= self.lbKetQuaBaoLuc, vgg16_model= self.vgg16_model, lstm_model= self.lstm_model, treeAction=None)
self.arrThread.append(thread1)
self.arrThread.append(thread3)
self.arrThread.append(thread4)
self.arrThread.append(thread2)
def runMyApp(self):
self.root.mainloop()
def videoLoadingThreading(self):
self.stopEvent = threading.Event()
self.loadVideoThread = threading.Thread(target=self.updateVideoFrames, args=())
self.loadVideoThread.setDaemon(True)
self.loadVideoThread.start()
def updateVideoFrames(self):
self.videoCap = cv2.VideoCapture(self.URL_VIDEO)
self.isContinue, self.frame = self.videoCap.read()
count = 0
xoayVong = 0
frames = []
while not self.stopEvent.is_set() and self.isContinue:
image = libs.fun_cv2_imageArrayToImage(containerFather= self.containerVideoCamera, frame= self.frame.copy())
self.lbVideoFrames.config(image=image)
self.lbVideoFrames.image = image
isContinue, self.frame = self.videoCap.read()
# Doc khong duoc la het video -> nho thoat ra
if not isContinue:
break
frames.append(self.frame.copy())
cv2.waitKey(5)
if count == 19:
self.arrThread[xoayVong].setFrames(frames)
self.arrThread[xoayVong].startShowVideo()
xoayVong += 1
if xoayVong == 4:
xoayVong = 0
frames = []
count = 0
continue
count += 1
self.btnRefresh.config(state='normal', cursor=CURSOR_DF)
if not self.IS_PAUSE:
self.videoCap.release()
def onClose(self):
libs.fun_print(name='Violence Detect App', value='Closing')
self.videoCap.release()
self.root.destroy()
sys.exit(0)
if __name__ == '__main__':
if IS_USING_WEBCAM:
URL_VIDEO = 0
videoCap = cv2.VideoCapture(URL_VIDEO)
app = MyApp()
app.runMyApp()
|
StarcoderdataPython
|
8142376
|
<filename>zooniverse/settings/test.py
"""
Distributed under the MIT License. See LICENSE.txt for more info.
"""
from .development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
TEST_OUTPUT_DIR = os.path.join(BASE_DIR, '..', 'test_output')
|
StarcoderdataPython
|
3271468
|
<filename>leetcode/1228_missing_number_in_arithmetic_progression.py<gh_stars>1-10
# -*- coding: utf-8 -*-
class Solution:
def missingNumber(self, arr):
return ((arr[-1] + arr[0]) * (len(arr) + 1)) // 2 - sum(arr)
if __name__ == '__main__':
solution = Solution()
assert 9 == solution.missingNumber([5, 7, 11, 13])
assert 14 == solution.missingNumber([15, 13, 12])
assert 0 == solution.missingNumber([0, 0, 0, 0, 0])
|
StarcoderdataPython
|
6418562
|
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from filterpy.kalman import KalmanFilter
from opendr.engine.target import BoundingBox3D, TrackingAnnotation3D
class KalmanTracker3D():
def __init__(
self,
boundingBox3D: BoundingBox3D,
id,
frame,
state_dimensions=10, # x, y, z, rotation_y, l, w, h, speed_x, speed_z, angular_speed
measurement_dimensions=7, # x, y, z, rotation_y, l, w, h
state_transition_matrix=None,
measurement_function_matrix=None,
covariance_matrix=None,
process_uncertainty_matrix=None,
):
super().__init__()
self.start_frame = frame
self.last_update_frame = frame
self.id = id
self.kalman_filter = KalmanFilter(dim_x=state_dimensions, dim_z=measurement_dimensions)
self.predictions = []
self.updates = 0
if state_transition_matrix is None:
state_transition_matrix = np.eye(state_dimensions, dtype=np.float32)
state_transition_matrix[0, -3] = 1
state_transition_matrix[1, -2] = 1
state_transition_matrix[2, -1] = 1
if measurement_function_matrix is None:
measurement_function_matrix = np.eye(
measurement_dimensions, state_dimensions, dtype=np.float32
)
if covariance_matrix is None:
covariance_matrix = np.eye(
state_dimensions, state_dimensions, dtype=np.float32
) * 10
covariance_matrix[7:, 7:] *= 1000
if process_uncertainty_matrix is None:
process_uncertainty_matrix = np.eye(
state_dimensions, state_dimensions, dtype=np.float32
)
process_uncertainty_matrix[7:, 7:] *= 0.01
self.kalman_filter.F = state_transition_matrix
self.kalman_filter.H = measurement_function_matrix
self.kalman_filter.P = covariance_matrix
self.kalman_filter.Q = process_uncertainty_matrix
location = boundingBox3D.data["location"]
dimensions = boundingBox3D.data["dimensions"]
rotation_y = boundingBox3D.data["rotation_y"]
# [x, y, z, rotation_y, l, w, h]
self.kalman_filter.x[:measurement_dimensions] = np.array([
*location, rotation_y, *dimensions
]).reshape(-1, 1)
self.name = boundingBox3D.name
self.bbox2d = boundingBox3D.bbox2d
self.action = boundingBox3D.action
self.alpha = boundingBox3D.alpha
self.truncated = boundingBox3D.truncated
self.occluded = boundingBox3D.occluded
self.confidence = boundingBox3D.confidence
def update(self, boundingBox3D: BoundingBox3D, frame):
self.last_update_frame = frame
self.updates += 1
location = boundingBox3D.data["location"]
dimensions = boundingBox3D.data["dimensions"]
rotation_y = boundingBox3D.data["rotation_y"]
self.name = boundingBox3D.name
self.bbox2d = boundingBox3D.bbox2d
self.action = boundingBox3D.action
self.alpha = boundingBox3D.alpha
self.truncated = boundingBox3D.truncated
self.occluded = boundingBox3D.occluded
self.confidence = boundingBox3D.confidence
rotation_y = normalize_angle(rotation_y)
predicted_rotation_y = self.kalman_filter.x[3]
if (
abs(rotation_y - predicted_rotation_y) >= np.pi / 2 and
abs(rotation_y - predicted_rotation_y) <= np.pi * 1.5
):
predicted_rotation_y = normalize_angle(predicted_rotation_y + np.pi)
if abs(rotation_y - predicted_rotation_y) >= np.pi * 1.5:
if rotation_y > 0:
predicted_rotation_y += np.pi * 2
else:
predicted_rotation_y -= np.pi * 2
self.kalman_filter.x[3] = predicted_rotation_y
self.kalman_filter.update(np.array([
*location, rotation_y, *dimensions
]))
def predict(self) -> np.ndarray:
self.kalman_filter.predict()
self.kalman_filter.x[3] = normalize_angle(self.kalman_filter.x[3])
self.predictions.append(self.kalman_filter.x)
return self.kalman_filter.x
def tracking_bounding_box_3d(self, frame):
return TrackingAnnotation3D(
self.name, self.truncated, self.occluded,
self.alpha, self.bbox2d,
self.kalman_filter.x[4:].reshape(-1),
self.kalman_filter.x[:3].reshape(-1),
float(self.kalman_filter.x[3]),
self.id,
self.confidence,
frame,
)
def age(self, frame):
return frame - self.start_frame
def staleness(self, frame):
return frame - self.last_update_frame
def normalize_angle(angle):
if angle >= np.pi:
angle -= np.pi * 2
if angle < -np.pi:
angle += np.pi * 2
return angle
|
StarcoderdataPython
|
12821861
|
from collections import Counter
n = int(input())
a = list(map(int, input().split()))
Q = int(input())
queries = [list(map(int, input().split())) for _ in range(Q)]
ans = sum(a)
A = Counter(a)
for q in queries:
ans += (q[1] - q[0]) * A[q[0]]
A[q[1]] += A[q[0]]
A[q[0]] = 0
print(ans)
|
StarcoderdataPython
|
11315778
|
<gh_stars>1-10
# throw.py
# The CIL throw instruction
# Copyright 2010 <NAME> - see LICENSE for details
from Instruction import Instruction
import unittest
from Instructions.Instruction import register
class throw(Instruction):
def __init__(self, arguments):
self.name = 'throw'
self.target = ''
def execute(self, vm):
pass
register('throw', throw)
class throwTest(unittest.TestCase):
def test_throw_no_arguments_throws_exception(self):
from VM import VM
vm = VM()
x = throw('asdf') # fixme optional parameters
x.execute(vm)
index = vm.get_instruction_pointer()
self.assertEqual(3, index);
def test_throw_object(self):
from VM import VM
vm = VM()
x = throw()
x.execute(vm)
index = vm.get_instruction_pointer()
self.assertEqual(3, index);
|
StarcoderdataPython
|
8090267
|
<gh_stars>1-10
#!/usr/bin/python
from datetime import *
import csv
import sys
import time
log_file = open(sys.argv[1], 'rb')
log_iter = csv.reader(log_file, delimiter=':')
now = datetime.now()
now = datetime.strptime(''.join([str(now.year), ' ',
str(now.month), ' ',
str(now.day), ' ',
str(now.hour), ':',
str(now.minute), ':',
str(now.second), ' GMT']),
'%Y %m %d %H:%M:%S %Z').strftime('%s')
#now = time.localtime()
f = '%Y %b %d %H:%M:%S %Z'
for i, row in enumerate(log_iter):
row[2] = row[2][:2]
s = ''.join(['2012 ', row[0], ':', row[1], ':', row[2], ' GMT'])
t = datetime.strptime(s, f)
#t = time.strptime(s, f)
n = t.strftime('%s')
if (n > now):
s = ''.join(['2011 ', row[0], ':', row[1], ':', row[2], ' GMT'])
t = datetime.strptime(s, f)
#t = time.strptime(s, f)
n = time.strftime('%s')
#print(s)
print(n)
|
StarcoderdataPython
|
4903423
|
<filename>xskillscore/tests/test_accessor_deterministic.py
import pytest
import xarray as xr
from xarray.tests import assert_allclose
from xskillscore.core.deterministic import (
effective_sample_size,
mae,
mape,
me,
median_absolute_error,
mse,
pearson_r,
pearson_r_eff_p_value,
pearson_r_p_value,
r2,
rmse,
smape,
spearman_r,
spearman_r_eff_p_value,
spearman_r_p_value,
)
correlation_metrics = [
pearson_r,
r2,
pearson_r_p_value,
spearman_r,
spearman_r_p_value,
effective_sample_size,
pearson_r_eff_p_value,
spearman_r_eff_p_value,
]
temporal_only_metrics = [
pearson_r_eff_p_value,
spearman_r_eff_p_value,
effective_sample_size,
]
distance_metrics = [
me,
mse,
rmse,
mae,
median_absolute_error,
mape,
smape,
]
AXES = ("time", "lat", "lon", ["lat", "lon"], ["time", "lat", "lon"])
def _ds(a, b, skipna_bool):
ds = xr.Dataset()
ds["a"] = a
ds["b"] = b
if skipna_bool is True:
ds["b"] = b.where(b < 0.5)
return ds
def adjust_weights(dim, weight_bool, weights):
"""
Adjust the weights test data to only span the core dimension
that the function is being applied over.
"""
if weight_bool:
drop_dims = [i for i in weights.dims if i not in dim]
drop_dims = {k: 0 for k in drop_dims}
return weights.isel(drop_dims)
else:
return None
@pytest.mark.parametrize("outer_bool", [False, True])
@pytest.mark.parametrize("metric", correlation_metrics + distance_metrics)
@pytest.mark.parametrize("dim", AXES)
@pytest.mark.parametrize("weight_bool", [False, True])
@pytest.mark.parametrize("skipna_bool", [False, True])
def test_deterministic_metrics_accessor(
a, b, dim, skipna_bool, weight_bool, weights, metric, outer_bool
):
# Update dim to time if testing temporal only metrics
if (dim != "time") and (metric in temporal_only_metrics):
dim = "time"
_weights = adjust_weights(dim, weight_bool, weights)
ds = _ds(a, b, skipna_bool)
b = ds["b"] # Update if populated with nans
if outer_bool:
ds = ds.drop_vars("b")
accessor_func = getattr(ds.xs, metric.__name__)
if metric in temporal_only_metrics or metric == median_absolute_error:
actual = metric(a, b, dim=dim, skipna=skipna_bool)
if outer_bool:
expected = accessor_func("a", b, dim=dim, skipna=skipna_bool)
else:
expected = accessor_func("a", "b", dim=dim, skipna=skipna_bool)
else:
actual = metric(a, b, dim=dim, weights=_weights, skipna=skipna_bool)
if outer_bool:
expected = accessor_func(
"a", b, dim=dim, weights=_weights, skipna=skipna_bool
)
else:
expected = accessor_func(
"a", "b", dim=dim, weights=_weights, skipna=skipna_bool
)
assert_allclose(actual, expected)
|
StarcoderdataPython
|
3314037
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: A: an integer array
@return: A tree node
"""
def sortedArrayToBST(self, A):
return self.buildTree(A,0,len(A)-1)
def buildTree(self,A,start,end):
if(start>end):
return None
mid = (start+end) >> 1
node = TreeNode(A[mid])
node.left = self.buildTree(A,start,mid-1)
node.right = self.buildTree(A,mid+1,end)
return node
|
StarcoderdataPython
|
11376645
|
import os
checkpoints_path = '/homedtic/lperez/parsing-as-pretraining/runs_constituency_parsing/run1/output'
model_partial_name = 'pytorch_model'
for (root, dirs, files) in os.walk(checkpoints_path):
for file in files:
if file.startswith(model_partial_name):
model_path = root + '/' + file
print('Evaluating %s' % model_path)
checkpoint_path = '/'.join(model_path.split('/')[0:-1])
print('checkpoint_path = %s' % checkpoint_path)
|
StarcoderdataPython
|
8099817
|
import sys
import time
from dpa.app.server import AppServer
from dpa.cli.action import CommandLineAction
# ------------------------------------------------------------------------------
class ClApp(CommandLineAction):
name = "clapp"
# --------------------------------------------------------------------------
@classmethod
def setup_cl_args(cls, parser):
parser.add_argument(
"port",
type=int,
help="Port number to serve."
)
# --------------------------------------------------------------------------
def __init__(self, port):
super(ClApp, self).__init__(port)
self._port = port
self._server = None
self._shutdown = False
# --------------------------------------------------------------------------
def execute(self):
self._server = AppServer(
self.port,
shutdown_callback=self._shutdown_server,
)
self._server.start()
while not self._shutdown:
time.sleep(1)
sys.exit(0)
# --------------------------------------------------------------------------
def undo(self):
pass
# --------------------------------------------------------------------------
@property
def port(self):
return self._port
# --------------------------------------------------------------------------
@property
def server(self):
return self._server
# --------------------------------------------------------------------------
def _shutdown_server(self):
self._shutdown = True
|
StarcoderdataPython
|
6503206
|
from __future__ import absolute_import
import io
import os
import sys
import uuid
import warnings
import nbformat
from dagster_graphql.implementation.context import (
DagsterGraphQLContext,
DagsterSnapshotGraphQLContext,
)
from dagster_graphql.implementation.pipeline_execution_manager import (
QueueingSubprocessExecutionManager,
SubprocessExecutionManager,
)
from dagster_graphql.implementation.reloader import Reloader
from dagster_graphql.schema import create_schema
from dagster_graphql.version import __version__ as dagster_graphql_version
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS
from flask_graphql import GraphQLView
from flask_sockets import Sockets
from graphql.execution.executors.gevent import GeventExecutor as Executor
from nbconvert import HTMLExporter
from dagster import ExecutionTargetHandle
from dagster import __version__ as dagster_version
from dagster import check, seven
from dagster.core.execution.compute_logs import warn_if_compute_logs_disabled
from dagster.core.instance import DagsterInstance
from dagster.core.snap.repository_snapshot import RepositorySnapshot
from dagster.core.storage.compute_log_manager import ComputeIOType
from .format_error import format_error_with_stack_trace
from .subscription_server import DagsterSubscriptionServer
from .templates.playground import TEMPLATE as PLAYGROUND_TEMPLATE
from .version import __version__
MISSING_SCHEDULER_WARNING = (
'You have defined ScheduleDefinitions for this repository, but have '
'not defined a scheduler on the instance'
)
class DagsterGraphQLView(GraphQLView):
def __init__(self, context, **kwargs):
super(DagsterGraphQLView, self).__init__(**kwargs)
self.context = check.inst_param(context, 'context', DagsterGraphQLContext)
def get_context(self):
return self.context
format_error = staticmethod(format_error_with_stack_trace)
def dagster_graphql_subscription_view(subscription_server, context):
context = check.inst_param(
context, 'context', (DagsterGraphQLContext, DagsterSnapshotGraphQLContext)
)
def view(ws):
subscription_server.handle(ws, request_context=context)
return []
return view
def info_view():
return (
jsonify(
dagit_version=__version__,
dagster_graphql_version=dagster_graphql_version,
dagster_version=dagster_version,
),
200,
)
def index_view(_path):
try:
return send_file(os.path.join(os.path.dirname(__file__), './webapp/build/index.html'))
except seven.FileNotFoundError:
text = '''<p>Can't find webapp files. Probably webapp isn't built. If you are using
dagit, then probably it's a corrupted installation or a bug. However, if you are
developing dagit locally, your problem can be fixed as follows:</p>
<pre>cd ./python_modules/
make rebuild_dagit</pre>'''
return text, 500
def notebook_view(request_args):
check.dict_param(request_args, 'request_args')
# This currently provides open access to your file system - the very least we can
# do is limit it to notebook files until we create a more permanent solution.
path = request_args['path']
if not path.endswith('.ipynb'):
return 'Invalid Path', 400
with open(os.path.abspath(path)) as f:
read_data = f.read()
notebook = nbformat.reads(read_data, as_version=4)
html_exporter = HTMLExporter()
html_exporter.template_file = 'basic'
(body, resources) = html_exporter.from_notebook_node(notebook)
return '<style>' + resources['inlining']['css'][0] + '</style>' + body, 200
def download_view(context):
context = check.inst_param(
context, 'context', (DagsterGraphQLContext, DagsterSnapshotGraphQLContext)
)
def view(run_id, step_key, file_type):
run_id = str(uuid.UUID(run_id)) # raises if not valid run_id
step_key = step_key.split('/')[-1] # make sure we're not diving deep into
out_name = '{}_{}.{}'.format(run_id, step_key, file_type)
manager = context.instance.compute_log_manager
try:
io_type = ComputeIOType(file_type)
result = manager.get_local_path(run_id, step_key, io_type)
if not os.path.exists(result):
result = io.BytesIO()
timeout = None if manager.is_watch_completed(run_id, step_key) else 0
except ValueError:
result = io.BytesIO()
timeout = 0
if not result:
result = io.BytesIO()
return send_file(
result, as_attachment=True, attachment_filename=out_name, cache_timeout=timeout
)
return view
def instantiate_app_with_views(context):
app = Flask(
'dagster-ui',
static_url_path='',
static_folder=os.path.join(os.path.dirname(__file__), './webapp/build'),
)
sockets = Sockets(app)
app.app_protocol = lambda environ_path_info: 'graphql-ws'
schema = create_schema()
subscription_server = DagsterSubscriptionServer(schema=schema)
app.add_url_rule(
'/graphql',
'graphql',
DagsterGraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True,
# XXX(freiksenet): Pass proper ws url
graphiql_template=PLAYGROUND_TEMPLATE,
executor=Executor(),
context=context,
),
)
sockets.add_url_rule(
'/graphql', 'graphql', dagster_graphql_subscription_view(subscription_server, context)
)
app.add_url_rule(
# should match the `build_local_download_url`
'/download/<string:run_id>/<string:step_key>/<string:file_type>',
'download_view',
download_view(context),
)
# these routes are specifically for the Dagit UI and are not part of the graphql
# API that we want other people to consume, so they're separate for now.
# Also grabbing the magic global request args dict so that notebook_view is testable
app.add_url_rule('/dagit/notebook', 'notebook', lambda: notebook_view(request.args))
app.add_url_rule('/dagit_info', 'sanity_view', info_view)
app.register_error_handler(404, index_view)
CORS(app)
return app
def get_execution_manager(instance):
execution_manager_settings = instance.dagit_settings.get('execution_manager')
if execution_manager_settings and execution_manager_settings.get('max_concurrent_runs'):
return QueueingSubprocessExecutionManager(
instance, execution_manager_settings.get('max_concurrent_runs')
)
return SubprocessExecutionManager(instance)
def create_app_with_snapshot(repository_snapshot, instance):
check.inst_param(repository_snapshot, 'snapshot', RepositorySnapshot)
check.inst_param(instance, 'instance', DagsterInstance)
execution_manager = get_execution_manager(instance)
warn_if_compute_logs_disabled()
print('Loading repository...')
context = DagsterSnapshotGraphQLContext(
repository_snapshot=repository_snapshot,
instance=instance,
execution_manager=execution_manager,
version=__version__,
)
return instantiate_app_with_views(context)
def create_app_with_execution_handle(handle, instance, reloader=None):
check.inst_param(handle, 'handle', ExecutionTargetHandle)
check.inst_param(instance, 'instance', DagsterInstance)
check.opt_inst_param(reloader, 'reloader', Reloader)
execution_manager = get_execution_manager(instance)
warn_if_compute_logs_disabled()
print('Loading repository...')
context = DagsterGraphQLContext(
handle=handle,
instance=instance,
execution_manager=execution_manager,
reloader=reloader,
version=__version__,
)
# Automatically initialize scheduler everytime Dagit loads
scheduler_handle = context.scheduler_handle
scheduler = instance.scheduler
if scheduler_handle:
if scheduler:
handle = context.get_handle()
python_path = sys.executable
repository_path = handle.data.repository_yaml
repository = context.get_repository()
scheduler_handle.up(
python_path, repository_path, repository=repository, instance=instance
)
else:
warnings.warn(MISSING_SCHEDULER_WARNING)
return instantiate_app_with_views(context)
|
StarcoderdataPython
|
1883104
|
<filename>ipfs_lod.py
# -*- coding: utf-8 -*-
"""Publishes LOD datasets over IPFS based on their W3C VoID descriptions.
Assumptions (maybe not completely reasonable)
=============================================
- Datasets are described in VoID documents.
- Versioning can be discovered by looking at the dcterms:modified property.
- Actual data can be accessed via void:dataDump properties.
- Both the VoID description and the dataset are sent to IPFS.
- The VoID description is modified to include the addresses of the dumps over IPFS.
"""
import ipfsapi
import logging
from lodataset import LODatasetDescription
import os
import wget
import shutil
class IPFSLODPublisher(object):
def __init__(self, dataset, client='127.0.0.1', port = 5001):
""" Build the publisher from a LODataset.
"""
self.dataset = dataset
self.dataset_id = dataset.id
self.last_modified = dataset["modified"]
self.api = ipfsapi.connect(client, port)
self.was_updated = True
logging.getLogger().setLevel(logging.INFO)
logging.info("Dataset " + dataset.id)
logging.info("Last modified " +
self.last_modified.toPython().strftime("%Y-%m-%d %H:%M:%S"))
def update(self):
""" Reload the dataset and its description.
If it was modified since last update, flags it for next publish.
"""
lod = LODatasetDescription(self.dataset.desc.uri,
self.dataset.desc.well_known)
self.dataset = lod[self.dataset_id]
newtime = self.dataset["modified"].toPython()
# Check if the new last modification is more recent:
if newtime > self.last_modified.toPython():
self.was_updated = True
logging.info("Dataset updated.")
else:
logging.info("Dataset remains the same.")
self.last_modified = self.dataset["modified"]
def publish(self, style="folder"):
"""Publish the Dataset to IPFS.
Styles
======
"folder" : the VOID file and dump files go in a common folder.
"ipfsld" : a VOID file is augmented with IPFSLD links (not implemented)
"""
if self.was_updated:
self.was_updated = False
if style=="folder":
# Create the folder:
folder = self.dataset.id.replace("/", "_")
folder = folder + self.last_modified.toPython().strftime("%Y_%m_%d_%H:%M:%S")
print(folder)
if not os.path.exists(folder):
os.mkdir(folder)
os.chdir(folder)
# Serialize the VOID:
#TODO: Include only the descriptions of the dataset, not all of them.
self.dataset.desc.g.serialize(destination='void.ttl', format='turtle')
# Get the dumps:
dumps = self.dataset["dataDump"]
# check if it is single dump:
if not isinstance(dumps, list):
dumps = [dumps]
for dump in dumps:
wget.download(dump)
os.chdir("..")
# Add to IPFS:
res = self.api.add(folder, recursive=False)
for r in res:
if r["Name"] == folder:
self.ipfs_addr = r["Hash"]
logging.info(res)
# cleanup
shutil.rmtree(folder)
else:
raise ValueError("Publishing style " + style + "not supported." )
|
StarcoderdataPython
|
4874652
|
<reponame>master-fufu/OpenATC<gh_stars>1-10
import timeit
import requests
import re
from multiprocessing.dummy import Pool as ThreadPool
from bs4 import BeautifulSoup as bs
from getconf import *
# TO DO: early link capability
# Constants
base_url = 'http://www.supremenewyork.com'
# Inputs
keywords_category = ['accessories'] # Demo stuff, feel free to change
keywords_model = ['Mendini', 'Tray', 'Ceramic']
keywords_style = ['Multi']
use_early_link = True
early_link = ''
# Functions
def product_page(url):
session = requests.Session()
response = session.get(base_url + url)
soup = bs(response.text, 'html.parser')
h1 = soup.find('h1', {'itemprop': 'name'})
p = soup.find('p', {'itemprop': 'model'})
if not h1 is None:
name = h1.getText()
if not p is None:
style = p.getText()
for keyword in keywords_model:
if keyword in name:
for keyword in keywords_style:
if keyword in style:
print('FOUND: ' + name + ' AT ' + base_url + url)
form = soup.find('form', {'action': re.compile('(?<=/shop/)(.*)(?=/add)')})
if form is not None:
payload = {
'utf8': '✓',
'authenticity_token': form.find('input', {'name': 'authenticity_token'})['value'],
'size': form.find('input', {'name': 'size'})['value'],
'commit': 'add to cart'
}
response1 = session.post(base_url + form['action'], data=payload)
print('Added to cart!')
time.sleep(3)
return session
def format_phone(n):
return '({}) {}-{}'.format(n[:3], n[3:6], n[6:])
def format_cc(n):
return '{} {} {} {}'.format(n[:4], n[4:8], n[8:12], n[12:])
def checkout(session):
print('Filling out checkout info...')
response = session.get('https://www.supremenewyork.com/checkout')
soup = bs(response.text, 'html.parser')
form = soup.find('form', {'action': '/checkout'})
payload = {
'utf8': '✓',
'authenticity_token': form.find('input', {'name': 'authenticity_token'})['value'],
'order[billing_name]': first_name + ' ' + last_name,
'order[email]': email,
'order[tel]': format_phone(phone_number),
'order[billing_address]': shipping_address_1,
'order[billing_address_2]': shipping_apt_suite,
'order[billing_zip]': shipping_zip,
'order[billing_city]': shipping_city,
'order[billing_state]': shipping_state,
'order[billing_country]': shipping_country_abbrv,
'same_as_billing_address': '1',
'store_credit_id': '',
'credit_card[type]': card_type,
'credit_card[cnb]': format_cc(card_number),
'credit_card[month]': card_exp_month,
'credit_card[year]': card_exp_year,
'credit_card[vval]': card_cvv,
'order[terms]': '1',
'hpcvv': '',
'cnt': '2'
}
response = session.get('https://www.supremenewyork.com/checkout.js', data=payload)
payload = {
'utf8': '✓',
'authenticity_token': form.find('input', {'name': 'authenticity_token'})['value'],
'order[billing_name]': first_name + ' ' + last_name,
'order[email]': email,
'order[tel]': format_phone(phone_number),
'order[billing_address]': shipping_address_1,
'order[billing_address_2]': shipping_apt_suite,
'order[billing_zip]': shipping_zip,
'order[billing_city]': shipping_city,
'order[billing_state]': shipping_state_abbrv,
'order[billing_country]': shipping_country_abbrv,
'same_as_billing_address': '1',
'store_credit_id': '',
'credit_card[type]': card_type,
'credit_card[cnb]': format_cc(card_number),
'credit_card[month]': card_exp_month,
'credit_card[year]': card_exp_year,
'credit_card[vval]': card_cvv,
'order[terms]': '1',
'hpcvv': ''
}
response = session.post('https://www.supremenewyork.com/checkout', data=payload)
if 'Your order has been submitted' in response.text:
print('Checkout was successful!')
else:
print('Oops! There was an error.')
# Main
start = timeit.default_timer()
session1 = requests.Session()
response1 = session1.get('http://www.supremenewyork.com/shop/all')
soup1 = bs(response1.text, 'html.parser')
links1 = soup1.find_all('a', href=True)
links_by_keyword1 = []
for link in links1:
for keyword in keywords_category:
if keyword in link['href']:
links_by_keyword1.append(link['href'])
pool1 = ThreadPool(len(links_by_keyword1))
nosession = True
while nosession:
print('Finding matching products...')
result1 = pool1.map(product_page, links_by_keyword1)
for session in result1:
if not session is None:
nosession = False
checkout(session)
break
stop = timeit.default_timer()
print(stop - start) # Get the runtime
|
StarcoderdataPython
|
5067769
|
<reponame>compstorylab/covid19ngrams<gh_stars>0
import matplotlib.font_manager as fm
fm._rebuild()
noto = [f.name for f in fm.fontManager.ttflist if 'Noto Sans' in f.name]
fonts = {
'Default': fm.FontProperties(family=["sans-serif"]),
'Korean': fm.FontProperties(family=["Noto Sans CJK KR", "Noto Sans CJK", "sans-serif"]),
'Tamil': fm.FontProperties(family=["Noto Sans Tamil", "sans-serif"]),
}
at_color = 'k'
ot_color = 'C0'
rt_color = 'C1'
tags = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split(' ')
contagiograms = {
'virus_12': [
('virus', 'en'), ('virus', 'es'), ('vírus', 'pt'), ('فيروس', 'ar'),
('바이러스', 'ko'), ('virus', 'fr'), ('virus', 'id'), ('virüs', 'tr'),
('Virus', 'de'), ('virus', 'it'), ('вирус', 'ru'), ('virus', 'tl'),
],
'virus_24': [
('virus', 'hi'), ('ویروس', 'fa'), ('وائرس', 'ur'), ('wirus', 'pl'),
('virus', 'ca'), ('virus', 'nl'), ('virus', 'ta'), ('ιός', 'el'),
('virus', 'sv'), ('вирус', 'sr'), ('virus', 'fi'), ('вірус', 'uk'),
],
'samples_1grams_12': [
('coronavirus', 'en'), ('cuarentena', 'es'), ('corona', 'pt'), ('كورونا', 'ar'),
('바이러스', 'ko'), ('quarantaine', 'fr'), ('virus', 'id'), ('virüs', 'tr'),
('Quarantäne', 'de'), ('quarantena', 'it'), ('карантин', 'ru'), ('virus', 'tl'),
],
'samples_1grams_24': [
('virus', 'hi'), ('قرنطینه', 'fa'), ('مرضی', 'ur'), ('testów', 'pl'),
('confinament', 'ca'), ('virus', 'nl'), ('ரஜ', 'ta'), ('σύνορα', 'el'),
('Italien', 'sv'), ('mere', 'sr'), ('manaa', 'fi'), ('BARK', 'uk'),
],
'samples_2grams': [
('social distancing', 'en'), ('public health', 'en'), ('the lockdown', 'en'), ('health workers', 'en'),
('small businesses', 'en'), ('stimulus check', 'en'), ('during quarantine', 'en'), ('<NAME>', 'en'),
('laid off', 'en'), ('panic buying', 'en'), ('stay home', 'en'), ('cultural reset', 'en'),
],
}
words_by_country = {
'United States': [
('coronavirus', 'en'), ('pandemic', 'en'), ('virus', 'en'), ('lockdown', 'en'), ('quarantine', 'en'),
('deaths', 'en'), ('masks', 'en'), ('cases', 'en'), ('distancing', 'en'), ('China', 'en'),
],
'Brazil': [
('quarentena', 'pt'), ('coronavírus', 'pt'), ('vírus', 'pt'), ('paredão', 'pt'), ('isolamento', 'pt'),
('corona', 'pt'), ('governadores', 'pt'), ('China', 'pt'), ('máscara', 'pt'), ('casos', 'pt'),
],
'India': [
('तरजन', 'hi'), ('Lockdown', 'hi'), ('Corona', 'hi'), ('शट', 'hi'), ('PPE', 'hi'),
('ऊन', 'hi'), ('Sadhna', 'hi'), ('आपद', 'hi'), ('Tvईश', 'hi'), ('WHO', 'hi'),
],
'Russia': [
('коронавируса', 'ru'), ('коронавирусом', 'ru'), ('карантина', 'ru'), ('самоизоляции', 'ru'), ('карантин', 'ru'),
('коронавирус', 'ru'), ('пандемии', 'ru'), ('карантине', 'ru'), ('маски', 'ru'), ('эпидемии', 'ru'),
],
'Mexico': [
('cuarentena', 'es'), ('pandemia', 'es'), ('coronavirus', 'es'), ('virus', 'es'), ('confinamiento', 'es'),
('mascarillas', 'es'), ('casos', 'es'), ('salud', 'es'), ('sanitaria', 'es'), ('fallecidos', 'es'),
],
'Iran': [
('کرونا', 'fa'), ('ویروس', 'fa'), ('قرنطینه', 'fa'), ('ماسک', 'fa'), ('چین', 'fa'),
('شیوع', 'fa'), ('بهداشت', 'fa'), ('مبتلا', 'fa'), ('ساعات', 'fa'), ('بیماری', 'fa'),
],
'Korea, South': [
('바이러스', 'ko'), ('코로나', 'ko'), ('코로나19', 'ko'), ('마스크', 'ko'), ('온라인', 'ko'),
('사회적', 'ko'), ('확진자', 'ko'), ('신상공개', 'ko'), ('커버', 'ko'), ('모집', 'ko'),
],
'Italy': [
('Coronavirus', 'it'), ('quarantena', 'it'), ('virus', 'it'), ('mascherine', 'it'), ('pandemia', 'it'),
('Conte', 'it'), ('contagi', 'it'), ('mascherina', 'it'), ('Covid', 'it'), ('lockdown', 'it'),
],
'France': [
('confinement', 'fr'), ('masques', 'fr'), ('Coronavirus', 'fr'), ('virus', 'fr'), ('masque', 'fr'),
('pandémie', 'fr'), ('sanitaire', 'fr'), ('crise', 'fr'), ('tests', 'fr'), ('soignants', 'fr'),
],
'Germany': [
('Corona', 'de'), ('Masken', 'de'), ('Virus', 'de'), ('Krise', 'de'), ('Coronavirus', 'de'),
('Pandemie', 'de'), ('Maske', 'de'), ('Abstand', 'de'), ('Quarantäne', 'de'), ('Lockdown', 'de'),
],
'Sweden': [
('Corona', 'sv'), ('smittade', 'sv'), ('viruset', 'sv'), ('coronakrisen', 'sv'), ('äldreboenden', 'sv'),
('skyddsutrustning', 'sv'), ('dödsfall', 'sv'), ('krisen', 'sv'), ('munskydd', 'sv'), ('döda', 'sv'),
],
'Turkey': [
('maske', 'tr'), ('virüs', 'tr'), ('çıkma', 'tr'), ('sağlık', 'tr'), ('koronavirüs', 'tr'),
('vaka', 'tr'), ('evde', 'tr'), ('yardım', 'tr'), ('yasağı', 'tr'), ('Korona', 'tr'),
],
}
|
StarcoderdataPython
|
1654666
|
<reponame>saripirala/file_compare<filename>setup.py<gh_stars>10-100
#!/usr/bin/env python3
import setuptools
setuptools.setup()
|
StarcoderdataPython
|
8179652
|
"""
Copyright (C) 2020-2021 <NAME> <...>
"""
import warnings
from contextlib import contextmanager
from typing import Any, Generator, List, Optional, Type, Union
def _warns_repr(warns: List[warnings.WarningMessage]) -> List[Union[Warning, str]]:
return [w.message for w in warns]
@contextmanager
def no_warning_call(warning_type: Optional[Type[Warning]] = None, match: Optional[str] = None) -> Generator:
"""
Args:
warning_type: specify catching warning, if None catching all
match: match message, containing following string, if None catches all
Raises:
AssertionError: if specified warning was called
"""
with warnings.catch_warnings(record=True) as called:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
yield
# no warning raised
if not called:
return
if not warning_type:
raise AssertionError(f'While catching all warnings, these were found: {_warns_repr(called)}')
# filter warnings by type
warns = [w for w in called if issubclass(w.category, warning_type)]
# Verify some things
if not warns:
return
if not match:
raise AssertionError(
f'While catching `{warning_type.__name__}` warnings, these were found: {_warns_repr(warns)}'
)
found = [w for w in warns if match in w.message.__str__()]
if found:
raise AssertionError(
f'While catching `{warning_type.__name__}` warnings with "{match}",'
f' these were found: {_warns_repr(found)}'
)
def void(*args: Any, **kwrgs: Any) -> Any:
"""Empty function which does nothing, just let your IDE stop complaining about unused arguments."""
_, _ = args, kwrgs
|
StarcoderdataPython
|
5198903
|
<filename>app.py
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request, flash, redirect, url_for
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
import os
import numpy as np
# import io
# import csv
# from werkzeug.utils import secure_filename
# from flask_sqlalchemy import SQLAlchemy
import pandas as pd
# from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans
from flask import jsonify
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
# app.config.from_object('config')
# app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///example.sqlite"
# db = SQLAlchemy(app)
# db.init_app(app)
# class Csv(db.Model):
# __tablename__ = "csvs"
# id = db.Column(db.Integer, primary_key=True)
# filename = db.Column(db.String, nullable=False)
#
# db.create_all()
#
UPLOAD_FOLDER = 'static/csv'
# ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'csv'}
#
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def home():
return render_template('pages/placeholder.home.html')
@app.route('/about')
def about():
return render_template('pages/placeholder.about.html')
@app.route('/login')
def login():
form = LoginForm(request.form)
return render_template('forms/login.html', form=form)
@app.route('/register')
def register():
form = RegisterForm(request.form)
return render_template('forms/register.html', form=form)
@app.route('/forgot')
def forgot():
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form=form)
# def allowed_file(filename):
# return '.' in filename and \
# filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['POST'])
def upload_file():
global data
global path_to_file
global m_labels
if request.method == 'POST':
f = request.files['file']
path_to_file = os.path.join(app.config['UPLOAD_FOLDER'], '1.csv')
f.save(os.path.join(app.config['UPLOAD_FOLDER'], '1.csv'))
# csvfile = Csv(filename=secure_filename(f.filename))
# db.session.add(csv)
# db.session.commit()
data = pd.read_csv(path_to_file)
head_data = np.array(data.head(10))
m_size = np.shape(head_data)
m_labels = list(data)
#------------------------------------------------------
m_str = '<div class="limiter"><div><div><div><table>'
for m in range(len(m_labels)):
if m == 0: m_str +='<thead><tr class="table100-head">'
m_str += '<th class="columns">' +m_labels[m]+"</th>"
if m == len(m_labels)-1: m_str +='</tr></thead>'
# ------------------------------------------------------
m_str+='<tbody>'
for n in range(m_size[0]):
for m in range(len(m_labels)):
if m == 0: m_str += '<tr>'
m_str += '<td class="columns">' +str( head_data[n][m] )+ "</td>"
if m == len(m_labels) - 1: m_str += '</tr>'
m_str +="</tbody></table></div></div></div></div>"
# res = {}
return m_str
# Error handlers.
@app.route('/result', methods=['POST'])
def get_result():
global labels
global data
if request.method == 'POST':
cluster_n = np.int16(request.values['cohorts'])
le = LabelEncoder()
m_str = ""
if len(data)==0:
return m_str
data1 = pd.DataFrame.copy(data)
categroy_ind = [2,4]
for n in categroy_ind:
m_category_name = m_labels[n]
le.fit(data1[m_category_name].values)
data1[m_category_name] = le.transform(data1[m_category_name].values)
# Initializing KMeans
kmeans = KMeans(n_clusters=cluster_n)
# Fitting with inputs
kmeans = kmeans.fit(data1)
# Predicting the clusters
labels = kmeans.predict(data1)
# Getting the cluster centers
C = kmeans.cluster_centers_
# ------------------------------------------------------
m_str = '<div class="limiter"><div><div><div><table>'
for m in range(len(m_labels)):
if m == 0:
m_str += '<thead><tr class="table100-head"><th class="columns">Cohort</th>'
m_str += '<th class="columns">' + m_labels[m] + "</th>"
if m == len(m_labels) - 1: m_str += '</tr></thead>'
# ------------------------------------------------------
m_str += '<tbody>'
json_string = '{'
for n in range(len(C)):
json_string += '"' + str(n) + '": [';
for m in range(len(m_labels)):
if m == 0:
m_str += '<tr data-toggle="modal" data-target="#myModal" class="cluster-row" id="' + str(n) + '" onclick="getCluster(' + str(n) + ')">'
m_str +='<td class="columns">' + str(n+1) + "</td>"
json_string += str(C[n][m])
else:
json_string += ',' + str(C[n][m])
m_str += '<td class="columns">' + str(np.round(C[n][m], 3)) + "</td>"
if m == len(m_labels) - 1: m_str += '</tr>'
if n == len(C) - 1:
json_string += ']'
else:
json_string += '],'
m_str += "</tbody></table></div></div></div></div>"
json_string += '}'
m_str += ";" + json_string
return m_str
@app.route('/cluster', methods=['POST'])
def get_cluster():
if request.method == 'POST':
clusterID = np.int16(request.values['clusterID'])
data2 =np.array(data[labels==clusterID])
# ------------------------------------------------------
m_str = '<div class="limiter"><div><div><div><table>'
for m in range(len(m_labels)):
if m == 0:
m_str += '<thead><tr class="table100-head"><th class="columns">No</th>'
m_str += '<th class="columns">' + m_labels[m] + "</th>"
if m == len(m_labels) - 1: m_str += '</tr></thead>'
# ------------------------------------------------------
m_str += '<tbody>'
for n in range(len(data2)):
for m in range(len(m_labels)):
if m == 0:
m_str += '<tr data-toggle="modal" data-target="#myModal" >'
m_str +='<td class="columns">' + str(n+1) + "</td>"
if m==2 or m==4:
m_str += '<td class="columns">' + str(data2[n, m]) + "</td>"
else:
m_str += '<td class="columns">' + str(np.round(data2[n, m], 3)) + "</td>"
if m == len(m_labels) - 1: m_str += '</tr>'
m_str += "</tbody></table></div></div></div></div>"
return m_str
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return ""#render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return ""#render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='localhost', port=port)
# app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
StarcoderdataPython
|
1684811
|
<reponame>lindenmp/neurodev_long
#!/usr/bin/env python
# coding: utf-8
# # Preamble
# In[1]:
# Essentials
import os, sys, glob
import pandas as pd
import numpy as np
import nibabel as nib
# Stats
import scipy as sp
from scipy import stats
import statsmodels.api as sm
import pingouin as pg
# Plotting
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
# In[2]:
sys.path.append('/Users/lindenmp/Dropbox/Work/ResProjects/neurodev_long/code/func/')
from proj_environment import set_proj_env
from func import get_cmap
# In[3]:
exclude_str = 't1Exclude'
parcel_names, parcel_loc, drop_parcels, num_parcels, yeo_idx, yeo_labels = set_proj_env(exclude_str = exclude_str)
# ### Setup output directory
# In[4]:
print(os.environ['MODELDIR'])
if not os.path.exists(os.environ['MODELDIR']): os.makedirs(os.environ['MODELDIR'])
# # Load in metadata
# In[5]:
# Protocol
prot = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n2416_dataFreeze/neuroimaging/n2416_pnc_protocol_validation_params_status_20170103.csv'))
# T1 QA
t1_qa = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n2416_dataFreeze/neuroimaging/t1struct/n2416_t1QaData_20170516.csv'))
# DTI QA
dti_qa = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n2416_dataFreeze/neuroimaging/dti/n2416_DTI64/n2416_dti_qa_20170301.csv'))
# REST QA
rest_qa = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n2416_dataFreeze/neuroimaging/rest/n2416_RestQAData_20170714.csv'))
# Demographics
demog = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n2416_dataFreeze/clinical/n2416_demographics_20170310.csv'))
# Brain volume
brain_vol = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n2416_dataFreeze/neuroimaging/t1struct/n2416_antsCtVol_20170412.csv'))
# incidental findings
inc_find = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'pncDataFreeze20170905/n9498_dataFreeze/health/n9498_health_20170405.csv'))
# GOASSESS Bifactor scores
goassess = pd.read_csv(os.path.join(os.environ['DERIVSDIR'], 'GO_Longitudinal_clinical_factor_scores_psychosis_split_BIFACTOR.csv'))
goassess.set_index(['bblid'], inplace = True)
# merge
df = prot
df = pd.merge(df, t1_qa, on=['scanid', 'bblid']) # t1_qa
df = pd.merge(df, dti_qa, on=['scanid', 'bblid']) # dti_qa
df = pd.merge(df, rest_qa, on=['scanid', 'bblid']) # rest_qa
df = pd.merge(df, demog, on=['scanid', 'bblid']) # demog
df = pd.merge(df, brain_vol, on=['scanid', 'bblid']) # brain_vol
print(df.shape[0])
df.set_index(['bblid', 'scanid'], inplace = True)
df = df.sort_index(axis = 0, level = 0)
# In[6]:
df['scanageYears'] = np.round(df.scanageMonths/12, decimals=1)
# In[7]:
df_tmp = pd.merge(df, inc_find, on=['bblid']) # goassess
# In[8]:
df.loc[:,'incidentalFindingExclude'] = df_tmp.loc[:,'incidentalFindingExclude'].copy().values
# # Filter subjects
# Filter out subjects using the QA procedures generated by BBL.
# In[9]:
# 0) incidental findings
df = df[df['incidentalFindingExclude'] == 0]
print('N after incidentalFindingExclude:', df.shape[0])
# 2) T1 exclusion
df = df[df[exclude_str] == 0]
df = df[df['t1PostProcessExclude'] == 0]
print('N after T1 exclusion:', df.shape[0])
# ## Load in data
# In[10]:
metrics = ('ct', 'vol')
# In[11]:
# output dataframe
ct_labels = ['ct_' + str(i) for i in range(num_parcels)]
vol_labels = ['vol_' + str(i) for i in range(num_parcels)]
df_node = pd.DataFrame(index = df.index, columns = ct_labels + vol_labels)
print(df_node.shape)
# ### Thickness
# In[12]:
# subject filter
subj_filt = np.zeros((df.shape[0],)).astype(bool)
# In[13]:
CT = np.zeros((df.shape[0], num_parcels))
for (i, (index, row)) in enumerate(df.iterrows()):
file_name = os.environ['CT_NAME_TMP'].replace("bblid", str(index[0]))
file_name = file_name.replace("scanid", str(index[1]))
full_path = glob.glob(os.path.join(os.environ['CTDIR'], file_name))
if i == 0: print(full_path)
if len(full_path) > 0:
ct = np.loadtxt(full_path[0])
CT[i,:] = ct
elif len(full_path) == 0:
subj_filt[i] = True
df_node.loc[:,ct_labels] = CT
# In[14]:
np.sum(subj_filt)
# In[15]:
if any(subj_filt):
df = df.loc[~subj_filt]
df_node = df_node.loc[~subj_filt]
# In[16]:
print('N after excluding missing subjects:', df.shape[0])
# ### Volume
# In[17]:
# subject filter
subj_filt = np.zeros((df.shape[0],)).astype(bool)
# In[18]:
VOL = np.zeros((df.shape[0], num_parcels))
for (i, (index, row)) in enumerate(df.iterrows()):
file_name = os.environ['VOL_NAME_TMP'].replace("bblid", str(index[0]))
file_name = file_name.replace("scanid", str(index[1]))
full_path = glob.glob(os.path.join(os.environ['VOLDIR'], file_name))
if i == 0: print(full_path)
if len(full_path) > 0:
img = nib.load(full_path[0])
v = np.array(img.dataobj)
v = v[v != 0]
unique_elements, counts_elements = np.unique(v, return_counts=True)
if len(unique_elements) == num_parcels:
VOL[i,:] = counts_elements
else:
print(str(index) + '. Warning: not all parcels present')
subj_filt[i] = True
elif len(full_path) == 0:
subj_filt[i] = True
df_node.loc[:,vol_labels] = VOL
# In[19]:
np.sum(subj_filt)
# In[20]:
if any(subj_filt):
df = df.loc[~subj_filt]
df_node = df_node.loc[~subj_filt]
# In[21]:
print('N after excluding missing subjects:', df.shape[0])
# ### Multiple scans
# Screen out people who, due to the QA screening above, have non-continuous scans. For example, if an individual's T2 scan doesn't pass QA, but T1 and T3 do.
#
# Also, I retain those participants who have only single timepoints of data even if those timepoints aren't T1.
# In[22]:
keep_me = ([1],[2],[3],[1,2],[1,2,3])
idx_keep = []
idx_drop = []
for idx, data in df.groupby('bblid'):
my_list = list(data['timepoint'].values)
if my_list == keep_me[0] or my_list == keep_me[1] or my_list == keep_me[2] or my_list == keep_me[3] or my_list == keep_me[4]:
idx_keep.append(idx)
else:
idx_drop.append(idx)
# In[23]:
df = df.loc[idx_keep,:]
df_node = df_node.loc[idx_keep,:]
# In[24]:
print('N after exclusion non-continuous scans:', df.shape[0])
# ### Create new total time points column
# The above filtering steps creates a mismatch between the number of timepoints each participant has according to BBL recruitment and how many I retain for analysis.
#
# I create a new variable that counts the number of timpeoints each participant has after my filtering.
# In[25]:
for idx, data in df.groupby('bblid'):
df.loc[idx,'TotalNtimepoints_new'] = int(data.shape[0])
df.loc[:,'TotalNtimepoints_new'] = df.loc[:,'TotalNtimepoints_new'].astype(int)
# In[26]:
print('N w/ 1 timepoint:', df.loc[df['TotalNtimepoints_new'] == 1,:].shape[0])
print('N w/ >=2 timepoints:', int(df.loc[df['TotalNtimepoints_new'] == 2,:].shape[0]/2 + df.loc[df['TotalNtimepoints_new'] == 3,:].shape[0]/3))
print('N w/ 3 timepoints:', int(df.loc[df['TotalNtimepoints_new'] == 3,:].shape[0]/3))
# ### Concat clinical data
# Note, this will fill missing phenotype data with NaNs. I prioritise retaining the full imaging sample for now.
# In[27]:
df.reset_index(inplace = True)
df.set_index(['bblid', 'timepoint'], inplace = True)
goassess.reset_index(inplace = True)
goassess.set_index(['bblid', 'timepoint'], inplace = True)
# In[28]:
goassess.loc[:,'scanid'] = np.float('nan')
# In[29]:
for idx, data in df.iterrows():
goassess.loc[idx,'scanid'] = data['scanid']
# In[30]:
df_out = pd.merge(df, goassess, on=['bblid', 'scanid', 'timepoint']).reset_index()
df_out.set_index(['bblid', 'scanid', 'timepoint'], inplace = True)
# In[31]:
header = ['TotalNtimepoints', 'TotalNtimepoints_new', 'sex', 'race', 'ethnicity', 'scanageMonths', 'scanageYears', 'mprage_antsCT_vol_TBV', 'averageManualRating', 'dti32MeanRelRMS',
'Overall_Psychopathology', 'Mania', 'Depression', 'Psychosis_Positive', 'Psychosis_NegativeDisorg',]
df_out = df_out.loc[:,header]
# Designate the individuals with only 1 timepoint as 'train' (False) and individuals with longitudinal data as 'test' (True)
# In[32]:
df_out.loc[:,'train_test'] = df_out.loc[:,'TotalNtimepoints_new'] != 1
# In[33]:
df_out.head()
# ### Final numbers
# In[34]:
print('N w/ 1 timepoint:', df_out.loc[df_out['TotalNtimepoints_new'] == 1,:].shape[0])
print('N w/ >=2 timepoints:', int(df_out.loc[df_out['TotalNtimepoints_new'] == 2,:].shape[0]/2 + df_out.loc[df_out['TotalNtimepoints_new'] == 3,:].shape[0]/3))
print('N w/ 3 timepoints:', int(df_out.loc[df_out['TotalNtimepoints_new'] == 3,:].shape[0]/3))
# ### Export
# In[35]:
if np.all(df_out.index.get_level_values(0) == df_node.index.get_level_values(0)) and np.all(df_out.index.get_level_values(1) == df_node.index.get_level_values(1)):
df_node.index = df_out.index
# In[36]:
df_out.to_csv(os.path.join(os.environ['MODELDIR'], 'df_pheno.csv'))
df_node.to_csv(os.path.join(os.environ['MODELDIR'], 'df_node_base.csv'))
# In[37]:
# find unique ages
age_unique = np.unique(df_out['scanageYears'])
print('There are', age_unique.shape[0], 'unique age points')
# Check if train and test represent the full unique age space
train_diff = np.setdiff1d(df_out.loc[~df_out.loc[:,'train_test'],'scanageYears'],age_unique)
test_diff = np.setdiff1d(df_out.loc[df_out.loc[:,'train_test'],'scanageYears'],age_unique)
if train_diff.size == 0:
print('All unique age points are represented in the training set')
elif train_diff.size != 0:
print('All unique age points ARE NOT represented in the training set')
if test_diff.size == 0:
print('All unique age points are represented in the testing set')
elif test_diff.size != 0:
print('All unique age points ARE NOT represented in the testing set')
# # Plots
# In[38]:
labels = ['Train', 'Test']
if not os.path.exists(os.environ['FIGDIR']): os.makedirs(os.environ['FIGDIR'])
os.chdir(os.environ['FIGDIR'])
sns.set(style='white', context = 'paper', font_scale = 1)
cmap = get_cmap('pair')
# ## Age
# In[39]:
df_out.loc[:,'sex'].unique()
# Predictably the test set has more data in the upper tail of the age distribution. This is because I define the test set based on individuals with multiple time points. This will limit the capacity for the normative model to generate deviations in the upper age range.
# In[40]:
f, axes = plt.subplots(1,2)
f.set_figwidth(6.5)
f.set_figheight(2.5)
colormap = sns.color_palette("pastel", 2)
sns.distplot(df_out.loc[~df_out.loc[:,'train_test'],'scanageYears'], bins=20, hist=True, kde=False, rug=False, label = labels[0],
hist_kws={"histtype": "step", "linewidth": 2, "alpha": 1}, color=list(cmap[0]), ax = axes[0]);
sns.distplot(df_out.loc[df_out.loc[:,'train_test'],'scanageYears'], bins=20, hist=True, kde=False, rug=False, label = labels[1],
hist_kws={"histtype": "step", "linewidth": 2, "alpha": 1}, color=list(cmap[1]), ax = axes[0]);
axes[0].legend(prop={'size': 8});
axes[0].set_xlabel('Age (years)');
axes[0].set_ylabel('Number of participants');
axes[0].set_xticks(np.arange(np.min(np.round(age_unique,0)), np.max(np.round(age_unique,0)), 2))
# set width of bar
barWidth = 0.25
# Sex
y_train = [np.sum(df_out.loc[~df_out.loc[:,'train_test'],'sex'] == 1), np.sum(df_out.loc[~df_out.loc[:,'train_test'],'sex'] == 2)]
y_test = [np.sum(df_out.loc[df_out.loc[:,'train_test'],'sex'] == 1), np.sum(df_out.loc[df_out.loc[:,'train_test'],'sex'] == 2)]
r1 = np.arange(len(y_train))+barWidth/2
r2 = [x + barWidth for x in r1]
axes[1].bar(r1, y_train, width = barWidth, color = cmap[0], label = labels[0])
axes[1].bar(r2, y_test, width = barWidth, color = cmap[1], label = labels[1])
axes[1].set_xlabel('Sex')
axes[1].set_xticks([r + barWidth for r in range(len(y_train))])
axes[1].set_xticklabels(['Male', 'Female'])
f.savefig('age_distributions.svg', dpi = 150, bbox_inches = 'tight', pad_inches = 0)
|
StarcoderdataPython
|
5178344
|
<reponame>Shadowalker1995/Tutorial-Resource
import torch
from torch import nn
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
# [b, 784] => [b, 20]
self.encoder = nn.Sequential(
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 64),
nn.ReLU(),
nn.Linear(64, 20),
nn.ReLU()
)
# [b, 20] => [b, 784]
self.decoder = nn.Sequential(
nn.Linear(20, 64),
nn.ReLU(),
nn.Linear(64, 256),
nn.ReLU(),
nn.Linear(256, 784),
nn.Sigmoid()
)
def forward(self, x):
"""
:param x: [b, 1, 28, 28]
:return:
"""
batchsz = x.size(0)
# flatten
x = x.view(batchsz, 784)
# encoder
x = self.encoder(x)
# decoder
x = self.decoder(x)
# reshape
x = x.view(batchsz, 1, 28, 28)
return x, None
|
StarcoderdataPython
|
1780843
|
"""
Metadata
"""
from typing import Union, Dict
Metadata = Union[str, Dict]
|
StarcoderdataPython
|
11269365
|
from flask_restx import Api
from flask import Blueprint
from .user.controller import api as user_ns
# Import controller APIs as namespaces.
api_bp = Blueprint("api", __name__)
api = Api(api_bp, title="API", description="Main routes.")
# API namespaces
api.add_namespace(user_ns)
|
StarcoderdataPython
|
240597
|
<filename>auth/tests/test_generate_token.py
from sage_utils.amqp.clients import RpcAmqpClient
from sage_utils.constants import NOT_FOUND_ERROR, VALIDATION_ERROR
from sage_utils.wrappers import Response
from app.token.api.workers.generate_token import GenerateTokenWorker
from app.users.documents import User
REQUEST_QUEUE = GenerateTokenWorker.QUEUE_NAME
REQUEST_EXCHANGE = GenerateTokenWorker.REQUEST_EXCHANGE_NAME
RESPONSE_EXCHANGE = GenerateTokenWorker.RESPONSE_EXCHANGE_NAME
async def test_generate_token_returns_a_new_token_pair(sanic_server):
await User.collection.delete_many({})
user = User(**{"username": "user", "password": "<PASSWORD>"})
await user.commit()
payload = {
"username": "user",
"password": "<PASSWORD>"
}
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload=payload)
assert Response.EVENT_FIELD_NAME in response.keys()
assert Response.CONTENT_FIELD_NAME in response.keys()
content = response[Response.CONTENT_FIELD_NAME]
assert len(content.keys()) == 2
assert sanic_server.app.config['JWT_ACCESS_TOKEN_FIELD_NAME'] in content.keys()
assert sanic_server.app.config['JWT_REFRESH_TOKEN_FIELD_NAME'] in content.keys()
await User.collection.delete_many({})
async def test_generate_token_returns_error_for_an_invalid_username(sanic_server):
await User.collection.delete_many({})
await User(**{"username": "user", "password": "<PASSWORD>"}).commit()
payload = {
"username": "NON_EXISTING_USER",
"password": "<PASSWORD>"
}
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload=payload)
assert Response.ERROR_FIELD_NAME in response.keys()
assert Response.EVENT_FIELD_NAME in response.keys()
error = response[Response.ERROR_FIELD_NAME]
assert len(error.keys()) == 2
assert Response.ERROR_TYPE_FIELD_NAME in error.keys()
assert error[Response.ERROR_TYPE_FIELD_NAME] == NOT_FOUND_ERROR
assert Response.ERROR_DETAILS_FIELD_NAME in error.keys()
assert error[Response.ERROR_DETAILS_FIELD_NAME] == "User wasn't found or " \
"specified an invalid password."
await User.collection.delete_many({})
async def test_generate_token_returns_error_for_an_invalid_password(sanic_server):
await User.collection.delete_many({})
await User(**{"username": "user", "password": "<PASSWORD>"}).commit()
payload = {
"username": "user",
"password": "<PASSWORD>"
}
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload=payload)
assert Response.ERROR_FIELD_NAME in response.keys()
assert Response.EVENT_FIELD_NAME in response.keys()
error = response[Response.ERROR_FIELD_NAME]
assert len(error.keys()) == 2
assert Response.ERROR_TYPE_FIELD_NAME in error.keys()
assert error[Response.ERROR_TYPE_FIELD_NAME] == NOT_FOUND_ERROR
assert Response.ERROR_DETAILS_FIELD_NAME in error.keys()
assert error[Response.ERROR_DETAILS_FIELD_NAME] == "User wasn't found or " \
"specified an invalid password."
await User.collection.delete_many({})
async def test_generate_token_returns_validation_error_for_empty_body(sanic_server):
client = RpcAmqpClient(
sanic_server.app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE
)
response = await client.send(payload={})
assert Response.ERROR_FIELD_NAME in response.keys()
assert Response.EVENT_FIELD_NAME in response.keys()
error = response[Response.ERROR_FIELD_NAME]
assert Response.ERROR_TYPE_FIELD_NAME in error.keys()
assert error[Response.ERROR_TYPE_FIELD_NAME] == VALIDATION_ERROR
assert Response.ERROR_DETAILS_FIELD_NAME in error.keys()
assert len(error[Response.ERROR_DETAILS_FIELD_NAME].keys()) == 2
assert 'username' in error[Response.ERROR_DETAILS_FIELD_NAME].keys()
assert len(error[Response.ERROR_DETAILS_FIELD_NAME]['username']) == 1
assert error[Response.ERROR_DETAILS_FIELD_NAME]['username'][0] == 'Missing data for ' \
'required field.'
assert 'password' in error[Response.ERROR_DETAILS_FIELD_NAME].keys()
assert len(error[Response.ERROR_DETAILS_FIELD_NAME]['password']) == 1
assert error[Response.ERROR_DETAILS_FIELD_NAME]['password'][0] == 'Missing data for ' \
'required field.'
|
StarcoderdataPython
|
8136004
|
<reponame>chrisfilo/NiMARE<filename>nimare/meta/ibma/base.py
"""
Image-based meta-analysis estimators
"""
from __future__ import division
import numpy as np
from sklearn.preprocessing import normalize
from scipy import stats
from ..base import MetaEstimator
class IBMAEstimator(MetaEstimator):
"""Base class for image-based meta-analysis methods.
"""
pass
|
StarcoderdataPython
|
4956394
|
<gh_stars>0
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import DataLoader
from ssd.utils import dboxes_coco, COCODetection
from ssd.utils import SSDTransformer
from pycocotools.coco import COCO
#DALI import
from ssd.coco_pipeline import COCOPipeline, DALICOCOIterator
def get_train_loader(args, local_seed):
train_annotate = os.path.join(args.data, "annotations/instances_train2017.json")
train_coco_root = os.path.join(args.data, "train2017")
train_pipe = COCOPipeline(batch_size=args.batch_size,
file_root=train_coco_root,
annotations_file=train_annotate,
default_boxes=dboxes_coco(args.figsize),
device_id=args.local_rank,
num_shards=args.N_gpu,
output_fp16=args.amp,
output_nhwc=False,
pad_output=False,
num_threads=args.num_workers,
seed=local_seed,
figsize=args.figsize)
train_pipe.build()
test_run = train_pipe.schedule_run(), train_pipe.share_outputs(), train_pipe.release_outputs()
train_loader = DALICOCOIterator(train_pipe, 118287 / args.N_gpu)
return train_loader
def get_val_dataset(args):
dboxes = dboxes_coco(args.figsize)
val_trans = SSDTransformer(dboxes, (args.figsize, args.figsize), val=True)
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
val_coco_root = os.path.join(args.data, "val2017")
val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
return val_coco
def get_val_dataloader(dataset, args):
if args.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
val_sampler = None
val_dataloader = DataLoader(dataset,
batch_size=args.eval_batch_size,
shuffle=False, # Note: distributed sampler is shuffled :(
sampler=val_sampler,
num_workers=args.num_workers)
return val_dataloader
def get_coco_ground_truth(args):
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
cocoGt = COCO(annotation_file=val_annotate, use_ext=True)
return cocoGt
|
StarcoderdataPython
|
11200293
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
from monitor.util import preload
from django.core.cache import cache
from monitor.util import getResource as rs
from dwebsocket.decorators import accept_websocket
import os, time
preload.loading()
@accept_websocket
def echo_log(request):
if not request.is_websocket(): # 判断是不是websocket连接
try: # 如果是普通的http方法
message = request.GET['message']
return HttpResponse(message)
except:
return HttpResponse('error')
else:
if not os.path.exists('logs/system.log'):
time.sleep(1)
with open('logs/system.log', encoding='utf-8') as f:
f.seek(0, 2)
while True:
line = f.readline()
if line:
request.websocket.send(line.strip().encode('utf-8'))
time.sleep(0.1)
def sms_send(request):
from monitor.alarm.sms import SMS
SMS().send_sms(
**{'time': '2018/05/27 18:27:42', 'alarmlevel': '01', 'alarmid': 'A3', 'maindata': 'AlarmID=A3', 'alarmcontent': '总线 BPM 通道使用率大于70%,警告!', 'ipaddress': '',
'alarmcount': '3',
'firsttime': '2018/05/27 18:27:01', 'alarmstatus': '01', 'policy': 'S3-12-1', 'areacode': '0300', 'alarmtype': '01', 'originalid': 'S3-12-1', 'alarmcate': '08',
'endtime': '2018/05/27 18:27:41'})
return HttpResponse(1)
def task_start(request):
task_no = request.POST.get('no')
result = rs.task_start(task_no)
return HttpResponse(result)
def functions(request):
pass
def deletetable(request):
code = request.GET.get('code') # 获取sysmenu表的code值
data = request.body.decode()
status = rs.del_tabledata(code, data)
return HttpResponse(status)
def updatetable(request):
code = request.GET.get('code') # 获取sysmenu表的code值
data = request.body.decode() # 获取要修改的数据内容,获取页面传到后台的json数据要用request.body.decode()
# print(code, data)
status = rs.set_tabledata(code, data)
return HttpResponse(status)
def tabledata(request):
code = request.GET.get('code') # 必须字段
code_dict = rs.get_menutitle(code)
# print(code_dict)
page = request.POST.get('page')
limit = request.POST.get('limit')
if code_dict['type'] == '1':
condition = request.POST.get('condition', '') # 条件,也可以作为搜索条件
columns = request.POST.get('columns', '') # 关键字搜索的字段逗号分隔
keyword = request.POST.get('keyword', '') # 关键字
table_data = rs.get_tabledata(code_dict['table'], page, limit, condition, columns, keyword, code_dict['code_class'])
return HttpResponse(table_data)
elif code_dict['type'] == '2':
param_dict = request.POST.get('param') # 必要条件
# print(param_dict)
table_data = rs.get_tabledata_sql(page, limit, code_dict['table'], param_dict, code_dict['code_class'])
return HttpResponse(table_data)
def manager(request):
code = request.GET.get('code')
code_dict = rs.get_menutitle(code)
menu_name = code_dict['code_name']
up_menu_name = rs.get_menutitle_up(code)
return render_to_response(code_dict['file_path'], {'menu_name': menu_name, 'up_menu_name': up_menu_name})
def file(request):
import json
id = request.GET.get('id')
a = [{'name': '常用文件夹',
'id': 1,
'alias': 'changyong',
'children': [{
'name': '所有未读',
'id': 11,
'href': 'http://www.layui.com/',
'alias': 'weidu'
}, {
'name': '置顶邮件',
'id': 12
}, {
'name': '标签邮件',
'id': 13
}]
}, {
'name': '我的邮箱',
'id': 2,
'spread': 'true',
'children': [{
'name': 'QQ邮箱',
'id': 21,
'spread': 'true',
'children': [{
'name': '收件箱',
'id': 211,
'children': [{
'name': '所有未读',
'id': 2111
}, {
'name': '置顶邮件',
'id': 2112
}, {
'name': '标签邮件',
'id': 2113
}]
}, {
'name': '已发出的邮件',
'id': 212
}, {
'name': '垃圾邮件',
'id': 213
}]
}, {
'name': '阿里云邮',
'id': 22,
'children': [{
'name': '收件箱',
'id': 221
}, {
'name': '已发出的邮件',
'id': 222
}, {
'name': '垃圾邮件',
'id': 223
}]
}]
}]
return HttpResponse(json.dumps(a))
def login(request):
return render_to_response('login.html')
def index(request):
return render_to_response('index.html', {'menutitle': '信息集成平台管理工具', 'leftmenu': cache.get('leftmenu')})
def welcome(request):
return render_to_response('welcome.html')
|
StarcoderdataPython
|
1926031
|
<gh_stars>10-100
# Copyright (c) 2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pynq.lib.video import *
from time import sleep
import cv2
from _thread import *
import threading
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Xilinx"
__email__ = "<EMAIL>"
"""Collection of classes to manage different video sources"""
# Threaded
class Webcam:
"""Wrapper for a webcam video pipeline"""
def __init__(self, filename: int=0, mode=VideoMode(1280,720,24,30)):
""" Returns a Webcam object
Parameters
----------
filename : int
webcam filename, by default this is 0
mode : VideoMode
webcam configuration
"""
self._dev = filename
self._videoIn = None
self.mode = mode
self._width = mode.width
self._height = mode.height
self._thread = threading.Lock()
self._running = None
def _configure(self):
self._videoIn = cv2.VideoCapture(self._dev)
self._videoIn.set(cv2.CAP_PROP_FRAME_WIDTH, self.mode.width);
self._videoIn.set(cv2.CAP_PROP_FRAME_HEIGHT, self.mode.height);
self._videoIn.set(cv2.CAP_PROP_FPS, self.mode.fps)
def start(self):
"""Start webcam by configuring it"""
self._configure()
def stop(self):
"""Stop the pipeline"""
if self._videoIn:
self._running = False
while self._thread.locked():
sleep(0.05)
self._videoIn.release()
self._videoIn = None
def pause(self):
"""Pause tie"""
if not self._videoIn:
raise SystemError("The Webcam is not started")
if self._running:
self._running = False
def close(self):
"""Uninitialise the drivers, stopping the pipeline beforehand"""
self.stop()
def readframe(self):
"""Read an image from the webcam"""
ret, frame = self._videoIn.read()
return frame
def tie(self, output):
"""Mirror the webcam input to an output channel
Parameters
----------
output : HDMIOut
The output to mirror on to
"""
if not self._videoIn:
raise SystemError("The Webcam is not started")
self._output = output
self._outframe = self._output.newframe()
self._thread.acquire()
self._running = True
try:
start_new_thread(self._tie, ())
except:
import traceback
print (traceback.format_exc())
def _tie(self):
"""Threaded method to implement tie"""
while self._running:
self._outframe[:] = self.readframe()
self._output.writeframe(self._outframe)
self._thread.release()
|
StarcoderdataPython
|
6988
|
# -*- coding: utf-8 -*-
def ordered_set(iter):
"""Creates an ordered set
@param iter: list or tuple
@return: list with unique values
"""
final = []
for i in iter:
if i not in final:
final.append(i)
return final
def class_slots(ob):
"""Get object attributes from child class attributes
@param ob: Defaults object
@type ob: Defaults
@return: Tuple of slots
"""
current_class = type(ob).__mro__[0]
if not getattr(current_class, 'allslots', None) \
and current_class != object:
_allslots = [list(getattr(cls, '__slots__', []))
for cls in type(ob).__mro__]
_fslots = []
for slot in _allslots:
_fslots = _fslots + slot
current_class.allslots = tuple(ordered_set(_fslots))
return current_class.allslots
def use_if_none_cls(alternative_attr):
def use_if_none(original_attr, ob, kwargs):
"""
Try and get a value from kwargs for original_attr. If there
is no original_attr in kwargs use the alternative_attr value
in the object ob
@param alternative_attr: the alternative attribute
@param original_attr: the original attribute
@param ob: the object with the attributes
@param kwargs: key values
@return: final value
"""
return kwargs.get(original_attr, getattr(ob, alternative_attr, None))
return use_if_none
def usef(attr):
"""Use another value as default
@param attr: the name of the attribute to
use as alternative value
@return: value of alternative attribute
"""
return use_if_none_cls(attr)
use_name_if_none = usef('Name')
def choose_alt(attr, ob, kwargs):
"""If the declared class attribute of ob is callable
then use that callable to get a default ob
instance value if a value is not available in kwargs.
@param attr: ob class attribute name
@param ob: the object instance whose default value needs to be set
@param kwargs: the kwargs values passed to the ob __init__ method
@return: value to be used to set ob instance
"""
result = ob.__class__.__dict__.get(attr, None)
if type(result).__name__ == "member_descriptor":
result = None
elif callable(result):
result = result(attr, ob, kwargs)
return result
class Defaults(object):
"""A base class which allows using slots to define
attributes and the ability to set object
instance defaults at the child class level"""
def __init__(self, **kwargs):
"""Assign kwargs to attributes and defaults to attributes"""
allslots = class_slots(self)
for attr in allslots:
setattr(self, attr, kwargs.get(
attr, choose_alt(attr, self, kwargs)))
def to_dict(self):
"""Returns attributes with values as dict
@return: dictionary of attributes with values
"""
allslots = class_slots(self)
return {
item: getattr(self, item, None)
for item in allslots
}
def to_dict_clean(self):
"""Return a dict where there values of None
are not included
@return: dict of the object properties with values
"""
attribs = self.to_dict()
return {
k: v
for k, v in attribs.items() if v
}
|
StarcoderdataPython
|
3556674
|
<reponame>Gdls/CapsDecE2S
import tensorflow as tf
from tensorflow.python.ops import rnn
#import my_rnn
import pdb
eps = 1e-6
def batch_norm(x, is_training=False):
return tf.layers.batch_normalization(x, momentum=0.8, training=is_training)
def sense_Global_Local_att(sense = None, context_repres = None, context_mask = None, window_size = 8):
'''
Args:
sense: A 3-D tensor with shape (batch, mp, dim), corresonding to the each sense.
context_repres: A 3-D tensor with shape (batch, L, dim), the lstm encoding context representation.
context_mask: A 3-D tensor with shape (batch, L, 1), the index mask of the target word to extract the local context from context_repres.
window_size: integer, size for the local context.
Returns:
Two 3-D tensor with shape (batch, mp, dim), one is sense based on global context, the other is sense based on local context.
'''
def singel_instance(x):
s = x[0] # mp, dim, 4, 3
c = x[1] # L, dim, 5, 3
m = x[2] # L, 1 -> L
#print c
#local context generation
c_shape = tf.shape(c)
idx = tf.argmax(m, 0, output_type="int32")
left_idx = tf.math.maximum(0, idx-window_size)
right_idx = tf.math.minimum(idx+window_size, c_shape[0])
indice = tf.range(left_idx[0], right_idx[0])
local_c = tf.gather(c, indice, axis = 0)# L'', dim
_s_c = tf.nn.softmax(tf.matmul(s, c, transpose_b=True), axis = 1) # mp, L
_s_local_c = tf.nn.softmax(tf.matmul(s, local_c, transpose_b=True), axis = 1) # mp, L''
#print "Global",_s_c, c # 4,5 5,3
#print "Local",_s_local_c, local_c # 4,? ?,3
g_c = tf.matmul(_s_c, c) #mp, dim= mp, L * L, dim
l_c = tf.matmul(_s_local_c, local_c) #mp, dim = mp, L'' * L'', dim
#print "matmul",g_c, l_c
global_s = s + g_c
local_s = s + l_c
return (global_s, local_s)
elems = (sense, context_repres, context_mask)
output = tf.map_fn(singel_instance, elems, dtype=(tf.float32,tf.float32))
return output
# For version compatibility
def reduce_sum(input_tensor, axis=None, keepdims=False):
try:
return tf.reduce_sum(input_tensor, axis=axis, keepdims=keepdims)
except:
return tf.reduce_sum(input_tensor, axis=axis, keep_dims=keepdims)
# For version compatibility
def softmax(logits, axis=None):
try:
return tf.nn.softmax(logits, axis=axis)
except:
return tf.nn.softmax(logits, dim=axis)
def get_shape(inputs, name=None):
name = "shape" if name is None else name
with tf.name_scope(name):
static_shape = inputs.get_shape().as_list()
dynamic_shape = tf.shape(inputs)
shape = []
for i, dim in enumerate(static_shape):
dim = dim if dim is not None else dynamic_shape[i]
shape.append(dim)
return(shape)
def routing(input, b_IJ, num_outputs=10, num_dims=16, iter_routing = 3):
''' The routing algorithm.
Args:
input: A Tensor with [batch_size, num_caps_l=1152, 1, length(u_i)=8, 1]
shape, num_caps_l meaning the number of capsule in the layer l.
num_outputs: the number of output capsules.
num_dims: the number of dimensions for output capsule.
Returns:
A Tensor of shape [batch_size, num_caps_l_plus_1, length(v_j)=16, 1]
representing the vector output `v_j` in the layer l+1
Notes:
u_i represents the vector output of capsule i in the layer l, and
v_j the vector output of capsule j in the layer l+1.
'''
# W: [1, num_caps_i, num_caps_j * len_v_j, len_u_j, 1] (1, 10, 1000, 100, 1)
input_shape = get_shape(input)#batch_size, Mp, dim, 1
W = tf.get_variable('Weight', shape=[1, input_shape[1], num_dims * num_outputs] + input_shape[-2:],
dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.01))
#print (W.shape)
biases = tf.get_variable('bias', shape=(1, 1, num_outputs, num_dims, 1))
# Eq.2, calc u_hat
# Since tf.matmul is a time-consuming op,
# A better solution is using element-wise multiply, reduce_sum and reshape
# ops instead. Matmul [a, b] x [b, c] is equal to a series ops as
# element-wise multiply [a*c, b] * [a*c, b], reduce_sum at axis=1 and
# reshape to [a, c]
input = tf.tile(input, [1, 1, num_dims * num_outputs, 1, 1])
# assert input.get_shape() == [batch_size, 1152, 160, 8, 1]
u_hat = reduce_sum(W * input, axis=3, keepdims=True)
u_hat = tf.reshape(u_hat, shape=[-1, input_shape[1], num_outputs, num_dims, 1])
# assert u_hat.get_shape() == [batch_size, 1152, 10, 16, 1]
# In forward, u_hat_stopped = u_hat; in backward, no gradient passed back from u_hat_stopped to u_hat
u_hat_stopped = tf.stop_gradient(u_hat, name='stop_gradient')
# line 3,for r iterations do
for r_iter in range(iter_routing):
with tf.variable_scope('iter_' + str(r_iter)):
# line 4:
# => [batch_size, 1152, 10, 1, 1]
c_IJ = softmax(b_IJ, axis=2)
# At last iteration, use `u_hat` in order to receive gradients from the following graph
if r_iter == iter_routing - 1:
# line 5:
# weighting u_hat with c_IJ, element-wise in the last two dims
# => [batch_size, 1152, 10, 16, 1]
s_J = tf.multiply(c_IJ, u_hat)
# then sum in the second dim, resulting in [batch_size, 1, 10, 16, 1]
s_J = reduce_sum(s_J, axis=1, keepdims=True) + biases
# assert s_J.get_shape() == [batch_size, 1, num_outputs, num_dims, 1]
# line 6:
# squash using Eq.1,
v_J = squash(s_J)
# assert v_J.get_shape() == [batch_size, 1, 10, 16, 1]
elif r_iter < iter_routing - 1: # Inner iterations, do not apply backpropagation
s_J = tf.multiply(c_IJ, u_hat_stopped)
s_J = reduce_sum(s_J, axis=1, keepdims=True) + biases
v_J = squash(s_J)
# line 7:
# reshape & tile v_j from [batch_size ,1, 10, 16, 1] to [batch_size, 1152, 10, 16, 1]
# then matmul in the last tow dim: [16, 1].T x [16, 1] => [1, 1], reduce mean in the
# batch_size dim, resulting in [1, 1152, 10, 1, 1]
v_J_tiled = tf.tile(v_J, [1, input_shape[1], 1, 1, 1])
u_produce_v = reduce_sum(u_hat_stopped * v_J_tiled, axis=3, keepdims=True)
# assert u_produce_v.get_shape() == [batch_size, 1152, 10, 1, 1]
# b_IJ += tf.reduce_sum(u_produce_v, axis=0, keep_dims=True)
b_IJ += u_produce_v
return(v_J)
def squash(vector):
'''Squashing function corresponding to Eq. 1
Args:
vector: A tensor with shape [batch_size, 1, num_caps, vec_len, 1] or [batch_size, num_caps, vec_len, 1].
Returns:
A tensor with the same shape as vector but squashed in 'vec_len' dimension.
'''
vec_squared_norm = reduce_sum(tf.square(vector), -2, keepdims=True)
scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + eps)
vec_squashed = scalar_factor * vector # element-wise
return(vec_squashed)
'''
def sense_selection(gloss, context, W_parameter):
batch_size = gloss.get_shape().as_list()[0]
memory_size = gloss.get_shape().as_list()[-1]
Cin = Ain = gloss# * sense_mask # [batch_size, max_n_sense, 2*n_units]
#Bin = tf.reshape(context, [batch_size, memory_size, 1]) # [batch_size, 2*n_units, 1]
Bin = tf.expand_dims(context, axis =2)
Aout = tf.matmul(Ain, Bin) # [batch_size, max_n_sense, 1]
#Aout_exp = tf.exp(Aout) #* sense_mask[:, :, :1]
#p = Aout_exp / tf.reduce_sum(Aout_exp, axis=1, keepdims=True) # [batch_size, max_n_sense, 1]
p = tf.nn.softmax(Aout)
#memory_p.append(tf.squeeze(p))
Mout = tf.squeeze(tf.matmul(Cin, p, transpose_a=True), axis = 2) # [batch_size, 2*n_units]
state = tf.nn.relu(tf.add(Mout, tf.matmul(context, W_parameter))) # [batch_size, 2*n_units]
return Mout, state, tf.squeeze(Aout), tf.squeeze(p)
'''
def sense_selection(senses, context, W_parameter):
senses = batch_norm(senses)
context = batch_norm(context)
batch_size = senses.get_shape().as_list()[0]
sense_size = senses.get_shape().as_list()[-1]
#print "senses",senses
#print "context",context
#pdb.set_trace()
Cin = Ain = senses # [batch_size, mp_dim, shared_dim]
Bin = tf.expand_dims(context, axis =2) # [batch_size, shared_dim, 1]
#print "Bin",Bin
#pdb.set_trace()
Aout = tf.matmul(Ain, Bin) # [batch_size, mp_dim, 1]
#Aout_exp = tf.exp(Aout)
#p = Aout_exp / tf.reduce_sum(Aout_exp, axis=1, keepdims=True) # [batch_size, mp_dim, 1]
p = tf.nn.softmax(Aout, axis = 1)
#print "p",p
#pdb.set_trace()
#memory_p.append(tf.squeeze(p)) #[batch_size, mp_dim]
#print "Cin",Cin
#pdb.set_trace()
Sout = tf.squeeze(tf.matmul(Cin, p, transpose_a=True), axis = 2) # [batch_size, shared_dim]
#print "Sout",Sout
#print "matmul",tf.matmul(context, W_parameter)
#pdb.set_trace()
state = tf.nn.relu(batch_norm(tf.add(Sout, tf.matmul(context, W_parameter)))) # [batch_size, shared_dim]
#print "state",state
#pdb.set_trace()
#if memory_update_type == 'concat':
# state = tf.concat((Mout, context), 1) # [batch_size, 4*n_units]
# state = tf.nn.relu(batch_norm(tf.matmul(state, W_memory)))
#else: # linear
# state = batch_norm(tf.add(Mout, tf.matmul(context, U_memory))) # [batch_size, 2*n_units]
return Sout, state, tf.squeeze(Aout), p
def soft_gate_for_f_b_context(f_context_representation, #diff part
b_context_representation,#sentence part
output_size, scope=None):
#pre_context_representation from single BiLSTM with shape (batch, dim); match_representation from BiLSTM in BiMPM with shape (batch, dim);
#the module aims to make a selection between sentence part by BiMPM matching and diff part by BiLSTM;
#two representations help to generate a "gate" in order to make a selection between the sentence part and diff part;
#The formular of calculating Gate is "sentence*gate+diff*(1-gate)"; If gate tends to be 1, then common part is conclusive, or diff part is conclusive
with tf.variable_scope(scope or "gate_selection_layer"):
highway_1 = tf.get_variable("highway_1", [output_size, output_size], dtype=tf.float32)
highway_2 = tf.get_variable("highway_2", [output_size, output_size], dtype=tf.float32)
highway_b = tf.get_variable("highway_b", [output_size], dtype=tf.float32)
full_w = tf.get_variable("full_w", [output_size, output_size], dtype=tf.float32)
full_b = tf.get_variable("full_b", [output_size], dtype=tf.float32)
gate = tf.nn.sigmoid(tf.nn.xw_plus_b(f_context_representation, highway_1, highway_b)+tf.nn.xw_plus_b(b_context_representation, highway_2, highway_b))
representation_f = tf.nn.tanh(tf.nn.xw_plus_b(f_context_representation, full_w, full_b))#common
representation_b = tf.nn.tanh(tf.nn.xw_plus_b(b_context_representation, full_w, full_b))
outputs = tf.add(tf.multiply(representation_b, gate),tf.multiply(representation_f,tf.subtract(1.0, gate)),"representation")
return outputs
def semantic_under_condition(word_embedding_multiperspective, word_Representatioin_multiperspective, w_list):
w_1, b_1, w_2, b_2, w_attention = w_list
#w_1 = tf.get_variable("mapping_w_1", [1,MP_dim], dtype=tf.float32) #1*P
#b_1 = tf.get_variable("mapping_b_1", [shared_dim], dtype=tf.float32)
#w_2 = tf.get_variable("mapping_w_2", [shared_dim, shared_dim], dtype=tf.float32) # D*D
#b_2 = tf.get_variable("mapping_b_2", [shared_dim], dtype=tf.float32)
#w_attention = tf.get_variable("attention_w", [context_lstm_dim*2, 1], dtype=tf.float32) # D*1
#pdb.set_trace()
def singel_instance(x):
return tf.matmul(w_1, x)+b_1
semantic_embedding = tf.map_fn(singel_instance, word_embedding_multiperspective, dtype=tf.float32) #batch * 1 * D
#batch * P * D
def singel_instance_condition(x):
return tf.matmul(x,w_2)+b_2
#semantic_Representation = tf.matmul(word_embedding_multiperspective, w_2)+b_2
semantic_Representation = tf.map_fn(singel_instance_condition, word_Representatioin_multiperspective, dtype=tf.float32)
def singel_instance_attention(x):
return tf.matmul(x,w_attention)
weights_attention = tf.nn.softmax(tf.map_fn(singel_instance_attention, semantic_Representation, dtype=tf.float32), axis = 1) #batch, P, 1
#weights_attention = tf.nn.softmax(tf.matmul(semantic_Representation, w_attention), axis = 1) #batch, P, 1
attention_R = tf.reduce_sum(tf.multiply(semantic_Representation, weights_attention), axis = 1) # batch, P, D-->batch, 1, D
embedding_based_Representation = tf.reduce_sum(semantic_embedding,axis = 1, keepdims = False)+attention_R # batch, 1, D
#print semantic_embedding # batch*1*D
#print semantic_Representation # batch*P*D
#print weights_attention
#print attention_R
#print embedding_based_Representation
#pdb.set_trace()
return embedding_based_Representation
def cosine_distance(y1,y2):
# y1 [....,a, 1, d]
# y2 [....,1, b, d]
# cosine_numerator = T.sum(y1*y2, axis=-1)
cosine_numerator = tf.reduce_sum(tf.multiply(y1, y2), axis=-1)
# y1_norm = T.sqrt(T.maximum(T.sum(T.sqr(y1), axis=-1), eps)) #be careful while using T.sqrt(), like in the cases of Euclidean distance, cosine similarity, for the gradient of T.sqrt() at 0 is undefined, we should add an Eps or use T.maximum(original, eps) in the sqrt.
y1_norm = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(y1), axis=-1), eps))
y2_norm = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(y2), axis=-1), eps))
return cosine_numerator / y1_norm / y2_norm
def cal_relevancy_matrix(in_question_repres, in_passage_repres):
in_question_repres_tmp = tf.expand_dims(in_question_repres, 1) # [batch_size, 1, question_len, dim]
in_passage_repres_tmp = tf.expand_dims(in_passage_repres, 2) # [batch_size, passage_len, 1, dim]
relevancy_matrix = cosine_distance(in_question_repres_tmp,in_passage_repres_tmp) # [batch_size, passage_len, question_len]
return relevancy_matrix
def mask_relevancy_matrix(relevancy_matrix, question_mask, passage_mask):
# relevancy_matrix: [batch_size, passage_len, question_len]
# question_mask: [batch_size, question_len]
# passage_mask: [batch_size, passsage_len]
relevancy_matrix = tf.multiply(relevancy_matrix, tf.expand_dims(question_mask, 1))
relevancy_matrix = tf.multiply(relevancy_matrix, tf.expand_dims(passage_mask, 2))
return relevancy_matrix
def cal_cosine_weighted_question_representation(question_representation, cosine_matrix, normalize=False):
# question_representation: [batch_size, question_len, dim]
# cosine_matrix: [batch_size, passage_len, question_len]
if normalize: cosine_matrix = tf.nn.softmax(cosine_matrix)
expanded_cosine_matrix = tf.expand_dims(cosine_matrix, axis=-1) # [batch_size, passage_len, question_len, 'x']
weighted_question_words = tf.expand_dims(question_representation, axis=1) # [batch_size, 'x', question_len, dim]
weighted_question_words = tf.reduce_sum(tf.multiply(weighted_question_words, expanded_cosine_matrix), axis=2)# [batch_size, passage_len, dim]
if not normalize:
weighted_question_words = tf.div(weighted_question_words, tf.expand_dims(tf.add(tf.reduce_sum(cosine_matrix, axis=-1),eps),axis=-1))
return weighted_question_words # [batch_size, passage_len, dim]
def multi_perspective_expand_for_3D(in_tensor, decompose_params):
in_tensor = tf.expand_dims(in_tensor, axis=2) #[batch_size, passage_len, 'x', dim]
decompose_params = tf.expand_dims(tf.expand_dims(decompose_params, axis=0), axis=0) # [1, 1, decompse_dim, dim]
return tf.multiply(in_tensor, decompose_params)#[batch_size, passage_len, decompse_dim, dim]
def multi_perspective_expand_for_2D(in_tensor, decompose_params):
in_tensor = tf.expand_dims(in_tensor, axis=1) #[batch_size, 'x', dim]
decompose_params = tf.expand_dims(decompose_params, axis=0) # [1, decompse_dim, dim]
return tf.multiply(in_tensor, decompose_params) # [batch_size, decompse_dim, dim]
def multi_perspective_expand_for_1D(in_tensor, decompose_params):
in_tensor = tf.expand_dims(in_tensor, axis=0) #['x', dim]
return tf.multiply(in_tensor, decompose_params) # [decompse_dim, dim]
def cal_full_matching_bak(passage_representation, full_question_representation, decompose_params):
# passage_representation: [batch_size, passage_len, dim]
# full_question_representation: [batch_size, dim]
# decompose_params: [decompose_dim, dim]
mp_passage_rep = multi_perspective_expand_for_3D(passage_representation, decompose_params) # [batch_size, passage_len, decompse_dim, dim]
mp_full_question_rep = multi_perspective_expand_for_2D(full_question_representation, decompose_params) # [batch_size, decompse_dim, dim]
return cosine_distance(mp_passage_rep, tf.expand_dims(mp_full_question_rep, axis=1)) #[batch_size, passage_len, decompse_dim]
def cal_full_matching(passage_representation, full_question_representation, decompose_params):
# passage_representation: [batch_size, passage_len, dim]
# full_question_representation: [batch_size, dim]
# decompose_params: [decompose_dim, dim]
def singel_instance(x):
p = x[0]
q = x[1]
# p: [pasasge_len, dim], q: [dim]
p = multi_perspective_expand_for_2D(p, decompose_params) # [pasasge_len, decompose_dim, dim]
q = multi_perspective_expand_for_1D(q, decompose_params) # [decompose_dim, dim]
q = tf.expand_dims(q, 0) # [1, decompose_dim, dim]
return cosine_distance(p, q) # [passage_len, decompose]
elems = (passage_representation, full_question_representation)
return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, passage_len, decompse_dim]
def cal_maxpooling_matching_bak(passage_rep, question_rep, decompose_params):
# passage_representation: [batch_size, passage_len, dim]
# qusetion_representation: [batch_size, question_len, dim]
# decompose_params: [decompose_dim, dim]
passage_rep = multi_perspective_expand_for_3D(passage_rep, decompose_params) # [batch_size, passage_len, decompse_dim, dim]
question_rep = multi_perspective_expand_for_3D(question_rep, decompose_params) # [batch_size, question_len, decompse_dim, dim]
passage_rep = tf.expand_dims(passage_rep, 2) # [batch_size, passage_len, 1, decompse_dim, dim]
question_rep = tf.expand_dims(question_rep, 1) # [batch_size, 1, question_len, decompse_dim, dim]
matching_matrix = cosine_distance(passage_rep,question_rep) # [batch_size, passage_len, question_len, decompse_dim]
return tf.concat( axis = 2, values = [tf.reduce_max(matching_matrix, axis=2), tf.reduce_mean(matching_matrix, axis=2)])# [batch_size, passage_len, 2*decompse_dim]
def cal_maxpooling_matching(passage_rep, question_rep, decompose_params):
# passage_representation: [batch_size, passage_len, dim]
# qusetion_representation: [batch_size, question_len, dim]
# decompose_params: [decompose_dim, dim]
def singel_instance(x):
p = x[0]
q = x[1]
# p: [pasasge_len, dim], q: [question_len, dim]
p = multi_perspective_expand_for_2D(p, decompose_params) # [pasasge_len, decompose_dim, dim]
q = multi_perspective_expand_for_2D(q, decompose_params) # [question_len, decompose_dim, dim]
p = tf.expand_dims(p, 1) # [pasasge_len, 1, decompose_dim, dim]
q = tf.expand_dims(q, 0) # [1, question_len, decompose_dim, dim]
return cosine_distance(p, q) # [passage_len, question_len, decompose]
elems = (passage_rep, question_rep)
matching_matrix = tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, passage_len, question_len, decompse_dim]
return tf.concat( axis = 2, values = [tf.reduce_max(matching_matrix, axis=2), tf.reduce_mean(matching_matrix, axis=2)])# [batch_size, passage_len, 2*decompse_dim]
def cal_maxpooling_matching_for_word(passage_rep, question_rep, decompose_params):
# passage_representation: [batch_size, passage_len, dim]
# qusetion_representation: [batch_size, question_len, dim]
# decompose_params: [decompose_dim, dim]
def singel_instance(x):
p = x[0]
q = x[1]
q = multi_perspective_expand_for_2D(q, decompose_params) # [question_len, decompose_dim, dim]
# p: [pasasge_len, dim], q: [question_len, dim]
def single_instance_2(y):
# y: [dim]
y = multi_perspective_expand_for_1D(y, decompose_params) #[decompose_dim, dim]
y = tf.expand_dims(y, 0) # [1, decompose_dim, dim]
matching_matrix = cosine_distance(y, q)#[question_len, decompose_dim]
return tf.concat( axis = 0, values = [tf.reduce_max(matching_matrix, axis=0), tf.reduce_mean(matching_matrix, axis=0)]) #[2*decompose_dim]
return tf.map_fn(single_instance_2, p, dtype=tf.float32) # [passage_len, 2*decompse_dim]
elems = (passage_rep, question_rep)
return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, passage_len, 2*decompse_dim]
def cal_attentive_matching(passage_rep, att_question_rep, decompose_params):
# passage_rep: [batch_size, passage_len, dim]
# att_question_rep: [batch_size, passage_len, dim]
def singel_instance(x):
p = x[0]
q = x[1]
# p: [pasasge_len, dim], q: [pasasge_len, dim]
p = multi_perspective_expand_for_2D(p, decompose_params) # [pasasge_len, decompose_dim, dim]
q = multi_perspective_expand_for_2D(q, decompose_params) # [pasasge_len, decompose_dim, dim]
return cosine_distance(p, q) # [pasasge_len, decompose_dim]
elems = (passage_rep, att_question_rep)
return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, passage_len, decompse_dim]
def cross_entropy(logits, truth, mask):
# logits: [batch_size, passage_len]
# truth: [batch_size, passage_len]
# mask: [batch_size, passage_len]
# xdev = x - x.max()
# return xdev - T.log(T.sum(T.exp(xdev)))
logits = tf.multiply(logits, mask)
xdev = tf.sub(logits, tf.expand_dims(tf.reduce_max(logits, 1), -1))
log_predictions = tf.sub(xdev, tf.expand_dims(tf.log(tf.reduce_sum(tf.exp(xdev),-1)),-1))
# return -T.sum(targets * log_predictions)
result = tf.multiply(tf.multiply(truth, log_predictions), mask) # [batch_size, passage_len]
return tf.multiply(-1.0,tf.reduce_sum(result, -1)) # [batch_size]
def highway_layer(in_val, output_size, scope=None):
# in_val: [batch_size, passage_len, dim]
input_shape = tf.shape(in_val)
batch_size = input_shape[0]
passage_len = input_shape[1]
# feat_dim = input_shape[2]
in_val = tf.reshape(in_val, [batch_size * passage_len, output_size])
with tf.variable_scope(scope or "highway_layer"):
highway_w = tf.get_variable("highway_w", [output_size, output_size], dtype=tf.float32)
highway_b = tf.get_variable("highway_b", [output_size], dtype=tf.float32)
full_w = tf.get_variable("full_w", [output_size, output_size], dtype=tf.float32)
full_b = tf.get_variable("full_b", [output_size], dtype=tf.float32)
trans = tf.nn.tanh(tf.nn.xw_plus_b(in_val, full_w, full_b))
gate = tf.nn.sigmoid(tf.nn.xw_plus_b(in_val, highway_w, highway_b))
outputs = tf.add(tf.multiply(trans, gate), tf.multiply(in_val, tf.sub(1.0, gate)), "y")
outputs = tf.reshape(outputs, [batch_size, passage_len, output_size])
return outputs
def multi_highway_layer(in_val, output_size, num_layers, scope=None):
scope_name = 'highway_layer'
if scope is not None: scope_name = scope
for i in xrange(num_layers):
cur_scope_name = scope_name + "-{}".format(i)
in_val = highway_layer(in_val, output_size, scope=cur_scope_name)
return in_val
def cal_max_question_representation(question_representation, cosine_matrix):
# question_representation: [batch_size, question_len, dim]
# cosine_matrix: [batch_size, passage_len, question_len]
question_index = tf.argmax(cosine_matrix, 2) # [batch_size, passage_len]
def singel_instance(x):
q = x[0]
c = x[1]
return tf.gather(q, c)
elems = (question_representation, question_index)
return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, passage_len, dim]
def cal_linear_decomposition_representation(passage_representation, passage_lengths, cosine_matrix,is_training,
lex_decompsition_dim, dropout_rate):
# passage_representation: [batch_size, passage_len, dim]
# cosine_matrix: [batch_size, passage_len, question_len]
passage_similarity = tf.reduce_max(cosine_matrix, 2)# [batch_size, passage_len]
similar_weights = tf.expand_dims(passage_similarity, -1) # [batch_size, passage_len, 1]
dissimilar_weights = tf.subtract(1.0, similar_weights)
similar_component = tf.multiply(passage_representation, similar_weights)
dissimilar_component = tf.multiply(passage_representation, dissimilar_weights)
all_component = tf.concat( axis =2, values = [similar_component, dissimilar_component])
if lex_decompsition_dim==-1:
return all_component
with tf.variable_scope('lex_decomposition'):
lex_lstm_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(lex_decompsition_dim)
lex_lstm_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(lex_decompsition_dim)
if is_training:
lex_lstm_cell_fw = tf.nn.rnn_cell.DropoutWrapper(lex_lstm_cell_fw, output_keep_prob=(1 - dropout_rate))
lex_lstm_cell_bw = tf.nn.rnn_cell.DropoutWrapper(lex_lstm_cell_bw, output_keep_prob=(1 - dropout_rate))
lex_lstm_cell_fw = tf.nn.rnn_cell.MultiRNNCell([lex_lstm_cell_fw])
lex_lstm_cell_bw = tf.nn.rnn_cell.MultiRNNCell([lex_lstm_cell_bw])
(lex_features_fw, lex_features_bw), _ = rnn.bidirectional_dynamic_rnn(
lex_lstm_cell_fw, lex_lstm_cell_bw, all_component, dtype=tf.float32, sequence_length=passage_lengths)
lex_features = tf.concat( axis =2, values = [lex_features_fw, lex_features_bw])
return lex_features
def match_passage_with_question(passage_context_representation_fw, passage_context_representation_bw, mask,
question_context_representation_fw, question_context_representation_bw,question_mask,
MP_dim, context_lstm_dim, scope=None,
with_full_match=True, with_maxpool_match=True, with_attentive_match=True, with_max_attentive_match=True):
all_question_aware_representatins = []
dim = 0
with tf.variable_scope(scope or "match_passage_with_question"):
fw_question_full_rep = question_context_representation_fw[:,-1,:]
bw_question_full_rep = question_context_representation_bw[:,0,:]
question_context_representation_fw = tf.multiply(question_context_representation_fw, tf.expand_dims(question_mask,-1))
question_context_representation_bw = tf.multiply(question_context_representation_bw, tf.expand_dims(question_mask,-1))
passage_context_representation_fw = tf.multiply(passage_context_representation_fw, tf.expand_dims(mask,-1))
passage_context_representation_bw = tf.multiply(passage_context_representation_bw, tf.expand_dims(mask,-1))
forward_relevancy_matrix = cal_relevancy_matrix(question_context_representation_fw, passage_context_representation_fw)
forward_relevancy_matrix = mask_relevancy_matrix(forward_relevancy_matrix, question_mask, mask)
backward_relevancy_matrix = cal_relevancy_matrix(question_context_representation_bw, passage_context_representation_bw)
backward_relevancy_matrix = mask_relevancy_matrix(backward_relevancy_matrix, question_mask, mask)
if MP_dim > 0:
if with_full_match:
# forward Full-Matching: passage_context_representation_fw vs question_context_representation_fw[-1]
fw_full_decomp_params = tf.get_variable("forward_full_matching_decomp", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
fw_full_match_rep = cal_full_matching(passage_context_representation_fw, fw_question_full_rep, fw_full_decomp_params)
all_question_aware_representatins.append(fw_full_match_rep)
dim += MP_dim
# backward Full-Matching: passage_context_representation_bw vs question_context_representation_bw[0]
bw_full_decomp_params = tf.get_variable("backward_full_matching_decomp", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
bw_full_match_rep = cal_full_matching(passage_context_representation_bw, bw_question_full_rep, bw_full_decomp_params)
all_question_aware_representatins.append(bw_full_match_rep)
dim += MP_dim
if with_maxpool_match:
# forward Maxpooling-Matching
fw_maxpooling_decomp_params = tf.get_variable("forward_maxpooling_matching_decomp", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
fw_maxpooling_rep = cal_maxpooling_matching(passage_context_representation_fw, question_context_representation_fw, fw_maxpooling_decomp_params)
all_question_aware_representatins.append(fw_maxpooling_rep)
dim += 2*MP_dim
# backward Maxpooling-Matching
bw_maxpooling_decomp_params = tf.get_variable("backward_maxpooling_matching_decomp", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
bw_maxpooling_rep = cal_maxpooling_matching(passage_context_representation_bw, question_context_representation_bw, bw_maxpooling_decomp_params)
all_question_aware_representatins.append(bw_maxpooling_rep)
dim += 2*MP_dim
if with_attentive_match:
# forward attentive-matching
# forward weighted question representation: [batch_size, question_len, passage_len] [batch_size, question_len, context_lstm_dim]
att_question_fw_contexts = cal_cosine_weighted_question_representation(question_context_representation_fw, forward_relevancy_matrix)
fw_attentive_decomp_params = tf.get_variable("forward_attentive_matching_decomp", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
fw_attentive_rep = cal_attentive_matching(passage_context_representation_fw, att_question_fw_contexts, fw_attentive_decomp_params)
all_question_aware_representatins.append(fw_attentive_rep)
dim += MP_dim
# backward attentive-matching
# backward weighted question representation
att_question_bw_contexts = cal_cosine_weighted_question_representation(question_context_representation_bw, backward_relevancy_matrix)
bw_attentive_decomp_params = tf.get_variable("backward_attentive_matching_decomp", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
bw_attentive_rep = cal_attentive_matching(passage_context_representation_bw, att_question_bw_contexts, bw_attentive_decomp_params)
all_question_aware_representatins.append(bw_attentive_rep)
dim += MP_dim
if with_max_attentive_match:
# forward max attentive-matching
max_att_fw = cal_max_question_representation(question_context_representation_fw, forward_relevancy_matrix)
fw_max_att_decomp_params = tf.get_variable("fw_max_att_decomp_params", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
fw_max_attentive_rep = cal_attentive_matching(passage_context_representation_fw, max_att_fw, fw_max_att_decomp_params)
all_question_aware_representatins.append(fw_max_attentive_rep)
dim += MP_dim
# backward max attentive-matching
max_att_bw = cal_max_question_representation(question_context_representation_bw, backward_relevancy_matrix)
bw_max_att_decomp_params = tf.get_variable("bw_max_att_decomp_params", shape=[MP_dim, context_lstm_dim], dtype=tf.float32)
bw_max_attentive_rep = cal_attentive_matching(passage_context_representation_bw, max_att_bw, bw_max_att_decomp_params)
all_question_aware_representatins.append(bw_max_attentive_rep)
dim += MP_dim
all_question_aware_representatins.append(tf.reduce_max(forward_relevancy_matrix, axis=2,keepdims=True))
all_question_aware_representatins.append(tf.reduce_mean(forward_relevancy_matrix, axis=2,keepdims=True))
all_question_aware_representatins.append(tf.reduce_max(backward_relevancy_matrix, axis=2,keepdims=True))
all_question_aware_representatins.append(tf.reduce_mean(backward_relevancy_matrix, axis=2,keepdims=True))
dim += 4
return (all_question_aware_representatins, dim)
def unidirectional_matching(in_question_repres, in_passage_repres,question_lengths, passage_lengths,
question_mask, mask, MP_dim, input_dim, with_filter_layer, context_layer_num,
context_lstm_dim,is_training,dropout_rate,with_match_highway,aggregation_layer_num,
aggregation_lstm_dim,highway_layer_num,with_aggregation_highway,with_lex_decomposition, lex_decompsition_dim,
with_full_match=True, with_maxpool_match=True, with_attentive_match=True, with_max_attentive_match=True):
# ======Filter layer======
cosine_matrix = cal_relevancy_matrix(in_question_repres, in_passage_repres)
cosine_matrix = mask_relevancy_matrix(cosine_matrix, question_mask, mask)
raw_in_passage_repres = in_passage_repres
if with_filter_layer:
relevancy_matrix = cosine_matrix # [batch_size, passage_len, question_len]
relevancy_degrees = tf.reduce_max(relevancy_matrix, axis=2) # [batch_size, passage_len]
relevancy_degrees = tf.expand_dims(relevancy_degrees,axis=-1) # [batch_size, passage_len, 'x']
in_passage_repres = tf.multiply(in_passage_repres, relevancy_degrees)
# =======Context Representation Layer & Multi-Perspective matching layer=====
all_question_aware_representatins = []
# max and mean pooling at word level
all_question_aware_representatins.append(tf.reduce_max(cosine_matrix, axis=2,keepdims=True))
all_question_aware_representatins.append(tf.reduce_mean(cosine_matrix, axis=2,keepdims=True))
question_aware_dim = 2
if MP_dim>0:
if with_max_attentive_match:
# max_att word level
max_att = cal_max_question_representation(in_question_repres, cosine_matrix)
max_att_decomp_params = tf.get_variable("max_att_decomp_params", shape=[MP_dim, input_dim], dtype=tf.float32)
max_attentive_rep = cal_attentive_matching(raw_in_passage_repres, max_att, max_att_decomp_params)
all_question_aware_representatins.append(max_attentive_rep)
question_aware_dim += MP_dim
# lex decomposition
if with_lex_decomposition:
lex_decomposition = cal_linear_decomposition_representation(raw_in_passage_repres, passage_lengths, cosine_matrix,is_training,
lex_decompsition_dim, dropout_rate)
all_question_aware_representatins.append(lex_decomposition)
if lex_decompsition_dim== -1: question_aware_dim += 2 * input_dim
else: question_aware_dim += 2* lex_decompsition_dim
with tf.variable_scope('context_MP_matching'):
for i in xrange(context_layer_num):
with tf.variable_scope('layer-{}'.format(i)):
with tf.variable_scope('context_represent'):
# parameters
context_lstm_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(context_lstm_dim)
context_lstm_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(context_lstm_dim)
if is_training:
context_lstm_cell_fw = tf.nn.rnn_cell.DropoutWrapper(context_lstm_cell_fw, output_keep_prob=(1 - dropout_rate))
context_lstm_cell_bw = tf.nn.rnn_cell.DropoutWrapper(context_lstm_cell_bw, output_keep_prob=(1 - dropout_rate))
context_lstm_cell_fw = tf.nn.rnn_cell.MultiRNNCell([context_lstm_cell_fw])
context_lstm_cell_bw = tf.nn.rnn_cell.MultiRNNCell([context_lstm_cell_bw])
# question representation
(question_context_representation_fw, question_context_representation_bw), _ = my_rnn.bidirectional_dynamic_rnn(
context_lstm_cell_fw, context_lstm_cell_bw, in_question_repres, dtype=tf.float32,
sequence_length=question_lengths) # [batch_size, question_len, context_lstm_dim]
in_question_repres = tf.concat( axis =2, values = [question_context_representation_fw, question_context_representation_bw])
# passage representation
tf.get_variable_scope().reuse_variables()
(passage_context_representation_fw, passage_context_representation_bw), _ = my_rnn.bidirectional_dynamic_rnn(
context_lstm_cell_fw, context_lstm_cell_bw, in_passage_repres, dtype=tf.float32,
sequence_length=passage_lengths) # [batch_size, passage_len, context_lstm_dim]
in_passage_repres = tf.concat( axis =2, values = [passage_context_representation_fw, passage_context_representation_bw])
# Multi-perspective matching
with tf.variable_scope('MP_matching'):
(matching_vectors, matching_dim) = match_passage_with_question(passage_context_representation_fw,
passage_context_representation_bw, mask,
question_context_representation_fw, question_context_representation_bw,question_mask,
MP_dim, context_lstm_dim, scope=None,
with_full_match=with_full_match, with_maxpool_match=with_maxpool_match,
with_attentive_match=with_attentive_match, with_max_attentive_match=with_max_attentive_match)
all_question_aware_representatins.extend(matching_vectors)
question_aware_dim += matching_dim
all_question_aware_representatins = tf.concat( axis =2, values = all_question_aware_representatins) # [batch_size, passage_len, dim]
if is_training:
all_question_aware_representatins = tf.nn.dropout(all_question_aware_representatins, (1 - dropout_rate))
else:
all_question_aware_representatins = tf.multiply(all_question_aware_representatins, (1 - dropout_rate))
# ======Highway layer======
if with_match_highway:
with tf.variable_scope("matching_highway"):
all_question_aware_representatins = multi_highway_layer(all_question_aware_representatins, question_aware_dim,highway_layer_num)
#========Aggregation Layer======
aggregation_representation = []
aggregation_dim = 0
aggregation_input = all_question_aware_representatins
with tf.variable_scope('aggregation_layer'):
for i in xrange(aggregation_layer_num):
with tf.variable_scope('layer-{}'.format(i)):
aggregation_lstm_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(aggregation_lstm_dim)
aggregation_lstm_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(aggregation_lstm_dim)
if is_training:
aggregation_lstm_cell_fw = tf.nn.rnn_cell.DropoutWrapper(aggregation_lstm_cell_fw, output_keep_prob=(1 - dropout_rate))
aggregation_lstm_cell_bw = tf.nn.rnn_cell.DropoutWrapper(aggregation_lstm_cell_bw, output_keep_prob=(1 - dropout_rate))
aggregation_lstm_cell_fw = tf.nn.rnn_cell.MultiRNNCell([aggregation_lstm_cell_fw])
aggregation_lstm_cell_bw = tf.nn.rnn_cell.MultiRNNCell([aggregation_lstm_cell_bw])
cur_aggregation_representation, _ = my_rnn.bidirectional_dynamic_rnn(
aggregation_lstm_cell_fw, aggregation_lstm_cell_bw, aggregation_input,
dtype=tf.float32, sequence_length=passage_lengths)
fw_rep = cur_aggregation_representation[0][:,-1,:]
bw_rep = cur_aggregation_representation[1][:,0,:]
aggregation_representation.append(fw_rep)
aggregation_representation.append(bw_rep)
aggregation_dim += 2* aggregation_lstm_dim
aggregation_input = tf.concat( axis =2, values = cur_aggregation_representation)# [batch_size, passage_len, 2*aggregation_lstm_dim]
#
aggregation_representation = tf.concat( axis =1, values = aggregation_representation) # [batch_size, aggregation_dim]
# ======Highway layer======
if with_aggregation_highway:
with tf.variable_scope("aggregation_highway"):
agg_shape = tf.shape(aggregation_representation)
batch_size = agg_shape[0]
aggregation_representation = tf.reshape(aggregation_representation, [1, batch_size, aggregation_dim])
aggregation_representation = multi_highway_layer(aggregation_representation, aggregation_dim, highway_layer_num)
aggregation_representation = tf.reshape(aggregation_representation, [batch_size, aggregation_dim])
return (aggregation_representation, aggregation_dim)
def bilateral_match_func1(in_question_repres, in_passage_repres,
question_lengths, passage_lengths, question_mask, mask, MP_dim, input_dim,
with_filter_layer, context_layer_num, context_lstm_dim,is_training,dropout_rate,
with_match_highway,aggregation_layer_num, aggregation_lstm_dim,highway_layer_num,
with_aggregation_highway,with_lex_decomposition,lex_decompsition_dim,
with_full_match=True, with_maxpool_match=True, with_attentive_match=True, with_max_attentive_match=True,
with_left_match=True, with_right_match=True):
init_scale = 0.01
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
match_representation = []
match_dim = 0
reuse_match_params = None
if with_left_match:
reuse_match_params = True
with tf.name_scope("match_passsage"):
with tf.variable_scope("MP-Match", reuse=None, initializer=initializer):
(passage_match_representation, passage_match_dim) = unidirectional_matching(in_question_repres, in_passage_repres,
question_lengths, passage_lengths, question_mask, mask, MP_dim, input_dim,
with_filter_layer, context_layer_num, context_lstm_dim,is_training,dropout_rate,
with_match_highway,aggregation_layer_num, aggregation_lstm_dim,highway_layer_num,
with_aggregation_highway,with_lex_decomposition,lex_decompsition_dim,
with_full_match=with_full_match, with_maxpool_match=with_maxpool_match,
with_attentive_match=with_attentive_match,
with_max_attentive_match=with_max_attentive_match)
match_representation.append(passage_match_representation)
match_dim += passage_match_dim
if with_right_match:
with tf.name_scope("match_question"):
with tf.variable_scope("MP-Match", reuse=reuse_match_params, initializer=initializer):
(question_match_representation, question_match_dim) = unidirectional_matching(in_passage_repres, in_question_repres,
passage_lengths, question_lengths, mask, question_mask, MP_dim, input_dim,
with_filter_layer, context_layer_num, context_lstm_dim,is_training,dropout_rate,
with_match_highway,aggregation_layer_num, aggregation_lstm_dim,highway_layer_num,
with_aggregation_highway, with_lex_decomposition,lex_decompsition_dim,
with_full_match=with_full_match, with_maxpool_match=with_maxpool_match,
with_attentive_match=with_attentive_match,
with_max_attentive_match=with_max_attentive_match)
match_representation.append(question_match_representation)
match_dim += question_match_dim
match_representation = tf.concat( axis =1, values = match_representation)
return (match_representation, match_dim)
def bilateral_match_func2(in_question_repres, in_passage_repres,
question_lengths, passage_lengths, question_mask, mask, MP_dim, input_dim,
with_filter_layer, context_layer_num, context_lstm_dim,is_training,dropout_rate,
with_match_highway,aggregation_layer_num, aggregation_lstm_dim,highway_layer_num,
with_aggregation_highway,with_lex_decomposition,lex_decompsition_dim,
with_full_match=True, with_maxpool_match=True, with_attentive_match=True, with_max_attentive_match=True,
with_left_match=True, with_right_match=True, with_mean_aggregation=True):
cosine_matrix = cal_relevancy_matrix(in_question_repres, in_passage_repres) # [batch_size, passage_len, question_len]
cosine_matrix = mask_relevancy_matrix(cosine_matrix, question_mask, mask)
cosine_matrix_transpose = tf.transpose(cosine_matrix, perm=[0,2,1])# [batch_size, question_len, passage_len]
# ====word level matching======
question_aware_representatins = []
question_aware_dim = 0
passage_aware_representatins = []
passage_aware_dim = 0
# max and mean pooling at word level
question_aware_representatins.append(tf.reduce_max(cosine_matrix, axis=2,keepdims=True)) # [batch_size, passage_length, 1]
question_aware_representatins.append(tf.reduce_mean(cosine_matrix, axis=2,keepdims=True))# [batch_size, passage_length, 1]
question_aware_dim += 2
passage_aware_representatins.append(tf.reduce_max(cosine_matrix_transpose, axis=2,keepdims=True))# [batch_size, question_len, 1]
passage_aware_representatins.append(tf.reduce_mean(cosine_matrix_transpose, axis=2,keepdims=True))# [batch_size, question_len, 1]
passage_aware_dim += 2
if MP_dim>0:
if with_max_attentive_match:
# max_att word level
qa_max_att = cal_max_question_representation(in_question_repres, cosine_matrix)# [batch_size, passage_len, dim]
qa_max_att_decomp_params = tf.get_variable("qa_word_max_att_decomp_params", shape=[MP_dim, input_dim], dtype=tf.float32)
qa_max_attentive_rep = cal_attentive_matching(in_passage_repres, qa_max_att, qa_max_att_decomp_params)# [batch_size, passage_len, decompse_dim]
question_aware_representatins.append(qa_max_attentive_rep)
question_aware_dim += MP_dim
pa_max_att = cal_max_question_representation(in_passage_repres, cosine_matrix_transpose)# [batch_size, question_len, dim]
pa_max_att_decomp_params = tf.get_variable("pa_word_max_att_decomp_params", shape=[MP_dim, input_dim], dtype=tf.float32)
pa_max_attentive_rep = cal_attentive_matching(in_question_repres, pa_max_att, pa_max_att_decomp_params)# [batch_size, question_len, decompse_dim]
passage_aware_representatins.append(pa_max_attentive_rep)
passage_aware_dim += MP_dim
with tf.variable_scope('context_MP_matching'):
for i in xrange(context_layer_num): # support multiple context layer
with tf.variable_scope('layer-{}'.format(i)):
with tf.variable_scope('context_represent'):
# parameters
context_lstm_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(context_lstm_dim)
context_lstm_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(context_lstm_dim)
if is_training:
context_lstm_cell_fw = tf.nn.rnn_cell.DropoutWrapper(context_lstm_cell_fw, output_keep_prob=(1 - dropout_rate))
context_lstm_cell_bw = tf.nn.rnn_cell.DropoutWrapper(context_lstm_cell_bw, output_keep_prob=(1 - dropout_rate))
context_lstm_cell_fw = tf.nn.rnn_cell.MultiRNNCell([context_lstm_cell_fw])
context_lstm_cell_bw = tf.nn.rnn_cell.MultiRNNCell([context_lstm_cell_bw])
# question representation
(question_context_representation_fw, question_context_representation_bw), _ = my_rnn.bidirectional_dynamic_rnn(
context_lstm_cell_fw, context_lstm_cell_bw, in_question_repres, dtype=tf.float32,
sequence_length=question_lengths) # [batch_size, question_len, context_lstm_dim]
in_question_repres = tf.concat( axis =2, values = [question_context_representation_fw, question_context_representation_bw])
# passage representation
tf.get_variable_scope().reuse_variables()
(passage_context_representation_fw, passage_context_representation_bw), _ = my_rnn.bidirectional_dynamic_rnn(
context_lstm_cell_fw, context_lstm_cell_bw, in_passage_repres, dtype=tf.float32,
sequence_length=passage_lengths) # [batch_size, passage_len, context_lstm_dim]
in_passage_repres = tf.concat( axis =2, values = [passage_context_representation_fw, passage_context_representation_bw])
# Multi-perspective matching
with tf.variable_scope('left_MP_matching'):
(matching_vectors, matching_dim) = match_passage_with_question(passage_context_representation_fw,
passage_context_representation_bw, mask,
question_context_representation_fw, question_context_representation_bw,question_mask,
MP_dim, context_lstm_dim, scope=None,
with_full_match=with_full_match, with_maxpool_match=with_maxpool_match,
with_attentive_match=with_attentive_match, with_max_attentive_match=with_max_attentive_match)
question_aware_representatins.extend(matching_vectors)
question_aware_dim += matching_dim
with tf.variable_scope('right_MP_matching'):
(matching_vectors, matching_dim) = match_passage_with_question(question_context_representation_fw,
question_context_representation_bw, question_mask,
passage_context_representation_fw, passage_context_representation_bw,mask,
MP_dim, context_lstm_dim, scope=None,
with_full_match=with_full_match, with_maxpool_match=with_maxpool_match,
with_attentive_match=with_attentive_match, with_max_attentive_match=with_max_attentive_match)
passage_aware_representatins.extend(matching_vectors)
passage_aware_dim += matching_dim
question_aware_representatins = tf.concat( axis =2, values = question_aware_representatins) # [batch_size, passage_len, question_aware_dim]
passage_aware_representatins = tf.concat( axis =2, values = passage_aware_representatins) # [batch_size, question_len, question_aware_dim]
if is_training:
question_aware_representatins = tf.nn.dropout(question_aware_representatins, (1 - dropout_rate))
passage_aware_representatins = tf.nn.dropout(passage_aware_representatins, (1 - dropout_rate))
else:
question_aware_representatins = tf.multiply(question_aware_representatins, (1 - dropout_rate))
passage_aware_representatins = tf.multiply(passage_aware_representatins, (1 - dropout_rate))
# ======Highway layer======
if with_match_highway:
with tf.variable_scope("left_matching_highway"):
question_aware_representatins = multi_highway_layer(question_aware_representatins, question_aware_dim,highway_layer_num)
with tf.variable_scope("right_matching_highway"):
passage_aware_representatins = multi_highway_layer(passage_aware_representatins, passage_aware_dim,highway_layer_num)
aggregation_representation = tf.concat([tf.reduce_max(question_aware_representatins,1),tf.reduce_max(passage_aware_representatins,1)],1)
aggregation_dim = question_aware_dim+passage_aware_dim
# ======Highway layer======
if with_aggregation_highway:
with tf.variable_scope("aggregation_highway"):
agg_shape = tf.shape(aggregation_representation)
batch_size = agg_shape[0]
aggregation_representation = tf.reshape(aggregation_representation, [1, batch_size, aggregation_dim])
aggregation_representation = multi_highway_layer(aggregation_representation, aggregation_dim, highway_layer_num)
aggregation_representation = tf.reshape(aggregation_representation, [batch_size, aggregation_dim])
return (aggregation_representation, aggregation_dim)
|
StarcoderdataPython
|
9713364
|
<gh_stars>0
import math
import time
import torch
from codebase.engine.train_supernet import SpeedTester
from codebase.engine.train_supernet_with_teacher import validate, set_running_statistics
from codebase.third_party.spos_ofa.ofa.nas.efficiency_predictor import \
PreResNetFLOPsModel, Mbv3FLOPsModel
from codebase.torchutils import logger
from codebase.torchutils.distributed import world_size
from codebase.torchutils.metrics import (
AccuracyMetric,
AverageMetric,
EstimatedTimeArrival,
)
from codebase.torchutils.common import unwarp_module
def train(
epoch,
network,
controller,
model,
loader,
criterion,
optimizer,
scheduler,
loss_lambda,
report_freq,
num_classes_per_superclass,
loss_type="mse"
):
controller.train()
model.eval()
n_superclass = unwarp_module(controller).n_superclass
superclass_loader_len = len(loader[0])
loader_len = n_superclass * superclass_loader_len
loss_metric = AverageMetric()
cross_entropy_metric = AverageMetric()
mse_metric = AverageMetric()
accuracy_metric = AccuracyMetric(topk=(1, 5))
ETA = EstimatedTimeArrival(loader_len)
speed_tester = SpeedTester()
logger.info(
f"Train start, epoch={epoch:04d}, lr={optimizer.param_groups[0]['lr']:.6f}"
)
permutation = torch.randperm(loader_len)
for iter_ in range(loader_len):
superclass_id = int(permutation[iter_] / superclass_loader_len)
data_idx = int(permutation[iter_] % superclass_loader_len)
inputs, targets = loader[superclass_id][data_idx]
inputs, targets = inputs.cuda(), targets.cuda()
superclass_id = inputs.new_tensor([superclass_id], dtype=torch.long)
constraint = unwarp_module(controller).sample_constraint()
if network == "preresnet20":
_, cum_indicators = controller([constraint], superclass_id)
logits = model(inputs, cum_indicators)
flops = unwarp_module(model).get_flops(
cum_indicators,
num_class_per_superclass=num_classes_per_superclass
) / 1e6
elif "mobilenetv3" in network:
_, _, _, depth_cum_indicators, ratio_cum_indicators, kernel_cum_size_indicators = controller([constraint],
superclass_id)
logits = model(inputs, depth_cum_indicators, ratio_cum_indicators, kernel_cum_size_indicators)
flops = unwarp_module(model).get_flops(
depth_cum_indicators,
ratio_cum_indicators,
kernel_cum_size_indicators,
num_class_per_superclass=num_classes_per_superclass
) / 1e6
if loss_type == "mse":
mse_loss = (flops - constraint) * (flops - constraint)
elif loss_type == "mse_half":
if flops <= constraint:
mse_loss = 0
else:
mse_loss = (flops - constraint) * (flops - constraint)
else:
raise NotImplementedError
cross_entropy = criterion(logits, targets)
loss = cross_entropy + loss_lambda * mse_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_metric.update(loss)
cross_entropy_metric.update(cross_entropy)
mse_metric.update(mse_loss)
accuracy_metric.update(logits, targets)
ETA.step()
speed_tester.update(inputs)
if iter_ % report_freq == 0 or iter_ == loader_len - 1:
logger.info(
", ".join(
[
"Train",
f"epoch={epoch:04d}",
f"iter={iter_:05d}/{loader_len:05d}",
f"speed={speed_tester.compute() * world_size():.2f} images/s",
f"loss={loss_metric.compute():.4f}",
f"ce_loss={cross_entropy_metric.compute():.4f}",
f"mse_loss={mse_metric.compute():.4f}",
f"top1-accuracy={accuracy_metric.at(1).rate * 100:.2f}%",
f"top5-accuracy={accuracy_metric.at(5).rate * 100:.2f}%",
f"ETA={ETA.remaining_time}",
f"cost={ETA.cost_time}" if iter_ == loader_len - 1 else "",
]
)
)
speed_tester.reset()
if scheduler is not None:
scheduler.step()
return (
loss_metric.compute(),
cross_entropy_metric.compute(),
mse_metric.compute(),
(accuracy_metric.at(1).rate, accuracy_metric.at(5).rate),
)
def test_time(
network,
controller,
model,
constraint,
constraint_num,
num_superclass
):
controller.eval()
model.eval()
# latency_constraints = list(range(15, 36, 5))
st = time.time()
repeat_num = 100
superclass_id = list(range(num_superclass)) * constraint_num
superclass_id = torch.tensor(superclass_id, dtype=torch.long)
# print(superclass_id.shape)
for i in range(repeat_num):
if network == "preresnet20":
width_mults, cum_indicators = controller([constraint] * num_superclass * constraint_num, superclass_id)
elif "mobilenetv3" in network:
depths, ratios, ks, depth_cum_indicators, ratio_cum_indicators, kernel_cum_size_indicators = controller(
[constraint] * num_superclass * constraint_num,
superclass_id
)
ed = time.time()
logger.info(f"Search in {(ed - st) / repeat_num:.6f} seconds")
def compute_tau(initial_tau, decay_factor, epoch):
return initial_tau * math.exp(-decay_factor * epoch)
def test_flops(
network,
controller,
model,
test_model,
num_superclass,
num_classes_per_superclass,
image_size
):
controller.eval()
model.eval()
test_model.eval()
latency_constraints = list(range(150, 550, 50))
for superclass_id in range(num_superclass):
superclass_id = torch.tensor([superclass_id], dtype=torch.long).cuda()
for constraint in latency_constraints:
if network == "preresnet20":
width_mults, cum_indicators = controller([constraint], superclass_id)
model_flops = unwarp_module(model).get_flops(
cum_indicators,
num_class_per_superclass=num_classes_per_superclass) / 1e6
unwarp_module(model).set_active_subnet(d=[1, 1, 1], w=width_mults)
arch_dict = {
'd': [1, 1, 1],
'w': width_mults,
'image_size': image_size,
'superclass_id': superclass_id
}
efficiency_predictor = PreResNetFLOPsModel(
model,
num_classes_per_superclass=num_classes_per_superclass
)
elif "mobilenetv3" in network:
depths, ratios, ks, depth_cum_indicators, ratio_cum_indicators, kernel_cum_size_indicators = controller(
[constraint],
superclass_id)
model_flops = unwarp_module(model).get_flops(
depth_cum_indicators,
ratio_cum_indicators,
kernel_cum_size_indicators,
num_class_per_superclass=num_classes_per_superclass
) / 1e6
unwarp_module(model).set_active_subnet(ks, ratios, depths)
arch_dict = {
'ks': ks,
'e': ratios,
'd': depths,
'image_size': image_size,
'superclass_id': superclass_id
}
efficiency_predictor = Mbv3FLOPsModel(
model,
num_classes_per_superclass=num_classes_per_superclass
)
flops = efficiency_predictor.get_efficiency(arch_dict)
logger.info(f"FLOPs 1: {model_flops.item()}M, FLOPs 2: {flops}M")
# assert False
def sample_arch(
network,
controller,
model,
num_superclass,
num_classes_per_superclass,
constraint_low,
constraint_high,
interval,
image_size
):
controller.eval()
model.eval()
if network == "preresnet20":
latency_constraints = list(range(15, 36, 5))
elif "mobilenetv3" in network:
latency_constraints = list(range(constraint_low, constraint_high + 1, interval))
# for superclass_id in range(num_superclass):
superclass_id = 0
superclass_id = torch.tensor([superclass_id], dtype=torch.long).cuda()
# for constraint in latency_constraints:
constraint = latency_constraints[0]
flops_list = []
i = 0
for i in range(10000):
if network == "preresnet20":
width_mults, cum_indicators = controller([constraint], superclass_id)
# model_flops = model.get_flops(cum_indicators) / 1e6
unwarp_module(model).set_active_subnet(d=[1, 1, 1], w=width_mults)
arch_dict = {
'd': [1, 1, 1],
'w': width_mults,
'image_size': image_size,
'superclass_id': superclass_id
}
efficiency_predictor = PreResNetFLOPsModel(
model,
num_classes_per_superclass=num_classes_per_superclass
)
elif "mobilenetv3" in network:
depths, ratios, ks, depth_cum_indicators, ratio_cum_indicators, kernel_cum_size_indicators = controller(
[constraint],
superclass_id)
# model_flops = model.get_flops(depth_cum_indicators, ratio_cum_indicators,
# kernel_cum_size_indicators) / 1e6
unwarp_module(model).set_active_subnet(ks, ratios, depths)
arch_dict = {
'ks': ks,
'e': ratios,
'd': depths,
'image_size': image_size,
'superclass_id': superclass_id
}
efficiency_predictor = Mbv3FLOPsModel(
model,
num_classes_per_superclass=num_classes_per_superclass
)
flops = efficiency_predictor.get_efficiency(arch_dict)
flops_list.append(flops)
return flops_list
def test(
network,
controller,
model,
loader,
bn_subset_loader,
num_superclass,
num_classes_per_superclass,
constraint_low,
constraint_high,
interval,
image_size
):
controller.eval()
model.eval()
if network == "preresnet20":
latency_constraints = list(range(15, 36, 5))
elif "mobilenetv3" in network:
latency_constraints = list(range(int(constraint_low), int(constraint_high) + 1, int(interval)))
superclass_acc_list = []
superclass_flops_list = []
superclass_arch_dict_list = []
acc_metric = AverageMetric()
acc5_metric = AverageMetric()
mse_metric = AverageMetric()
for superclass_id in range(num_superclass):
# superclass_id = 0
superclass_id = torch.tensor([superclass_id], dtype=torch.long).cuda()
acc_list = []
flops_list = []
arch_list = []
for constraint in latency_constraints:
acc_sub_list = []
flops_sub_list = []
arch_dict_sub_list = []
i = 0
while len(acc_sub_list) < 10:
# for i in range(10):
if network == "preresnet20":
width_mults, cum_indicators = controller([constraint], superclass_id)
# model_flops = model.get_flops(cum_indicators) / 1e6
unwarp_module(model).set_active_subnet(d=[1, 1, 1], w=width_mults)
arch_dict = {
'd': [1, 1, 1],
'w': width_mults,
'image_size': image_size,
'superclass_id': superclass_id
}
efficiency_predictor = PreResNetFLOPsModel(
model,
num_classes_per_superclass=num_classes_per_superclass
)
elif "mobilenetv3" in network:
depths, ratios, ks, depth_cum_indicators, ratio_cum_indicators, kernel_cum_size_indicators = controller(
[constraint],
superclass_id)
# model_flops = model.get_flops(depth_cum_indicators, ratio_cum_indicators,
# kernel_cum_size_indicators) / 1e6
unwarp_module(model).set_active_subnet(ks, ratios, depths)
arch_dict = {
'ks': ks,
'e': ratios,
'd': depths,
'image_size': image_size,
'superclass_id': superclass_id
}
efficiency_predictor = Mbv3FLOPsModel(
model,
num_classes_per_superclass=num_classes_per_superclass
)
flops = efficiency_predictor.get_efficiency(arch_dict)
if flops > constraint:
continue
mse_loss = (flops - constraint) * (flops - constraint)
set_running_statistics(model, bn_subset_loader)
test_loss_list, test_masked_total_acc1, test_masked_total_acc5, test_masked_acc1, test_masked_acc5 = validate(
model, loader, num_superclass)
superclass_acc1 = test_masked_acc1[superclass_id.item()].rate
superclass_acc5 = test_masked_acc5[superclass_id.item()].rate
mse_metric.update(mse_loss)
acc_metric.update(superclass_acc1)
acc5_metric.update(superclass_acc5)
# logger.info(
# f"Superclass id: {superclass_id}, Constraint: {constraint}, FLOPs 1: {model_flops}, FLOPs 2: {flops}")
logger.info(f"Superclass id: {superclass_id.item()}, Constraint: {constraint}, FLOPs: {flops}, {i}-th")
acc_sub_list.append(superclass_acc1 * 100)
flops_sub_list.append(flops)
arch_dict_sub_list.append(arch_dict)
i += 1
max_acc = max(acc_sub_list)
max_index = acc_sub_list.index(max_acc)
acc_list.append(max_acc)
flops_list.append(flops_sub_list[max_index])
arch_list.append(arch_dict_sub_list[max_index])
logger.info(f"Acc list: {acc_list}")
logger.info(f"FLOPs list: {flops_list}")
superclass_acc_list.append(acc_list)
superclass_flops_list.append(flops_list)
superclass_arch_dict_list.append(arch_list)
return mse_metric.compute(), acc_metric.compute(), acc5_metric.compute(), superclass_acc_list, superclass_flops_list, superclass_arch_dict_list
|
StarcoderdataPython
|
9648278
|
<reponame>zarif007/Exp-Dashboard
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from .views import RegistrationView, UsernameValidation, EmailValidation, \
PasswordValidation, VerificationView, LoginView, LogoutView
urlpatterns = [
path('register', RegistrationView.as_view(), name='register'),
path('login', LoginView.as_view(), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('validate-username', csrf_exempt(UsernameValidation.as_view()),
name='validate_username'),
path('validate-email', csrf_exempt(EmailValidation.as_view()),
name='validate_email'),
path('validate-password', csrf_exempt(PasswordValidation.as_view()),
name='validate_password'),
path('active/<uidb64>/<token>', csrf_exempt(VerificationView.as_view()),
name='activate'),
]
|
StarcoderdataPython
|
11274880
|
<reponame>Kreidl/pymailtojira<gh_stars>0
import jira
from jira import JIRA
#Authentication Method
def authenticate(username, password):
basic_auth=(username, password)
return basic_auth
#Create a JIRA Object which can be used later on
def createJIRAObject(jiraURL, username, password):
jira = JIRA(server=jiraURL, basic_auth=authenticate(username, password))
return jira
#Creates a JIRA Issues and returns it
def createjiraIssue(jiraURL, username, password, projectKey, summary, description, issueTypeName):
jira = createJIRAObject(jiraURL, username, password)
issue_dict = {
'project': {'key': projectKey},
'summary': summary,
'description': description,
'issuetype': {'name': issueTypeName},
'assignee': {'name': username}
}
new_issue = jira.create_issue(fields=issue_dict)
return new_issue
def add_attachment(jiraURL, username, password, issue, URL):
jira = createJIRAObject(jiraURL, username, password)
return jira.add_attachment(issue=issue, attachment=URL)
|
StarcoderdataPython
|
8189261
|
<reponame>Erernaen/ecchat<filename>urwidext.py<gh_stars>0
#!/usr/bin/env python3
# coding: UTF-8
import urwid
################################################################################
## urwid extension classes #####################################################
################################################################################
class GridFlowPlus(urwid.GridFlow):
def keypress(self, size, key):
if isinstance(key, str):
if key in ('tab', ):
if self.focus_position == len(self.contents) - 1:
self.focus_position = 0
else:
self.focus_position += 1
return
if key in ('Y', 'y', 'O', 'o'): # Yes / OK
self.focus_position = 0
return super().keypress(size, 'enter')
if key in ('esc', 'N', 'n', 'C', 'c'): # ESCAPE / No / Cancel
self.focus_position = 1
return super().keypress(size, 'enter')
return super().keypress(size, key)
################################################################################
class YesNoDialog(urwid.WidgetWrap):
signals = ['commit']
def __init__(self, text, loop):
self.loop = loop
self.parent = self.loop.widget
self.body = urwid.Filler(urwid.Text(text))
self.frame = urwid.Frame(self.body, focus_part = 'body')
self.view = urwid.Padding(self.frame, ('fixed left', 2), ('fixed right' , 2))
self.view = urwid.Filler (self.view, ('fixed top' , 1), ('fixed bottom', 1))
self.view = urwid.LineBox(self.view)
self.view = urwid.Overlay(self.view, self.parent, 'center', len(text) + 6, 'middle', 7)
self.frame.footer = GridFlowPlus([urwid.AttrMap(urwid.Button('Yes', self.on_yes), 'btn_nm', 'btn_hl'),
urwid.AttrMap(urwid.Button('No' , self.on_no ), 'btn_nm', 'btn_hl')],
7, 3, 1, 'center')
self.frame.focus_position = 'footer'
super().__init__(self.view)
############################################################################
def on_yes(self, *args, **kwargs):
self.loop.widget = self.parent
urwid.emit_signal(self, 'commit')
############################################################################
def on_no(self, *args, **kwargs):
self.loop.widget = self.parent
############################################################################
def show(self):
self.loop.widget = self.view
################################################################################
class PassphraseEdit(urwid.Edit):
def __init__(self, on_enter, on_cancel, on_tab, **kwargs):
self.on_enter = on_enter
self.on_cancel = on_cancel
self.on_tab = on_tab
super().__init__(**kwargs)
############################################################################
def keypress(self, size, key):
if isinstance(key, str):
if key in ('enter', ):
self.on_enter()
return
if key in ('esc', ):
self.on_cancel()
return
if key in ('tab', ):
self.on_tab()
return
return super().keypress(size, key)
################################################################################
class PassphraseDialog(urwid.WidgetWrap):
signals = ['commit']
def __init__(self, text, loop):
self.text = text
self.loop = loop
self.parent = self.loop.widget
self.label = urwid.Text(self.text)
self.input = PassphraseEdit(self.on_ok, self.on_cancel, self.on_tab, multiline=False, wrap = 'clip', allow_tab = False, mask='*')
self.body = urwid.Pile([urwid.Filler(self.label), urwid.Filler(urwid.AttrMap(self.input, 'header'))], 1)
self.frame = urwid.Frame(self.body, focus_part = 'body')
self.view = urwid.Padding(self.frame, ('fixed left', 2), ('fixed right' , 2))
self.view = urwid.Filler (self.view, ('fixed top' , 1), ('fixed bottom', 1))
self.view = urwid.LineBox(self.view)
self.view = urwid.Overlay(self.view, self.parent, 'center', len(text) + 6, 'middle', 9)
self.frame.footer = GridFlowPlus([urwid.AttrMap(urwid.Button(' OK ', self.on_ok), 'btn_nm', 'btn_hl'),
urwid.AttrMap(urwid.Button('Cancel' , self.on_cancel), 'btn_nm', 'btn_hl')],
10, 3, 1, 'center')
self.frame.focus_position = 'body'
super().__init__(self.view)
############################################################################
def on_ok(self, *args, **kwargs):
self.loop.widget = self.parent
urwid.emit_signal(self, 'commit', True, self.input.get_edit_text())
############################################################################
def on_cancel(self, *args, **kwargs):
self.loop.widget = self.parent
urwid.emit_signal(self, 'commit', False, '')
############################################################################
def on_tab(self, *args, **kwargs):
self.frame.focus_position = 'footer'
############################################################################
def show(self):
self.loop.widget = self.view
################################################################################
class MessageListBox(urwid.ListBox):
def __init__(self, body):
super().__init__(body)
############################################################################
def render(self, size, *args, **kwargs):
self.last_render_size = size
return super().render(size, *args, **kwargs)
############################################################################
def key(self, key):
super().keypress(self.last_render_size, key)
############################################################################
def mouse_event(self, size, event, button, col, row, focus):
if button in (4, 5): # mouse wheel
self.key({4 : 'up', 5 : 'down'} [button])
################################################################################
class FrameFocus(urwid.Frame):
def __init__(self, body, header=None, footer=None, focus_part='body'):
self.focus_part = focus_part
super().__init__(body, header, footer, focus_part)
############################################################################
def mouse_event(self, size, event, button, col, row, focus):
if button in (4, 5): # mouse wheel
super().mouse_event(size, event, button, col, row, focus)
self.set_focus(self.focus_part)
################################################################################
class MessageWalker(urwid.SimpleListWalker):
def __init__(self):
self.qual = []
self.text = []
self.uuid = []
self.recallOffset = 0
self.uuidAtOffset = ''
super().__init__([])
############################################################################
def append(self, qual, text, uuid):
self.qual.append(qual)
self.text.append(text)
self.uuid.append(uuid)
self.recallOffset = 0
self.uuidAtOffset = ''
super().append(urwid.Text(text))
############################################################################
def replace(self, qual, text, uuid):
self.recallOffset = 0
self.uuidAtOffset = ''
for index, _uuid in enumerate(self.uuid):
if uuid == _uuid:
assert self.qual[index] == qual
self[index].set_text(text)
self.text[index] = text
break
############################################################################
def set_markup_style(self, uuid, element, style):
for index, _uuid in enumerate(self.uuid):
if uuid == _uuid:
markup = self.text[index]
(old_style, text) = markup[element]
markup[element] = (style, text)
self[index].set_text(markup)
self.text[index] = markup
break
############################################################################
def recall(self, qual, element, direction):
text = ''
self.recallOffset = min(0, self.recallOffset + direction)
if self.recallOffset < 0:
scan_index = len(self) - 1
qual_found = 0
while scan_index >= 0:
if self.qual[scan_index] == qual:
self.uuidAtOffset = self.uuid[scan_index]
markup = self.text[scan_index]
(style, text) = markup[element]
qual_found += 1
if qual_found + self.recallOffset == 0:
self.set_focus(scan_index)
break
scan_index -= 1
if qual_found + self.recallOffset < 0:
self.recallOffset += 1
return text
############################################################################
def recall_uuid(self):
return self.uuidAtOffset
################################################################################
|
StarcoderdataPython
|
8019149
|
<filename>fhir/resources/DSTU2/namingsystem.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/NamingSystem) on 2019-05-14.
# 2019, SMART Health IT.
from . import (backboneelement, codeableconcept, contactpoint, domainresource,
fhirdate, fhirreference, period)
class NamingSystem(domainresource.DomainResource):
""" System of unique identification.
A curated namespace that issues unique symbols within that namespace for
the identification of concepts, people, devices, etc. Represents a
"System" used within the Identifier and Coding data types.
"""
resource_name = "NamingSystem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details of the publisher.
List of `NamingSystemContact` items (represented as `dict` in JSON). """
self.date = None
""" Publication Date(/time).
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" What does naming system identify?.
Type `str`. """
self.kind = None
""" codesystem | identifier | root.
Type `str`. """
self.name = None
""" Human-readable label.
Type `str`. """
self.publisher = None
""" Name of the publisher (Organization or individual).
Type `str`. """
self.replacedBy = None
""" Use this instead.
Type `FHIRReference` referencing `NamingSystem` (represented as `dict` in JSON). """
self.responsible = None
""" Who maintains system namespace?.
Type `str`. """
self.status = None
""" draft | active | retired.
Type `str`. """
self.type = None
""" e.g. driver, provider, patient, bank etc..
Type `CodeableConcept` (represented as `dict` in JSON). """
self.uniqueId = None
""" Unique identifiers used for system.
List of `NamingSystemUniqueId` items (represented as `dict` in JSON). """
self.usage = None
""" How/where is it used.
Type `str`. """
self.useContext = None
""" Content intends to support these contexts.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(NamingSystem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NamingSystem, self).elementProperties()
js.extend(
[
("contact", "contact", NamingSystemContact, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, True),
("description", "description", str, False, None, False),
("kind", "kind", str, False, None, True),
("name", "name", str, False, None, True),
("publisher", "publisher", str, False, None, False),
(
"replacedBy",
"replacedBy",
fhirreference.FHIRReference,
False,
None,
False,
),
("responsible", "responsible", str, False, None, False),
("status", "status", str, False, None, True),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("uniqueId", "uniqueId", NamingSystemUniqueId, True, None, True),
("usage", "usage", str, False, None, False),
(
"useContext",
"useContext",
codeableconcept.CodeableConcept,
True,
None,
False,
),
]
)
return js
class NamingSystemContact(backboneelement.BackboneElement):
""" Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_name = "NamingSystemContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of a individual to contact.
Type `str`. """
self.telecom = None
""" Contact details for individual or publisher.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(NamingSystemContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NamingSystemContact, self).elementProperties()
js.extend(
[
("name", "name", str, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
]
)
return js
class NamingSystemUniqueId(backboneelement.BackboneElement):
""" Unique identifiers used for system.
Indicates how the system may be identified when referenced in electronic
exchange.
"""
resource_name = "NamingSystemUniqueId"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.period = None
""" When is identifier valid?.
Type `Period` (represented as `dict` in JSON). """
self.preferred = None
""" Is this the id that should be used for this type.
Type `bool`. """
self.type = None
""" oid | uuid | uri | other.
Type `str`. """
self.value = None
""" The unique identifier.
Type `str`. """
super(NamingSystemUniqueId, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NamingSystemUniqueId, self).elementProperties()
js.extend(
[
("period", "period", period.Period, False, None, False),
("preferred", "preferred", bool, False, None, False),
("type", "type", str, False, None, True),
("value", "value", str, False, None, True),
]
)
return js
|
StarcoderdataPython
|
328675
|
#!/usr/bin/env python
"""
Takes a cifti map ('dscalar.nii') and outputs a csv of results
Usage:
ciftify_statclust_report [options] <func.dscalar.nii>
Arguments:
<func.dscalar.nii> Input map.
Options:
--min-threshold MIN the largest value [default: -2.85] to consider for being a minimum
--max-threshold MAX the smallest value [default: 2.85] to consider for being a maximum
--area-threshold MIN threshold [default: 20] for surface cluster area, in mm^2
--surface-distance MM minimum distance in mm [default: 20] between extrema of the same type.
--volume-distance MM minimum distance in mm [default: 20] between extrema of the same type.
--outputbase prefix Output prefix (with path) to output documents
--no-cluster-dlabel Do not output a dlabel map of the clusters
--output-peaks Also output an additional output of peak locations
--left-surface GII Left surface file (default is HCP S1200 Group Average)
--right-surface GII Right surface file (default is HCP S1200 Group Average)
--left-surf-area GII Left surface vertex areas file (default is HCP S1200 Group Average)
--right-surf-area GII Right surface vertex areas file (default is HCP S1200 Group Average)
--debug Debug logging
-n,--dry-run Dry run
-h, --help Prints this message
DETAILS
Note: at the moment generates separate outputs for surface.
Uses -cifti-separate in combination with FSL's clusterize to get information from
the subcortical space.
Outputs a cluster report csv with the following headings:
+ clusterID: Integer for the cluster this peak is from (corresponds to dlabel.nii)
+ cluster_name: the cluster label
+ by default this will be "LABEL_<clusterID>" but this be changed
in the .dlabel.nii file using connectome-workbench
+ mean_value: the average value for this cluster within the input dscalar.nii map
+ area: the surface area of the cluster (on the specified surface)
+ DKT_overlap: a list of DKT freesurfer anatomical atlas (aparc) atlas labels
that overlap with this cluster and the percent overlap of each label
+ Yeo7_overlap: a list of the Yeo et al 2011 7 network labels that overlap
with this cluster and the percent overlap of each label
+ MMP_overlap: The labels from the Glasser et al (2016) Multi-Modal Parcellation
that overlap with this cluster and the percent overlap of each label
If the "--output-peaks" flag is indicated, an addtional table will be output
with several headings:
+ clusterID: Integer for the cluster this peak is from (corresponds to dlabel.nii)
+ hemisphere: Hemisphere the peak is in (L or R)
+ vertex: The vertex id
+ x,y,z: The nearest x,y,z coordinates to the vertex
+ value: The intensity (value) at that vertex in the func.dscalar.nii
+ DKT: The label from the freesurfer anatomical atlas (aparc) at the vertex
+ DKT_overlap: The proportion of the cluster (clusterID) that overlaps with the DKT atlas label
+ Yeo7: The label from the Yeo et al 2011 7 network atlas at this peak vertex
+ Yeo7_overlap: The proportion of the cluster (clusterID) that overlaps with this Yeo7 network label
+ MMP: The label from the Glasser et al (2016) Multi-Modal Parcellation
+ MMP_overlap: The proportion of the cluster (clusterID) that overlaps with the MMP atlas label
If no surfaces of surface area files are given. The midthickness surfaces from
the HCP S1200 Group Mean will be used, as well as it's vertex-wise
surface area infomation.
Default name for the output csv taken from the input file.
i.e. func.dscalar.nii --> func_peaks.csv
Unless the '--no-cluster-dlabel' flag is given, a map of the clusters with be
be written to the same folder as the outputcsv to aid in visualication of the results.
This dlable map with have a name ending in '_clust.dlabel.nii'.
(i.e. func_peaks.csv & func_clust.dlabel.nii)
Atlas References:
Yeo, BT. et al. 2011. 'The Organization of the Human Cerebral Cortex
Estimated by Intrinsic Functional Connectivity.' Journal of Neurophysiology
106 (3): 1125-65.
Desikan, RS.et al. 2006. 'An Automated Labeling System for Subdividing the
Human Cerebral Cortex on MRI Scans into Gyral Based Regions of Interest.'
NeuroImage 31 (3): 968-80.
Glasser, MF. et al. 2016. 'A Multi-Modal Parcellation of Human Cerebral Cortex.'
Nature 536 (7615): 171-78.
Written by <NAME>, Last updated August 27, 2017
"""
from docopt import docopt
import os, sys
import numpy as np
import pandas as pd
import logging
import logging.config
import ciftify.io
import ciftify.report
import ciftify.utils
from ciftify.meants import NibInput
config_path = os.path.join(os.path.dirname(ciftify.config.find_ciftify_global()), 'bin', "logging.conf")
logging.config.fileConfig(config_path, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def load_LR_vertex_areas(surf_settings):
''' loads the vertex areas and stacks the dataframes'''
surf_va_L = ciftify.io.load_gii_data(surf_settings.L.vertex_areas)
surf_va_R = ciftify.io.load_gii_data(surf_settings.R.vertex_areas)
surf_va_LR = np.vstack((surf_va_L, surf_va_R))
return(surf_va_LR)
def report_atlas_overlap(df, label_data, atlas, surf_va_LR, min_percent_overlap = 5):
# read the atlas
atlas_data, atlas_dict = ciftify.io.load_LR_label(atlas['path'],
int(atlas['map_number']))
# write an overlap report to the outputfile
o_col = '{}_overlap'.format(atlas['name'])
df[o_col] = ""
for pd_idx in df.index.get_values():
df.loc[pd_idx, o_col] = ciftify.report.get_label_overlap_summary(
pd_idx, label_data, atlas_data, atlas_dict, surf_va_LR,
min_percent_overlap = min_percent_overlap)
return(df)
def run_ciftify_dlabel_report(arguments, tmpdir):
dscalar_in = NibInput(arguments['<func.dscalar.nii>'])
surf_distance = arguments['--surface-distance']
outputbase = arguments['--outputbase']
dont_output_clusters = arguments['--no-cluster-dlabel']
output_peaktable = arguments['--output-peaks']
surf_settings = ciftify.report.CombinedSurfaceSettings(arguments, tmpdir)
atlas_settings = ciftify.report.define_atlas_settings()
## if not outputname is given, create it from the input dscalar map
if not outputbase:
outputbase = os.path.join(os.path.dirname(dscalar_in.path), dscalar_in.base)
ciftify.utils.check_output_writable(outputbase, exit_on_error = True)
clusters_dscalar = clusterise_dscalar_input(dscalar_in.path,
arguments,
surf_settings,
tmpdir)
if dont_output_clusters:
cluster_dlabel = os.path.join(tmpdir, 'clust.dlabel.nii')
else:
cluster_dlabel = '{}_clust.dlabel.nii'.format(outputbase)
empty_labels = os.path.join(tmpdir, 'empty_labels.txt')
ciftify.utils.run('touch {}'.format(empty_labels))
ciftify.utils.run(['wb_command', '-cifti-label-import',
clusters_dscalar, empty_labels, cluster_dlabel])
## load the data
label_data, label_dict = ciftify.io.load_LR_label(cluster_dlabel, map_number = 1)
## define the outputcsv
outputcsv = '{}_statclust_report.csv'.format(outputbase)
logger.info('Output table: {}'.format(outputcsv))
## load the vertex areas
surf_va_LR = load_LR_vertex_areas(surf_settings)
## assert that the dimensions match
if not (label_data.shape[0] == surf_va_LR.shape[0]):
logger.error('label file vertices {} not equal to vertex areas {}'
''.format(label_data.shape[0], surf_va_LR.shape[0]))
sys.exit(1)
## use the label dict to start the report dataframe
df = pd.DataFrame.from_dict(label_dict, orient = "index")
df['label_idx'] = df.index
df = df.rename(index=str, columns={0: "label_name"})
# calculate a column of the surface area for row ROIs
df['area'] = -999
for pd_idx in df.index.get_values():
df.loc[pd_idx, 'area'] = ciftify.report.calc_cluster_area(pd_idx,
label_data, surf_va_LR)
for atlas in atlas_settings.values():
df = report_atlas_overlap(df, label_data, atlas,
surf_va_LR, min_percent_overlap = 5)
df.to_csv(outputcsv)
if output_peaktable:
write_statclust_peaktable(dscalar_in.path, clusters_dscalar, outputbase,
arguments, surf_settings, atlas_settings)
class ThresholdArgs(object):
'''little class that holds the user aguments about thresholds'''
def __init__(self, arguments):
self.max = arguments([])
area_threshold = arguments['--area-threshold']
self.volume_distance = arguments['--volume-distance']
min_threshold = arguments['--min-threshold']
max_threshold = arguments['--max-threshold']
area_threshold = arguments['--area-thratlas_settingseshold']
def clusterise_dscalar_input(data_file, arguments, surf_settings, tmpdir):
'''runs wb_command -cifti-find-clusters twice
returns the path to the output
'''
## also run clusterize with the same settings to get clusters
pcluster_dscalar = os.path.join(tmpdir,'pclusters.dscalar.nii')
wb_cifti_clusters(data_file, pcluster_dscalar, surf_settings,
arguments['--max-threshold'],
arguments['--area-threshold'],
less_than = False, starting_label=1)
## load both cluster files to determine the max value
pos_clust_data = ciftify.io.load_concat_cifti_surfaces(pcluster_dscalar)
max_pos = int(np.max(pos_clust_data))
## now get the negative clusters
ncluster_dscalar = os.path.join(tmpdir,'nclusters.dscalar.nii')
wb_cifti_clusters(data_file, ncluster_dscalar, surf_settings,
arguments['--min-threshold'],
arguments['--area-threshold'],
less_than = True, starting_label=max_pos + 1)
## add the positive and negative together to make one cluster map
clusters_out = os.path.join(tmpdir,'clusters.dscalar.nii')
ciftify.utils.run(['wb_command', '-cifti-math "(x+y)"',
clusters_out,
'-var','x',pcluster_dscalar, '-var','y',ncluster_dscalar])
return clusters_out
def wb_cifti_clusters(input_cifti, output_cifti, surf_settings,
value_threshold, minimun_size,less_than, starting_label=1):
'''runs wb_command -cifti-find-clusters'''
wb_arglist = ['wb_command', '-cifti-find-clusters',
input_cifti,
str(value_threshold), str(minimun_size),
str(value_threshold), str(minimun_size),
'COLUMN',
output_cifti,
'-left-surface', surf_settings.L.surface,
'-corrected-areas', surf_settings.L.vertex_areas,
'-right-surface', surf_settings.R.surface,
'-corrected-areas', surf_settings.R.vertex_areas,
'-start', str(starting_label)]
if less_than : wb_arglist.append('-less-than')
cinfo = ciftify.io.cifti_info(input_cifti)
if cinfo['maps_to_volume']: wb_arglist.append('-merged-volume')
ciftify.utils.run(wb_arglist)
def write_statclust_peaktable(data_file, clusters_dscalar, outputbase,
arguments, surf_settings, atlas_settings):
'''runs the old peak table functionality
Parameters
----------
data_file : filepath
path to the dscalar map input
clusters_dscalar : filepath
path to the cluster file created with same settings
outputbase :
the prefix for the outputfile
arguments : dict
the user args dictionary to pull the thresholds from
surf_settings : dict
the dictionary of paths to the surface files,
created by ciftify.report.CombinedSurfaceSettings
altas_settings : dict
dictionary of paths and settings related to the atlases to use for overlaps
comparison. Created by ciftify.report.define_atlas_settings()
Outputs
-------
writes a csv to <outputbase>_cortex_peaks.csv
'''
with ciftify.utils.TempDir() as ex_tmpdir:
## run FSL's cluster on the subcortical bits
## now to run FSL's cluster on the subcortical bits
cinfo = ciftify.io.cifti_info(data_file)
if cinfo['maps_to_volume']:
subcortical_vol = os.path.join(ex_tmpdir, 'subcortical.nii.gz')
ciftify.utils.run(['wb_command', '-cifti-separate', data_file, 'COLUMN', '-volume-all', subcortical_vol])
fslcluster_cmd = ['cluster',
'--in={}'.format(subcortical_vol),
'--thresh={}'.format(arguments['--max-threshold']),
'--peakdist={}'.format(arguments['--volume-distance'])]
peak_table = ciftify.utils.get_stdout(fslcluster_cmd)
with open("{}_subcortical_peaks.csv".format(outputbase), "w") as text_file:
text_file.write(peak_table.replace('/t',','))
else:
logger.info('No subcortical volume data in {}'.format(data_file))
## run wb_command -cifti-extrema to find the peak locations
extrema_dscalar = os.path.join(ex_tmpdir,'extrema.dscalar.nii')
ciftify.utils.run(['wb_command','-cifti-extrema',
data_file,
str(arguments['--surface-distance']),
str(arguments['--volume-distance']),
'COLUMN',
extrema_dscalar,
'-left-surface', surf_settings.L.surface,
'-right-surface', surf_settings.R.surface,
'-threshold',
str(arguments['--min-threshold']),
str(arguments['--max-threshold'])])
## multiply the cluster labels by the extrema to get the labeled exteama
lab_extrema_dscalar = os.path.join(ex_tmpdir,'lab_extrema.dscalar.nii')
ciftify.utils.run(['wb_command', '-cifti-math "(abs(x*y))"',
lab_extrema_dscalar,
'-var','x',clusters_dscalar, '-var','y',extrema_dscalar])
## run left and right dfs... then concatenate them
dfL = build_hemi_results_df(surf_settings.L, atlas_settings,
data_file, lab_extrema_dscalar, clusters_dscalar)
dfR = build_hemi_results_df(surf_settings.R, atlas_settings,
data_file, lab_extrema_dscalar, clusters_dscalar)
df = dfL.append(dfR, ignore_index = True)
## write the table out to the outputcsv
output_columns = ['clusterID','hemisphere','vertex', 'peak_value', 'area']
decimals_out = {"clusterID":0, 'peak_value':3, 'area':0}
for atlas in atlas_settings.keys():
atlas_name = atlas_settings[atlas]['name']
output_columns.append(atlas_name)
output_columns.append('{}_overlap'.format(atlas_name))
decimals_out['{}_overlap'.format(atlas_name)] = 3
df = df.round(decimals_out)
df.to_csv("{}_cortex_peaks.csv".format(outputbase),
columns = output_columns,index=False)
def build_hemi_results_df(surf_settings, atlas_settings,
input_dscalar, extreama_dscalar, clusters_dscalar):
## read in the extrema file from above
extrema_array = ciftify.io.load_hemisphere_data(extreama_dscalar, surf_settings.wb_structure)
vertices = np.nonzero(extrema_array)[0] # indices - vertex id for peaks in hemisphere
## read in the original data for the value column
input_data_array = ciftify.io.load_hemisphere_data(input_dscalar, surf_settings.wb_structure)
## load both cluster indices
clust_array = ciftify.io.load_hemisphere_data(clusters_dscalar, surf_settings.wb_structure)
## load the coordinates
coords = ciftify.io.load_surf_coords(surf_settings.surface)
surf_va = ciftify.io.load_gii_data(surf_settings.vertex_areas)
## put all this info together into one pandas dataframe
df = pd.DataFrame({"clusterID": np.reshape(extrema_array[vertices],(len(vertices),)),
"hemisphere": surf_settings.hemi,
"vertex": vertices,
'peak_value': [round(x,3) for x in np.reshape(input_data_array[vertices],(len(vertices),))]})
## look at atlas overlap
for atlas in atlas_settings.keys():
df = calc_atlas_overlap(df, surf_settings.wb_structure, clust_array, surf_va, atlas_settings[atlas])
return(df)
def calc_atlas_overlap(df, wb_structure, clust_label_array, surf_va, atlas_settings):
'''
calculates the surface area column of the peaks table
needs hemisphere specific inputs
'''
## load atlas
atlas_label_array, atlas_dict = ciftify.io.load_hemisphere_labels(atlas_settings['path'],
wb_structure,
map_number = atlas_settings['map_number'])
atlas_prefix = atlas_settings['name']
## create new cols to hold the data
df[atlas_prefix] = pd.Series('not_calculated', index = df.index)
overlap_col = '{}_overlap'.format(atlas_prefix)
df[overlap_col] = pd.Series(-99.0, index = df.index)
for pd_idx in df.index.tolist():
## atlas interger label is the integer at the vertex
atlas_label = atlas_label_array[df.loc[pd_idx, 'vertex']]
## the atlas column holds the labelname for this label
df.loc[pd_idx, atlas_prefix] = atlas_dict[atlas_label]
overlap_area = ciftify.report.calc_overlapping_area(
df.loc[pd_idx, 'clusterID'], clust_label_array,
atlas_label, atlas_label_array,
surf_va)
## overlap area is the area of the overlaping region over the total cluster area
clust_area = ciftify.report.calc_cluster_area(
df.loc[pd_idx, 'clusterID'],
clust_label_array,
surf_va)
df.loc[pd_idx, overlap_col] = overlap_area/clust_area
return(df)
def main():
arguments = docopt(__doc__)
logger.setLevel(logging.WARNING)
if arguments['--debug']:
logger.setLevel(logging.DEBUG)
logging.getLogger('ciftify').setLevel(logging.DEBUG)
## set up the top of the log
logger.info('{}{}'.format(ciftify.utils.ciftify_logo(),
ciftify.utils.section_header('Starting ciftify_statclust_report')))
ciftify.utils.log_arguments(arguments)
with ciftify.utils.TempDir() as tmpdir:
logger.info('Creating tempdir:{} on host:{}'.format(tmpdir,
os.uname()[1]))
ret = run_ciftify_dlabel_report(arguments, tmpdir)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5184344
|
"""
Question Source: https://leetcode.com/problems/maximum-depth-of-binary-tree/
Level: Easy
Topic: Tree
Solver: Tayyrov
Date: 14.02.2022
"""
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def maxDepth(root: Optional[TreeNode]) -> int:
# if not root:
# return 0
# q = deque([(root, 1)])
# while q:
# curr_node, curr_level = q.popleft()
# ans = curr_level
# if curr_node.left:
# q.append((curr_node.left, curr_level+1))
# if curr_node.right:
# q.append((curr_node.right, curr_level+1))
# return ans
if not root:
return 0
return max(maxDepth(root.left), maxDepth(root.right)) + 1
"""
Time : O(N)
Space: O(h) where h is the height of the tree, in theory h can be equal to the N
"""
|
StarcoderdataPython
|
5138955
|
<filename>SimTracker/SiStripDigitizer/python/SiStripDigi_APVModePeak_cff.py
raise RuntimeError("Do not import obsolete file SiStripDigi_APVModePeak_cff.py. Use 'SimGeneral.MixingModule.stripDigitizer_APVModePeak_cff.py_cff'")
|
StarcoderdataPython
|
4890208
|
#!/usr/bin/python3
import re
import csv
def perf_callback_factory(event_name, data_keys, remap=None):
""" Return a specialized callback for perf event in bcc.
TODO: add time offset to d['ts']
"""
def decorator(func):
# handle remapped guid key (inverse lookup)
if remap is not None:
for oldkey, newkey in remap.items():
if newkey == 'guid':
guid = oldkey
else:
guid = 'guid'
def generic_print(self, cpu, data, size):
event = self.b[event_name].event(data)
d = {field:getattr(event, field) for field in data_keys} # a data point in sofa
d['layer'] = event_name
d['ts'] = d['ts'] / 1e9
d[guid] = get_guid_str(d[guid])
for k, v in d.items():
try:
if type(v) == bytes:
d[k] = d[k].decode('utf-8')
except UnicodeDecodeError as ude:
d[k] = ''
# apply any modification to d
func(self, d)
self.log.print(d, remap=remap)
return generic_print
return decorator
def get_guid_str(guid_array):
""" Convert a guid array into string. """
prefix = guid_array[:12]
entity = guid_array[12:16]
prefix_str = '.'.join('{:x}'.format(c) for c in prefix)
entity_str = '.'.join('{:x}'.format(c) for c in entity)
return '|'.join([prefix_str, entity_str])
class Log:
""" sofa_ros2 logging system """
def __init__(self, fields, fmtstr, cvsfilename=None, print_raw=False):
self.fields = fields
self.fmtstr = fmtstr
if cvsfilename is not None:
self.f = open(cvsfilename, 'w')
self.cvslog = csv.DictWriter(self.f, fields)
self.cvslog.writeheader()
if print_raw:
self.print = self.print_raw
else:
fieldfmts = re.split(r'\ +', self.fmtstr)
self.fieldfmts = dict(zip(fields, fieldfmts))
# extract only width in standard format specifier
hdrfmt = self.clear_specifiers(fmtstr)
hdrfmts = re.split(r'\ +', hdrfmt)
print(' '.join(hdrfmts).format(*fields))
def close(self):
if hasattr(self, 'f'):
self.f.close()
def clear_specifiers(self, str):
return re.sub(r'#|[a-zA-Z]|\.\d+', '', str)
def print(self, data, remap=None):
""" Write log on console and a csv file. data is of type dictionary """
fieldfmts = self.fieldfmts.copy()
# remap keys
if remap is not None:
for oldkey, newkey in remap.items():
data[newkey] = data.pop(oldkey)
# assign default value to each key
for field in self.fields:
if not field in data or data[field] is None:
data[field] = ''
fieldfmts[field] = self.clear_specifiers(fieldfmts[field])
# don't print empty guid
try:
if data['guid'] == '0.0.0.0.0.0.0.0.0.0.0.0|0.0.0.0':
data['guid'] = ''
except KeyError as e:
pass
fmtstr = ' '.join(fieldfmts[field] for field in self.fields)
interested_data = [data[field] for field in self.fields]
print(fmtstr.format(*interested_data))
if hasattr(self, 'f'):
self.cvslog.writerow(dict(zip(self.fields, interested_data)))
def print_raw(self, data, remap=None):
# remap keys
if remap is not None:
for oldkey, newkey in remap.items():
data[newkey] = data.pop(oldkey)
interested_data = {k:data[k] for k in self.fields if k in data.keys()}
# don't print empty guid
try:
if interested_data['guid'] == '0.0.0.0.0.0.0.0.0.0.0.0|0.0.0.0':
interested_data['guid'] = ''
except KeyError as e:
pass
print(interested_data)
if hasattr(self, 'f'):
self.cvslog.writerow(interested_data)
if __name__ == "__main__":
log = Log(['ts', 'comm', 'pid', 'topic_name', 'guid', 'seqnum'],
'{:<14.4f} {:<11} {:<#18x} {:<20} {:<40} {:3d}', 'send_log')
data = {'func':'rcl_publish', 'ts':324874.41122, 'comm':'talker', 'pid':0x55601bc0f550,
'topic_name':'/chatter', 'ep_guid':'1.f.e7.13.3.77.0.0.1.0.0.0|0.0.10.3'}
log.print(data, remap={'ep_guid':'guid'})
log.close()
|
StarcoderdataPython
|
3531914
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
import sys
import argparse
import subprocess
import logging
import distutils.spawn
class RtmdockerCleaner:
''' Utility class to operate OpenRTM on Docker
This is utility class to operate OpenRTM on Docker.
'''
def __init__(self):
# Set parser
self._args = self.parser()
# Set logger
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s',
level=logging.INFO)
# Check docker command exsting
if not distutils.spawn.find_executable('docker'):
logging.error(
"Docker is not installed. Please install Docker first.")
sys.exit(1)
def parser(self):
argparser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument(
'-v', '--version', action='version', version='%(prog)s 1.0.0')
argparser.add_argument('-i', '--images', action='store_true',
help='remove all docker images')
argparser.add_argument('-c', '--containers', action='store_true',
help='stop & remove all docker containers')
argparser.add_argument('-a', '--all', action='store_true',
help='remove all docker containers & images')
argparser.add_argument('--dryrun', action='store_true',
help='dry run for debug')
return argparser.parse_args()
def start(self):
# stop & remove all docker images
logging.info("Start cleanup all images...")
if self._args.containers or self._args.all:
self.remove_containers()
if self._args.images or self._args.all:
self.remove_images()
logging.info("Completed")
def remove_containers(self):
# check all docker containers
ps = ""
try:
cmd = "docker ps -a -q"
ps = subprocess.check_output(cmd, shell=True).replace("\n", "")
except subprocess.CalledProcessError:
logging.info("No containers...")
# stop & remove all docker containers if exist
if ps:
logging.info("containers: " + ps)
cmd = "docker stop " + str(ps)
logging.info("command: " + cmd)
subprocess.call(cmd.split(" "))
cmd = "docker rm -f " + str(ps)
logging.info("command: " + cmd)
if not self._args.dryrun:
subprocess.call(cmd.split(" "))
return
def remove_images(self):
# check all docker images
images = ""
try:
cmd = "docker images -a -q"
images = subprocess.check_output(cmd, shell=True).replace("\n", "")
except subprocess.CalledProcessError:
logging.info("No images...")
# remove all docker images if exist
if images:
cmd = "docker rmi -f " + str(images)
logging.info("command: " + cmd)
if not self._args.dryrun:
subprocess.call(cmd.split(" "))
return
def main():
cleaner = RtmdockerCleaner()
cleaner.start()
return
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6577741
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 07:36:29 2020
@author: szekely
"""
from codes.helpers import load_wav, list_filenames
import librosa
import numpy as np
import soundfile
#%%
in_folder = './mtm/mtm_split/'
out_folder = './mtm/mtm/'
files = list(list_filenames(in_folder, ['.wav'], add_ext=False))
#with open('./trim_list.txt', 'r') as f:
# files = list(f)
files = [f.rstrip() for f in files]
files.sort()
sr = 22050
#files[0] = 'sn0801_sent051'
margin = 0.01 # seconds
marginr = 0.01 # use 0.27 for k s t at end of utterance (before breath)
#%%
dur = np.zeros((len(files),4))
#for i in range(100):
for i in range(len(files)):
y = load_wav(in_folder + files[i] + '.wav', sr=sr)
dur[i,0] = len(y[1])/sr
y_out = librosa.effects.trim(y[1], top_db=18)
if y_out[1][0] > margin*sr:
start = int( y_out[1][0] - margin*sr )
else:
start = 0
if y_out[1][1] < y[2] - marginr*sr:
stop = int( y_out[1][1] + marginr*sr )
else:
stop = int( y[2] ) # use this by definition for no end trim
#stop = int( y[2] ) # use this by definition for no end trim
soundfile.write(out_folder + files[i] + '.wav', y[1][start:stop], sr, subtype='PCM_16')
dur[i,1] = start/sr
dur[i,2] = stop/sr
dur[i,3] = (stop - start)/sr
#%%
with open('./mtm/mtm_extreme_trimmed.txt', 'w') as f:
for item in zip(files, dur):
f.write("%s %s\n" % item)
#%% copy files over
with open('./copy_list.txt', 'r') as f:
files = list(f)
files = [f.rstrip() for f in files]
from shutil import copyfile
in_folder = 'train_input/wavs/M01_trim/'
out_folder = 'train/wavs/M01/'
for file in files:
copyfile(in_folder + file, out_folder + file)
|
StarcoderdataPython
|
8114114
|
from tkinter import Label , Button , Frame , StringVar , Radiobutton
from tkinter.constants import TOP , RIGHT , YES
from typing import Any
import webbrowser as wb
from random import randint as rd
from typing import TypedDict
class State(TypedDict):
text: str
error : str
class Home():
'''
initial theme from app
'''
def __init__(self,props):
self.props=props
self.root=props['root']
self.back= props['Info'].Design['Color']['Background']
self.TextColor : str = props['Info'].Design['Color']['Text']
self.ButtonColor : str = props['Info'].Design['Color']['Button']
self.ButtonTextColor: str = props['Info'].Design['Color']['ButtonText']
self.ButtonHoverColor: str = props['Info'].Design['Color']['ButtonHover']
self.BackgroundColor : str = props['Info'].Design['Color']['Background']
self.EntryColor : str = props['Info'].Design['Color']['Entry']
self.EntryTextColor : str = props['Info'].Design['Color']['EntryText']
self.MainFrame : Frame = Frame(self.root,bg=self.back)
self.TitleFrame : Frame = Frame(self.root,bg=self.back)
self.ChoiseFrame : Frame = Frame(self.root,bg=self.back)
self.BodyFrame : Frame = Frame(self.root,bg=self.back)
self.choix : StringVar = StringVar()
self.choix.set("Serie")
self.rech : StringVar = StringVar()
self.rech.set("")
self.source_file : str = "s.url"
self.State : State = {
"text":"Serie",
"error":""
}
self.monted()
def Main(self) -> bool :
self.view()
return 0
def monted(self)->bool:
"""
monted all frame
"""
self.MainFrame.pack(expand=YES)
self.TitleFrame.pack(side=TOP)
self.ChoiseFrame.pack(side=RIGHT)
self.BodyFrame.pack(expand=YES)
return 0
def Button (self,text : str ,command , padx : int = 0 , pady : int = 0 ) -> bool :
"""
create button
keyword arguments:
text -- the text of the button
command -- the command of the button
padx -- the padx of the button (default 0)
pady -- the pady of the button (default 0)
"""
haz=Button(self.BodyFrame,highlightbackground=self.TextColor,text=text,width=30,border=0,relief="flat",font=("Courrier",9),bg=self.ButtonColor,fg=self.ButtonTextColor,command=command)
haz.pack(padx=padx,pady=pady)
return 0
def openWebBrowser(self,url : str) -> bool :
"""
open web browser
keyword arguments:
url -- the url of the site
"""
wb.open_new_tab(url)
def RadioButton(self, text : str, anchor : str ="w") -> bool :
"""
create radio button
keyword arguments:
text -- the text of the button
anchor -- the anchor of the button (default "w")
"""
b1=Radiobutton(self.ChoiseFrame,text=text,variable=self.choix,value=text,font=("Courrier",9),bg=self.back,fg=self.TextColor,anchor=anchor)
b1.pack(anchor = anchor)
return 0
def site (self) -> bool :
"""
open the site
"""
val = self.choix.get()
dir : str = 'config/url/'
stateFull : str = ["Serie","Film","Manga","Torrent"]
for i in stateFull:
if i == val:
self.State["text"] = i
self.source_file = dir + i + ".url"
break
try:
with open(self.source_file,"r") as f:
urls = f.readlines()
urlsLength=len(urls)
self.openWebBrowser(urls[rd(0,urlsLength-2)])
self.State["error"]=" correctement rendu"
except FileNotFoundError :
self.State["error"]=" un probleme est survenu !"
return 0
def Title(self) -> bool :
"""
create the title of the app
"""
Titre=Label(self.TitleFrame,text="Best Of Web",font=("Courrier",20),bg=self.BackgroundColor,fg=self.TextColor)
Titre.pack()
SousTitre=Label(self.TitleFrame,text="Le Meilleur Du Web En Un Clique",font=("Courrier",9),bg=self.BackgroundColor,fg=self.TextColor)
SousTitre.pack()
return 0
def Label(self,text : str) -> bool :
"""
create a label
Keyword arguments:
text -- the text of the label
"""
label=Label(self.ChoiseFrame,text=text,font=("Italic",13),bg=self.BackgroundColor,fg=self.TextColor)
label.pack()
return 0
def view (self):
"""
design the interface
"""
self.Title()
self.Label("Categorie de site")
self.RadioButton("Serie","w")
self.RadioButton("Film","w")
self.RadioButton("Manga","w")
self.RadioButton("Torrent","w")
self.Button("Lancer un site au hazard" ,self.site)
|
StarcoderdataPython
|
1632310
|
'''This module is used to define collector to collect data that need to be analyzed.'''
class Collector(object):
def __init__(self, conf):
self._conf = conf
def __del__(self):
return
def __enter__(self):
return self
def __exit__(self, *exe_info):
return
def open(self):
raise NotImplementedError
def get_data(self):
raise NotImplementedError
|
StarcoderdataPython
|
200696
|
<filename>src/models/Function.py
from enum import Enum
from type_replacement import normalize_type
from typing import List
import numpy as np
import re
from models.CallingConvention import CallingConvention
import ArgsExtract
name_re = re.compile(r'(?:::|__)(~?\w+)')
def extract_name_from_demangled(demangled_name : str):
name = demangled_name.replace('__', '::')
return name_re.search(name).group(1)
class FunctionType(Enum):
METHOD = 0
STATIC = 1
VIRTUAL = 2
CTOR = 3
DTOR = 4
DTOR_VIRTUAL = 5
class Function:
cls: str # Class this function belongs to
name: str # Name without class prefix
address: str # Address in hex form (Eg.: 0xBEEF)
ret_type: str # Ctors and dtors have a return type equal to the type of `this`
vt_index: np.int16 # Index in VMT
cc: CallingConvention
arg_types : List[str] # Argument types
arg_names : List[str] # Argument names
type: FunctionType # Type of the function
is_overloaded : bool # Is this function overloaded
def __init__(self, class_name : str, address : str, demangled_name : str, cc : str, ret_type : str, arg_types : str, arg_names : str, vt_index : np.int16, is_overloaded : bool, **kwargs):
self.cls = class_name
self.name = extract_name_from_demangled(demangled_name)
self.address = address
self.ret_type = normalize_type(ret_type)
self.vt_index = vt_index
self.cc = CallingConvention(cc)
self.arg_names, self.arg_types = ArgsExtract.extract(arg_types, arg_names, demangled_name, self.cc)
self.is_overloaded = is_overloaded
# Figure out type
if self.name == self.cls:
self.type = FunctionType.CTOR
self.ret_type = self.cls + '*'
elif self.name == '~' + self.cls:
self.type = FunctionType.DTOR if self.vt_index == -1 else FunctionType.DTOR_VIRTUAL
self.ret_type = self.cls + '*'
elif self.cc.is_static:
self.type = FunctionType.STATIC
else:
self.type = FunctionType.METHOD if self.vt_index == -1 else FunctionType.VIRTUAL
@property
def full_name(self):
# Name with class namespace prefix. Eg.: Class::Function
return f'{self.cls}::{self.name}'
@property
def param_names(self) -> str:
return ', '.join(self.arg_names)
@property
def param_types(self) -> str:
return ', '.join(self.arg_types)
@property
def param_name_types(self) -> str:
return ', '.join([' '.join(a) for a in zip(self.arg_types, self.arg_names)])
@property
def is_virtual(self) -> bool:
return self.type in (FunctionType.VIRTUAL, FunctionType.DTOR_VIRTUAL)
@property
def is_dtor(self) -> bool:
return self.type in (FunctionType.DTOR_VIRTUAL, FunctionType.DTOR)
@property
def is_ctor(self) -> bool:
return self.type == FunctionType.CTOR
@property
def is_static(self) -> bool:
return self.cc.is_static
@property
def is_method(self) -> bool:
return self.cc.is_method
@property
def plugin_call_src(self):
# C++ source code for the plugin call stuff
template = []
args = []
plugin_func = self.cc.plugin_fn
if self.ret_type != 'void':
plugin_func += 'AndReturn'
template.append(self.ret_type)
template.append(self.address)
if self.cc in ('thiscall', 'fastcall'): # Check is method call
template.append(self.cls + '*')
args.append('this')
template += self.arg_types
args += self.arg_names
return f'{"" if self.ret_type == "void" else "return "}plugin::{plugin_func}<{", ".join(template)}>({", ".join(args)})'
|
StarcoderdataPython
|
6535255
|
from sys import stdin
def isHan(n):
bHan = False
numList = list()
while n:
numList.insert(0, n % 10)
n //= 10
if len(numList) == 1 or len(numList) == 2:
bHan = True
elif len(numList) == 3 and numList[2] - numList[1] == numList[1] - numList[0]:
bHan = True
return bHan
def main():
count = 0
for i in range(1, int(stdin.readline().strip()) + 1):
if isHan(i) == True:
count += 1
print(count)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9777731
|
from abc import abstractmethod
from collections import namedtuple
ThermalConfig = namedtuple('ThermalConfig', ['cpu', 'gpu', 'mem', 'bat', 'ambient'])
class HardwareBase:
@staticmethod
def get_cmdline():
with open('/proc/cmdline') as f:
cmdline = f.read()
return {kv[0]: kv[1] for kv in [s.split('=') for s in cmdline.split(' ')] if len(kv) == 2}
@staticmethod
def read_param_file(path, parser, default=0):
try:
with open(path) as f:
return parser(f.read())
except Exception:
return default
@abstractmethod
def reboot(self, reason=None):
pass
@abstractmethod
def uninstall(self):
pass
@abstractmethod
def get_os_version(self):
pass
@abstractmethod
def get_device_type(self):
pass
@abstractmethod
def get_sound_card_online(self):
pass
@abstractmethod
def get_imei(self, slot):
pass
@abstractmethod
def get_serial(self):
pass
@abstractmethod
def get_subscriber_info(self):
pass
@abstractmethod
def get_network_info(self):
pass
@abstractmethod
def get_network_type(self):
pass
@abstractmethod
def get_sim_info(self):
pass
@abstractmethod
def get_network_strength(self, network_type):
pass
@abstractmethod
def get_battery_capacity(self):
pass
@abstractmethod
def get_battery_status(self):
pass
@abstractmethod
def get_battery_current(self):
pass
@abstractmethod
def get_battery_voltage(self):
pass
@abstractmethod
def get_battery_charging(self):
pass
@abstractmethod
def set_battery_charging(self, on):
pass
@abstractmethod
def get_usb_present(self):
pass
@abstractmethod
def get_current_power_draw(self):
pass
@abstractmethod
def shutdown(self):
pass
@abstractmethod
def get_thermal_config(self):
pass
@abstractmethod
def set_screen_brightness(self, percentage):
pass
@abstractmethod
def set_power_save(self, powersave_enabled):
pass
@abstractmethod
def get_gpu_usage_percent(self):
pass
@abstractmethod
def get_modem_version(self):
pass
@abstractmethod
def initialize_hardware(self):
pass
@abstractmethod
def get_networks(self):
pass
|
StarcoderdataPython
|
3564424
|
<filename>microbuild/__init__.py
"""
Lightweight Python Build Tool
"""
__author__ = "<NAME>"
__license__ = "MIT License"
__contact__ = "https://github.com/CalumJEadie/microbuild"
|
StarcoderdataPython
|
1667647
|
bl_info = {
"name": "Pivot Menu: Key: '.'",
"description": "Pivot Modes",
"blender": (2, 78, 0),
"category": "3d View"
}
import bpy
from bpy.types import (Menu, Operator)
class VIEW3D_PIE_pivot_of(Menu):
bl_label = "Pivot"
bl_idname = "view3d.pivot_of"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
pie.prop(context.space_data, "pivot_point", expand = True)
if context.active_object.mode == 'OBJECT':
pie.prop(context.space_data, "use_pivot_point_align", text = "Center Points")
classes = [VIEW3D_PIE_pivot_of]
addon_keymaps = []
def register():
for cls in classes:
bpy.utils.register_class(cls)
wm = bpy.context.window_manager
if wm.keyconfigs.addon:
km = wm.keyconfigs.addon.keymaps.new(name = 'Object Non-modal')
kmi = km.keymap_items.new('wm.call_menu_pie', 'PERIOD', 'PRESS')
kmi.properties.name = "view3d.pivot_of"
addon_keymaps.append((km, kmi))
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if __name__ == "__main__":
register()
|
StarcoderdataPython
|
8174035
|
<filename>numpy/distutils/intelccompiler.py<gh_stars>1-10
from __future__ import division, absolute_import, print_function
import sys
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils.ccompiler import simple_version_match
class IntelCCompiler(UnixCCompiler):
"""A modified Intel compiler compatible with a GCC-built Python."""
compiler_type = 'intel'
cc_exe = 'icc'
cc_args = 'fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
self.cc_exe = 'icc -fPIC'
compiler = self.cc_exe
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
linker_exe=compiler,
linker_so=compiler + ' -shared')
class IntelItaniumCCompiler(IntelCCompiler):
compiler_type = 'intele'
# On Itanium, the Intel Compiler used to be called ecc, let's search for
# it (now it's also icc, so ecc is last in the search).
for cc_exe in map(find_executable, ['icc', 'ecc']):
if cc_exe:
break
class IntelEM64TCCompiler(UnixCCompiler):
"""
A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
"""
compiler_type = 'intelem'
cc_exe = 'icc -m64 -fPIC'
cc_args = "-fPIC"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
self.cc_exe = 'icc -m64 -fPIC'
compiler = self.cc_exe
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
linker_exe=compiler,
linker_so=compiler + ' -shared')
if sys.platform == 'win32':
from distutils.msvc9compiler import MSVCCompiler
class IntelCCompilerW(MSVCCompiler):
"""
A modified Intel compiler compatible with an MSVC-built Python.
"""
compiler_type = 'intelw'
compiler_cxx = 'icl'
def __init__(self, verbose=0, dry_run=0, force=0):
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start='Intel\(R\).*?32,')
self.__version = version_match
def initialize(self, plat_name=None):
MSVCCompiler.initialize(self, plat_name)
self.cc = self.find_exe("icl.exe")
self.lib = self.find_exe("xilib")
self.linker = self.find_exe("xilink")
self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
'/Qstd=c99']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Qstd=c99', '/Z7', '/D_DEBUG']
class IntelEM64TCCompilerW(IntelCCompilerW):
"""
A modified Intel x86_64 compiler compatible with
a 64bit MSVC-built Python.
"""
compiler_type = 'intelemw'
def __init__(self, verbose=0, dry_run=0, force=0):
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start='Intel\(R\).*?64,')
self.__version = version_match
|
StarcoderdataPython
|
26093
|
<reponame>dan7267/1a-flood-risk-project-93
from floodsystem.stationdata import MonitoringStation
from floodsystem.geo import rivers_by_station_number
def run():
"""Requirements for Task1E"""
rivers_station_number = rivers_by_station_number(MonitoringStation, 9)
print(rivers_station_number)
if __name__ == "__main__":
print("*** Task 1E: CUED Part IA Flood Warning System ***")
run()
|
StarcoderdataPython
|
363540
|
import os
class Object():
def __init__(self, objLines) -> None:
labelLine = objLines[3]
#always 'PASperson', saving it anyways
self.label = labelLine.split(':')[0].split('"')[1]
#always 'UprightPerson', saving it anyways
self.labelPose = labelLine.split(':')[1].split('"')[1]
centerLine = objLines[4]
self.centerX = int(centerLine.split(':')[1].split(',')[0][2:])
self.centerY = int(centerLine.split(':')[1].split(',')[1][1:-2])
bboxLine = objLines[5].split(':')
xMinYMin = bboxLine[1].split('-')[0]
xMaxYMax = bboxLine[1].split('-')[1]
self.xMin = int(xMinYMin.split(',')[0][2:])
self.yMin = int(xMinYMin.split(',')[1][1:-2])
self.xMax = int(xMaxYMax.split(',')[0][2:])
self.yMax = int(xMaxYMax.split(',')[1][1:-2])
self.BboxShape = (self.xMax-self.xMin, self.yMax-self.yMin)
class Image():
def __init__(self, fileName, imgShape,) -> None:
self.fileName = fileName
self.imageShape = imgShape
self.objects = list()
def addObject(self, object):
self.objects.append(object)
def parseDataset(folder='INRIAPerson/Train/'):
INRIA_FOLDER = os.path.join(os.getcwd(), 'INRIAPerson')
TRAIN_FOLDER = os.path.join(os.getcwd(), folder)
ANNOTATION_FOLDER = os.path.join(TRAIN_FOLDER, 'annotations')
POS_FOLDER = os.path.join(TRAIN_FOLDER, 'pos')
annotation_list = open(TRAIN_FOLDER + 'annotations.lst', 'r')
pos_list = open(TRAIN_FOLDER + 'pos.lst', 'r')
neg_list = open(TRAIN_FOLDER + 'neg.lst', 'r')
neg_image_filenames = neg_list.readlines()
neg_image_paths = []
for neg in neg_image_filenames:
if neg == '':
continue
neg_image_paths.append(os.path.join(INRIA_FOLDER, neg[:-1]))
neg_list.close()
imgs = []
for annotation_file in annotation_list.readlines():
annotation_file = annotation_file[:-1]
pos_img_path = pos_list.readline()[:-2]
if annotation_file is None or pos_img_path is None:
print("threw up")
exit(-1)
with open(os.path.join(INRIA_FOLDER,annotation_file),'r', encoding='iso-8859-1') as annotation:
lines = annotation.readlines()
img_file = lines[2].split(':')[1][2:-2]
size_line = lines[3].split(':')[1]
sizes = size_line.split('x')
x = int(sizes[0])
y = int(sizes[1])
c = int(sizes[2])
imageShape = (x,y,c)
img = Image(os.path.join(INRIA_FOLDER, img_file), imageShape)
objectNum = int((len(lines)-12) / 7)
for i in range(objectNum):
start = 12 + i*7
end = 12 + (i+1)*7
obj_lines = lines[start:end]
img.addObject(Object(obj_lines))
imgs.append(img)
annotation_list.close()
pos_list.close()
return imgs, neg_image_paths
if __name__ == '__main__':
imgs, neg_image_filenames = parseDataset()
numObjects = 0
for img in imgs:
numObjects += len(img.objects)
print('Total images: ', len(imgs), ' with a total of ', numObjects, ' objects')
|
StarcoderdataPython
|
220134
|
from .Attention import Attention
from .PositionalEncoding import PositionalEncoding
from .ScaledDotProductAttention import ScaledDotProductAttention
from .LayerNormalization import LayerNormalization
from .MultiHeadAttention import MultiHeadAttention
|
StarcoderdataPython
|
9673554
|
# -*- coding: utf-8 -*-
from django.conf import settings
from cms.tests.base import CMSTestCase
from cms.utils.plugins import get_placeholders
from cms.exceptions import DuplicatePlaceholderWarning
import sys
import warnings
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class PlaceholderTestCase(CMSTestCase):
def test_01_placeholder_scanning_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_02_placeholder_scanning_include(self):
placeholders = get_placeholders('placeholder_tests/test_two.html')
self.assertEqual(sorted(placeholders), sorted([u'child', u'three']))
def test_03_placeholder_scanning_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_04_placeholder_scanning_complex(self):
placeholders = get_placeholders('placeholder_tests/test_four.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'child', u'four']))
def test_05_placeholder_scanning_super(self):
placeholders = get_placeholders('placeholder_tests/test_five.html')
self.assertEqual(sorted(placeholders), sorted([u'one', u'extra_one', u'two', u'three']))
def test_06_placeholder_scanning_nested(self):
placeholders = get_placeholders('placeholder_tests/test_six.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'new_two', u'new_three']))
def test_07_placeholder_scanning_duplicate(self):
placeholders = self.assertWarns(DuplicatePlaceholderWarning, "Duplicate placeholder found: `one`", get_placeholders, 'placeholder_tests/test_seven.html')
self.assertEqual(sorted(placeholders), sorted([u'one']))
def failUnlessWarns(self, category, message, f, *args, **kwargs):
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertTrue(first.category is category)
return result
assertWarns = failUnlessWarns
|
StarcoderdataPython
|
1972566
|
# Autogenerated by configen, do not edit.
# If encountering an error, please file an issue @
# https://github.com/romesco/hydra-lightning
# fmt: off
# isort: skip_file
# flake8: noqa
# Hydra + Lightning
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
from typing import Dict
from typing import Optional
@dataclass
class CometLoggerConf:
_target_: str = "pytorch_lightning.loggers.CometLogger"
api_key: Optional[str] = None
save_dir: Optional[str] = None
project_name: Optional[str] = None
rest_api_key: Optional[str] = None
experiment_name: Optional[str] = None
experiment_key: Optional[str] = None
offline: bool = False
prefix: str = ""
kwargs: Any = MISSING
@dataclass
class MLFlowLoggerConf:
_target_: str = "pytorch_lightning.loggers.MLFlowLogger"
experiment_name: str = "default"
tracking_uri: Optional[str] = None
tags: Optional[Dict[str, Any]] = None
save_dir: Optional[str] = "./mlruns"
prefix: str = ""
@dataclass
class NeptuneLoggerConf:
_target_: str = "pytorch_lightning.loggers.NeptuneLogger"
api_key: Optional[str] = None
project_name: Optional[str] = None
close_after_fit: Optional[bool] = True
offline_mode: bool = False
experiment_name: Optional[str] = None
experiment_id: Optional[str] = None
prefix: str = ""
kwargs: Any = MISSING
@dataclass
class TestTubeLoggerConf:
_target_: str = "pytorch_lightning.loggers.TestTubeLogger"
save_dir: str = MISSING
name: str = "default"
description: Optional[str] = None
debug: bool = False
version: Optional[int] = None
create_git_tag: bool = False
log_graph: bool = False
prefix: str = ""
@dataclass
class WandbLoggerConf:
_target_: str = "pytorch_lightning.loggers.WandbLogger"
name: Optional[str] = None
save_dir: Optional[str] = None
offline: bool = False
id: Optional[str] = None
anonymous: bool = False
version: Optional[str] = None
project: Optional[str] = None
log_model: bool = False
experiment: Any = None
prefix: str = ""
kwargs: Any = MISSING
|
StarcoderdataPython
|
1979736
|
<reponame>tklijnsma/toscript
#!/usr/bin/env python
import sys
from get_toscript import ToGoer
class Completer(object):
"""docstring for Completer"""
def __init__(self):
super(Completer, self).__init__()
self.goer = ToGoer()
def completion_hook(self, cmd, curr_word, prev_word):
if not self.goer.exists_toscriptdir():
print('\nNo directory \'{0}\'\n'.format(self.goer.toscriptdir))
return []
elif not self.goer.has_scripts_toscriptdir():
print('\nNo scripts found in \'{0}\'\n'.format(self.goer.toscriptdir))
return []
elif prev_word == 'to' or prev_word == '--test':
potential_matches = self.goer.toscripts_basenames
else:
potential_matches = []
matches = [k for k in potential_matches if k.startswith(curr_word)]
return matches
def main():
completer = Completer()
results = completer.completion_hook(*sys.argv[1:])
if len(results):
print('\n'.join(results))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11200902
|
import argparse
def get_params():
parser = argparse.ArgumentParser(
description="Variable parameters based on the configuration of the machine or user's choice")
parser.add_argument("--mem_size", default=100000, type=int, help="The memory size.")
parser.add_argument("--env_name", default="MountainCar-v0", type=str, help="Name of the environment.")
parser.add_argument("--interval", default=10, type=int,
help="The interval specifies how often different parameters should be saved and printed,"
" counted by episodes.")
parser.add_argument("--do_train", default=True,
help="The flag determines whether to train the agent or play with it.")
parser.add_argument("--train_from_scratch", default=True, type=bool, help="The flag determines whether to train from scratch or continue previous tries.")
parser.add_argument("--do_intro_env", action="store_true",
help="Only introduce the environment then close the program.")
parser_params = parser.parse_args()
# Parameters based on the Discrete SAC paper.
# region default parameters
default_params = {"lr": 3e-4,
"batch_size": 64,
"state_shape": (4, 84, 84),
"max_steps": int(1e+8),
"gamma": 0.99,
"initial_random_steps": 20000,
"train_period": 4,
"fixed_network_update_freq": 8000
}
# endregion
total_params = {**vars(parser_params), **default_params}
print("params:", total_params)
return total_params
|
StarcoderdataPython
|
8071455
|
from products.test.infrastructure import FakeUnitOfWorkManager
from products.handlers import CreateProductCommand, CreateProductCommandHandler
class When_creating_a_product:
""" Now that we have a repository pattern and command handlers
it becomes trivial to write unit tests that check that
we perform the correct actions against our domain.
These tests should all operate against cmd handlers and verify that
a) We create and commit a transaction
b) We have persisted any state changes that we make
c) We have raised any domain events on a message bus for further
processing """
def given(self):
self._uow = FakeUnitOfWorkManager()
self._handler = CreateProductCommandHandler(self._uow)
def when_we_raise_a_create_product_command(self):
self._handler(CreateProductCommand("foo"))
def it_should_add_the_product_to_the_repository(self):
assert any(p.name == "foo" for p in self._uow.products)
def it_should_raise_product_created(self):
pass
def it_should_have_committed_the_unit_of_work(self):
pass
|
StarcoderdataPython
|
5181802
|
<filename>appengine/findit/common/rotations.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to get the sheriff(s) on duty.
Currently only supports the Chrome Build Sheriff rotation."""
import json
from common.findit_http_client import FinditHttpClient
from libs import time_util
from model.wf_config import FinditConfig
_ROTATIONS_URL = ('https://chrome-ops-rotation-proxy.appspot.com/current/'
'oncallator:chrome-build-sheriff')
_HTTP_CLIENT = FinditHttpClient()
def current_sheriffs():
status_code, content, _headers = _HTTP_CLIENT.Get(_ROTATIONS_URL)
if status_code == 200:
content = json.loads(content)
if 'emails' not in content:
raise Exception('Malformed sheriff json at %s' % _ROTATIONS_URL)
return content['emails']
else:
raise Exception('Could not retrieve sheriff list from %s, got code %d' %
(_ROTATIONS_URL, status_code))
|
StarcoderdataPython
|
1843097
|
<gh_stars>0
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import grpc
import six
from runtime.feature.column import JSONDecoderWithFeatureColumn
from runtime.model.modelzooserver_pb2 import ReleaseModelRequest
from runtime.model.modelzooserver_pb2_grpc import ModelZooServerStub
def load_model_from_model_zoo(address, model, tag, meta_only=False):
stub = None
meta = None
channel = grpc.insecure_channel(address)
try:
stub = ModelZooServerStub(channel)
meta_req = ReleaseModelRequest(name=model, tag=tag)
meta_resp = stub.GetModelMeta(meta_req)
meta = json.loads(meta_resp.meta, cls=JSONDecoderWithFeatureColumn)
except: # noqa: E722
# make sure that the channel is closed when exception raises
channel.close()
six.reraise(*sys.exc_info())
if meta_only:
channel.close()
return None, meta
def reader():
try:
tar_req = ReleaseModelRequest(name=model, tag=tag)
tar_resp = stub.DownloadModel(tar_req)
for each_resp in tar_resp:
yield each_resp.content_tar
finally:
reader.close()
def close():
if not reader.is_closed:
channel.close()
reader.is_closed = True
reader.is_closed = False
reader.close = close
return reader, meta
|
StarcoderdataPython
|
3551938
|
<reponame>anwarchk/quay<gh_stars>1-10
import json
import pytest
from jsonschema import validate
from buildtrigger.customhandler import custom_trigger_payload
from buildtrigger.basehandler import METADATA_SCHEMA
from buildtrigger.bitbuckethandler import get_transformed_webhook_payload as bb_webhook
from buildtrigger.bitbuckethandler import get_transformed_commit_info as bb_commit
from buildtrigger.githubhandler import get_transformed_webhook_payload as gh_webhook
from buildtrigger.gitlabhandler import get_transformed_webhook_payload as gl_webhook
from buildtrigger.triggerutil import SkipRequestException
def assertSkipped(filename, processor, *args, **kwargs):
with open('buildtrigger/test/triggerjson/%s.json' % filename) as f:
payload = json.loads(f.read())
nargs = [payload]
nargs.extend(args)
with pytest.raises(SkipRequestException):
processor(*nargs, **kwargs)
def assertSchema(filename, expected, processor, *args, **kwargs):
with open('buildtrigger/test/triggerjson/%s.json' % filename) as f:
payload = json.loads(f.read())
nargs = [payload]
nargs.extend(args)
created = processor(*nargs, **kwargs)
assert created == expected
validate(created, METADATA_SCHEMA)
def test_custom_custom():
expected = {
u'commit':u'1c002dd',
u'commit_info': {
u'url': u'gitsoftware.com/repository/commits/1234567',
u'date': u'timestamp',
u'message': u'initial commit',
u'committer': {
u'username': u'user',
u'url': u'gitsoftware.com/users/user',
u'avatar_url': u'gravatar.com/user.png'
},
u'author': {
u'username': u'user',
u'url': u'gitsoftware.com/users/user',
u'avatar_url': u'gravatar.com/user.png'
}
},
u'ref': u'refs/heads/master',
u'default_branch': u'master',
u'git_url': u'foobar',
}
assertSchema('custom_webhook', expected, custom_trigger_payload, git_url='foobar')
def test_custom_gitlab():
expected = {
'commit': u'<PASSWORD>',
'ref': u'refs/heads/master',
'git_url': u'<EMAIL>:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee4<PASSWORD>28a<PASSWORD>fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
},
}
assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='<EMAIL>:jsmith/somerepo.git')
def test_custom_github():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'<EMAIL>:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
},
'author': {
'username': u'jsmith',
},
},
}
assertSchema('github_webhook', expected, custom_trigger_payload,
git_url='<EMAIL>:jsmith/anothertest.git')
def test_custom_bitbucket():
expected = {
"commit": u"af64ae7188685f8424040b4735ad1<PASSWORD>1<PASSWORD>",
"ref": u"refs/heads/master",
"git_url": u"<EMAIL>:jsmith/another-repo.git",
"commit_info": {
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"date": u"2015-09-10T20:40:54+00:00",
"message": u"Dockerfile edited online with Bitbucket",
"author": {
"username": u"<NAME>",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
"committer": {
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
},
}
assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='<EMAIL>:jsmith/another-repo.git')
def test_bitbucket_customer_payload_noauthor():
expected = {
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"ref": "refs/heads/master",
"git_url": "<EMAIL>:somecoollabs/svc-identity.git",
"commit_info": {
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433<PASSWORD>",
"date": "2015-09-25T00:55:08+00:00",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
"committer": {
"username": "CodeShip Tagging",
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
},
},
}
assertSchema('bitbucket_customer_example_noauthor', expected, bb_webhook)
def test_bitbucket_customer_payload_tag():
expected = {
"commit": "<PASSWORD>",
"ref": "refs/tags/0.1.2",
"git_url": "<EMAIL>:somecoollabs/svc-identity.git",
"commit_info": {
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/<PASSWORD>",
"date": "2015-09-25T00:55:08+00:00",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
"committer": {
"username": "CodeShip Tagging",
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
},
},
}
assertSchema('bitbucket_customer_example_tag', expected, bb_webhook)
def test_bitbucket_commit():
ref = 'refs/heads/somebranch'
default_branch = 'somebranch'
repository_name = 'foo/bar'
def lookup_author(_):
return {
'user': {
'display_name': 'cooluser',
'avatar': 'http://some/avatar/url'
}
}
expected = {
"commit": u"abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
"ref": u"refs/heads/somebranch",
"git_url": u"<EMAIL>:foo/bar.git",
"default_branch": u"somebranch",
"commit_info": {
"url": u"https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
"date": u"2012-07-24 00:26:36",
"message": u"making some changes\n",
"author": {
"avatar_url": u"http://some/avatar/url",
"username": u"cooluser",
}
}
}
assertSchema('bitbucket_commit', expected, bb_commit, ref, default_branch,
repository_name, lookup_author)
def test_bitbucket_webhook_payload():
expected = {
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
"ref": u"refs/heads/master",
"git_url": u"<EMAIL>:jsmith/another-repo.git",
"commit_info": {
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"date": u"2015-09-10T20:40:54+00:00",
"message": u"Dockerfile edited online with Bitbucket",
"author": {
"username": u"<NAME>",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
"committer": {
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
},
}
assertSchema('bitbucket_webhook', expected, bb_webhook)
def test_github_webhook_payload_slash_branch():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/slash/branch',
'default_branch': u'master',
'git_url': u'<EMAIL>:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
},
'author': {
'username': u'jsmith',
},
},
}
assertSchema('github_webhook_slash_branch', expected, gh_webhook)
def test_github_webhook_payload():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'<EMAIL>:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
},
'author': {
'username': u'jsmith',
},
},
}
assertSchema('github_webhook', expected, gh_webhook)
def test_github_webhook_payload_with_lookup():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'<EMAIL>:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
'url': u'http://github.com/jsmith',
'avatar_url': u'http://some/avatar/url',
},
'author': {
'username': u'jsmith',
'url': u'http://github.com/jsmith',
'avatar_url': u'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'html_url': 'http://github.com/jsmith',
'avatar_url': 'http://some/avatar/url'
}
assertSchema('github_webhook', expected, gh_webhook, lookup_user=lookup_user)
def test_github_webhook_payload_missing_fields_with_lookup():
expected = {
'commit': u'<PASSWORD>',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'<EMAIL>:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/4<PASSWORD>a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile'
},
}
def lookup_user(username):
if not username:
raise Exception('Fail!')
return {
'html_url': 'http://github.com/jsmith',
'avatar_url': 'http://some/avatar/url'
}
assertSchema('github_webhook_missing', expected, gh_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_payload():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'<EMAIL>:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
},
}
assertSchema('gitlab_webhook', expected, gl_webhook)
def test_github_webhook_payload_known_issue():
expected = {
"commit": "118b07121695d9f2e40a5ff264fdcc2917680870",
"ref": "refs/heads/master",
"default_branch": "master",
"git_url": "<EMAIL>:jsmith/docker-test.git",
"commit_info": {
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"date": "2015-09-25T14:55:11-04:00",
"message": "Fail",
},
}
assertSchema('github_webhook_noname', expected, gh_webhook)
def test_github_webhook_payload_missing_fields():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'<EMAIL>:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile'
},
}
assertSchema('github_webhook_missing', expected, gh_webhook)
def test_gitlab_webhook_nocommit_payload():
assertSkipped('gitlab_webhook_nocommit', gl_webhook)
def test_gitlab_webhook_multiple_commits():
expected = {
'commit': u'9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'ref': u'refs/heads/master',
'git_url': u'<EMAIL>:jsmith/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'date': u'2016-09-29T15:02:41+00:00',
'message': u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1",
'author': {
'username': 'jsmith',
'url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url'
},
},
}
def lookup_user(_):
return {
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook_multicommit', expected, gl_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_for_tag():
expected = {
'commit': u'82b3d5ae55f7080f1e6022629cdb57bfae7cccc7',
'commit_info': {
'author': {
'avatar_url': 'http://some/avatar/url',
'url': 'http://gitlab.com/jsmith',
'username': 'jsmith'
},
'date': '2015-08-13T19:33:18+00:00',
'message': 'Fix link\n',
'url': 'https://some/url',
},
'git_url': u'<EMAIL>:jsmith/example.git',
'ref': u'refs/tags/v1.0.0',
}
def lookup_user(_):
return {
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
def lookup_commit(repo_id, commit_sha):
if commit_sha == '82b3d5ae55f7080f1e6022629cdb57bfae7cccc7':
return {
"id": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7",
"message": "Fix link\n",
"timestamp": "2015-08-13T19:33:18+00:00",
"url": "https://some/url",
"author_name": "<NAME>",
"author_email": "<EMAIL>",
}
return None
assertSchema('gitlab_webhook_tag', expected, gl_webhook, lookup_user=lookup_user,
lookup_commit=lookup_commit)
def test_gitlab_webhook_for_tag_nocommit():
assertSkipped('gitlab_webhook_tag', gl_webhook)
def test_gitlab_webhook_for_tag_commit_sha_null():
assertSkipped('gitlab_webhook_tag_commit_sha_null', gl_webhook)
def test_gitlab_webhook_for_tag_known_issue():
expected = {
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'ref': u'refs/tags/thirdtag',
'git_url': u'<EMAIL>:someuser/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'date': u'2019-10-17T18:07:48Z',
'message': u'Update Dockerfile',
'author': {
'username': 'someuser',
'url': 'http://gitlab.com/someuser',
'avatar_url': 'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'username': 'someuser',
'html_url': 'http://gitlab.com/someuser',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook_tag_commit_issue', expected, gl_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_payload_known_issue():
expected = {
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'ref': u'refs/tags/fourthtag',
'git_url': u'[email protected]:someuser/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'date': u'2019-10-17T18:07:48Z',
'message': u'Update Dockerfile',
},
}
def lookup_commit(repo_id, commit_sha):
if commit_sha == '770830e7ca132856991e6db4f7fc0f4dbe20bd5f':
return {
"added": [],
"author": {
"name": "Some User",
"email": "<EMAIL>"
},
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"message": "Update Dockerfile",
"removed": [],
"modified": [
"Dockerfile"
],
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
}
return None
assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit)
def test_gitlab_webhook_for_other():
assertSkipped('gitlab_webhook_other', gl_webhook)
def test_gitlab_webhook_payload_with_lookup():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'<EMAIL>:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
'author': {
'username': 'jsmith',
'url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook', expected, gl_webhook, lookup_user=lookup_user)
def test_github_webhook_payload_deleted_commit():
expected = {
'commit': u'456806b662cb903a<PASSWORD>',
'commit_info': {
'author': {
'username': u'jsmith'
},
'committer': {
'username': u'jsmith'
},
'date': u'2015-12-08T18:07:03-05:00',
'message': (u'Merge pull request #1044 from jsmith/errerror\n\n' +
'Assign the exception to a variable to log it'),
'url': u'https://github.com/jsmith/somerepo/commit/456806b662cb903<PASSWORD>bab'
},
'git_url': u'<EMAIL>:jsmith/somerepo.git',
'ref': u'refs/heads/master',
'default_branch': u'master',
}
def lookup_user(_):
return None
assertSchema('github_webhook_deletedcommit', expected, gh_webhook, lookup_user=lookup_user)
def test_github_webhook_known_issue():
def lookup_user(_):
return None
assertSkipped('github_webhook_knownissue', gh_webhook, lookup_user=lookup_user)
def test_bitbucket_webhook_known_issue():
assertSkipped('bitbucket_knownissue', bb_webhook)
|
StarcoderdataPython
|
8075403
|
"""The london_underground component."""
|
StarcoderdataPython
|
45867
|
<gh_stars>0
#!/usr/bin/env python3
# imports go here
import pika
import multiprocessing
import time
import random
import json
import logging
import datetime
#
# Free Coding session for 2015-03-12
# Written by <NAME>
#
logger = logging.getLogger(__name__)
def get_temperatures():
return {'celcius': [random.randint(10, 20) for i in range(100)],
'key': '<KEY>',
'taken': str(datetime.datetime.now())}
def start_measuring():
connection = pika.BlockingConnection()
channel = connection.channel()
while True:
measurement = get_temperatures()
channel.basic_publish(exchange='', routing_key='test', body=json.dumps(measurement))
time.sleep(1)
connection.close()
def producer():
# start thread to read temperatures
p = multiprocessing.Process(target=start_measuring)
p.start()
time.sleep(1)
def consumer():
# on main thread read from message queue and process them
connection = pika.BlockingConnection()
channel = connection.channel()
channel.queue_declare(queue='test') # in case it's not yet created
for method_frame, properties, body in channel.consume('test'):
print(str(body))
try:
data = json.loads(body.decode('utf-8'))
print(data)
channel.basic_ack(method_frame.delivery_tag)
except ValueError as e:
logger.exception('parsing error', e)
logger.warn('parsing error')
if method_frame.delivery_tag == 100:
break
channel.cancel()
connection.close()
if __name__ == "__main__":
producer()
consumer()
|
StarcoderdataPython
|
6422769
|
from setuptools import setup, find_packages
NAME = 'jewelry'
URL = 'https://github.com/mgsosna/jewelry'
REQUIRES_PYTHON = '>=3.7.0'
REQUIREMENTS_FN = 'requirements.txt'
def list_requirements(file_name=REQUIREMENTS_FN):
with open(file_name) as f:
return f.read().splitlines()
setup(
name=NAME,
version="0.1.0",
include_package_data=True,
python_requires=REQUIRES_PYTHON,
url=URL,
package_dir={'': 'jewelry'},
packages=find_packages(where="jewelry"),
install_requires=list_requirements()
)
|
StarcoderdataPython
|
11352707
|
import unittest
import numpy as np
from src.viewpointdiversitydetection.model_evaluation_utilities import generate_markdown_table
class ModelEvaluationUtilitiesTest(unittest.TestCase):
def test_generate_markdown_table(self):
parameters = {'C': 5, 'gamma': .0001, 'class w': 'balanced',
'IAA': 0.2}
answers = np.array(['b', 'b', 'b', 'b', 'a', 'b', 'a'])
predictions = np.array(['a', 'a', 'b', 'b', 'a', 'a', 'a'])
probabilities_list = [[.9, .1], [.4, .6], [.2, .8], [.3, .7], [.8, .2], [.55, .45], [.95, .05]]
probabilities = np.array([np.array(i) for i in probabilities_list])
top_number = 3
label_a = 'a'
label_b = 'b'
corpus_name = 'Testing'
search_terms = ['term 1', 'term 2']
t = generate_markdown_table(corpus_name, search_terms, parameters, answers, predictions, probabilities,
top_number, label_a, label_b)
print(t)
self.assertTrue(t)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.