id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
12858248
|
# _*_ coding: utf-8 _*_
class IgnoreRequest(Exception):
pass
|
StarcoderdataPython
|
238563
|
from typing import *
from time import perf_counter_ns
'''
def Timer(n: int = 1):
return [_Timer() for i in range(n)]
class _Timer:
def __init__(self):
self.times: list[float] = []
def __enter__(self):
self.start = perf_counter_ns()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.end = perf_counter_ns()
if exc_value: raise exc_value
self.times.append(self.end - self.start)
def __lt__(self, other) -> float:
N = len(self.times)
self_sorted = sorted(self.times)
other_sorted: list[float] = sorted(other.times)
return sum(self_sorted[i] < other_sorted[i] for i in range(N)) / N
def __eq__(self, other) -> float:
N = len(self.times)
self_sorted = sorted(self.times)
other_sorted: list[float] = sorted(other.times)
return sum(self_sorted[i] == other_sorted[i] for i in range(N)) / N
def __gt__(self, other) -> float:
N = len(self.times)
self_sorted = sorted(self.times)
other_sorted: list[float] = sorted(other.times)
return sum(self_sorted[i] > other_sorted[i] for i in range(N)) / N
'''
# TODO: rdtsc
# https://docs.microsoft.com/en-gb/windows/win32/cimwin32prov/win32-processor
# https://github.com/workhorsy/py-cpuinfo
cpu_s = '''$colItems = Get-WmiObject "Win32_Processor"
foreach ($objItem in $colItems) {
Write-Host "CPU Model: " -NoNewLine
Write-Host $objItem.Name
Write-Host "CPU Cores: " -NoNewLine
Write-Host $objItem.NumberOfCores
Write-Host "CPU Max Speed: " -NoNewLine
Write-Host $objItem.MaxClockSpeed
Write-Host "CPU Current Speed: " -NoNewLine
Write-Host $objItem.CurrentClockSpeed
}'''
if __name__ == '__main__':
import ctypes
print(ctypes.windll.wmi)
|
StarcoderdataPython
|
3331807
|
<reponame>EchizenG/MAD-GAN_synthetic<filename>datasets/data_synthetic_bak_1D.py
from common import scatter
import matplotlib.pyplot as plt
import random
import numpy as np
from attrdict import AttrDict
import tensorflow.contrib.learn as tf_learn
import pandas as pd
from scipy import stats, integrate
import seaborn as sns
sns.set(color_codes=True)
class MoG1D:
def __init__(self, lpf = 1, hpf = 1):
self.modes = []
self.dataExtractor = []
self.lowerProbFactor = lpf
self.higherProbFactor = hpf
self.fig, self.axs = plt.subplots(ncols=2)
# Mimic tf datasets generator
self.train = AttrDict({'next_batch':
lambda b: (self.next_batch(b), None)})
self.train.images = [np.zeros([1])]
def add_mode(self, x, std=1.0):
x = float(x)
std = float(std)
self.modes.append({'x': x, 'std': std})
return self
def generate_sample(self, with_label=False):
# Pick a mode
# mode = random.choice(self.modes)
index = random.choice(self.dataExtractor)
mode = self.modes[index]
x = np.random.normal(mode['x'], mode['std'])
return x
def estimate_mode_idx(self, x, thres=3.0):
x = float(x)
thres = float(thres)
_min_dist = np.inf
_min_i = -1
for i, mode in enumerate(self.modes):
m_x = mode['x']
m_std = mode['std']
dist = np.sqrt((m_x - x) * (m_x - x))
if (dist <= thres * m_std):
# Keep the index with minimum dist.
if (dist < _min_dist):
_min_i = i
return _min_i
def estimate_mode_idxs(self, arr, thres=3.0):
ret = np.apply_along_axis(lambda x:
self.estimate_mode_idx(x[0], thres),
1,
arr
)
return ret
def next_batch(self, batchsize=128):
numbers = []
for i in range(batchsize):
numbers.append(self.generate_sample())
return np.array(numbers).reshape(batchsize,1)
def get_hq_ratio(self, arr, thres=3.0):
ret = self.estimate_mode_idxs(arr, thres)
return np.sum(ret >= 0) / float(len(ret))
def get_n_modes(self, arr, thres=3.0):
visited = [False for x in self.modes]
ret = self.estimate_mode_idxs(arr, thres)
for r in ret:
if r >= 0:
visited[r] = True
return sum(visited)
def plot(self, img_generator, fig_id=None, batch_size = 128):
#fig, axs = plt.subplots(ncols=2)
fig, axs = plt.subplots()
samples = img_generator(batch_size * 8) #TODO originally 1024
data_samples = self.next_batch(batch_size * 8)
sns.distplot(samples,ax=axs,bins=range(-10, 70, 1),kde=False)
sns.distplot(data_samples,ax=axs,bins=range(-10, 70, 1),kde=False)
return fig
# TODO: refactoring
@property
def n_modes(self):
return len(self.modes)
class MoG:
def __init__(self, lpf = 1, hpf = 1):
self.modes = []
self.dataExtractor = []
self.lowerProbFactor = lpf
self.higherProbFactor = hpf
# Mimic tf datasets generator
self.train = AttrDict({'next_batch':
lambda b: (self.next_batch(b), None)})
self.train.images = [np.zeros([2])]
def add_mode(self, x, y, std=1.0):
x = float(x)
y = float(y)
std = float(std)
self.modes.append({'x': x,'y': y, 'std': std})
return self
def generate_sample(self, with_label=False):
# Pick a mode
# mode = random.choice(self.modes)
index = random.choice(self.dataExtractor)
mode = self.modes[index]
x = np.random.normal(mode['x'], mode['std'])
y = np.random.normal(mode['y'], mode['std'])
return (x,y)
def estimate_mode_idx(self, x, y, thres=3.0):
x = float(x)
y = float(y)
thres = float(thres)
_min_dist = np.inf
_min_i = -1
for i, mode in enumerate(self.modes):
m_x = mode['x']
m_y = mode['y']
m_std = mode['std']
dist = np.sqrt((m_x - x) * (m_x - x) + (m_y - y) * (m_y - y))
if (dist <= thres * m_std):
# Keep the index with minimum dist.
if (dist < _min_dist):
_min_i = i
return _min_i
def estimate_mode_idxs(self, arr, thres=3.0):
ret = np.apply_along_axis(lambda x:
self.estimate_mode_idx(x[0], x[1], thres),
1,
arr
)
return ret
def next_batch(self, batchsize=128):
numbers = []
for i in range(batchsize):
numbers.append(self.generate_sample())
return np.array(numbers)
def get_hq_ratio(self, arr, thres=3.0):
ret = self.estimate_mode_idxs(arr, thres)
return np.sum(ret >= 0) / float(len(ret))
def get_n_modes(self, arr, thres=3.0):
visited = [False for x in self.modes]
ret = self.estimate_mode_idxs(arr, thres)
for r in ret:
if r >= 0:
visited[r] = True
return sum(visited)
# TODO: refactoring
def plot(self, img_generator, fig_id=None, batch_size = 128):
samples = img_generator(batch_size * 8) #TODO originally 1024
fig = scatter(samples, fig_id, xlim=(-21, 21), ylim=(-21, 21)) #TODO Originally from -7 to 7
# Plot true samples
modes = [(m['x'], m['y']) for m in self.modes]
modes = np.array(modes)
plt.figure(fig_id)
plt.scatter(modes[:, 0], modes[:, 1])
return fig
@property
def n_modes(self):
return len(self.modes)
class Spiral():
# Please refer to https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/datasets/synthetic.py
def __init__(self, size=4, std=0.05):
self.train = AttrDict({'next_batch': self.next_batch})
self.train.images = [np.zeros([2])]
self.std = std
self.size = size
self.n_modes = 100
def next_batch(self, n_samples):
X, Y = tf_learn.datasets.synthetic.spirals(n_samples, self.std, n_loops=1)
X = self.size * X
return X, Y
def get_hq_ratio(self, samples, thres=3.0):
# TODO: Count # of samples within threshold dist.
true_X, _ = tf_learn.datasets.synthetic.spirals(self.n_modes, n_loops=1)
true_X *= self.size
# Naive impl.
dist = np.zeros([self.n_modes, len(samples)], dtype=np.float)
for i in range(self.n_modes):
for j in range(len(samples)):
dist[i, j] = np.linalg.norm(true_X[i] - samples[j])
hq_cnt = np.sum(np.min(dist, axis=0) < self.std * thres * self.size)
return hq_cnt / float(len(samples))
def get_n_modes(self, arr, thres=3.0):
true_X, _ = tf_learn.datasets.synthetic.spirals(self.n_modes, n_loops=1)
true_X *= self.size
# Naive impl.
dist = np.zeros([self.n_modes, len(arr)], dtype=np.float)
for i in range(self.n_modes):
for j in range(len(arr)):
dist[i, j] = np.linalg.norm(true_X[i] - arr[j])
visited = np.any((dist < self.std * thres * self.size), axis=1)
return sum(visited)
# TODO: refactoring
def plot(self, img_generator, fig_id=None):
samples = img_generator(1024)
fig = scatter(samples, fig_id, xlim=(-7, 7), ylim=(-7,7))
# Plot true samples
true_X, _ = tf_learn.datasets.synthetic.spirals(self.n_modes, n_loops=1)
true_X *= self.size
plt.figure(fig_id)
plt.scatter(true_X[:,0], true_X[:, 1])
return fig
def rect_MoG(size, lpf = 1, hpf = 1, std=0.1):
assert(size % 2 == 1)
mog = MoG(lpf, hpf)
_start = - size + 1
_end = size
_std = std
index = 0
lowProbMembr = [0, 4, 12, 20, 24]
for i in range(_start, _end, 2):
for j in range(_start, _end, 2):
mog.add_mode(4*i, 4*j, _std) #TODO: Originally it was (i, j, _std)
if index in lowProbMembr:
for iterInd in range(mog.lowerProbFactor):
mog.dataExtractor.append(index)
else:
for iterInd in range(mog.higherProbFactor):
mog.dataExtractor.append(index)
index += 1
return mog
def specs_MoG(size, lpf = 1, hpf = 1, std=0.1):
#uses specs.txt
assert(size % 2 == 1)
mog = MoG(lpf, hpf)
_start = - size + 1
_end = size
_std = std
index = 0
lowProbMembr = [0, 4, 12, 20, 24]
lines = []
with open('datasets/specs.txt') as f:
for line in f:
lines.append(line)
for line in lines:
i = float(line.rstrip().split()[0])
j = float(line.rstrip().split()[1])
mog.add_mode(i, j, _std)
if index in lowProbMembr:
for iterInd in range(mog.lowerProbFactor):
mog.dataExtractor.append(index)
else:
for iterInd in range(mog.higherProbFactor):
mog.dataExtractor.append(index)
index += 1
return mog
def specs_MoG1D(size, lpf = 1, hpf = 1, std=0.1):
#uses specs.txt
#assert(size % 2 == 1)
mog = MoG1D(lpf, hpf)
_start = - size + 1
_end = size
_std = std
index = 0
lowProbMembr = [0, 4, 12, 20, 24]
lines = []
with open('datasets/specs1D.txt') as f:
for line in f:
lines.append(line)
for line in lines:
print(line.rstrip())
i = float(line.rstrip())
#j = float(line.rstrip().split()[1])
mog.add_mode(i, _std)
if index in lowProbMembr:
for iterInd in range(mog.lowerProbFactor):
mog.dataExtractor.append(index)
else:
for iterInd in range(mog.higherProbFactor):
mog.dataExtractor.append(index)
index += 1
return mog
if __name__ == '__main__':
# Create
mog = rect_MoG(5)
# datasets = mog.generate_batch(4096)
data = mog.train.next_batch(4096)
plt.scatter(data[0][:,0], data[0][:,1], alpha=0.1)
data = Spiral().train.next_batch(4096)
plt.scatter(data[0][:, 0], data[0][:, 1], alpha=0.1)
plt.show()
|
StarcoderdataPython
|
27962
|
<gh_stars>0
import argparse
from sklearn import decomposition
from sklearn.manifold import TSNE
from scripts.utils.utils import init_logger, save_npz
from scripts.utils.documents import load_document_topics
logger = init_logger()
def main():
parser = argparse.ArgumentParser(description='maps a given high-dimensional documents to 2d document representations with t-sne')
parser.add_argument('--document-topics', type=argparse.FileType('r'), help='path to input document-topic-file (.npz)', required=True)
parser.add_argument('--documents-2d', type=argparse.FileType('w'), help='path to output document-2d-data (.npz)', required=True)
args = parser.parse_args()
input_document_topics_path = args.document_topics.name
output_documents_2d_path = args.documents_2d.name
document_topics = load_document_topics(input_document_topics_path)
#model = decomposition.PCA(n_components=2)
model = TSNE(n_components=2, verbose=1, perplexity=100, n_iter=1000)
logger.info('running 2d-transformation with model {}'.format(model))
documents_2d = model.fit_transform(document_topics)
logger.debug('2d-transformation res\n{}'.format(documents_2d))
logger.info('saving 2d-documents')
save_npz(output_documents_2d_path, documents_2d)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
112832
|
<filename>bin/04_transition_list.py<gh_stars>1-10
#!/bin/python
#
# TRANSITION LIST
# ===============
#
# This executable produces the building transition list, mapping the
# arrival and departure time of each user from each building.
#
# Arrival and departure times stands for the first session start and last
# session end time from contiguous list of sessions that took place in the same
# building. It does not mean that a user had an uninterrupted session in the
# same building. It could be the case that a user had several sessions between
# an arrival and departure time using multiple different APs in the same
# building pair. It also does not mean that the individual was in the building
# for the whole period, the individual could have left the university and back
# connecting always from the same building.
#
# The program took about 2 minutes to complete in a table with 90k userids and
# 13 million sessions. The warehouse was hosted in a server running with Intel
# Xeon E5 v4 2.20GHz with 40 CPU cores and 500Gb of total memory. The warehouse
# was deployed via Docker containers, only 20 CPU cores and 100Gb of memory
# were made available to it.
import logging
import argparse
from sqlalchemy import create_engine
from __init__ import resolve_args
def create_view(engine):
with engine.begin() as conn:
conn.execute(
f"""
CREATE MATERIALIZED VIEW IF NOT EXISTS views.bdg_transition AS
SELECT
userid_key,
building_key,
min(session_start) AS arrival_time,
max(session_end) AS departure_time
FROM (
SELECT
userid_key,
building_key,
session_start,
session_end,
SUM(next_same_building) OVER (
PARTITION BY f.userid_key
ORDER BY f.session_start
RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
) AS block
FROM (
SELECT
f.userid_key,
ap.building_key,
f.session_start,
f.session_end,
CASE
WHEN ((LEAD(ap.building_key) OVER w) = ap.building_key) THEN 0
ELSE 1
END AS next_same_building
FROM views.stationary_session f, dimension.ap
WHERE ap.key = f.ap_key
WINDOW w AS (PARTITION BY f.userid_key ORDER BY f.session_start)
) f
) f
GROUP BY userid_key, building_key, block
ORDER BY userid_key, arrival_time
WITH NO DATA
"""
)
conn.execute(
f"""
CREATE UNIQUE INDEX IF NOT EXISTS ix_bdg_transition_userid_key_arrival_time
ON views.bdg_transition (userid_key, arrival_time)
"""
)
conn.execute("REFRESH MATERIALIZED VIEW views.bdg_transition")
def main(args):
engine = create_engine(args.wifi_conn)
logging.info("Creating the transition list.")
create_view(engine)
logging.info("Done.")
if __name__ == "__main__":
cli = argparse.ArgumentParser(description="Creates the transition list.")
args = resolve_args(cli)
main(args)
|
StarcoderdataPython
|
9744982
|
<reponame>jcolekaplan/computer_vision<gh_stars>0
"""
<NAME>
keypoint_detection.py
Check how consistent the results of Harris keypoint detection and SIFT keypoint
detection with each other are.
_/`.-'`.
_ _/` . _.'
..:::::.(_) /` _.'_./
.oooooooooo\ \o/.-'__.'o.
.ooooooooo`._\_|_.'`oooooob.
.ooooooooooooooooooooo&&oooooob.
.oooooooooooooooooooo&@@@@@@oooob.
.ooooooooooooooooooooooo&&@@@@@ooob.
doooooooooooooooooooooooooo&@@@@ooob
doooooooooooooooooooooooooo&@@@oooob
dooooooooooooooooooooooooo&@@@ooooob
dooooooooooooooooooooooooo&@@oooooob
`dooooooooooooooooooooooooo&@ooooob'
`doooooooooooooooooooooooooooooob'
`doooooooooooooooooooooooooooob'
`doooooooooooooooooooooooooob'
`doooooooooooooooooooooooob'
`doooooooooooooooooooooob'
`dooooooooobodoooooooob'
`doooooooob dooooooob'
`"""""""' `""""""'
They say comparing Harris and SIFT keypoints is like comparing
apples and oranges...ok they don't say that...sue me.
"""
import sys
import cv2 as cv
import numpy as np
def harris(image, sigma):
"""
Harris measure computation from lecture
Take in image and sigma
Perform Gaussian smoothing, get the components of the outer product,
convolution of outer product and Gaussian kernel, and compute
Harris measure
Return Harris measure
"""
imgDx, imgDy = getDerivs(image, sigma)
imgDxSq = imgDx * imgDx
imgDySq = imgDy * imgDy
imgDxDy = imgDx * imgDy
hSigma = int(2*sigma)
hKsize = (4*hSigma+1,4*hSigma+1)
imgDxSq = cv.GaussianBlur(imgDxSq, hKsize, hSigma)
imgDySq = cv.GaussianBlur(imgDySq, hKsize, hSigma)
imgDxDy = cv.GaussianBlur(imgDxDy, hKsize, hSigma)
kappa = 0.004
imgDet = imgDxSq * imgDySq - imgDxDy * imgDxDy
imgTrace = imgDxSq + imgDySq
imgHarris = imgDet - kappa * imgTrace * imgTrace
return imgHarris
def getDerivs(image, sigma):
"""
Helper function for harris function
Take in image and sigma
Get gradient derivatives in x and y directions
Return derivatives
"""
ksize = (int(4*sigma+1),int(4*sigma+1))
imgGauss = cv.GaussianBlur(image.astype(np.float64), ksize, sigma)
kx,ky = cv.getDerivKernels(1,1,3)
kx = np.transpose(kx/2)
ky = ky/2
imgDx = cv.filter2D(imgGauss,-1,kx)
imgDy = cv.filter2D(imgGauss,-1,ky)
return imgDx, imgDy
def nonMaxSuppression(image, sigma):
"""
Non-max suppression from class without thresholding
Normalize Harris measure, dilate based on maxDist(from Sigma),
compare the two to get peaks
Normalize peaks and return peaks
"""
imgHarris = normalize(image)
maxDist = int(2*sigma)
kernel = np.ones((2*maxDist+1, 2*maxDist+1), np.uint8)
imgHarris = imgHarris.astype(np.uint8)
imgHarrisDilate = cv.dilate(imgHarris, kernel)
imgPeaks = cv.compare(imgHarris, imgHarrisDilate, cv.CMP_GE)
imgPeaks = imgPeaks * image
imgPeaks = normalize(imgPeaks)
return imgPeaks
def normalize(image):
"""
Helper function
Takes in image
Normalizes(scales) image to 255 (whites get whiter)
Return normalized image
"""
min = np.min(image)
max = np.max(image)
normalImg = 255*(image - min) / (max - min)
return normalImg
def getHarrisKeyPoints(orgImage, imgPeaks, sigma):
"""
Go through all the keypoints in the peaks and:
Get coordinates
Create new OpenCV keypoint with coordinates, sigma, and the magnitude
of the peak (response)
Add new keypoint to list of keypoints
Add coordinates to lists of coordinates (helpful later)
"""
sortHarris = np.sort(imgPeaks,axis=None)[::-1]
keyPoints = list()
kpXCoords = []
kpYCoords = []
# Since we only need 200 top keypoints, start with 500 since top 200
# might contain duplicates
for i in range(500):
x,y = np.where(imgPeaks==sortHarris[i])
x,y = x.astype(int), y.astype(int)
newKp = cv.KeyPoint(y, x, 2*sigma, -1, imgPeaks[x,y])
keyPoints.append(newKp)
kpXCoords.append(int(x))
kpYCoords.append(int(y))
"""
Go through the KeyPoints and filter out ones that are too close to
each other
"""
newKP = []
newKPX = []
newKPY = []
for i in range(1, len(keyPoints)):
xPrev, yPrev = keyPoints[i-1].pt
x,y = keyPoints[i].pt
if x-1 <= xPrev <= x+1 and y-1 <= yPrev <= y+1:
pass
else:
newKP.append(keyPoints[i-1])
newKPX.append(xPrev)
newKPY.append(yPrev)
"""
Return the top 200 keypoints along with their coordinates
"""
return newKP[:200], (newKPY[:200], newKPX[:200])
def getSiftKeyPoints(image, sigma):
"""
SIFT keypoint finder from lecture
Take in image and sigma
Get SIFT keypoints, filter out duplicates and ones with too high of a size
Keep track of the coordinates of each keypoints
Return top 200 keypoints along with their coordinates
"""
sift = cv.xfeatures2d.SIFT_create()
keyPoints = sift.detect(image, None)
keyPoints.sort(key = lambda k: k.response)
keyPoints = keyPoints[::-1]
startInd = 0
for i in range(len(keyPoints)):
if (keyPoints[i].size > 3*sigma):
startInd = i+1
else:
break
kpUnique = [keyPoints[startInd]]
x,y = keyPoints[startInd].pt
siftXCoords = [float(y)]
siftYCoords = [float(x)]
for k in keyPoints[startInd+1:]:
if (k.pt != kpUnique[-1].pt) and (k.size < 3*sigma):
kpUnique.append(k)
x,y = k.pt
siftXCoords.append(float(y))
siftYCoords.append(float(x))
return kpUnique[:200], (siftXCoords, siftYCoords)
def outputStats(harrisKP, siftKP):
"""
Take in Harris and SIFT keypoints and output stats about the top
ten keypoints with the largest response
"""
print("\nTop 10 Harris keypoints:")
for i in range(10):
x,y = harrisKP[i].pt
response = harrisKP[i].response
size = harrisKP[i].size
print("{}: ({:.2f}, {:.2f}) {:.4f} {:.2f}".format(i, x, y, response, size))
print("\nTop 10 SIFT keypoints:")
for i in range(10):
x,y = siftKP[i].pt
response = siftKP[i].response
size = siftKP[i].size
print("{}: ({:.2f}, {:.2f}) {:.4f} {:.2f}".format(i, x, y, response, size))
def compareKeyPoints(harrisCoords, siftCoords, harrisOrSift):
"""
Takes in Harris and SIFT keypoints
Compares the top 100 Harris keypoints to the top 200 SIFT keypoints
or vice versa
Uses numpy to get the distances from each keypoint to the keypoints in
the other list
Output average distances and how the ranks of each keypoint compares to its
corresponding keypoint in the other list
"""
harrisX, harrisY = harrisCoords
siftX, siftY = siftCoords
if harrisOrSift == "Harris":
siftOrHarris = "SIFT"
oneHundredX, oneHundredY = np.asarray(harrisX)[:100], np.asarray(harrisY)[:100]
twoHundredX, twoHundredY = np.asarray(siftX)[:200], np.asarray(siftY)[:200]
else:
siftOrHarris = "Harris"
oneHundredX, oneHundredY = np.asarray(siftX)[:100], np.asarray(siftY)[:100]
twoHundredX, twoHundredY = np.asarray(harrisX)[:200], np.asarray(harrisY)[:200]
minDists = []
indexDiffs = []
for i in range(100):
distances = np.sqrt((twoHundredX - oneHundredX[i])**2 + (twoHundredY - oneHundredY[i])**2)
minDists.append(np.min(distances))
indexDiffs.append(np.abs(np.argmin(distances) - i))
print("\n{} keypoint to {} distances:\nnum_from 100 num_to 200".format(siftOrHarris, harrisOrSift))
print("Median distance: {:.1f}".format(np.median(minDists)))
print("Average distance: {:.1f}".format(np.average(minDists)))
print("Median index difference: {:.1f}".format(np.median(indexDiffs)))
print("Average index difference: {:.1f}".format(np.average(indexDiffs)))
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 3:
print("Correct usage: p2_compare.py sigma img")
sys.exit()
else:
sig = sys.argv[1]
inImgName = sys.argv[2]
try:
sig = float(sig)
except ValueError:
print("Sigma must be real number!")
sys.exit()
try:
inImg = cv.imread(inImgName, 0)
except AttributeError:
print("{} does not exit or is not a valid image!".format(inImgName))
sys.exit()
"""
Harris keypoints
"""
harrisImg = harris(inImg, sig)
harrisImgSup = nonMaxSuppression(harrisImg, sig)
harrisKP, hCoords = getHarrisKeyPoints(inImg, harrisImgSup, sig)
harrisOut = cv.drawKeypoints(inImg.astype(np.uint8), harrisKP, None)
"""
SIFT keypoints
"""
siftKP, sCoords = getSiftKeyPoints(inImg, sig)
siftOut = cv.drawKeypoints(inImg.astype(np.uint8), siftKP, None)
"""
Output images with keypoints drawn on them
"""
name, ext = inImgName.split(".")
cv.imwrite("{}_harris.{}".format(name, ext), harrisOut)
cv.imwrite("{}_sift.{}".format(name, ext), siftOut)
"""
Output stats
"""
outputStats(harrisKP, siftKP)
compareKeyPoints(hCoords, sCoords, "Harris")
compareKeyPoints(hCoords, sCoords, "SIFT")
|
StarcoderdataPython
|
1696776
|
<gh_stars>100-1000
from __future__ import annotations
from psycopg2._psycopg import Column
from local_data_api.models import Field
from local_data_api.resources import PostgresSQL
from tests.test_resource.test_resource import helper_default_test_field
def test_create_connection_maker(mocker):
mock_connect = mocker.patch('local_data_api.resources.postgres.psycopg2.connect')
connection_maker = PostgresSQL.create_connection_maker(
host='127.0.0.1',
port=3306,
user_name='root',
password='<PASSWORD>',
engine_kwargs={'auto_commit': True},
)
connection_maker(database='test')
mock_connect.assert_called_once_with(
auto_commit=True,
host='127.0.0.1',
password='<PASSWORD>',
port=3306,
user='root',
dbname='test',
)
mock_connect = mocker.patch('local_data_api.resources.postgres.psycopg2.connect')
connection_maker = PostgresSQL.create_connection_maker()
connection_maker()
mock_connect.assert_called_once_with()
def test_create_column_metadata(mocker):
connection_mock = mocker.Mock()
cursor_mock = mocker.Mock()
connection_mock.cursor.side_effect = [cursor_mock]
cursor_mock.description = [Column(name="mock", scale=1)]
dummy = PostgresSQL(connection_mock)
dummy.create_column_metadata_set(cursor_mock)
assert True
def test_from_value(mocker) -> None:
connection_mock = mocker.Mock()
dummy = PostgresSQL(connection_mock)
helper_default_test_field(dummy)
|
StarcoderdataPython
|
3486188
|
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Simulation/DriveWave.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Simulation/DriveWave
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .Drive import Drive
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Simulation.DriveWave.get_wave import get_wave
except ImportError as error:
get_wave = error
from ._check import InitUnKnowClassError
from .Import import Import
class DriveWave(Drive):
"""Drive to generate a wave according to an Import object"""
VERSION = 1
# cf Methods.Simulation.DriveWave.get_wave
if isinstance(get_wave, ImportError):
get_wave = property(
fget=lambda x: raise_(
ImportError("Can't use DriveWave method get_wave: " + str(get_wave))
)
)
else:
get_wave = get_wave
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
wave=-1,
Umax=800,
Imax=800,
is_current=False,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "wave" in list(init_dict.keys()):
wave = init_dict["wave"]
if "Umax" in list(init_dict.keys()):
Umax = init_dict["Umax"]
if "Imax" in list(init_dict.keys()):
Imax = init_dict["Imax"]
if "is_current" in list(init_dict.keys()):
is_current = init_dict["is_current"]
# Set the properties (value check and convertion are done in setter)
self.wave = wave
# Call Drive init
super(DriveWave, self).__init__(Umax=Umax, Imax=Imax, is_current=is_current)
# The class is frozen (in Drive init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
DriveWave_str = ""
# Get the properties inherited from Drive
DriveWave_str += super(DriveWave, self).__str__()
if self.wave is not None:
tmp = self.wave.__str__().replace(linesep, linesep + "\t").rstrip("\t")
DriveWave_str += "wave = " + tmp
else:
DriveWave_str += "wave = None" + linesep + linesep
return DriveWave_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from Drive
if not super(DriveWave, self).__eq__(other):
return False
if other.wave != self.wave:
return False
return True
def as_dict(self):
"""Convert this object in a json seriable dict (can be use in __init__)"""
# Get the properties inherited from Drive
DriveWave_dict = super(DriveWave, self).as_dict()
if self.wave is None:
DriveWave_dict["wave"] = None
else:
DriveWave_dict["wave"] = self.wave.as_dict()
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
DriveWave_dict["__class__"] = "DriveWave"
return DriveWave_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
if self.wave is not None:
self.wave._set_None()
# Set to None the properties inherited from Drive
super(DriveWave, self)._set_None()
def _get_wave(self):
"""getter of wave"""
return self._wave
def _set_wave(self, value):
"""setter of wave"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class("pyleecan.Classes", value.get("__class__"), "wave")
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Import()
check_var("wave", value, "Import")
self._wave = value
if self._wave is not None:
self._wave.parent = self
wave = property(
fget=_get_wave,
fset=_set_wave,
doc=u"""Wave generator
:Type: Import
""",
)
|
StarcoderdataPython
|
6529593
|
<gh_stars>0
from db import db
from models.enums import RoleType
class CreatorModel(db.Model):
"""Creator Model"""
__tablename__ = "creators"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
role = db.Column(db.Enum(RoleType), default=RoleType.SIGNED_CREATOR, nullable=False)
secrets = db.relationship("SecretModel", back_populates="creator")
|
StarcoderdataPython
|
1663032
|
<reponame>markovmodel/pyemma_tutorials<gh_stars>10-100
import os
import versioneer
from setuptools import setup
def copy_notebooks():
import shutil
dest = os.path.join('pyemma_tutorials', 'notebooks')
try:
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree('notebooks', dest)
print('moved notebooks into pkg')
except OSError:
pass
metadata=dict(
name='pyemma_tutorials',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=['pyemma_tutorials'],
package_data={'pyemma_tutorials': ['notebooks/*',
'notebooks/static/*',
'jupyter_notebook_config.py',
'jupyter_notebook_config.json',
]},
include_package_data=True,
entry_points={'console_scripts': ['pyemma_tutorials = pyemma_tutorials.cli:main'],},
install_requires=['pyemma',
'mdshare',
'nbexamples',
'nglview',
'notebook',
'jupyter_contrib_nbextensions',
],
zip_safe=False,
)
if __name__ == '__main__':
copy_notebooks()
setup(**metadata)
|
StarcoderdataPython
|
9653593
|
<reponame>luna-ml/leaderboard<filename>cartpole-v1/models/dqn-ritakurban/agent.py
import pathlib
import keras
import numpy as np
class Agent():
def __init__(self, **kwargs):
self.model = self.load_model(f"""{kwargs.get("path")}/pretrained""")
def load_model(self, path):
"""Load pretrained model"""
model = keras.models.load_model(path)
return model
def predict(self, state):
# return 0 or 1 to control cart to left and right
q_values = self.model.predict(np.array([state]))
return np.argmax(q_values[0])
|
StarcoderdataPython
|
4904139
|
from src.pgassets import pgTextPanel
class pgButton(pgTextPanel):
def __init__(self, pos: tuple, size: tuple, text="", color=(255, 255, 255), borderwidth=2, transparent=False, fontsize=20):
pgTextPanel.__init__(self, pos, size, text, color, borderwidth, transparent, fontsize=fontsize)
|
StarcoderdataPython
|
1661086
|
<filename>src/Sixth Chapter/Exercise12.py
# Write a function is_factor(f, n) that passes the tests below.
import sys
def is_factor(f, n):
return n % f == 0
def test(did_pass):
""" Print the result of a test. """
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
def test_suite():
""" Run the suite of tests for code in this module (this file).
"""
test(is_factor(3, 12))
test(not is_factor(5, 12))
test(is_factor(7, 14))
test(not is_factor(7, 15))
test(is_factor(1, 15))
test(is_factor(15, 15))
test(not is_factor(25, 15))
test_suite() # Here is the call to run the tests
|
StarcoderdataPython
|
4897108
|
<filename>src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinn_front_end_common/utilities/reload/reload_buffered_vertex.py<gh_stars>1-10
# front end common imports
from spinn_front_end_common.interface.buffer_management.buffer_models.\
sends_buffers_from_host_pre_buffered_impl import \
SendsBuffersFromHostPreBufferedImpl
from spinn_front_end_common.interface.buffer_management.storage_objects.\
buffered_sending_region import BufferedSendingRegion
from spinn_front_end_common.utilities import constants
_MAX_MEMORY_USAGE = constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP
class ReloadBufferedVertex(SendsBuffersFromHostPreBufferedImpl):
""" A Buffered sending vertex when using reload
"""
def __init__(self, label, region_files_tuples):
"""
:param label: The label of the vertex
:param region_files_dict: A dictionary of region id -> file name
"""
self._label = label
self._send_buffers = dict()
for (region_id, filename, max_size_of_buffer) in region_files_tuples:
send_buffer = BufferedSendingRegion(max_size_of_buffer)
reader = open(filename, "r")
line = reader.readline()
while line != "":
bits = line.split(":")
send_buffer.add_key(int(bits[0]), int(bits[1]))
line = reader.readline()
self._send_buffers[region_id] = send_buffer
SendsBuffersFromHostPreBufferedImpl.__init__(
self, self._send_buffers)
|
StarcoderdataPython
|
6454923
|
<reponame>ssinad/gcp
"""
Create a version of processed data
used in our notebooks.
Instead of selecting individual county and then
make chart, we'll clean all counties at once,
then subset.
Save it to `data` to use for our RMarkdown repo:
https://github.com/CityOfLosAngeles/covid19-rmarkdown
"""
import numpy as np
import pandas as pd
import default_parameters
import utils
from IPython.display import Markdown, HTML
start_date = default_parameters.start_date
today_date = default_parameters.today_date
yesterday_date = default_parameters.yesterday_date
one_week_ago = default_parameters.one_week_ago
fulldate_format = default_parameters.fulldate_format
# Clean the JHU county data at once
def clean_jhu(start_date):
df = utils.prep_us_county_time_series()
keep_cols = [
"county",
"state",
"state_abbrev",
"fips",
"date",
"Lat",
"Lon",
"cases",
"deaths",
"new_cases",
"new_deaths",
]
df = (df[keep_cols]
.sort_values(["county", "state", "fips", "date"])
.reset_index(drop=True)
)
# Merge in population
pop = (pd.read_csv(utils.CROSSWALK_URL,
dtype={"county_fips": "str", "cbsacode": "str"})
[["county_fips", "county_pop"]]
.rename(columns = {"county_fips": "fips"})
)
df = pd.merge(df, pop,
on = "fips", how = "inner", validate = "m:1"
)
df = utils.calculate_rolling_average(df, start_date, today_date)
df = utils.find_tier_cutoffs(df, "county_pop")
df = utils.doubling_time(df, window=7)
return df
# Clean all CA counties hospitalizations data at once
def clean_hospitalizations(start_date):
df = pd.read_parquet(utils.HOSPITAL_SURGE_URL)
df = (df.assign(
date = pd.to_datetime(df.date).dt.date,
date2 = pd.to_datetime(df.date),
).rename(columns = {"county_fips": "fips"})
)
keep_cols = [
"county",
"fips",
"date",
"date2",
"hospitalized_covid",
"all_hospital_beds",
"icu_covid",
"all_icu_beds",
]
df = (
df[keep_cols]
.sort_values(["county", "fips", "date"])
.reset_index(drop=True)
)
# Calculate 7-day average
df = df.assign(
hospitalized_avg7 = df.hospitalized_covid.fillna(0).rolling(window=7).mean(),
icu_avg7 = df.icu_covid.fillna(0).rolling(window=7).mean(),
)
df = df[(df.date >= start_date) & (df.date < today_date)]
df = utils.make_long(df)
return df
# Caption to include under each county
def county_caption(df, county_name):
df = df[df.county == county_name]
'''
This changes the columns to string...which shows up incorrectly in Markdown.
cols_to_format = ["cases", "deaths", "new_cases", "new_deaths"]
for c in cols_to_format:
df[c] = df[c].map("{:,g}".format)
'''
np.seterr(divide='ignore', invalid='ignore')
extract_col = "cases"
cumulative_cases = df[df.date == yesterday_date].iloc[0][extract_col]
extract_col = "cases_avg7"
new_cases_1week = df[df.date == one_week_ago].iloc[0][extract_col]
new_cases_yesterday = df[df.date == yesterday_date].iloc[0][extract_col]
tier3_cutoff = df[df.date == yesterday_date].iloc[0]["tier3_case_cutoff"]
pct_change_new_cases = (((new_cases_yesterday - new_cases_1week) / new_cases_1week) * 100).round(1)
new_cases_tier4_proportion = (new_cases_yesterday / tier3_cutoff).round(1)
extract_col = "deaths"
cumulative_deaths = df[df.date == yesterday_date][extract_col].iloc[0]
extract_col = "deaths_avg7"
new_deaths_1week = df[df.date == one_week_ago].iloc[0][extract_col]
new_deaths_yesterday = df[df.date == yesterday_date].iloc[0][extract_col]
pct_change_new_deaths = (((new_deaths_yesterday - new_deaths_1week) / new_deaths_1week) * 100).round(1)
extract_col = "doubling_time"
doubling_time_1week = df[df.date == one_week_ago].iloc[0][extract_col].round(0).astype(int)
doubling_time_yesterday = df[df.date == yesterday_date].iloc[0][extract_col].round(0).astype(int)
# Add condition for small numbers; report 7-day rolling average instead of percent change
threshold = 10
cases_under = ((new_cases_1week <= threshold) or (new_cases_yesterday <= threshold))
cases_over = ((new_cases_1week > threshold) and (new_cases_yesterday > threshold))
deaths_under = ((new_deaths_1week <= threshold) or (new_deaths_yesterday <= threshold))
deaths_over = ((new_deaths_1week > threshold) and (new_deaths_yesterday > threshold))
if cases_under and deaths_over:
display(
Markdown(
f"As of {yesterday_date.strftime(fulldate_format)}, there were **{cumulative_cases:,}** total cases "
f"and **{cumulative_deaths:,}** total deaths. "
f"<br>In the past week, new cases went from **{new_cases_1week:.1f}** to **{new_cases_yesterday:.1f}**; "
f"new deaths grew by **{pct_change_new_deaths}%**. "
f"<br>New cases are **{new_cases_tier4_proportion:.1f}x** higher than the Tier 4 cut-off. <i><span style='color:#797C7C'>(1 = Tier 4 widespread cut-off; 2 = new cases are 2x higher than the Tier 4 cut-off)</span></i>."
f"<br>In the past week, the doubling time went from **{doubling_time_1week} days** to "
f"**{doubling_time_yesterday} days** <i><span style='color:#797C7C'>(longer doubling time is better)</span></i>. "
)
)
elif cases_over and deaths_under:
display(
Markdown(
f"As of {yesterday_date.strftime(fulldate_format)}, there were **{cumulative_cases:,}** total cases "
f"and **{cumulative_deaths:,}** total deaths. "
f"<br>In the past week, new cases grew by **{pct_change_new_cases}%**; "
f"new deaths went from **{new_deaths_1week:.1f}** to **{new_deaths_yesterday:.1f}**. "
f"<br>New cases are **{new_cases_tier4_proportion:.1f}x** higher than the Tier 4 cut-off. <i><span style='color:#797C7C'>(1 = Tier 4 widespread cut-off; 2 = new cases are 2x higher than the Tier 4 cut-off)</span></i>."
f"<br>In the past week, the doubling time went from **{doubling_time_1week} days** to "
f"**{doubling_time_yesterday} days** <i><span style='color:#797C7C'>(longer doubling time is better)</span></i>. "
)
)
elif cases_under and deaths_under:
display(
Markdown(
f"As of {yesterday_date.strftime(fulldate_format)}, there were **{cumulative_cases:,}** total cases "
f"and **{cumulative_deaths:,}** total deaths. "
f"<br>In the past week, new cases went from **{new_cases_1week:,.1f}** to **{new_cases_yesterday:,.0f}**; "
f"new deaths went from **{new_deaths_1week:.1f}** to **{new_deaths_yesterday:.1f}**. "
f"<br>New cases are **{new_cases_tier4_proportion:.1f}x** higher than the Tier 4 cut-off. <i><span style='color:#797C7C'>(1 = Tier 4 widespread cut-off; 2 = new cases are 2x higher than the Tier 4 cut-off)</span></i>."
f"<br>In the past week, the doubling time went from **{doubling_time_1week} days** to "
f"**{doubling_time_yesterday} days** <i><span style='color:#797C7C'>(longer doubling time is better)</span></i>. "
)
)
else:
display(
Markdown(
f"As of {yesterday_date.strftime(fulldate_format)}, there were **{cumulative_cases:,}** total cases "
f"and **{cumulative_deaths:,}** total deaths. "
f"<br>In the past week, new cases grew by **{pct_change_new_cases}%**; "
f"new deaths grew by **{pct_change_new_deaths}%**. "
f"<br>New cases are **{new_cases_tier4_proportion:.1f}x** higher than the Tier 4 cut-off. <i><span style='color:#797C7C'>(1 = Tier 4 widespread cut-off; 2 = new cases are 2x higher than the Tier 4 cut-off)</span></i>."
f"<br>In the past week, the doubling time went from **{doubling_time_1week} days** to "
f"**{doubling_time_yesterday} days** <i><span style='color:#797C7C'>(longer doubling time is better)</span></i>. "
)
)
def ca_hospitalizations_caption(df, county_name):
df = df[df.county == county_name]
if df.date.max() == default_parameters.two_days_ago:
yesterday_date = default_parameters.two_days_ago
one_week_ago = default_parameters.nine_days_ago
else:
yesterday_date = default_parameters.yesterday_date
one_week_ago = default_parameters.one_week_ago
np.seterr(divide='ignore', invalid='ignore')
extract_col = "COVID-ICU"
icu_1week = df[(df.date == one_week_ago) & (df["type"]==extract_col)].iloc[0]["num"]
icu_yesterday = df[(df.date == yesterday_date) & (df["type"]==extract_col)].iloc[0]["num"]
pct_change_icu = (((icu_yesterday - icu_1week) / icu_1week) * 100).round(1)
extract_col = "All COVID-Hospitalized"
hosp_1week = df[(df.date == one_week_ago) & (df["type"]==extract_col)].iloc[0]["num"]
hosp_yesterday = df[(df.date == yesterday_date) & (df["type"]==extract_col)].iloc[0]["num"]
pct_change_hosp = (((hosp_yesterday - hosp_1week) / hosp_1week) * 100).round(1)
# Add condition for small numbers; report 7-day rolling average instead of percent change
threshold = 10
icu_under = ((icu_1week <= threshold) or (icu_yesterday <= threshold))
icu_over = ((icu_1week > threshold) and (icu_yesterday > threshold))
hosp_under = ((hosp_1week <= threshold) or (hosp_yesterday <= threshold))
hosp_over = ((hosp_1week > threshold) and (hosp_yesterday > threshold))
if icu_under and hosp_over:
display(
Markdown(
f"In the past week, all COVID hospitalizations grew by **{pct_change_hosp}%**.; "
f"COVID ICU hospitalizations went from **{icu_1week:.1f}** to **{icu_yesterday:.1f}**. "
)
)
elif icu_over and hosp_under:
display(
Markdown(
f"In the past week, all COVID hospitalizations went from **{hosp_1week:.1f}** to **{hosp_yesterday:.1f}**.; "
f"COVID ICU hospitalizations grew by **{pct_change_icu}%**. "
)
)
elif icu_under and hosp_under:
display(
Markdown(
f"In the past week, all COVID hospitalizations went from **{hosp_1week:.1f}** to **{hosp_yesterday:.1f}**.; "
f"COVID ICU hospitalizations went from **{icu_1week:.1f}** to **{icu_yesterday:.1f}**. "
)
)
else:
display(
Markdown(
f"In the past week, all COVID hospitalizations grew by **{pct_change_hosp}%**.; "
f"COVID ICU hospitalizations grew by **{pct_change_icu}%**. "
)
)
|
StarcoderdataPython
|
5116456
|
<filename>python-code/tiny-app/headpose-detection/videoCapture.py
import numpy as np
import cv2
import argparse
import os.path as osp
from hpd import HPD
def main(args):
filename = args["input_file"]
if filename is None:
isVideo = False
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
else:
isVideo = True
cap = cv2.VideoCapture(filename)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
name, ext = osp.splitext(filename)
out = cv2.VideoWriter(args["output_file"], fourcc, fps, (width, height))
# Initialize head pose detection
hpd = HPD(args["landmark_type"], args["landmark_predictor"])
count = 0
while(cap.isOpened()):
# Capture frame-by-frame
print('\rframe: %d' % count, end='')
ret, frame = cap.read()
if isVideo:
frame, angles = hpd.processImage(frame)
if frame is None:
break
else:
out.write(frame)
else:
frame = cv2.flip(frame, 1)
frame, angles = hpd.processImage(frame)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count += 1
# When everything done, release the capture
cap.release()
if isVideo: out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', metavar='FILE', dest='input_file', default=None, help='Input video. If not given, web camera will be used.')
parser.add_argument('-o', metavar='FILE', dest='output_file', default=None, help='Output video.')
parser.add_argument('-lt', metavar='N', dest='landmark_type', type=int, default=1, help='Landmark type.')
parser.add_argument('-lp', metavar='FILE', dest='landmark_predictor',
default='model/shape_predictor_68_face_landmarks.dat', help="Landmark predictor data file.")
args = vars(parser.parse_args())
main(args)
|
StarcoderdataPython
|
3451402
|
def get_numbers(src: list):
for num in range(len(src) - 1):
if src[num] < src[num + 1]:
yield src[num + 1]
pass
src = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
print(*get_numbers(src))
|
StarcoderdataPython
|
5159159
|
import graphene
class Query(graphene.ObjectType):
hello = graphene.String(name=graphene.String(default_value="stranger"))
def resolve_hello(self, info, name):
return 'Hello ' + name
schema = graphene.Schema(query=Query)
result = schema.execute('{ hello ( name: "Test" )}')
print(result.data['hello']) # "Hello stranger"
class Episode(graphene.Enum):
NEWHOPE = 4
EMPIRE = 5
JEDI = 6
@property
def description(self):
if self == Episode.NEWHOPE:
return 'New Hope Episode'
return 'Other episode'
Episode_ = graphene.Enum('Episode', [('NEWHOPE', 4), ('EMPIRE', 5), ('JEDI', 6)])
# graphene.Enum.from_enum(AlreadyExistingPyEnum, description=lambda value: return 'foo' if value == AlreadyExistingPyEnum.Foo else 'bar')
from enum import Enum
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
Episode__ = graphene.Enum.from_enum(Color)
print(Episode__.get(1))
print(Color.RED)
import datetime
from graphene.types import Scalar
from graphql.language import ast
class DateTime(Scalar):
'''DateTime Scalar Description'''
@staticmethod
def serialize(dt):
return dt.isoformat()
@staticmethod
def parse_literal(node):
if isinstance(node, ast.StringValue):
return datetime.datetime.strptime(
node.value, "%Y-%m-%dT%H:%M:%S.%f")
@staticmethod
def parse_value(value):
return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f")
dt = DateTime()
print(dt.parse_value("1990-3-2T11:22:22.3"))
class Character(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
class Character(graphene.ObjectType):
name = graphene.String(required=True)
ch = Character()
print(ch.name)
# class Query(graphene.ObjectType):
# hello = Character(name=graphene.String)
# # User = graphene.Field(User, id=graphene.ID(required=True))
#
# def resolve_hello(self, info, name):
# return 'Hello ' + name
#
# schema = graphene.Schema(query=Query)
# result = schema.execute('{ hello ( name: "Test" )}')
# print(result.data['hello']) # "Hello stranger"
#
# # ch = Character('str')
# # print(ch)
#
# # schema = graphene.Schema(query=Character)
# #
# # result = schema.execute('{ name }')
# # print(result.data['nane']) # "Hello stranger"
#
#
# class Character(graphene.ObjectType):
# name = graphene.String(required=True)
class Query(graphene.ObjectType):
name = graphene.String()
def resolve_name(self, info):
return info.context.get('name')
schema = graphene.Schema(Query)
result = schema.execute('{ name }', context_value={'name': 'Syrus'})
print(result.data['name'])
# class User(graphene.ObjectType):
# firstName = graphene.String()
# lastName = graphene.String()
# id = graphene.ID()
####################################################################################################################
class User(graphene.ObjectType):
first_name = graphene.String()
last_name = graphene.String()
id = graphene.ID()
users = [User(first_name='Peter', last_name='Griffin', id=1), User(first_name='Ivan', last_name='Banan', id=12)]
class Query(graphene.ObjectType):
user = graphene.List(User, id=graphene.ID())
def resolve_user(self, info, id):
return [user for user in users if user.id == int(id)]
schema = graphene.Schema(Query)
result = schema.execute(
'''query getUser($id: ID) {
user(id: $id) {
id
firstName
lastName
}
}''',
variable_values={'id': 12}
)
print(result.data['user'])
class Query(graphene.ObjectType):
user = graphene.List(User, id=graphene.ID())
def resolve_user(self, info, id):
return [user for user in info.context if user.id == int(id)]
schema = graphene.Schema(Query)
result = schema.execute(
'''query getUser($id: ID) {
user(id: $id) {
id
firstName
lastName
}
}''',
variable_values={'id': 2},
context_value=[User(first_name='Peter', last_name='Griffin', id=1),
User(first_name='Ivan', last_name='Banan', id=12),
User(first_name='Stepan', last_name='Orange', id=2)]
)
print(result.data['user'])
####################################################################################################################
# result = schema.execute(
# '{ user (id: 12)}'
# '''query getUser($id: ID) {
# user(id: $id)
# }''',
# variable_values={'id': '12'}
# )
# result = schema.execute(
# '''query getUser($id: ID) {
# user(id: $id) {
# id
# firstName
# lastName
# }
# }''',
# variable_values={'id': '12'}
# )
# print(result.data)
import graphene
class Query(graphene.ObjectType):
reverse = graphene.String(word=graphene.String())
def resolve_reverse(self, info, word):
return word[::-1]
schema = graphene.Schema(Query)
result = schema.execute('{ reverse (word: "полиндром")}')
print(result.data['reverse'])
class AuthorizationMiddleware(object):
def resolve(self, next, root, info, **args):
if info.field_name == 'user':
return None
return next(root, info, **args)
result = schema.execute('THE QUERY', middleware=[AuthorizationMiddleware()])
print(result.data)
from time import time as timer
# def timing_middleware(next, root, info, **args):
# start = timer()
# return_value = next(root, info, **args)
# duration = timer() - start
# logger.debug("{parent_type}.{field_name}: {duration} ms".format(
# parent_type=root._meta.name if root and hasattr(root, '_meta') else '',
# field_name=info.field_name,
# duration=round(duration * 1000, 2)
# ))
# return return_value
|
StarcoderdataPython
|
1642268
|
<gh_stars>0
# coding: UTF-8
from __future__ import absolute_import
import unittest
from usig_normalizador_amba.NormalizadorDireccionesAMBA import NormalizadorDireccionesAMBA
from usig_normalizador_amba.Direccion import Direccion
from usig_normalizador_amba.Errors import ErrorCruceInexistente, ErrorCalleInexistente
from usig_normalizador_amba.settings import CALLE_Y_CALLE
from test_commons import cargarCallejeroEstatico
class NormalizadorDireccionesAMBACalleYCalleTestCase(unittest.TestCase):
partidos = ['hurlingham', 'ituzaingo', 'jose_c_paz', 'la_matanza', 'san_isidro', 'san_miguel']
nd = NormalizadorDireccionesAMBA(include_list=partidos)
for n in nd.normalizadores:
cargarCallejeroEstatico(n.c)
def _checkDireccion(self, direccion, codigo_calle, nombre_calle, codigo_cruce, nombre_cruce, codigo_partido, localidad):
self.assertTrue(isinstance(direccion, Direccion))
self.assertEqual(direccion.tipo, CALLE_Y_CALLE)
self.assertEqual(direccion.calle.codigo, codigo_calle)
self.assertEqual(direccion.calle.nombre, nombre_calle)
self.assertEqual(direccion.cruce.codigo, codigo_cruce)
self.assertEqual(direccion.cruce.nombre, nombre_cruce)
self.assertEqual(direccion.partido.codigo, codigo_partido)
self.assertEqual(direccion.localidad, localidad)
def testCalleInexistente01(self):
self.assertRaises(ErrorCalleInexistente, self.nd.normalizar, u'Elm street y Roque Sáenz Peña')
def testCalleInexistente02(self):
self.assertRaises(ErrorCruceInexistente, self.nd.normalizar, u'Roque Sáenz Peña y kokusai dori')
def testDireccionExistentePeroEnOtroPartido(self):
res = self.nd.normalizar(u'Europa y Darwin, Ituzaingo')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, u'Debería haber 1 matching/s. Hay {0}'.format(len(res)))
self._checkDireccion(res[0], 6895, u'Europa', 6953, u'Darwin', 'ituzaingo', u'Ituzaingó')
self.assertRaises(ErrorCalleInexistente, self.nd.normalizar, u'Europa y Darwin, La Matanza')
def testDireccionExistentePeroEnOtraLocalidad(self):
res = self.nd.normalizar(u'Europa y Darwin, Ituzaingo')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, u'Debería haber 1 matching/s. Hay {0}'.format(len(res)))
self._checkDireccion(res[0], 6895, u'Europa', 6953, u'Darwin', 'ituzaingo', u'Ituzaingó')
self.assertRaises(ErrorCalleInexistente, self.nd.normalizar, u'Europa y Darwin, Villa Udaondo')
def testNormalizador_DireccionEnVariosPartidos(self):
res = self.nd.normalizar(u'san martin y peron')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 2, u'Debería haber 2 matching/s. Hay {0}'.format(len(res)))
self._checkDireccion(res[0], 46673, u'Avenida General San Martín', 82054, u'Av. Eva Perón', 'la_matanza', u'Ramos Mejia')
self._checkDireccion(res[1], 12124, u'General José de San Martín', 293550, u'<NAME>', 'san_miguel', u'Campo de Mayo')
def testNormalizador_PartidoConNumero(self):
res = self.nd.normalizar(u'<NAME> y <NAME>, 20 de Junio')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, u'Debería haber 1 matching/s. Hay {0}'.format(len(res)))
self._checkDireccion(res[0], 232752, u'<NAME>', 232731, u'<NAME>', 'la_matanza', u'20 de Junio')
|
StarcoderdataPython
|
8122793
|
<filename>examples/nist_sre/helpers.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function
import os
import pickle
import shutil
import warnings
from collections import OrderedDict, defaultdict
from enum import Enum
from numbers import Number
import numba as nb
import numpy as np
from scipy.io import wavfile
from odin import fuel as F
from odin import visual as V
from odin.stats import freqcount, sampling_iter
from odin.utils import (Progbar, args_parse, cache_disk, catch_warnings_error,
catch_warnings_ignore, crypto, ctext, get_exppath,
get_logpath, get_module_from_path, get_script_name,
get_script_path, mpi, select_path)
# ===========================================================================
# Configuration
# ===========================================================================
class Config(object):
# ====== Acoustic features ====== #
FRAME_LENGTH = 0.025
STEP_LENGTH = 0.01
SAMPLE_RATE = 8000
WINDOW = 'hamm'
NFFT = 512
# Random seed for reproducibility
SUPER_SEED = 87654321
class SystemStates(Enum):
""" SystemStates """
UNKNOWN = 0
EXTRACT_FEATURES = 1
TRAINING = 2
SCORING = 3
# ===========================================================================
# General arguments for all experiments
# ===========================================================================
_args = args_parse(descriptions=[
('recipe', 'recipe is the name of acoustic Dataset defined in feature_recipes.py', None),
('-feat', 'specific name for the acoustic features, extracted from the given recipe', None, ''),
('-aug', 'augmentation dataset: musan, rirs; could be multiple dataset '
'for training: "musan,rirs"', None, 'None'),
('-ncpu', 'number of CPU to be used, if <= 0, auto-select', None, 0),
# for scoring
('-sys', 'name of the system for scoring: xvec, ivec, e2e ...', None, 'xvec'),
('-sysid', 'when a system is saved multiple checkpoint (e.g. sys.0.ai)', None, '-1'),
('-score', 'name of dataset for scoring, multiple dataset split by ","', None, 'sre18dev,sre18eval'),
('-backend', 'list of dataset for training the backend: '
'PLDA, SVM or Cosine', None, 'sre04,sre05,sre06,sre08,sre10,mx6'),
('-lda', 'if > 0, running LDA before training the backend '
'with given number of components', None, 0),
('-plda', 'number of PLDA components, must be > 0 ', None, 150),
('--mll', 'pre-fitting maximum likelihood before training PLDA', None, False),
('--showllk', 'show LLK during training of PLDA, this will slow thing down', None, False),
# for training
('-downsample', 'absolute number of files used for training', None, 0),
('-exclude', 'list of excluded dataset not for training,'
'multiple dataset split by ","', None, ''),
# for ivector
('-nmix', 'for i-vector training, number of Gaussian components', None, 2048),
('-tdim', 'for i-vector training, number of latent dimension for i-vector', None, 600),
# for DNN
('-utt', 'maximum length of sequence for training', None, 3),
('-seq', 'sequencing mode for training data, cut or pad', None, 'cut'),
('-batch', 'batch size, for training DNN, kaldi use 64, we use 128', None, 128),
('-epoch', 'number of epoch, for training DNN, kaldi only 3 epochs', None, 12),
('-clip', 'The maximum change in parameters allowed per minibatch, '
'measured in Euclidean norm over the entire model (change '
'will be clipped to this value), kaldi use 2.0', None, 2.0),
('-lr', 'learning rate for Adam, kaldi use 0.001 by default,'
' we use 0.01', None, 0.01),
# others
('-mindur', 'for filtering utterances, minimum duration of utterance '
'for training (in second)', None, 1),
('-minutt', 'for filtering utterances, minimum number of utterance of '
'each speaker for training', None, 3),
('--override', 'override previous experiments', None, False),
('--debug', 'enable debugging', None, False),
])
IS_DEBUGGING = bool(_args.debug)
IS_OVERRIDE = bool(_args.override)
MINIMUM_UTT_DURATION = int(_args.mindur) # in seconds
assert MINIMUM_UTT_DURATION > 0, "Minimum utterances duration must be greater than 0"
MINIMUM_UTT_PER_SPEAKERS = int(_args.minutt) # number of utterances
# this variable determine which state is running
CURRENT_STATE = SystemStates.UNKNOWN
# ====== Features extraction ====== #
FEATURE_RECIPE = str(_args.recipe)
FEATURE_NAME = FEATURE_RECIPE.split('_')[0] if len(str(_args.feat)) == 0 else str(_args.feat)
AUGMENTATION_NAME = _args.aug
TRAINING_DATASET = ['mx6', 'voxceleb1', 'voxceleb2', 'swb', 'fisher',
'sre04', 'sre05', 'sre06', 'sre08', 'sre10']
# ====== DNN ====== #
BATCH_SIZE = int(_args.batch)
EPOCH = int(_args.epoch)
LEARNING_RATE = float(_args.lr)
GRADIENT_CLIPPING = float(_args.clip)
# ====== searching for the appropriate system ====== #
SCORE_SYSTEM_NAME = _args.sys
SCORE_SYSTEM_ID = int(_args.sysid)
N_LDA = int(_args.lda)
N_PLDA = int(_args.plda)
assert N_PLDA > 0, "Number of PLDA components must > 0, but given: %d" % N_PLDA
PLDA_MAXIMUM_LIKELIHOOD = bool(_args.mll)
PLDA_SHOW_LLK = bool(_args.showllk)
# ====== system ====== #
NCPU = min(18, mpi.cpu_count() - 2) if _args.ncpu <= 0 else int(_args.ncpu)
# ====== helper for checking the requirement ====== #
def _check_feature_extraction_requirement():
# check requirement for feature extraction
from shutil import which
if which('sox') is None:
raise RuntimeError("`sox` was not installed")
if which('sph2pipe') is None:
raise RuntimeError("`sph2pipe` was not installed")
if which('ffmpeg') is None:
raise RuntimeError("`ffmpeg` was not installed")
def _check_recipe_name_for_extraction():
# check the requirement of recipe name for feature extraction
if '_' in FEATURE_RECIPE:
raise ValueError("'_' can appear in recipe name which is: '%s'" % FEATURE_RECIPE)
# ====== check the running script to determine the current running states ====== #
_script_name = get_script_name()
if _script_name in ('speech_augmentation', 'speech_features_extraction'):
CURRENT_STATE = SystemStates.EXTRACT_FEATURES
_check_feature_extraction_requirement()
_check_recipe_name_for_extraction()
elif _script_name in ('train_xvec', 'train_ivec', 'train_tvec',
'train_evec', 'analyze', 'analyze_data'):
CURRENT_STATE = SystemStates.TRAINING
elif _script_name in ('make_score'):
CURRENT_STATE = SystemStates.SCORING
_check_feature_extraction_requirement()
else:
raise RuntimeError("Unknown states for current running script: %s/%s" %
(get_script_path(), get_script_name()))
# some fancy log of current state
print(ctext('====================================', 'red'))
print(ctext("System state:", 'cyan'), ctext(CURRENT_STATE, 'yellow'))
print(ctext('====================================', 'red'))
# ===========================================================================
# FILE LIST PATH
# ===========================================================================
# ====== basic directories ====== #
EXP_DIR = get_exppath('sre', override=False)
# this folder store extracted vectors for training backend and extracting scores
VECTORS_DIR = os.path.join(EXP_DIR, 'vectors')
if not os.path.exists(VECTORS_DIR):
os.mkdir(VECTORS_DIR)
# this folder store the results
RESULT_DIR = os.path.join(EXP_DIR, 'results')
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
# this folder store the analysis
ANALYSIS_DIR = os.path.join(EXP_DIR, 'analysis')
if not os.path.exists(ANALYSIS_DIR):
os.mkdir(ANALYSIS_DIR)
# ====== raw data ====== #
PATH_BASE = select_path(
'/media/data2/SRE_DATA',
'/mnt/sda1/SRE_DATA',
'/mnt/sdb1/SRE_DATA',
default='')
# path to directory contain following folders:
##############
# * fisher
# * mx6
# * sre04
# * sre05
# * sre06
# * sre08
# * sre10
# * swb
# * voxceleb1
# * voxceleb2
###############
# * musan
# * rirs
###############
# * sre18dev
# * sre18eval
PATH_RAW_DATA = {
'mx6': PATH_BASE,
'voxceleb1': PATH_BASE,
'voxceleb2': PATH_BASE,
'swb': PATH_BASE,
'fisher': PATH_BASE,
'sre04': os.path.join(PATH_BASE, 'NIST1996_2008/SRE02_SRE06'),
'sre05': os.path.join(PATH_BASE, 'NIST1996_2008/SRE96_SRE05'),
'sre06': os.path.join(PATH_BASE, 'NIST1996_2008/SRE02_SRE06'),
'sre08': PATH_BASE,
'sre10': PATH_BASE,
'sre18dev': PATH_BASE,
'sre18eval': PATH_BASE,
# noise datasets
'musan': PATH_BASE,
'rirs': PATH_BASE,
}
# all features will be stored here
OUTPUT_DIR = select_path(
'/home/trung/data',
'/media/data1',
'/mnt/sda1'
)
PATH_ACOUSTIC_FEATURES = os.path.join(OUTPUT_DIR, "SRE_FEAT")
if not os.path.exists(PATH_ACOUSTIC_FEATURES):
os.mkdir(PATH_ACOUSTIC_FEATURES)
# ===========================================================================
# Load the file list
# ===========================================================================
sre_file_list = F.load_sre_list()
print('README at:', ctext(sre_file_list['README.txt'], 'cyan'))
sre_file_list = {k: v
for k, v in sre_file_list.items()
if isinstance(v, np.ndarray)}
print("Original dataset:")
for k, v in sorted(sre_file_list.items(), key=lambda x: x[0]):
print(' ', ctext('%-18s' % k, 'yellow'), ':',
ctext(v.shape, 'cyan'))
# ===========================================================================
# Validate scoring dataset
# ===========================================================================
def validate_scoring_dataset(in_path_raw, score_dataset, file_must_exist=True):
all_files = {}
for dsname in score_dataset:
if dsname not in sre_file_list:
raise ValueError("Cannot find dataset with name: '%s' in the file list" % dsname)
if dsname not in in_path_raw:
raise ValueError("Cannot find dataset with name: '%s' in provided path" % dsname)
base_path = in_path_raw[dsname]
ds = []
for row in sre_file_list[dsname]:
path = os.path.join(base_path, row[0])
# every file must exist
if bool(file_must_exist) and not os.path.exists(path):
raise RuntimeError("File not exist at path: %s" % path)
ds.append([path] + row[1:4].tolist() + [dsname])
all_files[dsname] = np.array(ds)
# Header:
# 0 1 2 3 4
# path, channel, name, something, dataset_name
return all_files
# ====== check dataset for scoring ====== #
if CURRENT_STATE == SystemStates.SCORING:
assert len(_args.score) > 0, \
"No dataset are provided for scoring, specify '-score' option"
# for scoring
SCORING_DATASETS = validate_scoring_dataset(
in_path_raw=PATH_RAW_DATA,
score_dataset=str(_args.score).strip().split(','))
print("Processed scoring dataset:")
for dsname, dsarray in sorted(SCORING_DATASETS.items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % dsname, 'yellow'), ':',
'%s' % ctext(dsarray.shape, 'cyan'))
# for training the backend
BACKEND_DATASETS = validate_scoring_dataset(
in_path_raw=PATH_RAW_DATA,
score_dataset=str(_args.backend).strip().split(','),
file_must_exist=False)
assert len(BACKEND_DATASETS) > 0, \
"Datasets for training the backend must be provided"
print("Processed backend dataset:")
for dsname, dsarray in sorted(BACKEND_DATASETS.items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % dsname, 'yellow'), ':',
'%s' % ctext(dsarray.shape, 'cyan'))
# ===========================================================================
# Validating the Noise dataset for augmentation
# ===========================================================================
@cache_disk
def validating_noise_data(in_path_raw):
# preparing
noise_dataset = ['musan', 'rirs']
all_files = defaultdict(list)
n_files = sum(len(sre_file_list[i])
for i in noise_dataset
if i in sre_file_list)
n_non_exist = 0
n_exist = 0
prog = Progbar(target=n_files, print_summary=True,
name="Validating noise dataset")
prog.set_summarizer(key='#Non-exist', fn=lambda x: x[-1])
prog.set_summarizer(key='#Exist', fn=lambda x: x[-1])
# check all dataset
for ds_name in noise_dataset:
if ds_name not in sre_file_list:
continue
if ds_name not in in_path_raw:
continue
base_path = in_path_raw[ds_name]
base_ds = all_files[ds_name]
# start validating
for row in sre_file_list[ds_name]:
# check file
path, channel, name, noise_type, duration = row[:5]
path = os.path.join(base_path, path)
if os.path.exists(path):
base_ds.append([path, channel, name, noise_type, duration])
n_exist += 1
else:
n_non_exist += 1
# update progress
prog['ds'] = ds_name
prog['#Exist'] = n_exist
prog['#Non-exist'] = n_non_exist
prog.add(1)
# ====== return ====== #
# Header:
# 0 1 2 3 4
# path, channel, name, noise_type, duration
return {key: np.array(sorted(val, key=lambda x: x[0]))
for key, val in all_files.items()}
# ==================== run the validation ==================== #
if CURRENT_STATE == SystemStates.EXTRACT_FEATURES:
ALL_NOISE = validating_noise_data(
in_path_raw=PATH_RAW_DATA)
print("Processed noise data:")
for ds_name, noise_list in ALL_NOISE.items():
print(" ", ctext(ds_name, 'yellow'), ':', noise_list.shape)
if len(noise_list) == 0:
continue
for name, count in sorted(freqcount(noise_list[:, 3]).items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % name, 'yellow'), ':',
'%s(files)' % ctext('%-6d' % count, 'cyan'))
# ===========================================================================
# Validating the file list of training data
# ===========================================================================
@cache_disk
def validating_training_data(in_path_raw, training_dataset):
file_list = {ds: sre_file_list[ds]
for ds in training_dataset
if ds in sre_file_list}
# ====== meta info ====== #
all_files = []
non_exist_files = []
extension_count = defaultdict(int)
total_data = sum(v.shape[0]
for k, v in file_list.items()
if k not in('musan', 'rirs'))
# ====== progress ====== #
prog = Progbar(target=total_data,
print_summary=True, print_report=True,
name="Preprocessing File List")
prog.set_summarizer('#Files', fn=lambda x: x[-1])
prog.set_summarizer('#Non-exist', fn=lambda x: x[-1])
# ====== iterating ====== #
for ds_name, data in sorted(file_list.items(),
key=lambda x: x[0]):
if ds_name in ('musan', 'rirs'):
continue
for row in data:
path, channel, name, spkid = row[:4]
assert channel in ('0', '1')
# check path provided
if ds_name in in_path_raw:
path = os.path.join(in_path_raw[ds_name], path)
# create new row
start_time = '-'
end_time = '-'
if ds_name == 'mx6':
start_time, end_time = row[-2:]
new_row = [path, channel, name,
ds_name + '_' + spkid, ds_name,
start_time, end_time]
# check file exist
if os.path.exists(path):
all_files.append(new_row)
else:
non_exist_files.append(new_row)
# extension
ext = os.path.splitext(path)[-1]
extension_count[ext + '-' + ds_name] += 1
# update progress
prog['Dataset'] = ds_name
prog['#Files'] = len(all_files)
prog['#Non-exist'] = len(non_exist_files)
prog.add(1)
# final results
all_files = np.array(all_files)
if len(all_files) == 0:
return all_files, np.array(non_exist_files), extension_count
# ====== check no duplicated name ====== #
n_files = len(all_files)
n_unique_files = len(np.unique(all_files[:, 2]))
assert n_files == n_unique_files, \
'Found duplicated name: %d != %d' % (n_files, n_unique_files)
# ====== check no duplicated speaker ====== #
n_spk = sum(len(np.unique(dat[:, 3]))
for name, dat in file_list.items()
if name not in ('musan', 'rirs'))
n_unique_spk = len(np.unique(all_files[:, 3]))
assert n_spk == n_unique_spk, \
'Found duplicated speakers: %d != %d' % (n_spk, n_unique_spk)
# ====== return ====== #
# Header:
# 0 1 2 3 4 5 6
# path, channel, name, spkid, dataset, start_time, end_time
return all_files, np.array(non_exist_files), extension_count
# ==================== run the validation process ==================== #
if CURRENT_STATE == SystemStates.EXTRACT_FEATURES:
(ALL_FILES, NON_EXIST_FILES, ext_count) = validating_training_data(
in_path_raw=PATH_RAW_DATA,
training_dataset=TRAINING_DATASET
)
if len(ALL_FILES) == 0:
raise RuntimeError("No files found for feature extraction")
# list of all dataset
ALL_DATASET = sorted(np.unique(ALL_FILES[:, 4]))
print("All extensions:")
for name, val in sorted(ext_count.items(), key=lambda x: x[0]):
print(' ', '%-16s' % name, ':', ctext('%-6d' % val, 'cyan'), '(files)')
print("#Speakers:", ctext(len(np.unique(ALL_FILES[:, 3])), 'cyan'))
# map Dataset_name -> speaker_ID
DS_SPK = defaultdict(list)
for row in ALL_FILES:
DS_SPK[row[4]].append(row[3])
DS_SPK = {k: sorted(set(v))
for k, v in DS_SPK.items()}
print("Processed datasets:")
for name, count in sorted(freqcount(ALL_FILES[:, 4]).items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % name, 'yellow'), ':',
'%s(files)' % ctext('%-6d' % count, 'cyan'),
'%s(spk)' % ctext('%-4d' % len(DS_SPK[name]), 'cyan'))
# ===========================================================================
# PATH HELPER
# ===========================================================================
def get_model_path(system_name, logging=True):
"""
Parameters
----------
args_name : list of string
list of name for parsed argument, taken into account for creating
model name
Return
------
exp_dir, model_path, log_path
"""
args_name = []
if system_name == 'xvec':
args_name += ['utt', 'seq']
elif system_name == 'ivec':
args_name += ['nmix', 'tdim']
else:
raise ValueError("No support for system with name: %s" % system_name)
args_name += ['mindur', 'minutt']
# ====== base system and feature identity ====== #
name = str(system_name).lower()
name += '_' + FEATURE_RECIPE.replace('_', '')
name += '.' + FEATURE_NAME
# ====== concat the attributes ====== #
attributes = []
for i in [str(i) for i in args_name]:
attributes.append(str(getattr(_args, i)))
attributes = '_'.join(attributes)
name += '.' + attributes
# ====== check the exclude dataset ====== #
excluded_dataset = str(_args.exclude).strip()
if len(excluded_dataset) > 0:
dataset_str = []
for excluded in sorted(set(excluded_dataset.split(','))):
assert excluded in sre_file_list or excluded == 'noise', \
"Unknown excluded dataset with name: '%s'" % excluded
dataset_str.append(excluded)
dataset_str = '_'.join(dataset_str)
name += '.' + dataset_str
# ====== check save_path ====== #
save_path = os.path.join(EXP_DIR, name)
if os.path.exists(save_path) and IS_OVERRIDE:
print("Override path:", ctext(save_path, 'yellow'))
shutil.rmtree(save_path)
if not os.path.exists(save_path):
os.mkdir(save_path)
# ====== return path ====== #
log_path = get_logpath(name='log.txt', increasing=True,
odin_base=False, root=save_path)
model_path = os.path.join(save_path, 'model.ai')
if bool(logging):
print("Model path:", ctext(model_path, 'cyan'))
print("Log path:", ctext(log_path, 'cyan'))
return save_path, model_path, log_path
# ===========================================================================
# Data helper
# ===========================================================================
def prepare_dnn_feeder_recipe(name2label=None, n_speakers=None,
utt_length=None, seq_mode=None):
if utt_length is None:
utt_length = float(_args.utt)
if seq_mode is None:
seq_mode = str(_args.seq).strip().lower()
frame_length = int(utt_length / Config.STEP_LENGTH)
if seq_mode == 'cut':
seq_train = 'cut'
seq_score = 'mix'
elif seq_mode == 'pad':
seq_train = 'pad'
seq_score = 'pad'
else:
raise ValueError("Only support 'cut' or 'pad' sequencing mode")
recipes = [
F.recipes.Sequencing(frame_length=frame_length,
step_length=frame_length,
end=seq_score if CURRENT_STATE == SystemStates.SCORING
else seq_train,
pad_value=0, pad_mode='post',
data_idx=0),
]
if name2label is not None and n_speakers is not None:
recipes += [
F.recipes.Name2Label(lambda name:name2label[name],
ref_idx=0),
F.recipes.LabelOneHot(nb_classes=n_speakers, data_idx=1)
]
elif (name2label is not None and n_speakers is None) or\
(name2label is None and n_speakers is not None):
raise RuntimeError("name2label and n_speakers must both be None, or not-None")
return recipes
def filter_utterances(X, indices, spkid,
min_dur=None, min_utt=None,
remove_min_length=True, remove_min_uttspk=True,
n_speakers=None, ncpu=None, save_path=None,
title=''):
"""
X : 2-D matrix
input features
indices : Mapping
utterance_name -> (start, end) in `X`
spkid : Mapping
utterance_name -> speaker_id
remove_min_length : bool (default: True)
if True, remove all files shorter than MINIMUM_UTT_DURATION
remove_min_uttspk : bool (default: True)
if True, remove all speakers with lower amount of utterances than
MINIMUM_UTT_PER_SPEAKERS
n_speakers : {None, int} (default: None)
if given, downsample the dataset by given number of speakers
save_path : {None, str} (default: None)
if given, pickle all filtered files to disk
"""
if min_dur is None:
min_dur = MINIMUM_UTT_DURATION
if min_utt is None:
min_utt = MINIMUM_UTT_PER_SPEAKERS
minimum_amount_of_frames = min_dur / Config.STEP_LENGTH
save_data = {}
prog = Progbar(target=len(indices),
print_report=True, print_summary=True,
name='Filtering broken utterances: %s' % title)
prog.set_summarizer('zero-length', fn=lambda x: x[-1])
prog.set_summarizer('min-frames', fn=lambda x: x[-1])
prog.set_summarizer('zero-var', fn=lambda x: x[-1])
prog.set_summarizer('small-var', fn=lambda x: x[-1])
prog.set_summarizer('overflow', fn=lambda x: x[-1])
# ====== mpi function for checking ====== #
@nb.jit(nopython=True, nogil=True)
def _fast_mean_var_ax0(z):
# using this function for calculating mean and variance
# can double the speed but cannot check overflow,
# only accept float32 or float64 input
s1 = np.zeros(shape=(z.shape[1],), dtype=z.dtype)
s2 = np.zeros(shape=(z.shape[1],), dtype=z.dtype)
for i in range(z.shape[0]):
s1 += z[i]
s2 += np.power(z[i], 2)
mean = s1 / z.shape[0]
var = s2 / z.shape[0] - np.power(mean, 2)
return mean, var
def _mpi_func(jobs):
for name, (start, end) in jobs:
y = X[start:end]
# flags
is_zero_len = False
is_zero_var = False
is_small_var = False
is_min_frames = False
is_overflow = False
# checking length
if y.shape[0] == 0:
is_zero_len = True
elif y.shape[0] < minimum_amount_of_frames:
is_min_frames = True
# checking statistics
else:
with catch_warnings_error(RuntimeWarning):
try:
# mean = np.mean(y, axis=-1)
var = np.var(y, axis=-1)
# min_val = np.min(y, axis=-1)
# max_val = np.max(y, axis=-1)
# numerical unstable
except RuntimeWarning as w:
if 'overflow encountered' in str(w):
is_overflow = True
else:
print(name, ':', w)
# process with more numerical filtering
else:
if np.any(np.isclose(var, 0)):
is_zero_var = True
# very heuristic and aggressive here
# filter-out anything with ~16.67% of low-var
# this could remove 1/3 of the original data
if np.sum(var < 0.01) > (len(y) / 6):
is_small_var = True
# return the flags
yield (name, is_zero_len, is_min_frames,
is_zero_var, is_small_var,
is_overflow)
# ====== running the multiprocessing filter ====== #
zero_len_files = {}
min_frame_files = {}
zero_var_files = {}
small_var_files = {}
overflow_files = {}
for res in mpi.MPI(jobs=sorted(indices.items(),
key=lambda x: x[1][0]),
func=_mpi_func,
ncpu=NCPU if ncpu is None else int(ncpu),
batch=250):
name = res[0]
if res[1]: zero_len_files[name] = 1
if res[2]: min_frame_files[name] = 1
if res[3]: zero_var_files[name] = 1
if res[4]: small_var_files[name] = 1
if res[5]: overflow_files[name] = 1
# update progress
prog['name'] = name[:48]
prog['zero-length'] = len(zero_len_files)
prog['min-frames'] = len(min_frame_files)
prog['zero-var'] = len(zero_var_files)
prog['small-var'] = len(small_var_files)
prog['overflow'] = len(overflow_files)
prog.add(1)
# ====== remove broken files ====== #
if not bool(remove_min_length):
min_frame_files = {}
new_indices = {name: (start, end)
for name, (start, end) in indices.items()
if name not in zero_len_files and
name not in min_frame_files and
name not in zero_var_files and
name not in small_var_files and
name not in overflow_files}
print("Filtered #utterances: %s/%s (files)" %
(ctext(len(indices) - len(new_indices), 'lightcyan'),
ctext(len(indices), 'cyan')))
indices = new_indices
# ====== store save data ====== #
save_data['zero_len'] = zero_len_files
save_data['min_dur'] = min_frame_files
save_data['zero_var'] = zero_var_files
save_data['small_var'] = small_var_files
save_data['overflow'] = overflow_files
# ====== filter-out by number of utt-per-speaker ====== #
if bool(remove_min_uttspk):
spk2utt = defaultdict(list)
for name in indices.keys():
spk2utt[spkid[name]].append(name)
n_utt_removed = 0
n_spk_removed = 0
removed_utt = []
keep_utt = []
for spk, utt in spk2utt.items():
if len(utt) < min_utt:
n_utt_removed += len(utt)
n_spk_removed += 1
removed_utt += utt
else:
keep_utt += utt
removed_utt = set(removed_utt)
keep_utt = set(keep_utt)
save_data['min_utt'] = removed_utt
print("Removed min-utt/spk: %s/%s(utt) %s/%s(spk)" % (
ctext(n_utt_removed, 'lightcyan'), ctext(len(indices), 'cyan'),
ctext(n_spk_removed, 'lightcyan'), ctext(len(spk2utt), 'cyan')
))
assert len(indices) == n_utt_removed + len(keep_utt), "Not possible!"
indices = {name: (start, end)
for name, (start, end) in indices.items()
if name in keep_utt}
# ====== sample by number of speakers ====== #
if isinstance(n_speakers, Number) and n_speakers > 0:
spk2utt = defaultdict(list)
for name, (start, end) in indices.items():
spk2utt[spkid[name]].append((name, (start, end)))
n_org_spk = len(spk2utt)
n_org_ids = len(indices)
# only need down-sampling with smaller number of speaker
if n_speakers < n_org_spk:
rand = np.random.RandomState(seed=Config.SUPER_SEED)
tmp = list(spk2utt.keys())
rand.shuffle(tmp)
sampled_spk = tmp[:n_speakers]
indices = []
for spk in sampled_spk:
indices += spk2utt[spk]
indices = dict(indices)
else:
sampled_spk = spk2utt
# print some log
print("Selected: %s/%s(spk) which have %s/%s(utt)" % (
ctext(len(sampled_spk), 'lightcyan'), ctext(n_org_spk, 'cyan'),
ctext(len(indices), 'lightcyan'), ctext(n_org_ids, 'cyan')
))
# ====== return the new indices ====== #
if save_path is not None:
try:
with open(save_path, 'wb') as save_file:
pickle.dump(save_data, save_file)
except Exception as e:
print("Cannot save filtering data to path: '%s', error: '%s'" %
(save_path, str(e)))
return indices
def prepare_dnn_data(save_dir, feat_name=None,
utt_length=None, seq_mode=None,
min_dur=None, min_utt=None,
exclude=None, train_proportion=None,
return_dataset=False):
assert os.path.isdir(save_dir), \
"Path to '%s' is not a directory" % save_dir
if feat_name is None:
feat_name = FEATURE_NAME
if utt_length is None:
utt_length = int(_args.utt)
if seq_mode is None:
seq_mode = str(_args.seq).strip().lower()
if min_dur is None:
min_dur = MINIMUM_UTT_DURATION
if min_utt is None:
min_utt = MINIMUM_UTT_PER_SPEAKERS
if exclude is None:
exclude = str(_args.exclude).strip()
print("Minimum duration: %s(s)" % ctext(min_dur, 'cyan'))
print("Minimum utt/spk : %s(utt)" % ctext(min_utt, 'cyan'))
# ******************** prepare dataset ******************** #
path = os.path.join(PATH_ACOUSTIC_FEATURES, FEATURE_RECIPE)
assert os.path.exists(path), "Cannot find acoustic dataset at path: %s" % path
ds = F.Dataset(path=path, read_only=True)
rand = np.random.RandomState(seed=Config.SUPER_SEED)
# ====== find the right feature ====== #
assert feat_name in ds, "Cannot find feature with name: %s" % feat_name
X = ds[feat_name]
ids_name = 'indices_%s' % feat_name
assert ids_name in ds, "Cannot find indices with name: %s" % ids_name
# ====== basic path ====== #
path_filtered_data = os.path.join(save_dir, 'filtered_files.pkl')
path_train_files = os.path.join(save_dir, 'train_files.pkl')
path_speaker_info = os.path.join(save_dir, 'speaker_info.pkl')
# ******************** cannot find cached data ******************** #
if any(not os.path.exists(p) for p in [path_filtered_data,
path_train_files,
path_speaker_info]):
# ====== exclude some dataset ====== #
if len(exclude) > 0:
exclude_dataset = {i: 1 for i in exclude.split(',')}
print("* Excluded dataset:", ctext(exclude_dataset, 'cyan'))
indices = {name: (start, end)
for name, (start, end) in ds[ids_name].items()
if ds['dsname'][name] not in exclude_dataset}
# special case exclude all the noise data
if 'noise' in exclude_dataset:
indices = {name: (start, end)
for name, (start, end) in indices.items()
if '/' not in name}
else:
indices = {i: j for i, j in ds[ids_name].items()}
# ====== down-sampling if necessary ====== #
if _args.downsample > 1000:
dataset2name = defaultdict(list)
# ordering the indices so we sample the same set every time
for name in sorted(indices.keys()):
dataset2name[ds['dsname'][name]].append(name)
n_total_files = len(indices)
n_sample_files = int(_args.downsample)
# get the percentage of each dataset
dataset2per = {i: len(j) / n_total_files
for i, j in dataset2name.items()}
# sampling based on percentage
_ = {}
for dsname, flist in dataset2name.items():
rand.shuffle(flist)
n_dataset_files = int(dataset2per[dsname] * n_sample_files)
_.update({i: indices[i]
for i in flist[:n_dataset_files]})
indices = _
# ====== * filter out "bad" sample ====== #
indices = filter_utterances(X=X, indices=indices, spkid=ds['spkid'],
min_utt=min_utt, min_dur=min_dur,
remove_min_length=True,
remove_min_uttspk=True,
n_speakers=None, ncpu=None,
save_path=path_filtered_data)
# ====== all training file name ====== #
# modify here to train full dataset
all_name = sorted(indices.keys())
rand.shuffle(all_name); rand.shuffle(all_name)
n_files = len(all_name)
print("#Files:", ctext(n_files, 'cyan'))
# ====== speaker mapping ====== #
name2spk = {name: ds['spkid'][name]
for name in all_name}
all_speakers = sorted(set(name2spk.values()))
spk2label = {spk: i
for i, spk in enumerate(all_speakers)}
name2label = {name: spk2label[spk]
for name, spk in name2spk.items()}
assert len(name2label) == len(all_name)
print("#Speakers:", ctext(len(all_speakers), 'cyan'))
# ====== stratify sampling based on speaker ====== #
valid_name = []
# create speakers' cluster
label2name = defaultdict(list)
for name, label in sorted(name2label.items(),
key=lambda x: x[0]):
label2name[label].append(name)
# for each speaker with >= 3 utterance
for label, name_list in sorted(label2name.items(),
key=lambda x: x[0]):
if len(name_list) < 3:
continue
n = max(1, int(0.05 * len(name_list))) # 5% for validation
valid_name += rand.choice(a=name_list, size=n, replace=False).tolist()
# train list is the rest
_ = set(valid_name)
train_name = [i for i in all_name if i not in _]
# ====== split training and validation ====== #
train_indices = {name: indices[name] for name in train_name}
valid_indices = {name: indices[name] for name in valid_name}
# ====== save cached data ====== #
with open(path_train_files, 'wb') as fout:
pickle.dump({'train': train_indices, 'valid': valid_indices},
fout)
with open(path_speaker_info, 'wb') as fout:
pickle.dump({'all_speakers': all_speakers,
'name2label': name2label,
'spk2label': spk2label},
fout)
# ******************** load cached data ******************** #
else:
with open(path_train_files, 'rb') as fin:
obj = pickle.load(fin)
train_indices = obj['train']
valid_indices = obj['valid']
with open(path_speaker_info, 'rb') as fin:
obj = pickle.load(fin)
all_speakers = obj['all_speakers']
name2label = obj['name2label']
spk2label = obj['spk2label']
# ******************** print log ******************** #
def summary_indices(ids):
datasets = defaultdict(int)
speakers = defaultdict(list)
text = ''
for name in sorted(ids.keys()):
text += name + str(ids[name])
dsname = ds['dsname'][name]
datasets[dsname] += 1
speakers[dsname].append(ds['spkid'][name])
for dsname in sorted(datasets.keys()):
print(' %-18s: %s(utt) %s(spk)' % (
dsname,
ctext('%6d' % datasets[dsname], 'cyan'),
ctext(len(set(speakers[dsname])), 'cyan')))
print(' MD5 checksum:', ctext(crypto.md5_checksum(text), 'lightcyan'))
# ====== training files ====== #
print("#Train files:", ctext('%-8d' % len(train_indices), 'cyan'),
"#spk:", ctext(len(set(name2label[name]
for name in train_indices.keys())), 'cyan'),
"#noise:", ctext(len([name for name in train_indices.keys()
if '/' in name]), 'cyan'))
summary_indices(ids=train_indices)
# ====== valid files ====== #
print("#Valid files:", ctext('%-8d' % len(valid_indices), 'cyan'),
"#spk:", ctext(len(set(name2label[name]
for name in valid_indices.keys())), 'cyan'),
"#noise:", ctext(len([name for name in valid_indices.keys()
if '/' in name]), 'cyan'))
summary_indices(ids=valid_indices)
# ******************** create the recipe ******************** #
assert all(name in name2label
for name in train_indices.keys())
assert all(name in name2label
for name in valid_indices.keys())
recipes = prepare_dnn_feeder_recipe(name2label=name2label,
n_speakers=len(all_speakers),
utt_length=utt_length, seq_mode=seq_mode)
# ====== downsample training set for analyzing if required ====== #
if train_proportion is not None:
assert 0 < train_proportion < 1
n_training = len(train_indices)
train_indices = list(train_indices.items())
rand.shuffle(train_indices); rand.shuffle(train_indices)
train_indices = dict(train_indices[:int(n_training * train_proportion)])
# ====== create feeder ====== #
train_feeder = F.Feeder(
data_desc=F.IndexedData(data=X,
indices=train_indices),
batch_mode='batch', ncpu=NCPU, buffer_size=256)
valid_feeder = F.Feeder(
data_desc=F.IndexedData(data=X,
indices=valid_indices),
batch_mode='batch', ncpu=max(2, NCPU // 4), buffer_size=64)
train_feeder.set_recipes(recipes)
valid_feeder.set_recipes(recipes)
print(train_feeder)
print(valid_feeder)
# ====== debugging ====== #
if IS_DEBUGGING:
import matplotlib
matplotlib.use('Agg')
prog = Progbar(target=len(valid_feeder), print_summary=True,
name="Iterating validation set")
samples = []
n_visual = 250
for name, idx, X, y in valid_feeder.set_batch(batch_size=100000,
batch_mode='file',
seed=None, shuffle_level=0):
assert idx == 0, "Utterances longer than %.2f(sec)" % (100000 * Config.STEP_LENGTH)
prog['X'] = X.shape
prog['y'] = y.shape
prog.add(X.shape[0])
# random sampling
if rand.rand(1) < 0.5 and len(samples) < n_visual:
for i in rand.randint(0, X.shape[0], size=4, dtype='int32'):
samples.append((name, X[i], np.argmax(y[i], axis=-1)))
# plot the spectrogram
n_visual = len(samples)
V.plot_figure(nrow=n_visual, ncol=8)
for i, (name, X, y) in enumerate(samples):
is_noise = '/' in name
assert name2label[name] == y, "Speaker label mismatch for file: %s" % name
name = name.split('/')[0]
dsname = ds['dsname'][name]
spkid = ds['spkid'][name]
y = np.argmax(y, axis=-1)
ax = V.plot_spectrogram(X.T,
ax=(n_visual, 1, i + 1),
title='#%d' % (i + 1))
ax.set_title('[%s][%s]%s %s' %
('noise' if is_noise else 'clean', dsname, name, spkid),
fontsize=6)
# don't need to be high resolutions
V.plot_save('/tmp/tmp.pdf', dpi=12)
exit()
# ====== return ====== #
if bool(return_dataset):
return train_feeder, valid_feeder, all_speakers, ds
return train_feeder, valid_feeder, all_speakers
# ===========================================================================
# Evaluation and validation helper
# ===========================================================================
def validate_features_dataset(output_dataset_path, ds_validation_path):
ds = F.Dataset(output_dataset_path, read_only=True)
print(ds)
features = {}
for key, val in ds.items():
if 'indices_' in key:
name = key.split('_')[-1]
features[name] = (val, ds[name])
all_indices = [val[0] for val in features.values()]
# ====== sampling 250 files ====== #
all_files = sampling_iter(it=all_indices[0].keys(), k=250,
seed=Config.SUPER_SEED)
all_files = [f for f in all_files
if all(f in ids for ids in all_indices)]
print("#Samples:", ctext(len(all_files), 'cyan'))
# ====== ignore the 20-figures warning ====== #
with catch_warnings_ignore(RuntimeWarning):
for file_name in all_files:
X = {}
for feat_name, (ids, data) in features.items():
start, end = ids[file_name]
X[feat_name] = data[start:end][:].astype('float32')
V.plot_multiple_features(features=X, fig_width=20,
title='[%s]%s' % (ds['dsname'][file_name], file_name))
V.plot_save(ds_validation_path, dpi=12)
|
StarcoderdataPython
|
3452949
|
<filename>scripts/hooks/protect_branches.py
import re
import sys
from subprocess import run
from typing import NoReturn
def ProtectBranches() -> NoReturn:
hookid = "protect-branches"
protected_branches = [r"main", r"branch-\d+\.\d+"]
current_branch = run(["git", "branch", "--show-current"], capture_output=True).stdout.decode(sys.stdout.encoding).replace("\n", "")
for branch in protected_branches:
regex = re.compile(branch)
if regex.match(current_branch):
# Not portable to get user input, see:
# https://stackoverflow.com/questions/65844278/any-way-to-get-user-input-from-a-hook
print(
f"""
You were about to push to `{current_branch}`, which is disallowed by default.
If that's really what you intend, run the following command:
SKIP={hookid} git push
"""
)
sys.exit(1) # push will not execute
sys.exit(0) # push will execute
if __name__ == "__main__":
ProtectBranches()
|
StarcoderdataPython
|
1832305
|
__author__ = "<NAME>"
# QProgressBar
progressbar_style = '''
QProgressBar {
border: 0px;
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
height: 8px;
min-height: 8px;
max-height: 8px;
margin-top: 7px;
margin-bottom: 7px;
color: transparent;
}
QProgressBar::chunk {
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
background: palette(highlight);
}
QProgressBar::chunk:disabled {
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
background: palette(midlight);
}
'''
|
StarcoderdataPython
|
9723271
|
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import xgboost as xgb
import numpy as np
import pandas as pd
from os import listdir
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics import f1_score
def aggregate_features(X):
return np.mean(X, axis=0)
def get_features():
biz_id_list = []
result_array = np.zeros((2000, 1024))
path = 'features/inception-21k-global/'
feature_files = listdir(path)
for i, f in enumerate(feature_files):
biz_id = int(f[:-4])
feature_bag = np.load('features/inception-21k-global/' + f)
out_feature = aggregate_features(feature_bag)
biz_id_list.append(biz_id)
result_array[i, :] = out_feature
col_names = [
'incpt21k-glp-avg-' +
str(i) for i in range(
result_array.shape[1])]
feature_frame = pd.DataFrame(
data=result_array,
index=biz_id_list,
columns=col_names)
return feature_frame
def get_response():
biz2label = pd.read_csv("rawdata/train.csv")
result_array = np.zeros((2000, 9))
for class_no in range(9):
response = [
1 if str(class_no) in str(l).split(" ") else 0 for l in biz2label["labels"]]
result_array[:, class_no] = response
response_frame = pd.DataFrame(
data=result_array,
index=biz2label["business_id"],
columns=['class' + str(i) for i in range(9)],
dtype=int)
return response_frame
def get_data():
X = get_features()
Y = get_response()
dataframe = pd.merge(X, Y, left_index=True, right_index=True)
return dataframe
# def get_testdata():
# photo2ftr = pd.read_csv("features/inception-21k-global-test.csv",
# index_col=0, header=None)
# photo2biz = pd.read_csv("rawdata/test_photo_to_biz.csv")
# biz_ids = np.unique(photo2biz["business_id"])
# test_data = np.zeros((len(biz_ids), 1024))
# for i, biz_id in enumerate(biz_ids):
# dd = photo2biz[photo2biz["business_id"] == biz_id]
# photo_ids = np.unique(dd["photo_id"])
# feature = np.mean(photo2ftr.loc[photo_ids].as_matrix(), axis=0)
# test_data[i, :] = feature
# np.save('features/inception-21k-global-test.npy', test_data)
# dataframe = get_data().as_matrix()
# np.save('features/train_data.npy', dataframe)
def evalerror(preds, dtrain):
pred_labels = [1 if p > 0 else 0 for p in preds]
labels = dtrain.get_label()
return 'f1-score', f1_score(labels, pred_labels)
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
dataframe = np.load('features/train_data.npy')
X = dataframe[:, 0:1024]
y_array = dataframe[:, -9:].astype(int)
X_test = np.load("features/inception-21k-global-test.npy")
print(X_test.shape)
dtest = xgb.DMatrix(X_test)
preds_array = np.zeros((X_test.shape[0], y_array.shape[1]))
for y_index in range(9):
y = y_array[:, y_index]
dtrain = xgb.DMatrix(X, label=y)
param = {
'max_depth': 3,
'eta': 0.5,
'silent': 1,
'objective': 'binary:logitraw',
'nthread': 4}
cv_result = xgb.cv(
params=param,
dtrain=dtrain,
num_boost_round=2000,
nfold=5,
feval=evalerror,
early_stopping_rounds=30,
maximize=True,
verbose_eval=True,
fpreproc=fpreproc,
show_stdv=False)
# train model and predict on test set
opt_round_num = cv_result.shape[0] - 1
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
# add dtest here
clf = xgb.train(
params=param,
dtrain=dtrain,
num_boost_round=opt_round_num,
evals=[(dtrain, 'train')],
feval=evalerror,
maximize=True)
preds = (clf.predict(dtest) > 0).astype(int)
print(preds.shape)
print(preds_array.shape)
preds_array[:, y_index] = preds
np.savetxt("output/first_try.csv", preds_array, delimiter=",")
# sss = StratifiedShuffleSplit(dataframe[:, -3], 1, test_size=0.2)
# for train_index, test_index in sss:
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# dtrain = xgb.DMatrix(X_train, label=y_train)
# dtest = xgb.DMatrix(X_test, label=y_test)
# param = {'max_depth':3, 'eta':0.1, 'silent':1, 'objective':'binary:logitraw', 'lambda':1}
# param['nthread'] = 4
# watchlist = [(dtrain,'train'), (dtest,'eval')]
# num_round = 2000
# def evalerror(preds, dtrain):
# pred_labels = [1 if p > 0 else 0 for p in preds]
# labels = dtrain.get_label()
# return 'f1-score', f1_score(labels, pred_labels)
# bst = xgb.train(param, dtrain, num_round, watchlist, feval=evalerror, early_stopping_rounds=30, maximize=True)
|
StarcoderdataPython
|
6635600
|
from pyplayground.server.pyenki import EPuck
from pyplayground.server.RobotBase import RobotBase
class RobotEPuck( RobotBase, EPuck ):
"""
Clase para interactuar con los robots del tipo EPuck de pyenki
"""
tipo = "epuck"
def __init__( self, name ):
"""
Constructor para robots del tipo "EPuck"
Parameters
name: Nombre para el robot
"""
RobotBase.__init__( self, name )
EPuck.__init__( self )
self.myLedRing = 0
self.myCameraImage = None
def setLedRing( self, on_off:int ) -> dict:
"""
Apaga o enciende el anillo que rodea al robot
Parameters
on_off: 1 para encender, 0 para apagar
"""
self.enkilock.acquire()
self.myLedRing = on_off
self.enkilock.release()
return {}
def getCameraImage( self ) -> bytes:
"""
Obtiene la imagen de la camara lineal del robot.
La imagen es de 60x1 pixeles
Returns
La magen lineal
"""
self.enkilock.acquire()
image = self.myCameraImage
self.enkilock.release()
image = bytes( [ int(v*255) for c in image for v in c.components] )
data = len(image).to_bytes( length=4, byteorder="big" ) + image
return data
def controlStep( self, dt:float ):
"""Invocada desde la libreria "pyenki" para cada robot"""
self.myControlStep( dt )
self.enkilock.acquire()
self.myCameraImage = self.cameraImage
EPuck.setLedRing( self, self.myLedRing )
self.enkilock.release()
|
StarcoderdataPython
|
1770982
|
<gh_stars>1-10
from hiclib.hicShared import byChrEig
from mirnylib.genome import Genome
import matplotlib.pyplot as plt
from mirnylib.systemutils import setExceptionHook
from mirnylib.plotting import nicePlot, maximumContrastList
setExceptionHook()
gen = Genome('../../../../hg19', readChrms=["#","X"])
mychroms = [0,2,5,13,20]
eigs = byChrEig("../fragmentHiC/test-1M-byChr.hm", gen, chromosomes = mychroms)
for j,chrom in enumerate(mychroms):
plt.scatter(eigs[j],gen.GCBin[chrom], color = maximumContrastList[j], label = "Chr {0}".format(chrom+1))
plt.xlabel("eigenvector")
plt.ylabel("GC content")
nicePlot()
|
StarcoderdataPython
|
11391127
|
import numpy
def add(a, b):
"""Calculates the sum of a and b.
:param a: The first operand of the sum.
:param b: The second operand of the sum.
"""
return a + b
def subtract(a, b):
"""Calculates the subtraction of b from a.
:param a: The first operand of the subtraction.
:param b: The second operand of the subtraction.
"""
return a - b
def log2(a):
"""Calculates the base 2 log of a.
:param a: The operand of the base 2 logarithm.
"""
return numpy.log2(a)
|
StarcoderdataPython
|
162820
|
from lml.registry import PluginInfoChain
__test_plugins__ = PluginInfoChain(__name__).add_a_plugin("test_io2", "reader")
|
StarcoderdataPython
|
9600709
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
l = ""
head = self
while head is not None:
l += str(head.val)
head = head.next
return l
class Solution:
def addTwoNumbers(self, l1, l2):
p = l1
q = l2
dummy = ListNode(0)
head = dummy
carry = 0
while p is not None or q is not None:
sum = (0 if p is None else p.val) + (0 if q is None else q.val) + carry
carry = int(sum / 10)
head.next = ListNode(sum % 10)
head = head.next
print(head.val, carry)
if p is not None:
p = p.next
if q is not None:
q = q.next
if carry > 0:
head.next = ListNode(carry)
return dummy.next
def traverse(l1):
res = ""
head = l1
condition = True
while condition:
res += str(head.val)
head = head.next
condition = head is not None
return res
if __name__ == "__main__":
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l1.next = l2
l2.next = l3
l3 = ListNode(7)
l4 = ListNode(7)
# l5 = ListNode(8)
l3.next = l4
# l4.next = l5
print(l1)
print(l3)
s = Solution()
print('sum', s.addTwoNumbers(l1, l3))
|
StarcoderdataPython
|
8050913
|
<reponame>RealityBending/Pyllusion<filename>pyllusion/Delboeuf/delboeuf_parameters.py
import numpy as np
def _delboeuf_parameters(
illusion_strength=0, difference=0, size_min=0.25, distance=1, distance_auto=False
):
# Size inner circles
parameters = _delboeuf_parameters_sizeinner(
difference=difference, size_min=size_min
)
inner_size_left = parameters["Size_Inner_Left"]
inner_size_right = parameters["Size_Inner_Right"]
# Base size outer circles
outer_size_left = inner_size_left + (0.2 * size_min)
outer_size_right = inner_size_right + (0.2 * size_min)
# Actual outer size based on illusion
outer_size_left, outer_size_right = _delboeuf_parameters_sizeouter(
outer_size_left,
outer_size_right,
difference=difference,
illusion_strength=illusion_strength,
)
# Get location and distances
if distance_auto is False:
distance_reference = 'Between Centers'
distance_centers = distance
position_left, position_right = -(distance_centers / 2), (distance_centers / 2)
distance_edges_inner = distance_centers - (
inner_size_left / 2 + inner_size_right / 2
)
distance_edges_outer = distance_centers - (
outer_size_left / 2 + outer_size_right / 2
)
else:
distance_reference = 'Between Edges'
distance_edges_outer = distance
distance_centers = distance_edges_outer + (
inner_size_left / 2 + inner_size_right / 2
)
distance_edges_inner = distance_centers - (
outer_size_left / 2 + outer_size_right / 2
)
position_left, position_right = -(distance_centers / 2), (distance_centers / 2)
parameters.update(
{
"Illusion": "Delboeuf",
"Illusion_Strength": illusion_strength,
"Illusion_Type": "Incongruent" if illusion_strength > 0 else "Congruent",
"Size_Outer_Left": outer_size_left,
"Size_Outer_Right": outer_size_right,
"Distance": distance_centers,
"Distance_Reference": distance_reference,
"Distance_Edges_Inner": distance_edges_inner,
"Distance_Edges_Outer": distance_edges_outer,
"Size_Inner_Smaller": np.min([inner_size_left, inner_size_right]),
"Size_Inner_Larger": np.max([inner_size_left, inner_size_right]),
"Size_Outer_Smaller": np.min([outer_size_left, outer_size_right]),
"Size_Outer_Larger": np.max([outer_size_left, outer_size_right]),
"Position_Left": position_left,
"Position_Right": position_right,
}
)
return parameters
# ------------------------------------------
# Utilities
# ------------------------------------------
def _delboeuf_parameters_sizeouter(
outer_size_left,
outer_size_right,
illusion_strength=0,
difference=0,
both_sizes=False,
):
if both_sizes is True:
illusion_strength = illusion_strength / 2
# Actual outer size based on illusion
if difference > 0: # if right is smaller
if illusion_strength > 0:
outer_size_left = np.sqrt(1 + np.abs(illusion_strength)) * outer_size_left
if both_sizes is True:
outer_size_right = outer_size_right / np.sqrt(
1 + np.abs(illusion_strength)
)
else:
outer_size_right = np.sqrt(1 + np.abs(illusion_strength)) * outer_size_right
if both_sizes is True:
outer_size_left = outer_size_left / np.sqrt(
1 + np.abs(illusion_strength)
)
else:
if illusion_strength > 0:
outer_size_right = np.sqrt(1 + np.abs(illusion_strength)) * outer_size_right
if both_sizes is True:
outer_size_left = outer_size_left / np.sqrt(
1 + np.abs(illusion_strength)
)
else:
outer_size_left = np.sqrt(1 + np.abs(illusion_strength)) * outer_size_left
if both_sizes is True:
outer_size_right = outer_size_right / np.sqrt(
1 + np.abs(illusion_strength)
)
return outer_size_left, outer_size_right
def _delboeuf_parameters_sizeinner(difference=0, size_min=0.25):
size_bigger = np.sqrt(1 + np.abs(difference)) * size_min
if difference > 0: # if right is smaller
inner_size_right = size_min
inner_size_left = size_bigger
else:
inner_size_right = size_bigger
inner_size_left = size_min
parameters = {
"Difference": difference,
"Size_Inner_Left": inner_size_left,
"Size_Inner_Right": inner_size_right,
"Size_Inner_Difference": np.pi
* (size_bigger / 2) ** 2
/ np.pi
* (size_min / 2) ** 2,
}
return parameters
|
StarcoderdataPython
|
4896440
|
import pytest
from click.testing import CliRunner
from pagefunc import *
from cli import main
@pytest.fixture
def runner():
return CliRunner()
def test_cli(runner):
result = runner.invoke(main)
assert result.exit_code == 0
assert not result.exception
assert result.output.strip() == 'Hello, world.'
def test_cli_with_option(runner):
result = runner.invoke(main, ['--as-cowboy'])
assert not result.exception
assert result.exit_code == 0
assert result.output.strip() == 'Howdy, world.'
def test_cli_with_arg(runner):
result = runner.invoke(main, ['Brian'])
assert result.exit_code == 0
assert not result.exception
assert result.output.strip() == 'Hello, Brian.'
|
StarcoderdataPython
|
9701554
|
<reponame>kjdavidson/NoisePy
import pyasdf
import numpy as np
import time
'''
this script compares the speed of reading ASDF files with different size
the ultimate goal is to find the best way to read a chunck of data stored in the xxx
'''
def read_data(sfile,nseg,data_type,path):
with pyasdf.ASDFDataSet(sfile,mode='r') as ds:
data_types = ds.auxiliary_data.list()
if data_type not in data_types:
raise ValueError('%s not in the data_type list'%data_type)
paths = ds.auxiliary_data[data_type].list()
if path not in paths:
raise ValueError('%s not in the path list'%path)
Nfft = ds.auxiliary_data[data_type][path].parameters['nfft']
data = np.zeros((nseg,Nfft),dtype=np.complex64)
data = ds.auxiliary_data[data_type][path].data[:]
def read_data1(sfile,indx1,indx2,data_type,path):
with pyasdf.ASDFDataSet(sfile,mode='r') as ds:
data_types = ds.auxiliary_data.list()
if data_type not in data_types:
raise ValueError('%s not in the data_type list'%data_type)
paths = ds.auxiliary_data[data_type].list()
if path not in paths:
raise ValueError('%s not in the path list'%path)
Nfft = ds.auxiliary_data[data_type][path].parameters['nfft']
nseg = indx2-indx1+1
data = np.zeros((nseg,Nfft),dtype=np.complex64)
data = ds.auxiliary_data[data_type][path].data[indx1:indx2,:]
sfile1 = '/Users/chengxin/Documents/Harvard/Kanto_basin/Mesonet_BW/FFT/test/E.AYHM.h5'
sfile2 = '/Users/chengxin/Documents/Harvard/Kanto_basin/Mesonet_BW/FFT/E.AYHM.h5'
seg1 = 10
seg2 = 47
type1 = 'HNU'
type2 = 'HNU'
tag1 = '2010_12_20_1'
tag2 = '2010_12_20'
for ii in range(4):
%time read_data(sfile1,seg1,type1,tag1)
%time read_data1(sfile2,10,20,type2,tag2)
%time read_data(sfile2,seg2,type2,tag2)
|
StarcoderdataPython
|
9790318
|
# Copyright (C) 2015-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
empty,
equal_to,
has_entries,
has_entry,
matches_regexp )
from unittest.mock import patch
from pprint import pformat
import os
import pytest
import requests
from ycmd import handlers
from ycmd.tests.rust import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
ChunkMatcher,
ErrorMatcher,
ExpectedFailure,
LocationMatcher,
WithRetry )
from ycmd.utils import ReadFile
RESPONSE_TIMEOUT = 5
def RunTest( app, test, contents = None ):
if not contents:
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
def CombineRequest( request, data ):
kw = request
request.update( data )
return BuildRequest( **kw )
# Because we aren't testing this command, we *always* ignore errors. This
# is mainly because we (may) want to test scenarios where the completer
# throws an exception and the easiest way to do that is to throw from
# within the FlagsForFile function.
app.post_json( '/event_notification',
CombineRequest( test[ 'request' ], {
'event_name': 'FileReadyToParse',
'contents': contents,
'filetype': 'rust',
} ),
expect_errors = True )
# We also ignore errors here, but then we check the response code
# ourself. This is to allow testing of requests returning errors.
response = app.post_json(
'/run_completer_command',
CombineRequest( test[ 'request' ], {
'completer_target': 'filetype_default',
'contents': contents,
'filetype': 'rust',
'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ test[ 'request' ].get( 'arguments', [] ) )
} ),
expect_errors = True
)
print( f'completer response: { pformat( response.json ) }' )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
@SharedYcmd
def Subcommands_DefinedSubcommands_test( app ):
subcommands_data = BuildRequest( completer_target = 'rust' )
assert_that( app.post_json( '/defined_subcommands', subcommands_data ).json,
contains_inanyorder( 'FixIt',
'Format',
'GetDoc',
'GetType',
'GoTo',
'GoToDeclaration',
'GoToDefinition',
'GoToImplementation',
'GoToReferences',
'GoToSymbol',
'GoToType',
'RefactorRename',
'RestartServer' ) )
@SharedYcmd
def Subcommands_ServerNotInitialized_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
completer = handlers._server_state.GetFiletypeCompleter( [ 'rust' ] )
@patch.object( completer, '_ServerIsInitialized', return_value = False )
def Test( app, cmd, arguments, *args ):
RunTest( app, {
'description': 'Subcommand ' + cmd + ' handles server not ready',
'request': {
'command': cmd,
'line_num': 1,
'column_num': 1,
'filepath': filepath,
'arguments': arguments,
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError,
'Server is initializing. Please wait.' ),
}
} )
Test( app, 'Format', [] )
Test( app, 'FixIt', [] )
Test( app, 'GetType', [] )
Test( app, 'GetDoc', [] )
Test( app, 'GoTo', [] )
Test( app, 'GoToDeclaration', [] )
Test( app, 'GoToDefinition', [] )
Test( app, 'GoToImplementation', [] )
Test( app, 'GoToReferences', [] )
Test( app, 'RefactorRename', [ 'test' ] )
@SharedYcmd
def Subcommands_Format_WholeFile_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
RunTest( app, {
'description': 'Formatting is applied on the whole file',
'request': {
'command': 'Format',
'filepath': filepath,
'options': {
'tab_size': 2,
'insert_spaces': True
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
# Let's just rewrite the whole file...
ChunkMatcher( "mod test;\n\nuse test::*;\n\nstruct Earth {}"
"\nstruct Mars {}\ntrait Atmosphere {}\nimpl "
"Atmosphere for Earth {}\nimpl Atmosphere for "
"Mars {}\n\nfn main() {\n create_universe();"
"\n let builder = Builder {};\n builder."
"build_\n}\n\nfn format_test() {\n let a: "
"i32 = 5;\n}\n",
LocationMatcher( filepath, 1, 1 ),
LocationMatcher( filepath, 23, 1 ) ),
)
} ) )
} )
}
} )
@ExpectedFailure( 'rangeFormat is not yet implemented',
matches_regexp( '\nExpected: <200>\n but: was <500>\n' ) )
@SharedYcmd
def Subcommands_Format_Range_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
RunTest( app, {
'description': 'Formatting is applied on some part of the file',
'request': {
'command': 'Format',
'filepath': filepath,
'range': {
'start': {
'line_num': 17,
'column_num': 1,
},
'end': {
'line_num': 22,
'column_num': 2
}
},
'options': {
'tab_size': 4,
'insert_spaces': False
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( 'fn format_test() {\n'
'\tlet a: i32 = 5;\n',
LocationMatcher( filepath, 17, 1 ),
LocationMatcher( filepath, 22, 1 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def Subcommands_GetDoc_NoDocumentation_test( app ):
RunTest( app, {
'description': 'GetDoc on a function with no documentation '
'raises an error',
'request': {
'command': 'GetDoc',
'line_num': 4,
'column_num': 11,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError,
'No documentation available.' )
}
} )
@WithRetry
@SharedYcmd
def Subcommands_GetDoc_Function_test( app ):
RunTest( app, {
'description': 'GetDoc on a function returns its documentation',
'request': {
'command': 'GetDoc',
'line_num': 2,
'column_num': 8,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entry( 'detailed_info',
'common::test\n'
'pub fn create_universe()\n'
'---\n'
'Be careful when using that function' ),
}
} )
@SharedYcmd
def Subcommands_GetType_UnknownType_test( app ):
RunTest( app, {
'description': 'GetType on a unknown type raises an error',
'request': {
'command': 'GetType',
'line_num': 2,
'column_num': 4,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, 'Unknown type.' )
}
} )
@WithRetry
@SharedYcmd
def Subcommands_GetType_Function_test( app ):
RunTest( app, {
'description': 'GetType on a function returns its type',
'request': {
'command': 'GetType',
'line_num': 2,
'column_num': 22,
'filepath': PathToTestFile( 'common', 'src', 'test.rs' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entry( 'message', 'pub fn create_universe()' ),
}
} )
def RunGoToTest( app, command, test ):
folder = PathToTestFile( 'common', 'src' )
filepath = os.path.join( folder, test[ 'req' ][ 0 ] )
request = {
'command': command,
'line_num': test[ 'req' ][ 1 ],
'column_num': test[ 'req' ][ 2 ],
'filepath': filepath,
}
response = test[ 'res' ]
if isinstance( response, list ):
expect = {
'response': requests.codes.ok,
'data': contains_inanyorder( *[
LocationMatcher(
os.path.join( folder, location[ 0 ] ),
location[ 1 ],
location[ 2 ]
) for location in response
] )
}
elif isinstance( response, tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher(
os.path.join( folder, response[ 0 ] ),
response[ 1 ],
response[ 2 ]
)
}
else:
error_type = test.get( 'exc', RuntimeError )
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( error_type, test[ 'res' ] )
}
RunTest( app, {
'request': request,
'expect' : expect
} )
@pytest.mark.parametrize( 'test', [
# Variable
{ 'req': ( 'main.rs', 14, 5 ), 'res': ( 'test.rs', 4, 12 ) },
# Type
{ 'req': ( 'main.rs', 13, 19 ), 'res': ( 'test.rs', 4, 12 ) },
# Function
{ 'req': ( 'main.rs', 12, 14 ), 'res': 'Cannot jump to location' },
# Keyword
{ 'req': ( 'main.rs', 3, 2 ), 'res': 'Cannot jump to location' },
] )
@SharedYcmd
def Subcommands_GoToType_Basic_test( app, test ):
RunGoToTest( app, 'GoToType', test )
@pytest.mark.parametrize( 'test', [
# Structure
{ 'req': ( 'main.rs', 8, 24 ), 'res': ( 'main.rs', 5, 8 ) },
# Function
{ 'req': ( 'main.rs', 12, 14 ), 'res': ( 'test.rs', 2, 8 ) },
# Implementation
{ 'req': ( 'main.rs', 9, 12 ), 'res': ( 'main.rs', 7, 7 ) },
# Keyword
{ 'req': ( 'main.rs', 3, 2 ), 'res': 'Cannot jump to location' },
] )
@pytest.mark.parametrize( 'command', [ 'GoToDeclaration',
'GoToDefinition',
'GoTo' ] )
@WithRetry
@SharedYcmd
def Subcommands_GoTo_test( app, command, test ):
RunGoToTest( app, command, test )
@pytest.mark.parametrize( 'test', [
# Structure
{ 'req': ( 'main.rs', 5, 9 ), 'res': ( 'main.rs', 8, 21 ) },
# Trait
{ 'req': ( 'main.rs', 7, 7 ), 'res': [ ( 'main.rs', 8, 21 ),
( 'main.rs', 9, 21 ) ] },
] )
@WithRetry
@SharedYcmd
def Subcommands_GoToImplementation_test( app, test ):
RunGoToTest( app, 'GoToImplementation', test )
@WithRetry
@SharedYcmd
def Subcommands_GoToImplementation_Failure_test( app ):
RunGoToTest( app,
'GoToImplementation',
{ 'req': ( 'main.rs', 11, 2 ),
'res': 'Cannot jump to location',
'exc': RuntimeError } )
@pytest.mark.parametrize( 'test', [
# Struct
{ 'req': ( 'main.rs', 9, 22 ), 'res': [ ( 'main.rs', 6, 8 ),
( 'main.rs', 9, 21 ) ] },
# Function
{ 'req': ( 'main.rs', 12, 8 ), 'res': [ ( 'test.rs', 2, 8 ),
( 'main.rs', 12, 5 ) ] },
# Implementation
{ 'req': ( 'main.rs', 8, 10 ), 'res': [ ( 'main.rs', 7, 7 ),
( 'main.rs', 8, 6 ),
( 'main.rs', 9, 6 ) ] },
# Keyword
{ 'req': ( 'main.rs', 1, 1 ), 'res': 'Cannot jump to location' }
] )
@SharedYcmd
def Subcommands_GoToReferences_test( app, test ):
RunGoToTest( app, 'GoToReferences', test )
@WithRetry
@SharedYcmd
def Subcommands_RefactorRename_Works_test( app ):
main_filepath = PathToTestFile( 'common', 'src', 'main.rs' )
test_filepath = PathToTestFile( 'common', 'src', 'test.rs' )
RunTest( app, {
'description': 'RefactorRename on a function renames all its occurences',
'request': {
'command': 'RefactorRename',
'arguments': [ 'update_universe' ],
'line_num': 12,
'column_num': 16,
'filepath': main_filepath
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'text': '',
'chunks': contains_exactly(
ChunkMatcher( 'update_universe',
LocationMatcher( test_filepath, 2, 8 ),
LocationMatcher( test_filepath, 2, 23 ) ),
ChunkMatcher( 'update_universe',
LocationMatcher( main_filepath, 12, 5 ),
LocationMatcher( main_filepath, 12, 20 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def Subcommands_RefactorRename_Invalid_test( app ):
RunTest( app, {
'description': 'RefactorRename raises an error when cursor is invalid',
'request': {
'command': 'RefactorRename',
'arguments': [ 'update_universe' ],
'line_num': 15,
'column_num': 7,
'filepath': PathToTestFile( 'common', 'src', 'main.rs' )
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError,
'Cannot rename the symbol under cursor.' )
}
} )
@SharedYcmd
def Subcommands_FixIt_EmptyResponse_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
RunTest( app, {
'description': 'FixIt on a line with no codeAction returns empty response',
'request': {
'command': 'FixIt',
'line_num': 22,
'column_num': 1,
'filepath': filepath
},
'expect': {
'response': requests.codes.ok,
'data': has_entry( 'fixits', empty() )
}
} )
@SharedYcmd
def Subcommands_FixIt_Basic_test( app ):
filepath = PathToTestFile( 'common', 'src', 'main.rs' )
RunTest( app, {
'description': 'Simple FixIt test',
'request': {
'command': 'FixIt',
'line_num': 17,
'column_num': 2,
'filepath': filepath
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( 'pub(crate) ',
LocationMatcher( filepath, 17, 1 ),
LocationMatcher( filepath, 17, 1 ) )
)
} ) )
} )
},
} )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True
|
StarcoderdataPython
|
4936907
|
from datetime import datetime
import json
import pg8000
import re
from config import CONFIG_DICT
from service import db_access
INSERT_TITLE_QUERY_FORMAT = (
'insert into title_register_data('
'title_number, register_data, geometry_data, is_deleted, last_modified, official_copy_data, lr_uprns'
')'
'values('
"%s, %s, %s, %s, %s, %s, '{{ {} }}'"
')'
)
DELETE_ALL_TITLES_QUERY = 'delete from title_register_data;'
def _get_db_connection_params():
connection_string_regex = (
r'^.*?//(?P<user>.+?):(?P<password>.+?)@(?P<host>.+?):(?P<port>\d+)/(?P<database>.+)$'
)
db_connection_string = CONFIG_DICT['SQLALCHEMY_DATABASE_URI']
matches = re.match(connection_string_regex, db_connection_string)
return {
'user': matches.group('user'),
'password': matches.group('password'),
'host': matches.group('host'),
'port': int(matches.group('port')),
'database': matches.group('database'),
}
DB_CONNECTION_PARAMS = _get_db_connection_params()
class TestDbAccess:
def setup_method(self, method):
self.connection = self._connect_to_db()
self._delete_all_titles()
def teardown_method(self, method):
try:
self.connection.close()
except (pg8000.InterfaceError):
pass
def test_get_title_register_returns_none_when_title_not_in_the_db(self):
assert db_access.get_title_register('non-existing') is None
def test_get_title_register_returns_none_when_title_marked_as_deleted(self):
title_number = 'title123'
self._create_title(title_number, is_deleted=True)
title = db_access.get_title_register(title_number)
assert title is None
def test_get_title_register_returns_title_data_when_title_not_marked_as_deleted(self):
title_number = 'title123'
register_data = {'register': 'data1'}
geometry_data = {'geometry': 'data2'}
is_deleted = False
last_modified = datetime(2015, 9, 10, 12, 34, 56, 123)
official_copy_data = {'official': 'copy'}
lr_uprns = ['123', '456']
self._create_title(title_number, register_data, geometry_data, is_deleted, last_modified, official_copy_data, lr_uprns)
title = db_access.get_title_register(title_number)
assert title is not None
assert title.title_number == title_number
assert title.register_data == register_data
assert title.geometry_data == geometry_data
assert title.is_deleted == is_deleted
assert title.last_modified.timestamp() == last_modified.timestamp()
assert title.official_copy_data == official_copy_data
assert title.lr_uprns == lr_uprns
def test_get_title_registers_returns_titles_with_right_content(self):
title_number = 'title123'
register_data = {'register': 'data1'}
geometry_data = {'geometry': 'data2'}
is_deleted = False
last_modified = datetime(2015, 9, 10, 12, 34, 56, 123)
official_copy_data = {'official': 'copy'}
lr_uprns = ['123', '456']
self._create_title(title_number, register_data, geometry_data, is_deleted, last_modified, official_copy_data, lr_uprns)
titles = db_access.get_title_registers([title_number])
assert len(titles) == 1
title = titles[0]
assert title is not None
assert title.title_number == title_number
assert title.register_data == register_data
assert title.geometry_data == geometry_data
assert title.is_deleted == is_deleted
assert title.last_modified.timestamp() == last_modified.timestamp()
assert title.official_copy_data == official_copy_data
assert title.lr_uprns == lr_uprns
def test_get_title_registers_returns_list_with_all_existing_titles(self):
existing_title_numbers = {'title1', 'title2', 'title3'}
for title_number in existing_title_numbers:
self._create_title(title_number)
titles = db_access.get_title_registers(existing_title_numbers | {'non-existing-1'})
assert len(titles) == 3
returned_title_numbers = self._get_title_numbers(titles)
assert existing_title_numbers == returned_title_numbers
def test_get_title_registers_returns_empty_list_when_no_title_found(self):
titles = db_access.get_title_registers(['non-existing-1', 'non-existing-2'])
assert titles == []
def test_get_title_registers_does_not_return_deleted_titles(self):
existing_title_number_1 = 'existing-1'
existing_title_number_2 = 'existing-2'
deleted_title_number_1 = 'deleted-1'
deleted_title_number_2 = 'deleted-2'
self._create_title(existing_title_number_1, is_deleted=False)
self._create_title(existing_title_number_2, is_deleted=False)
self._create_title(deleted_title_number_1, is_deleted=True)
self._create_title(deleted_title_number_2, is_deleted=True)
titles = db_access.get_title_registers([
existing_title_number_1, deleted_title_number_1, existing_title_number_2, deleted_title_number_2
])
assert len(titles) == 2
assert self._get_title_numbers(titles) == {existing_title_number_1, existing_title_number_2}
def test_get_official_copy_data_returns_none_when_title_not_in_the_db(self):
assert db_access.get_official_copy_data('non-existing') is None
def test_get_official_copy_data_returns_none_when_title_marked_as_deleted(self):
title_number = 'title123'
self._create_title(title_number, is_deleted=True, official_copy_data={'official': 'copy'})
title = db_access.get_official_copy_data(title_number)
assert title is None
def test_get_official_copy_data_returns_the_copy_when_title_in_the_db(self):
title_number = 'title123'
register_data = {'register': 'data1'}
geometry_data = {'geometry': 'data2'}
is_deleted = False
last_modified = datetime(2015, 9, 10, 12, 34, 56, 123)
official_copy_data = {'official': 'copy'}
lr_uprns = ['123', '456']
self._create_title(title_number, register_data, geometry_data, is_deleted, last_modified, official_copy_data, lr_uprns)
title = db_access.get_official_copy_data(title_number)
assert title is not None
assert title.title_number == title_number
assert title.register_data == register_data
assert title.geometry_data == geometry_data
assert title.is_deleted == is_deleted
assert title.last_modified.timestamp() == last_modified.timestamp()
assert title.official_copy_data == official_copy_data
assert title.lr_uprns == lr_uprns
def _get_title_numbers(self, titles):
return set(map(lambda title: title.title_number, titles))
def _create_title(
self,
title_number,
register_data={},
geometry_data={},
is_deleted=False,
last_modified=datetime.now(),
official_copy_data={},
lr_uprns=[]):
print(INSERT_TITLE_QUERY_FORMAT.format(self._get_string_list_for_pg(lr_uprns)))
self.connection.cursor().execute(
INSERT_TITLE_QUERY_FORMAT.format(self._get_string_list_for_pg(lr_uprns)),
(
title_number,
json.dumps(register_data),
json.dumps(geometry_data),
is_deleted,
last_modified,
json.dumps(official_copy_data),
)
)
return self.connection.commit()
def _get_string_list_for_pg(self, strings):
return ','.join(['"{}"'.format(s) for s in strings])
def _delete_all_titles(self):
self.connection.cursor().execute(DELETE_ALL_TITLES_QUERY)
self.connection.commit()
def _connect_to_db(self):
return pg8000.connect(**DB_CONNECTION_PARAMS)
|
StarcoderdataPython
|
6653272
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import sys
import argparse
import os
import subprocess
import builtins
import webbrowser
from . import VizTracer
from . import FlameGraph
from .report_builder import ReportBuilder
from .util import get_url_from_file
from .code_monkey import CodeMonkey
def main():
import runpy
parser = argparse.ArgumentParser(prog="python -m viztracer")
parser.add_argument("--tracer_entries", nargs="?", type=int, default=1000000,
help="size of circular buffer. How many entries can it store")
parser.add_argument("--output_file", "-o", nargs="?", default=None,
help="output file path. End with .json or .html or .gz")
parser.add_argument("--output_dir", nargs="?", default=None,
help="output directory. Should only be used when --pid_suffix is used")
parser.add_argument("--quiet", action="store_true", default=False,
help="stop VizTracer from printing anything")
parser.add_argument("--max_stack_depth", nargs="?", type=int, default=-1,
help="maximum stack depth you want to trace.")
parser.add_argument("--exclude_files", nargs="*", default=None,
help="specify the files(directories) you want to exclude from tracing. Can't be used with --include_files")
parser.add_argument("--include_files", nargs="*", default=None,
help="specify the only files(directories) you want to include from tracing. Can't be used with --exclude_files")
parser.add_argument("--ignore_c_function", action="store_true", default=False,
help="ignore all c functions including most builtin functions and libraries")
parser.add_argument("--ignore_non_file", action="store_true", default=False,
help="ignore all functions that are not in a vaild file(like import)")
parser.add_argument("--log_return_value", action="store_true", default=False,
help="log return value of the function in the report")
parser.add_argument("--log_print", action="store_true", default=False,
help="replace all print() function to adding an event to the result")
parser.add_argument("--log_function_args", action="store_true", default=False,
help="log all function arguments, this will introduce large overhead")
parser.add_argument("--log_gc", action="store_true", default=False,
help="log ref cycle garbage collection operations")
parser.add_argument("--log_var", nargs="*", default=None,
help="log variable with specified names")
parser.add_argument("--log_number", nargs="*", default=None,
help="log variable with specified names as a number(using VizCounter)")
parser.add_argument("--novdb", action="store_true", default=False,
help="Do not instrument for vdb, will reduce the overhead")
parser.add_argument("--pid_suffix", action="store_true", default=False,
help="append pid to file name. This should be used when you try to trace multi process programs. Will by default generate json files")
parser.add_argument("--save_flamegraph", action="store_true", default=False,
help="save flamegraph after generating the VizTracer report")
parser.add_argument("--generate_flamegraph", nargs="?", default=None,
help="generate a flamegraph from json VizTracer report. Specify the json file to use")
parser.add_argument("--run", nargs="*", default=[],
help="explicitly specify the python commands you want to trace. Should be used if there's ambiguity")
parser.add_argument("--module", "-m", nargs="?", default=None,
help="run module with VizTracer")
parser.add_argument("--combine", nargs="*", default=[],
help="combine all json reports to a single report. Specify all the json reports you want to combine")
parser.add_argument("--open", action="store_true", default=False,
help="open the report in browser after saving")
parser.add_argument("command", nargs=argparse.REMAINDER,
help="python commands to trace")
options = parser.parse_args(sys.argv[1:])
if options.command:
command = options.command
elif options.run:
command = options.run
elif options.module:
command = options.command
elif options.generate_flamegraph:
flamegraph = FlameGraph()
flamegraph.load(options.generate_flamegraph)
if options.output_file:
ofile = options.output_file
else:
ofile = "result_flamegraph.html"
flamegraph.save(ofile)
exit(0)
elif options.combine:
builder = ReportBuilder(options.combine)
if options.output_file:
ofile = options.output_file
else:
ofile = "result.html"
builder.save(output_file=ofile)
exit(0)
else:
parser.print_help()
exit(0)
if options.module:
code = "run_module(modname, run_name='__main__')"
global_dict = {
"run_module": runpy.run_module,
"modname": options.module
}
sys.argv = [options.module] + command[:]
else:
file_name = command[0]
if not os.path.exists(file_name):
if sys.platform in ["linux", "linux2", "darwin"]:
p = subprocess.Popen(["which", file_name], stdout=subprocess.PIPE)
guess_file_name = p.communicate()[0].decode("utf-8").strip()
if not guess_file_name or not os.path.exists(guess_file_name):
print("No such file as {}".format(file_name))
exit(1)
else:
file_name = guess_file_name
else:
print("No such file as {}".format(file_name))
exit(1)
code_string = open(file_name, "rb").read()
global_dict = {
"__name__": "__main__",
"__file__": file_name,
"__package__": None,
"__cached__": None
}
if options.log_var or options.log_number:
monkey = CodeMonkey(code_string, file_name)
if options.log_var:
monkey.add_instrument("log_var", {"varnames": options.log_var})
if options.log_number:
monkey.add_instrument("log_number", {"varnames": options.log_number})
builtins.compile = monkey.compile
code = compile(code_string, os.path.abspath(file_name), "exec")
sys.path.insert(0, os.path.dirname(file_name))
sys.argv = command[:]
if options.quiet:
verbose = 0
else:
verbose = 1
if options.output_file:
ofile = options.output_file
elif options.pid_suffix:
ofile = "result.json"
else:
ofile = "result.html"
if options.output_dir:
if not os.path.exists(options.output_dir):
os.mkdir(options.output_dir)
ofile = os.path.join(options.output_dir, ofile)
print(ofile)
tracer = VizTracer(
tracer_entries=options.tracer_entries,
verbose=verbose,
output_file=ofile,
max_stack_depth=options.max_stack_depth,
exclude_files=options.exclude_files,
include_files=options.include_files,
ignore_c_function=options.ignore_c_function,
ignore_non_file=options.ignore_non_file,
log_return_value=options.log_return_value,
log_function_args=options.log_function_args,
log_print=options.log_print,
log_gc=options.log_gc,
novdb=options.novdb,
save_on_exit=True,
pid_suffix=options.pid_suffix
)
builtins.__dict__["__viz_tracer__"] = tracer
global_dict["__builtins__"] = globals()["__builtins__"]
tracer.start()
exec(code, global_dict)
tracer.stop()
tracer.save(output_file=ofile, save_flamegraph=options.save_flamegraph)
if options.open:
try:
webbrowser.open(get_url_from_file(os.path.abspath(ofile)))
except webbrowser.Error as e:
print(e, "Can not open the report")
|
StarcoderdataPython
|
8044747
|
from enum import Enum
class CommandType(Enum):
ROTATE = 1
MOVE = 2
|
StarcoderdataPython
|
1792813
|
from urllib.parse import urljoin
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from ...text import create_slug
from ..magic import MisencodedCharField, MisencodedTextField
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True)
jmeno_uzivatele = MisencodedCharField(max_length=20)
nick_uzivatele = MisencodedCharField(unique=True, max_length=25)
prijmeni_uzivatele = MisencodedCharField(max_length=20)
psw_uzivatele = MisencodedCharField(max_length=40)
email_uzivatele = MisencodedCharField(max_length=50)
pohlavi_uzivatele = MisencodedCharField(max_length=4, blank=True, null=True)
vek_uzivatele = models.IntegerField(default=0)
kraj_uzivatele = MisencodedCharField(max_length=20)
chat_barva = MisencodedCharField(max_length=6)
chat_pismo = models.IntegerField(default=12)
chat_reload = models.IntegerField(default=15)
chat_zprav = models.IntegerField(default=20)
chat_filtr = MisencodedCharField(max_length=255, blank=True, null=True)
chat_filtr_zobrazit = models.IntegerField(default=0)
pospristup = models.DateTimeField(auto_now_add=True)
level = MisencodedCharField(max_length=1)
icq_uzivatele = models.IntegerField(default=0)
vypsat_udaje = MisencodedCharField(max_length=15)
ikonka_uzivatele = MisencodedCharField(max_length=25, blank=True, null=True)
popis_uzivatele = MisencodedCharField(max_length=255, blank=True, null=True)
nova_posta = models.IntegerField(default=0)
skin = MisencodedCharField(max_length=10)
reputace = models.IntegerField(default=0)
reputace_rozdel = models.PositiveIntegerField(default=0)
status = MisencodedCharField(max_length=1)
reg_schval_datum = models.DateTimeField(blank=True, null=True, auto_now_add=True)
indexhodnotitele = models.DecimalField(max_digits=4, decimal_places=2, default=-99.99)
reload = MisencodedCharField(max_length=1)
max_level = models.IntegerField(blank=True, null=True)
api_key = MisencodedCharField(unique=True, max_length=40, blank=True, null=True)
class Meta:
db_table = 'uzivatele'
def get_slug(self):
slug = create_slug(self.nick_uzivatele)
if not slug:
slug = 'neznamy'
#TODO: log an error
return slug
def get_icon_url(self):
if not self.ikonka_uzivatele:
return None
else:
return urljoin(
settings.USER_ICON_MEDIA_ROOT_URL,
self.ikonka_uzivatele
)
icon_url = property(get_icon_url)
slug = property(get_slug)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
# Note that unlike the normal model, we are creating the User lazily
# (instead of UserProfile as usual). Hence, on creation, UserProfile is assumed
# to exist (and needs to be updated with proper relation manually), whereas
# afterwards profiles can be updated as usual
if created:
# YOU are responsible for properly linking User and UserProfile
# outside of signal handling!
# ALWAYS use .users.create_user
pass
else:
instance.profile.save()
class LevelSystemParams(models.Model):
parametr = MisencodedCharField(primary_key=True, max_length=40)
hodnota = MisencodedCharField(max_length=30)
class Meta:
db_table = 'level_parametry_2'
|
StarcoderdataPython
|
4951863
|
import os
import psico.seqalign
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
FILENAME_FATCAT = os.path.join(DATA_DIR, '1ubqA-1hezE.fatcat')
def test_needle_alignment():
A = psico.seqalign.needle_alignment("ACDEFGHIKLMN", "DEYGHVVVVIKLMN")
assert str(A[0].seq) == "ACDEFGH----IKLMN"
assert str(A[1].seq) == "--DEYGHVVVVIKLMN"
def test_alignment_mapping():
seq1 = "ACDEFGH----IKLMN"
seq2 = "--DEYGHVVVVIKLMN"
mapping = psico.seqalign.alignment_mapping(seq1, seq2)
assert dict(mapping) == {
2: 0,
3: 1,
4: 2,
5: 3,
6: 4,
7: 9,
8: 10,
9: 11,
10: 12,
11: 13,
}
def test_aln_magic_format():
assert "fatcat" == psico.seqalign.aln_magic_format(FILENAME_FATCAT)
def test_aln_magic_read():
# indirectly also tests FatCatIterator
A = psico.seqalign.aln_magic_read(FILENAME_FATCAT)
S = [
"IFVKTLTGKTITLEVEPSDT--IENVKAKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQKESTLHLVL",
"VNLIFADGKIQTAEFKGTFEEATAEAYRYADLLAKVNGEYTADLED-----------GGNH-----MNIKF",
]
assert str(A[0].seq) == S[0]
assert str(A[1].seq) == S[1]
# def needle_alignment_emboss(s1, s2):
# def FatCatIterator(handle):
# def ProSMARTIterator(handle):
# def POAIterator(handle):
|
StarcoderdataPython
|
3474691
|
#!/usr/bin/env python
'''
Simple script to call conda build with the current revision and version.
'''
import argparse
import subprocess
import json
from re import match
import os
NAME = 'python-clingox'
def get_build_number(channels, version):
'''
Get the next build number.
'''
try:
pkgs = json.loads(subprocess.check_output(['conda', 'search', '--json', '-c', channels[0], NAME]))
except subprocess.CalledProcessError:
pkgs = {NAME: []}
build_number = -1
for pkg in pkgs.get(NAME, []):
if pkg['channel'].find(channels[0]) >= 0 and pkg["version"] == version:
build_number = max(build_number, pkg['build_number'])
return build_number + 1
def run():
'''
Compile and upload conda packages.
'''
parser = argparse.ArgumentParser(description='Build conda packages.')
parser.add_argument('--release', action='store_true', help='Build release packages.')
args = parser.parse_args()
if args.release:
label = None
channels = ['potassco']
else:
label = "dev"
channels = ['potassco/label/dev', 'potassco']
version = None
with open('setup.py') as fh:
for line in fh:
m = match(r'''[ ]*version[ ]*=[ ]*['"]([0-9]+\.[0-9]+\.[0-9]+)(\.post[0-9]+)?['"]''', line)
if m is not None:
version = m.group(1)
assert version is not None
build_number = get_build_number(channels, version)
build_env = os.environ.copy()
build_env.pop("BUILD_RELEASE", "1" if args.release else None)
build_env["VERSION_NUMBER"] = version
build_env["BUILD_NUMBER"] = str(build_number)
if 'GITHUB_SHA' in os.environ:
build_env["BUILD_REVISION"] = os.environ['GITHUB_SHA']
recipe_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conda')
options = ['conda', 'build']
if label is not None:
options.extend(['--label', label])
for c in channels:
options.extend(['-c', c])
options.append(recipe_path)
subprocess.check_call(options, env=build_env)
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
8051545
|
import logging
import sys
from datetime import datetime
from constants import TMP_DICT
class LogAdapter(object):
def getlogger(self, module_name):
logger = logging.getLogger(module_name)
# TODO: This need to be from config
logger.setLevel(TMP_DICT['log_level'])
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
@classmethod
def datetime_str(self, format=None, extention=None):
now = datetime.now()
if not format:
format = "%Y%m%d-%H%M%S"
dt_string = now.strftime(format)
if extention:
dt_string += '.' + extention
return dt_string
log_adapter = LogAdapter()
|
StarcoderdataPython
|
11248377
|
<reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from RecoMuon.MuonIsolation.muonPFIsolationValues_cff import *
muPFIsoValueCharged03PFBRECO = muPFIsoValueCharged03.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedPFBRECO')}
)
muPFMeanDRIsoValueCharged03PFBRECO = muPFMeanDRIsoValueCharged03.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedPFBRECO')}
)
muPFSumDRIsoValueCharged03PFBRECO = muPFSumDRIsoValueCharged03.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedPFBRECO')}
)
muPFIsoValueChargedAll03PFBRECO = muPFIsoValueChargedAll03.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedAllPFBRECO')}
)
muPFMeanDRIsoValueChargedAll03PFBRECO = muPFMeanDRIsoValueChargedAll03.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedAllPFBRECO')}
)
muPFSumDRIsoValueChargedAll03PFBRECO = muPFSumDRIsoValueChargedAll03.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedAllPFBRECO')}
)
muPFIsoValueGamma03PFBRECO = muPFIsoValueGamma03.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFMeanDRIsoValueGamma03PFBRECO = muPFMeanDRIsoValueGamma03.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFSumDRIsoValueGamma03PFBRECO = muPFSumDRIsoValueGamma03.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFIsoValueNeutral03PFBRECO = muPFIsoValueNeutral03.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFMeanDRIsoValueNeutral03PFBRECO = muPFMeanDRIsoValueNeutral03.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFSumDRIsoValueNeutral03PFBRECO = muPFSumDRIsoValueNeutral03.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFIsoValueGammaHighThreshold03PFBRECO = muPFIsoValueGammaHighThreshold03.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFMeanDRIsoValueGammaHighThreshold03PFBRECO = muPFMeanDRIsoValueGammaHighThreshold03.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFSumDRIsoValueGammaHighThreshold03PFBRECO = muPFSumDRIsoValueGammaHighThreshold03.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFIsoValueNeutralHighThreshold03PFBRECO = muPFIsoValueNeutralHighThreshold03.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFMeanDRIsoValueNeutralHighThreshold03PFBRECO = muPFMeanDRIsoValueNeutralHighThreshold03.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFSumDRIsoValueNeutralHighThreshold03PFBRECO = muPFSumDRIsoValueNeutralHighThreshold03.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFIsoValuePU03PFBRECO = muPFIsoValuePU03.clone(
deposits = {0: dict(src = 'muPFIsoDepositPUPFBRECO')}
)
muPFMeanDRIsoValuePU03PFBRECO = muPFMeanDRIsoValuePU03.clone(
deposits = {0: dict(src = 'muPFIsoDepositPUPFBRECO')}
)
muPFSumDRIsoValuePU03PFBRECO = muPFSumDRIsoValuePU03.clone(
deposits = {0: dict(src = 'muPFIsoDepositPUPFBRECO')}
)
##############################
muPFIsoValueCharged04PFBRECO = muPFIsoValueCharged04.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedPFBRECO')}
)
muPFMeanDRIsoValueCharged04PFBRECO = muPFMeanDRIsoValueCharged04.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedPFBRECO')}
)
muPFSumDRIsoValueCharged04PFBRECO = muPFSumDRIsoValueCharged04.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedPFBRECO')}
)
muPFIsoValueChargedAll04PFBRECO = muPFIsoValueChargedAll04.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedAllPFBRECO')}
)
muPFMeanDRIsoValueChargedAll04PFBRECO = muPFMeanDRIsoValueChargedAll04.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedAllPFBRECO')}
)
muPFSumDRIsoValueChargedAll04PFBRECO = muPFSumDRIsoValueChargedAll04.clone(
deposits = {0: dict(src = 'muPFIsoDepositChargedAllPFBRECO')}
)
muPFIsoValueGamma04PFBRECO = muPFIsoValueGamma04.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFMeanDRIsoValueGamma04PFBRECO = muPFMeanDRIsoValueGamma04.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFSumDRIsoValueGamma04PFBRECO = muPFSumDRIsoValueGamma04.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFIsoValueNeutral04PFBRECO = muPFIsoValueNeutral04.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFMeanDRIsoValueNeutral04PFBRECO = muPFMeanDRIsoValueNeutral04.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFSumDRIsoValueNeutral04PFBRECO = muPFSumDRIsoValueNeutral04.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFIsoValueGammaHighThreshold04PFBRECO = muPFIsoValueGammaHighThreshold04.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFMeanDRIsoValueGammaHighThreshold04PFBRECO = muPFMeanDRIsoValueGammaHighThreshold04.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFSumDRIsoValueGammaHighThreshold04PFBRECO = muPFSumDRIsoValueGammaHighThreshold04.clone(
deposits = {0: dict(src = 'muPFIsoDepositGammaPFBRECO')}
)
muPFIsoValueNeutralHighThreshold04PFBRECO = muPFIsoValueNeutralHighThreshold04.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFMeanDRIsoValueNeutralHighThreshold04PFBRECO = muPFMeanDRIsoValueNeutralHighThreshold04.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFSumDRIsoValueNeutralHighThreshold04PFBRECO = muPFSumDRIsoValueNeutralHighThreshold04.clone(
deposits = {0: dict(src = 'muPFIsoDepositNeutralPFBRECO')}
)
muPFIsoValuePU04PFBRECO = muPFIsoValuePU04.clone(
deposits = {0: dict(src = 'muPFIsoDepositPUPFBRECO')}
)
muPFMeanDRIsoValuePU04PFBRECO = muPFMeanDRIsoValuePU04.clone(
deposits = {0: dict(src = 'muPFIsoDepositPUPFBRECO')}
)
muPFSumDRIsoValuePU04PFBRECO = muPFSumDRIsoValuePU04.clone(
deposits = {0: dict(src = 'muPFIsoDepositPUPFBRECO')}
)
muonPFIsolationValuesPFBRECOTask = cms.Task(
muPFIsoValueCharged03PFBRECO,
muPFMeanDRIsoValueCharged03PFBRECO,
muPFSumDRIsoValueCharged03PFBRECO,
muPFIsoValueChargedAll03PFBRECO,
muPFMeanDRIsoValueChargedAll03PFBRECO,
muPFSumDRIsoValueChargedAll03PFBRECO,
muPFIsoValueGamma03PFBRECO,
muPFMeanDRIsoValueGamma03PFBRECO,
muPFSumDRIsoValueGamma03PFBRECO,
muPFIsoValueNeutral03PFBRECO,
muPFMeanDRIsoValueNeutral03PFBRECO,
muPFSumDRIsoValueNeutral03PFBRECO,
muPFIsoValueGammaHighThreshold03PFBRECO,
muPFMeanDRIsoValueGammaHighThreshold03PFBRECO,
muPFSumDRIsoValueGammaHighThreshold03PFBRECO,
muPFIsoValueNeutralHighThreshold03PFBRECO,
muPFMeanDRIsoValueNeutralHighThreshold03PFBRECO,
muPFSumDRIsoValueNeutralHighThreshold03PFBRECO,
muPFIsoValuePU03PFBRECO,
muPFMeanDRIsoValuePU03PFBRECO,
muPFSumDRIsoValuePU03PFBRECO,
##############################
muPFIsoValueCharged04PFBRECO,
muPFMeanDRIsoValueCharged04PFBRECO,
muPFSumDRIsoValueCharged04PFBRECO,
muPFIsoValueChargedAll04PFBRECO,
muPFMeanDRIsoValueChargedAll04PFBRECO,
muPFSumDRIsoValueChargedAll04PFBRECO,
muPFIsoValueGamma04PFBRECO,
muPFMeanDRIsoValueGamma04PFBRECO,
muPFSumDRIsoValueGamma04PFBRECO,
muPFIsoValueNeutral04PFBRECO,
muPFMeanDRIsoValueNeutral04PFBRECO,
muPFSumDRIsoValueNeutral04PFBRECO,
muPFIsoValueGammaHighThreshold04PFBRECO,
muPFMeanDRIsoValueGammaHighThreshold04PFBRECO,
muPFSumDRIsoValueGammaHighThreshold04PFBRECO,
muPFIsoValueNeutralHighThreshold04PFBRECO,
muPFMeanDRIsoValueNeutralHighThreshold04PFBRECO,
muPFSumDRIsoValueNeutralHighThreshold04PFBRECO,
muPFIsoValuePU04PFBRECO,
muPFMeanDRIsoValuePU04PFBRECO,
muPFSumDRIsoValuePU04PFBRECO
)
muonPFIsolationValuesPFBRECOSequence = cms.Sequence(muonPFIsolationValuesPFBRECOTask)
|
StarcoderdataPython
|
3422089
|
<reponame>XinghuiTao/ros2_turtlebot<filename>src/omniverse/omniverse/action_server.py
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from interfaces.action import Move
class MyActionClient(Node):
def __init__(self):
super().__init__('my_action_client')
self._action_client = ActionClient(self, Move, 'turtlebot3_as')
def send_goal(self, secs):
goal_msg = Move.Goal()
goal_msg.secs = secs
self._action_client.wait_for_server()
self._send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback)
self._send_goal_future.add_done_callback(self.goal_response_callback)
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected :(')
return
self.get_logger().info('Goal accepted :)')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback)
def get_result_callback(self, future):
result = future.result().result
self.get_logger().info('Result: {0}'.format(result.status))
rclpy.shutdown()
def feedback_callback(self, feedback_msg):
feedback = feedback_msg.feedback
self.get_logger().info('Received feedback: {0}'.format(feedback.feedback))
def main(args=None):
rclpy.init(args=args)
action_client = MyActionClient()
future = action_client.send_goal(5)
rclpy.spin(action_client)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1603725
|
"""
implement the qmix algorithm with tensorflow, also thanks to the pymarl repo.
"""
from functools import partial
from time import time
import numpy as np
import tensorflow as tf
from absl import logging
from smac.env import MultiAgentEnv, StarCraft2Env
from xt.algorithm.qmix.episode_buffer_np import EpisodeBatchNP
from xt.algorithm.qmix.qmix_alg import DecayThenFlatSchedule, EpsilonGreedyActionSelector
class QMixAlgorithm(object):
"""Target network is for
calculating the maximum estimated Q-value in given action a.
"""
def __init__(self, scheme, args, avail_action_num, seq_limit, dtype):
# avail_actions vary with env.map
self.n_agents = args.n_agents
self.args = args
self.dtype = dtype
self.obs_shape = self._get_input_shape(scheme)
logging.debug("obs_shape: {}".format(self.obs_shape))
self.previous_state = None
self.ph_hidden_states_in = None
self.hidden_states_out = None
self.params = None
self.inputs = None
self.out_actions = None
self.avail_action_num = avail_action_num
# 2s_vs_1sc , use the episode limit as fix shape.
self.fix_seq_length = seq_limit
self.schedule = DecayThenFlatSchedule(
args.epsilon_start,
args.epsilon_finish,
args.epsilon_anneal_time,
decay="linear",
)
self.epsilon = self.schedule.eval(0)
# select action
self.selector = EpsilonGreedyActionSelector(self.args)
# mix
self.state_dim = int(np.prod(args.state_shape))
self.embed_dim = args.mixing_embed_dim
# self.global_state_dims = (1, 120) # fixme: 2s3z
self.graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config, graph=self.graph)
self.sess = sess
self.gru_cell = None
self.hi_out_val = None
self.hi_out_val_default = None
# self.hi_target_out_val = None
self.grad_update = None # train op
self._explore_paras = None # need update after each train process
self.last_target_update_episode = 0
self.ph_obs, self.agent_outs, self.hidden_outs = None, None, None
self.ph_avail_action, self.ph_actions, self.ph_train_obs = None, None, None
self.ph_train_obs_len, self.agent_explore_replace_op = None, None
self.agent_train_replace_op, self.ph_train_states = None, None
self.ph_train_target_states, self.q_tot, self.target_q_tot = None, None, None
self.mix_train_replace_op = None
self.ph_rewards, self.ph_terminated = None, None
self.loss, self.ph_mask = None, None
def _get_input_shape(self, scheme):
"""assemble input shape"""
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
def _get_motivate_actions(self, agents_dim, avail_actions, t_env, test_mode=False):
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# random_numbers = th.rand_like(agent_inputs[:, :, 0])
random_numbers = np.random.rand(agents_dim)
# pick_random = (random_numbers < self.epsilon).long()
pick_random = np.array(random_numbers < self.epsilon).astype(np.long)
# random_actions = Categorical(avail_actions.float()).sample().long()
avail_action_len = avail_actions.shape[-1]
avail_norm_to_np = np.array(avail_actions / avail_actions.sum(-1)).astype(np.float)
random_actions = np.random.multinomial(avail_action_len, avail_norm_to_np).astype(np.long)
return pick_random, random_actions
def build_agent_net(
self,
inputs_obs,
seq_max,
obs_lengths,
hidden_state_in=None,
):
"""default init_state for rnn.
"""
fc1 = tf.layers.dense(
inputs=inputs_obs,
units=self.args.rnn_hidden_dim,
activation=tf.nn.relu,
)
fc1 = tf.transpose(fc1, perm=[0, 2, 1, 3])
print("\n fc1 before reshape: ", fc1)
fc1 = tf.reshape(fc1, [-1, seq_max, self.args.rnn_hidden_dim])
print("fc1 after reshape: ", fc1)
gru_cell = tf.nn.rnn_cell.GRUCell(
num_units=self.args.rnn_hidden_dim, # dtype=self.dtype
)
# only record the gru cell once time, to init the hidden value.
if not self.gru_cell:
self.gru_cell = gru_cell
# self.hidden_in_zero = self.gru_cell.zero_state(1, dtype=tf.float32)
# https://blog.csdn.net/u010223750/article/details/71079036
# tf.nn.dynamic_rnn
rnn_output, hidden_state_out = tf.nn.dynamic_rnn(
gru_cell,
fc1,
dtype=self.dtype,
initial_state=hidden_state_in,
sequence_length=obs_lengths,
# sequence_length=[1, ]
)
print("rnn raw out: {} ".format(rnn_output))
rnn_output = tf.reshape(rnn_output, [-1, self.n_agents, seq_max, self.args.rnn_hidden_dim])
rnn_output = tf.transpose(rnn_output, perm=[0, 2, 1, 3])
rnn_output = tf.reshape(rnn_output, [-1, self.args.rnn_hidden_dim])
fc2_outputs = tf.layers.dense(
inputs=rnn_output,
units=self.args.n_actions,
activation=None,
# activation=tf.nn.relu,
)
out_actions = tf.reshape(fc2_outputs, (-1, self.n_agents, self.avail_action_num))
print("out action: {} \n".format(out_actions))
return out_actions, hidden_state_out
def _build_mix_net2(self, agent_qs, states):
"""build mixer architecture with two hyper embed"""
hypernet_embed = self.args.hypernet_embed
def hyper_w1(hyper_w1_input):
"""input shape (none, state_dim)"""
with tf.variable_scope("hyper_w1"):
hw0 = tf.layers.dense(inputs=hyper_w1_input, units=hypernet_embed, activation=tf.nn.relu)
hw1 = tf.layers.dense(inputs=hw0, units=self.embed_dim * self.n_agents, activation=None)
return hw1
def hyper_w_final(hyper_w_final_input):
"""input shape (none, state_dim)"""
with tf.variable_scope("hyper_w_final"):
hw_f0 = tf.layers.dense(
inputs=hyper_w_final_input,
units=hypernet_embed,
activation=tf.nn.relu,
)
hw_f1 = tf.layers.dense(inputs=hw_f0, units=self.embed_dim, activation=None)
return hw_f1
def hyper_b1(state_input):
"""State dependent bias for hidden layer"""
with tf.variable_scope("hyper_b1"):
return tf.layers.dense(inputs=state_input, units=self.embed_dim, activation=None)
def val(state_input):
"""V(s) instead of a bias for the last layers"""
with tf.variable_scope("val_for_bias"):
val0 = tf.layers.dense(inputs=state_input, units=self.embed_dim, activation=tf.nn.relu)
val2 = tf.layers.dense(inputs=val0, units=1, activation=None)
return val2
bs = agent_qs.get_shape().as_list()[0]
states_reshaped = tf.reshape(states, (-1, self.state_dim))
agent_qs_reshaped = tf.reshape(agent_qs, (-1, 1, self.n_agents))
# firstly layer
w1 = tf.math.abs(hyper_w1(states_reshaped))
b1 = hyper_b1(states_reshaped)
w1_reshaped = tf.reshape(w1, (-1, self.n_agents, self.embed_dim))
b1_reshaped = tf.reshape(b1, (-1, 1, self.embed_dim))
to_hidden_val = tf.math.add(tf.matmul(agent_qs_reshaped, w1_reshaped), b1_reshaped)
hidden = tf.nn.elu(to_hidden_val)
# second layer
w_final = tf.math.abs(hyper_w_final(states_reshaped))
w_final_reshaped = tf.reshape(w_final, (-1, self.embed_dim, 1))
# state-dependent bias
v = tf.reshape(val(states_reshaped), (-1, 1, 1))
# compute final output
y = tf.math.add(tf.matmul(hidden, w_final_reshaped), v)
# reshape and return
q_tot = tf.reshape(y, (bs, -1, 1))
return q_tot
def _build_action_selector(self, agent_inputs, avail_actions, ph_pick_random, ph_random_actions):
"""firstly, calculate the explore action with numpy out of the graph!
"""
masked_q_values = tf.identity(agent_inputs)
negation_inf_val = tf.ones_like(masked_q_values) * -1e10
masked_q_values = tf.where(avail_actions < 1e-5, negation_inf_val, masked_q_values)
picked_actions = ph_pick_random * ph_random_actions + (1 - ph_pick_random) * tf.reduce_max(
masked_q_values, reduction_indices=[2])
return picked_actions
def build_inputs(self, batch, t):
"""
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
1. inference stage, use batch = 1,
2. train stage, use batch = episode.limit
Also, use numpy for combine the inputs data
"""
bs = batch.batch_size
inputs = list()
inputs.append(batch["obs"][:, t]) # b1av
# print("forward input.obs shape, ", np.shape(inputs[0])) # torch.Size([1, 5, 80])
if self.args.obs_last_action:
if t == 0:
# tmp = batch["actions_onehot"][:, t]
# print(tmp, np.shape(tmp), np.shape(batch["actions_onehot"]))
inputs.append(np.zeros_like(batch["actions_onehot"][:, t]))
# print(inputs)
else:
inputs.append(batch["actions_onehot"][:, t - 1])
# print("forward input.onehot shape, ",
# np.shape(inputs[-1]), np.shape(batch["actions_onehot"]))
if self.args.obs_agent_id:
_ag_id = np.expand_dims(np.eye(self.n_agents), axis=0) # add axis 0
inputs.append(np.tile(_ag_id, (bs, 1, 1))) # broadcast_to
# print("inputs shape: ", [np.shape(i) for i in inputs])
# inputs = np.concatenate(
# [x.reshape(bs * self.n_agents, -1) for x in inputs], axis=1
# )
# [batch_size, 1, agents, obs_size]
inputs = np.expand_dims(np.concatenate(inputs, axis=-1), axis=1)
# fixme: make to [batch_size, agent_num, seq_len, obs_size]
# print("forward input shape, ", np.shape(inputs)) # torch.Size([5, 96])
# print("inputs shape: ", inputs.shape)
return inputs
def build_actor_graph(self):
"""actor graph used by the explorer"""
with self.graph.as_default():
self.ph_obs = tf.placeholder(tf.float32, shape=(1, 1, self.n_agents, self.obs_shape), name="obs")
# self.ph_obs_len = tf.placeholder(tf.float32, shape=(None,), name="obs_len")
self.ph_hidden_states_in = tf.placeholder(tf.float32,
shape=(None, self.args.rnn_hidden_dim),
name="hidden_in")
with tf.variable_scope("explore_agent"):
self.agent_outs, self.hidden_outs = self.build_agent_net(
inputs_obs=self.ph_obs,
seq_max=1, # --------------------- 1, importance
obs_lengths=[1 for _ in range(self.n_agents)],
hidden_state_in=self.ph_hidden_states_in,
)
self._explore_paras = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="explore_agent")
def reset_hidden_state(self):
"""reset hidden before start each episode"""
self.hi_out_val = self.hi_out_val_default
def get_explore_actions(self, ep_batch, t_ep, t_env, test_mode):
"""get explore action with numpy"""
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_inputs = self.build_inputs(ep_batch, t_ep)
# agent_inputs =
out_val = self.infer_actions(agent_inputs)
select_actions = self.selector.select_action(out_val, avail_actions, t_env, test_mode=test_mode)
# print("out_val: {}, select action: {}, avail_actions, {}, t_env:{}".format(
# out_val, select_actions, avail_actions, t_env))
return select_actions
def infer_actions(self, agent_inputs):
"""inference with tf.sess.run"""
out_val, self.hi_out_val = self.sess.run(
[self.agent_outs, self.hidden_outs],
feed_dict={
self.ph_obs: agent_inputs,
# self.ph_obs_len: list(obs_len),
self.ph_hidden_states_in: self.hi_out_val,
},
)
return out_val
@staticmethod
def _gather4d_on_dim3(inputs, indices):
"""
gather 4dim tensor into 3dim, same to the pytorch.gather + sequeeze(3) function.
:param inputs:
:param indices:
:return:
"""
print("inputs: ", inputs)
len_0d, len_1d, len_2d, len_3d = inputs.get_shape().as_list()
print("len_0d, len_1d, len_2d, len_3d", len_0d, len_1d, len_2d, len_3d)
inputs = tf.reshape(inputs, (-1, len_3d))
calc_0d = inputs.get_shape()[0]
flag_0d, flag_1d, flag_2d, flag_3d = indices.get_shape()
indices = tf.reshape(indices, [-1, flag_3d])
idx_matrix = tf.tile(tf.expand_dims(tf.range(0, len_3d, dtype=indices.dtype), 0), [calc_0d, 1])
indices_t = tf.transpose(indices)
idx_mask = tf.equal(idx_matrix, tf.transpose(indices_t))
inputs = tf.reshape(tf.boolean_mask(inputs, idx_mask), [flag_0d, flag_1d, flag_2d])
return inputs
@staticmethod
def _print_trainable_var_name(**kwargs):
"""print trainable variable name """
for k, v in kwargs.items():
print("{}: \n {}".format(k, list([t.name for t in v])))
def build_train_graph(self):
"""train graph cannot connect-up to actor.graph,
because of the different seq_max(1 vs limit)
"""
with self.graph.as_default():
self.ph_avail_action = tf.placeholder(
tf.float32,
shape=[
self.args.batch_size,
self.fix_seq_length + 1,
self.n_agents,
self.avail_action_num,
],
name="avail_action",
)
self.ph_actions = tf.placeholder(
tf.float32,
shape=[self.args.batch_size, self.fix_seq_length, self.n_agents, 1],
name="actions",
)
# agent_num = self.n_agents
# seq_max = 300
# -------eval rnn agent ------------------
self.ph_train_obs = tf.placeholder(
tf.float32,
shape=(
self.args.batch_size,
self.fix_seq_length + 1,
self.n_agents,
self.obs_shape,
),
name="train_obs",
)
self.ph_train_obs_len = tf.placeholder(tf.float32, shape=(None, ), name="train_obs_len")
with tf.variable_scope("eval_agent"):
trajectory_agent_outs, _ = self.build_agent_net(
inputs_obs=self.ph_train_obs,
seq_max=self.fix_seq_length + 1, # --------------------- importance
obs_lengths=self.ph_train_obs_len,
hidden_state_in=None, # with total trajectory, needn't hold hidden
)
with tf.variable_scope("target_agent"):
tar_agent_outs_tmp, _ = self.build_agent_net(
inputs_obs=self.ph_train_obs,
# fix value, different between explore and train
seq_max=self.fix_seq_length + 1,
obs_lengths=self.ph_train_obs_len,
hidden_state_in=None,
)
target_trajectory_agent_outs = tf.stop_gradient(tar_agent_outs_tmp)
_eval_agent_paras = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="eval_agent")
_target_agent_paras = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target_agent")
with tf.variable_scope("soft_replacement"):
self.agent_train_replace_op = [tf.assign(t, e) for t, e in zip(_target_agent_paras, _eval_agent_paras)]
self.agent_explore_replace_op = [
tf.assign(t, e) for t, e in zip(self._explore_paras, _eval_agent_paras)
]
self._print_trainable_var_name(
_eval_agent_paras=_eval_agent_paras,
_target_agent_paras=_target_agent_paras,
_explore_paras=self._explore_paras,
)
# agent out to max q values
# Calculate estimated Q-Values ----------------
mac_out = tf.reshape(
trajectory_agent_outs,
[self.args.batch_size, self.fix_seq_length + 1, self.n_agents, -1],
)
print("mac_out: ", mac_out)
chosen_action_qvals = self._gather4d_on_dim3(mac_out[:, :-1], self.ph_actions) # -----
# Calculate the Q-Values necessary for the target -----------
target_mac_out = tf.reshape(
target_trajectory_agent_outs,
[self.args.batch_size, self.fix_seq_length + 1, self.n_agents, -1],
)
target_mac_out = target_mac_out[:, 1:]
# Mask out unavailable actions
# target_mac_out[avail_actions[:, 1:] == 0] = -9999999
indices = tf.equal(self.ph_avail_action[:, 1:], 0)
# TypeError: Input 'e' of 'Select' Op has type float32 that
# does not match type int32 of argument 't'.
mask_val = tf.tile(
[[[[-999999.0]]]],
[
self.args.batch_size,
self.fix_seq_length,
self.n_agents,
self.avail_action_num,
],
)
print("indices: ", indices)
print("mask_val: ", mask_val)
print("target_mac_out: ", target_mac_out)
target_mac_out = tf.where(indices, mask_val, target_mac_out)
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = tf.stop_gradient(tf.identity(mac_out[:, 1:]))
mac_out_detach = tf.where(indices, mask_val, mac_out_detach)
cur_max_actions = tf.expand_dims(tf.argmax(mac_out_detach, axis=-1), -1)
target_max_qvals = self._gather4d_on_dim3(target_mac_out, cur_max_actions)
else:
target_max_qvals = tf.reduce_max(target_mac_out, axis=[-1])
# eval mixer ---------------
self.ph_train_states = tf.placeholder(
tf.float32,
shape=(self.args.batch_size, self.fix_seq_length, self.state_dim),
name="train_stats",
)
# target mixer -------------------
self.ph_train_target_states = tf.placeholder(
tf.float32,
shape=(self.args.batch_size, self.fix_seq_length, self.state_dim),
name="train_target_stats",
)
with tf.variable_scope("eval_mixer"):
self.q_tot = self._build_mix_net2(chosen_action_qvals, self.ph_train_states)
with tf.variable_scope("target_mixer"):
q_tot_tmp = self._build_mix_net2(target_max_qvals, self.ph_train_target_states)
self.target_q_tot = tf.stop_gradient(q_tot_tmp)
_eval_mix_paras = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="eval_mixer")
_target_mix_paras = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target_mixer")
with tf.variable_scope("soft_replacement"):
self.mix_train_replace_op = [tf.assign(t, e) for t, e in zip(_target_mix_paras, _eval_mix_paras)]
self._print_trainable_var_name(_eval_mix_paras=_eval_mix_paras, _target_mix_paras=_target_mix_paras)
# --------
self.ph_rewards = tf.placeholder(
tf.float32,
shape=(self.args.batch_size, self.fix_seq_length, 1),
name="rewards",
)
self.ph_terminated = tf.placeholder(
tf.float32,
shape=(self.args.batch_size, self.fix_seq_length, 1),
name="terminated",
)
self.ph_mask = tf.placeholder(
tf.float32,
shape=(self.args.batch_size, self.fix_seq_length, 1),
name="mask",
)
print("self.ph_rewards: ", self.ph_rewards)
print("self.args.gamma: ", self.args.gamma)
print("self.ph_terminated: ", self.ph_terminated)
print("self.target_q_tot: ", self.target_q_tot)
# Calculate 1-step Q-Learning targets
targets = (self.ph_rewards + self.args.gamma * (1.0 - self.ph_terminated) * self.target_q_tot)
# Td-error
td_error = self.q_tot - tf.stop_gradient(targets)
# mask = mask.expand_as(td_error) #fixme: default as same shape!
# 0-out the targets that came from padded data
masked_td_error = tf.multiply(td_error, self.ph_mask)
self.loss = tf.reduce_sum(masked_td_error**2) / tf.reduce_sum(self.ph_mask)
# # Optimise
optimizer = tf.train.RMSPropOptimizer(self.args.lr, decay=0.95, epsilon=1.5e-7, centered=True)
grads_and_vars = optimizer.compute_gradients(self.loss)
capped_gvs = [(
grad if grad is None else tf.clip_by_norm(grad, clip_norm=self.args.grad_norm_clip),
var,
) for grad, var in grads_and_vars]
self.grad_update = optimizer.apply_gradients(capped_gvs)
def _update_targets(self, episode_num):
"""
update weights periodically.
1. from eval agent to target agent
2. from target mixer to eval mixer
:return:
"""
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
_a, _m = self.sess.run([self.agent_train_replace_op, self.mix_train_replace_op])
print('episode ' + str(episode_num) + ', target Q network params replaced!')
self.last_target_update_episode = episode_num
def _update_explore_agent(self):
"""
update explore agent after each train process
:return:
"""
_ = self.sess.run(self.agent_explore_replace_op)
def save_explore_agent_weights(self, save_path):
"""save explore agent weight for explorer"""
explore_saver = tf.train.Saver({t.name: t for t in self._explore_paras})
explore_saver.save(self.sess, save_path=save_path, write_meta_graph=False)
# tf.train.list_variables(tf.train.latest_checkpoint(wp))
def train_whole_graph(self, batch: EpisodeBatchNP, t_env: int, episode_num: int):
# Truncate batch to only filled timesteps
max_ep_t = batch.max_t_filled()
logging.debug("episode sample with max_ep_t: {}".format(max_ep_t))
# batch = batch[:, :max_ep_t]
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].astype(np.float32)
mask = batch["filled"][:, :-1].astype(np.float32)
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# # # Calculate estimated Q-Values
# [bs, seq_len, n_agents, obs_size] [32, 1, 2, 26] --> [32, 301, 2, 26]
_inputs = [self.build_inputs(batch, t) for t in range(batch.max_seq_length)]
batch_trajectories = np.concatenate(_inputs, axis=1)
logging.debug("batch_trajectories.shape: {}".format(batch_trajectories.shape))
logging.debug("rewards.shape: {}".format(rewards.shape))
logging.debug("actions.shape: {}".format(actions.shape))
logging.debug("terminated.shape: {}".format(terminated.shape))
logging.debug("mask.shape: {}".format(mask.shape))
logging.debug("avail_actions.shape: {}".format(avail_actions.shape))
logging.debug("batch.max_seq_length: {}".format(batch.max_seq_length))
logging.debug("batch.batch_size: {}".format(batch.batch_size))
# to get action --> [32, 300, 2, 7]
# [32*301*2, 26] --> [32*301*2, 7] --> [32, 301, 2, 7] --> [32, 300, 2, 7]
# batch4train = batch_trajectories.reshape([-1, batch_trajectories.shape[-1]])
# writer = tf.summary.FileWriter(logdir="logdir", graph=self.graph)
# writer.flush()
_, loss_val = self.sess.run(
[self.grad_update, self.loss],
feed_dict={
self.ph_train_obs: batch_trajectories,
# Note: split trajectory with each agent.
self.ph_train_obs_len: list(
[max_ep_t for _ in range(batch.batch_size * self.n_agents)]),
self.ph_avail_action: avail_actions,
self.ph_actions: actions,
self.ph_train_states: batch["state"][:, :-1],
self.ph_train_target_states: batch["state"][:, 1:],
self.ph_rewards: rewards,
self.ph_terminated: terminated,
self.ph_mask: mask,
},
)
logging.info("episode-{}, t_env-{}, train_loss: {}".format(episode_num, t_env, loss_val))
# from tests.qmix.test_assign import print_mix_tensor_val, print_agent_tensor_val
# print_agent_tensor_val(self.graph, self.sess, "before update explore agent")
self._update_explore_agent()
self.save_explore_agent_weights(save_path="./save_models/actor{}".format(episode_num))
# print_agent_tensor_val(self.graph, self.sess, "after update explore agent")
# print_mix_tensor_val(self.graph, self.sess, "before update target")
self._update_targets(episode_num=episode_num)
# print_mix_tensor_val(self.graph, self.sess, "after update target")
return {"train_loss": loss_val}
class QMixAgent(object):
"""agent for 2s_vs_1sc"""
def __init__(self, scheme, args):
self.args = args
self.scheme = scheme
def env_fn(env, **kwargs) -> MultiAgentEnv:
return env(**kwargs)
sc2_env_func = partial(env_fn, env=StarCraft2Env)
self.env = sc2_env_func(**self.args.env_args)
self.episode_limit = self.env.episode_limit
print("limit seq: ", self.episode_limit)
env_info = self.env.get_env_info()
print("env_info: ", env_info)
self.avail_action_num = env_info["n_actions"]
self.t = 0
self.t_env = 0
self.n_episode = 0
self.alg = QMixAlgorithm(self.scheme, self.args, self.avail_action_num, self.episode_limit, tf.float32)
self.replay_buffer = None
self.batch = None
# self.bm_writer = BenchmarkBoard("logdir", "qmix_{}".format(
# strftime("%Y-%m-%d %H-%M-%S", localtime())))
def setup(self, scheme, groups, preprocess):
self.new_batch = partial(
EpisodeBatchNP,
scheme,
groups,
1, # Note: batch size must be 1 in a episode
self.episode_limit + 1,
preprocess=preprocess,
)
self.alg.build_actor_graph() # 1 only use for explore !
self.alg.build_train_graph()
# note: init with only once are importance!
with self.alg.graph.as_default():
self.alg.sess.run(tf.global_variables_initializer())
self.alg.hi_out_val_default = self.alg.sess.run(
self.alg.gru_cell.zero_state(self.args.n_agents, dtype=tf.float32))
writer = tf.summary.FileWriter(logdir="logdir", graph=self.alg.graph)
writer.flush()
def reset(self):
self.alg.reset_hidden_state()
self.batch = self.new_batch()
self.env.reset()
self.t = 0
def run_one_episode(self, test_mode=False):
# time_info = [0, 0, 0] # reset, interaction
_t = time()
self.reset()
_reset_t = time() - _t
terminated = False
episode_return = 0
env_step_list = []
infer_time_list = []
interaction_cycle, cycle_start = [], None
def show_time(text, time_list):
print("{} mean: {}, Hz-~{}, steps-{}, last-7 as: \n {}".format(
text,
np.mean(time_list[5:]),
int(1.0 / np.mean(time_list)),
len(time_list),
time_list[-7:],
))
return np.mean(time_list[5:])
_start_explore = time()
while not terminated:
pre_transition_data = {
"state": [self.env.get_state()],
"avail_actions": [self.env.get_avail_actions()],
"obs": [self.env.get_obs()],
}
self.batch.update(pre_transition_data, ts=self.t)
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this time step in a batch of size 1
before_infer = time()
actions = self.alg.get_explore_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
infer_time_list.append(time() - before_infer)
before_env_step = time()
reward, terminated, env_info = self.env.step(actions[0])
env_step_list.append(time() - before_env_step)
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward, )],
"terminated": [(terminated != env_info.get("episode_limit", False), )],
}
self.batch.update(post_transition_data, ts=self.t)
self.t += 1
if not cycle_start:
cycle_start = time()
else:
interaction_cycle.append(time() - cycle_start)
cycle_start = time()
last_data = {
"state": [self.env.get_state()],
"avail_actions": [self.env.get_avail_actions()],
"obs": [self.env.get_obs()],
}
self.batch.update(last_data, ts=self.t)
# Select actions in the last stored state
actions = self.alg.get_explore_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
self.batch.update({"actions": actions}, ts=self.t)
if not test_mode:
self.t_env += self.t
self.n_episode += 1
# # for time analysis
# env_avg = show_time("env_step", env_step_list)
# infer_avg = show_time("infer time", infer_time_list)
# cycle_avg = show_time("--> cycle", interaction_cycle)
# print(
# "env step proportion: {}, infer proportion:{}.".format(
# env_avg / cycle_avg, infer_avg / cycle_avg
# )
# )
logging.debug("t_env: {}, explore reward: {}".format(self.t_env, episode_return))
# print("env_info: ", env_info)
if env_info.get("battle_won"):
print("\n", "*" * 50, "won once in {} mode! \n".format("TEST" if test_mode else "EXPLORE"))
# self.bm_writer.insert_records(
record_info_list = [("reset_time", _reset_t, self.n_episode),
("interaction_time", time() - _start_explore, self.n_episode),
("env_step_mean", np.mean(env_step_list), self.n_episode),
("infer_mean", np.mean(infer_time_list), self.n_episode),
("cycle_mean", np.mean(interaction_cycle), self.n_episode),
("explore_reward", episode_return, self.t_env),
("step_per_episode", self.t, self.n_episode)]
return self.batch, record_info_list, env_info
def train(self, batch_data, t_env, episode_num):
info = self.alg.train_whole_graph(batch_data, t_env, episode_num)
record_info = [("train_loss", info["train_loss"], self.t_env)]
return record_info
# self.bm_writer.insert_records([("train_loss", info["train_loss"], self.t_env)])
|
StarcoderdataPython
|
3454218
|
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.ops.linalg.sparse import sparse as tfsp
from . import modes as modes
from . import ops as ops
def dot(a, b, transpose_a=False, transpose_b=False):
"""
Dot product between `a` and `b`, with automatic handling of batch dimensions.
Supports both dense and sparse multiplication (including sparse-sparse).
The innermost dimension of `a` must match the outermost dimension of `b`,
unless there is a shared batch dimension.
Note that doing sparse-sparse multiplication of any rank and sparse-dense
multiplication with rank higher than 2 may result in slower computations.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:param transpose_a: bool, transpose innermost two dimensions of a.
:param transpose_b: bool, transpose innermost two dimensions of b.
:return: Tensor or SparseTensor with rank 2 or 3.
"""
a_is_sparse_tensor = isinstance(a, tf.SparseTensor)
b_is_sparse_tensor = isinstance(b, tf.SparseTensor)
# Handle case where we can use faster sparse-dense matmul
if K.ndim(a) == 2 and K.ndim(b) == 2:
if transpose_a:
a = ops.transpose(a)
if transpose_b:
b = ops.transpose(b)
if a_is_sparse_tensor and not b_is_sparse_tensor:
return tf.sparse.sparse_dense_matmul(a, b)
elif not a_is_sparse_tensor and b_is_sparse_tensor:
return ops.transpose(
tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a))
)
# Fallthrough to tfsp implementation
# Defaults to tf.matmul if neither is sparse
if a_is_sparse_tensor:
a = tfsp.CSRSparseMatrix(a)
if b_is_sparse_tensor:
b = tfsp.CSRSparseMatrix(b)
out = tfsp.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if hasattr(out, 'to_sparse_tensor'):
return out.to_sparse_tensor()
return out
def mixed_mode_dot(a, b):
"""
Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
works for both dense and sparse inputs.
:param a: Tensor or SparseTensor with rank 2.
:param b: Tensor or SparseTensor with rank 3.
:return: Tensor or SparseTensor with rank 3.
"""
s_0_, s_1_, s_2_ = K.int_shape(b)
B_T = ops.transpose(b, (1, 2, 0))
B_T = ops.reshape(B_T, (s_1_, -1))
output = dot(a, B_T)
output = ops.reshape(output, (s_1_, s_2_, -1))
output = ops.transpose(output, (2, 0, 1))
return output
def filter_dot(fltr, features):
"""
Computes the matrix multiplication between a graph filter and node features,
automatically handling data modes.
:param fltr: Tensor or SparseTensor of rank 2 or 3.
:param features: Tensor or SparseTensor of rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = modes.autodetect_mode(fltr, features)
if mode == modes.SINGLE or mode == modes.BATCH:
return dot(fltr, features)
else:
return mixed_mode_dot(fltr, features)
def matmul_A_B(a, b):
"""
Computes A * B, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = modes.autodetect_mode(a, b)
if mode == modes.MIXED:
# Mixed mode (rank(a)=2, rank(b)=3)
output = mixed_mode_dot(a, b)
elif mode == modes.iMIXED:
# Inverted mixed (rank(a)=3, rank(b)=2)
# This implementation is faster than using rank 3 sparse matmul with tfsp
s_1_a, s_2_a = tf.shape(a)[1], tf.shape(a)[2]
s_1_b = tf.shape(b)[1]
a_flat = ops.reshape(a, (-1, s_2_a))
output = dot(a_flat, b)
output = ops.reshape(output, (-1, s_1_a, s_1_b))
else:
# Single (rank(a)=2, rank(b)=2) and batch (rank(a)=3, rank(b)=3) mode
output = dot(a, b)
return output
def matmul_AT_B(a, b):
"""
Computes A.T * B, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = modes.autodetect_mode(a, b)
if mode == modes.SINGLE or mode == modes.MIXED:
# Single (rank(a)=2, rank(b)=2)
# Mixed (rank(a)=2, rank(b)=3)
a_t = ops.transpose(a)
elif mode == modes.iMIXED or mode == modes.BATCH:
# Inverted mixed (rank(a)=3, rank(b)=2)
# Batch (rank(a)=3, rank(b)=3)
a_t = ops.transpose(a, (0, 2, 1))
else:
raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(
K.ndim(a), K.ndim(b)
))
return matmul_A_B(a_t, b)
def matmul_A_BT(a, b):
"""
Computes A * B.T, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
mode = modes.autodetect_mode(a, b)
if mode == modes.SINGLE or mode == modes.iMIXED:
# Single (rank(a)=2, rank(b)=2)
# Inverted mixed (rank(a)=3, rank(b)=2)
b_t = ops.transpose(b)
elif mode == modes.MIXED or mode == modes.BATCH:
# Mixed (rank(a)=2, rank(b)=3)
# Batch (rank(a)=3, rank(b)=3)
b_t = ops.transpose(b, (0, 2, 1))
else:
raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(
K.ndim(a), K.ndim(b)
))
return matmul_A_B(a, b_t)
def matmul_AT_B_A(a, b):
"""
Computes A.T * B * A, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
at_b = matmul_AT_B(a, b)
at_b_a = matmul_A_B(at_b, a)
return at_b_a
def matmul_A_B_AT(a, b):
"""
Computes A * B * A.T, dealing automatically with sparsity and data modes.
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with rank 2 or 3.
:return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
"""
b_at = matmul_A_BT(a, b)
a_b_at = matmul_A_B(a, b_at)
return a_b_at
def matrix_power(a, k):
"""
If a is a square matrix, computes a^k. If a is a rank 3 Tensor of square
matrices, computes the exponent of each inner matrix.
:param a: Tensor or SparseTensor with rank 2 or 3. The innermost two
dimensions must be the same.
:param k: int, the exponent to which to raise the matrices.
:return: Tensor or SparseTensor with same rank as the input.
"""
x_k = a
for _ in range(k - 1):
x_k = matmul_A_B(a, x_k)
return x_k
|
StarcoderdataPython
|
6691822
|
from functools import reduce
from operator import mul
def persistence(n):
return 0 if (n < 10) else (1 + persistence(reduce(mul, [int(d) for d in str(n)])))
|
StarcoderdataPython
|
3407424
|
# WiSe 17/18
from typing import List, Tuple
import lecture_classes as lc
# Leider braucht es für eine Erkennung der Namen mit pyflakes diesen
# syntaktischen Käse, ansonsten tut der Code auch mit einem einfachen
# from lecture_classes import *
Lecture = lc.Lecture
T = lc.T
morgen = lc.morgen
vormittag = lc.vormittag
mittag = lc.mittag
nachmittag = lc.nachmittag
abend = lc.abend
"""
morgen(weekday) == T("08:00","09:30",weekday)
vormittag(weekday) == T("09:45","11:15",weekday)
mittag(weekday) == T("11:45","13:15",weekday)
nachmittag(weekday) == T("14:00","15:30",weekday)
abend(weekday) == T("15:45","17:15",weekday)
"""
faecher: List[Lecture] = []
rem: List[Tuple[int, str]] = []
add: List[Tuple[int, Lecture]] = []
holiday: List[Tuple[int, int]] = []
remall: List[Tuple[int, str]] = []
faecher = [
Lecture("PRAX", "Science Park SB",
T("08:00", "16:30", 1),
T("08:00", "16:30", 2),
T("08:00", "16:30", 3),
T("08:00", "16:30", 4),
T("08:00", "16:30", 5)
),
Lecture("unixAG", "A204.2", T("16:30", "17:30", 3)),
]
rem = [
(20180509, "unixAG"),
]
add = [
(20180509,
Lecture("Mitgliederversammlung unixAG",
"A209.1",
T("19:00", "20:30", 3)
)
),
]
remall = [
(20180601, "PRAX"),
]
holiday = [
(20180510, 20180510),
]
|
StarcoderdataPython
|
1728777
|
<reponame>globusgenomics/galaxy
class allele_walker:
'''
Given a set of site concordant records, call consensus on variants with matching alleles.
'''
def __init__(self, recordSet):
self.recordSet = recordSet
|
StarcoderdataPython
|
1860173
|
<filename>core/controllers/filter_controller_var2.py
from numpy import dot, maximum
from numpy.linalg import solve
from numpy import sign
from scipy.linalg import sqrtm
import cvxpy as cp
import numpy as np
import scipy
from cvxpy.error import SolverError
from .controller import Controller
class FilterControllerVar2(Controller):
"""Class for solving the ProBF-QCQP with two controller inputs."""
def __init__(self, affine_dynamics, phi_0, phi_1, desired_controller, sigma = 2.0):
"""Create an FBLinController object.
Policy is u = (act)^-1 * (-drift + aux), where drift and act are
components of drift vector and actuation matrix corresponding to
highest-order derivatives of each output coordinate and aux is an
auxilliary linear controller.
Inputs:
Feedback linearizable dynamics, fb_lin_dynamics: FBLinDynamics
Auxilliary linear controller, linear_controller: LinearController
"""
Controller.__init__(self, affine_dynamics)
self.affine_dynamics = affine_dynamics
self.phi_0 = phi_0
self.phi_1 = phi_1
self.desired_controller = desired_controller
self.sigma = sigma
def eval_novar(self, x, t, phi0, phi1, uc):
num = - phi0 - dot( phi1, uc )
den = dot(phi1, phi1 .T)
if den!=0:
lambda_star = maximum( 0 , num / den )
else:
lambda_star = 0
return uc + lambda_star * phi1.T
def eval(self, x, t):
#print("Evaluating")
# Evaluate mean and variance
phi0, varb, varab = self.phi_0( x, t )
phi1, vara = self.phi_1( x, t )
# Obtain desired controller
uc = self.desired_controller.process( self.desired_controller.eval(x, t ) )
u = cp.Variable((4))
sigma = self.sigma
# If sigma is very small, there is no need to explicitly use the variance
if(sigma<0.05):
return self.eval_novar(x, t, phi0, phi1, uc)
# Constructing the matrices of the convex program
deltaf = np.array([[vara[0],0,varab[0],0],[0,vara[1],varab[1],0],[varab[0],varab[1],varb[0],0],[0,0,0,0]])
delta = scipy.linalg.sqrtm(deltaf)
cu = np.array([[0],[0],[0],[1]])
# Try to solve the convex program. If infeasible, reduce sigma.
prob = cp.Problem(cp.Minimize(cp.square(u[0])+cp.square(u[1])-2*u[0]*uc[0]-2*u[1]*uc[1]),[phi1[0]*u[0]+phi1[1]*u[1]+phi0[0]-sigma*u[3]>=0,cp.norm(delta@u)<=cu.T@u,u[3]>=0,u[2]-1==0])
try:
prob.solve()
except SolverError:
pass
if prob.status not in ["optimal","optimal_inaccurate"]:
print(prob.status)
print("Not solved",phi0,phi1,vara,varab,varb,sigma)
count = 0
while count<3 and prob.status not in ["optimal","optimal_inaccurate"]:
sigmahigh = sigma
count = count+1
u = cp.Variable((4))
sigma = sigma/2.0
prob = cp.Problem(cp.Minimize(cp.square(u[0])+cp.square(u[1])-2*u[0]*uc[0]-2*u[1]*uc[1]),[phi1[0]*u[0]+phi1[1]*u[1]+phi0[0]-sigma*u[3]>=0,cp.norm(delta@u)<=cu.T@u,u[3]>=0,u[2]-1==0])
try:
prob.solve()
except SolverError:
print("Failed")
pass
if prob.status in ["optimal", "optimal_inaccurate"]:
ucurr = [u[0].value, u[1].value]
else:
ucurr = uc
print("Sigma reduced to:", sigma)
else:
ucurr = [u[0].value, u[1].value]
self.sigma = sigma
return self.desired_controller.process(np.array([ucurr[0],ucurr[1]])).T
|
StarcoderdataPython
|
3345307
|
import requests
import json
class ForloopClient:
def __init__(self, key=None, secret=None, url=None):
self.key = key
self.secret = secret
#self.session = requests.Session()
if url:
self.url = url
else:
self.url = "https://www.forloop.ai"
def get_nodes(self,pipeline):
response=requests.get(self.url+"/api/v1/nodes")
print(response,response.content)
result=response.json()["results"]
return(result)
def analyze_data(self,filename):
payload={"filename":filename}
response=requests.post(self.url+"/api/v1/analyze_data",data=json.dumps(payload))
#print("RESPONSE",response,response.content)
result=response.json()#["results"]
return(result)
def clean_data(self,filename):
payload={"filename":filename}
response=requests.post(self.url+"/api/v1/clean_data",data=json.dumps(payload))
result=pd.read_pickle("C:\\Users\\EUROCOM\\Documents\\Git\\ForloopAI\\forloop_platform_dominik\\cleaned_data.pkl")
return(result)
def run_python_script(self,filename,dir_path):
#dir_path="C:\\Users\\EUROCOM\\Documents\\Git\\ForloopAI\\forloop_api"
payload={"filename":filename,"dir_path":dir_path}
response=requests.post(self.url+"/api/v1/run_python_script",data=json.dumps(payload))
print("RESPONSE",response,response.content)
result=response.json()#["results"]
return(result)
################# OLDER ##################
import pandas as pd
from idfops.pandas_operations import read_spreadsheet
#Package functions
def load_df(filename):
try:
df=read_spreadsheet(filename)[0]
except FileNotFoundError as e:
print("File was not found")
df=None
return(df)
def send_data_to_forloop(df):
"""TODO: Implementation"""
pass
def import_batch_data():
"""
Loading data from batches coming from the platform
TODO: Implementation
"""
pass
|
StarcoderdataPython
|
5040893
|
<reponame>siddhantdixit/OOP-ClassWork
p = None
# print(id(p))
class Student:
def __init__(self):
self.name = "Siddhant"
# print(id(self.name))
self.roll = 123
global p
p = self.name
def __del__(self):
print("Deleted")
del self.name
del self.roll
obj = None
print(id(obj))
x = Student()
obj = x
x.__del__()
print(id(x))
print(id(obj))
# print("Program Completed")
# print(obj.name)
# print(obj.roll)
# y = Student()
# print(y.name,y.roll)
|
StarcoderdataPython
|
6542341
|
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import uiza
class UizaBase(object):
data_validated = None
connection = None
def create(self, **data):
"""
Create data
:param data: data body will be created
"""
data_body = dict(appId=uiza.app_id)
data_body.update(data)
result = self.connection.post(data=data_body)
try:
query = self.url_encode(params={'id': result[0].id, 'appId': uiza.app_id})
result = self.connection.get(query=query)
except Exception:
pass
return result
def update(self, **kwargs):
"""
Update data
:param kwargs: data body will be updated
"""
data_body = dict(appId=uiza.app_id)
if kwargs:
data_body.update(kwargs)
result = self.connection.put(data=data_body)
try:
query = self.url_encode(params={'id': result[0].id, 'appId': uiza.app_id})
result = self.connection.get(query=query)
except Exception:
pass
return result
def list(self, **kwargs):
"""
List data
:param kwargs: params
"""
params = dict(appId=uiza.app_id)
if kwargs:
params.update(kwargs)
query = self.url_encode(params=params)
result = self.connection.get(query=query)
return result
def retrieve(self, id):
"""
Get detail
:param id: id of object
"""
query = self.url_encode(params={'id': id, 'appId': uiza.app_id})
result = self.connection.get(query=query)
return result
def delete(self, id):
"""ata
Delete data
:param id: id of object
:param appId: appId
"""
result = self.connection.delete(dict(id=id, appId=uiza.app_id))
return result
def url_encode(self, params):
return '?{}'.format(urlencode(params))
|
StarcoderdataPython
|
116909
|
<reponame>wood-ghost/PaddleOMZAnalyzer
import os, sys, os.path
import argparse
import numpy as np
import cv2
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
from IPython import display
from PIL import Image, ImageDraw
import urllib, shutil, json
import yaml
from yaml.loader import SafeLoader
#Helper functions
def image_preprocess(input_image, size):
img = cv2.resize(input_image, (size,size))
img = np.transpose(img, [2,0,1]) / 255
img = np.expand_dims(img, 0)
##NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
img_mean = np.array([0.485, 0.456,0.406]).reshape((3,1,1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3,1,1))
img -= img_mean
img /= img_std
return img.astype(np.float32)
def draw_box(img, results, label_list, scale_x, scale_y):
for i in range(len(results)):
#print(results[i])
bbox = results[i, 2:]
label_id = int(results[i, 0])
score = results[i, 1]
if(score>0.20):
xmin, ymin, xmax, ymax = [int(bbox[0]*scale_x), int(bbox[1]*scale_y),
int(bbox[2]*scale_x), int(bbox[3]*scale_y)]
cv2.rectangle(img,(xmin, ymin),(xmax, ymax),(0,255,0),3)
font = cv2.FONT_HERSHEY_SIMPLEX
label_text = label_list[label_id];
cv2.rectangle(img, (xmin, ymin), (xmax, ymin-70), (0,255,0), -1)
cv2.putText(img, "#"+label_text,(xmin,ymin-10), font, 1.2,(255,255,255), 2,cv2.LINE_AA)
cv2.putText(img, str(score),(xmin,ymin-40), font, 0.8,(255,255,255), 2,cv2.LINE_AA)
return img
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--model_file", type=str, default='', help="model filename, default(None)")
return parser.parse_args()
def main():
args = parse_args()
pdmodel_file = args.model_file
dir_name = os.path.dirname(pdmodel_file)
pdmodel_config = dir_name + "/infer_cfg.yml"
if not os.path.exists(pdmodel_file) or not os.path.exists(pdmodel_config) or not os.path.exists("./horse.jpg"):
print('model params file "{}" or "{}" or jpg file "./horse.jpg" not exists. Please check them.'.format(pdmodel_file, pdmodel_config))
return
device = 'CPU'
#load the data from config, and setup the parameters
label_list=[]
with open(pdmodel_config) as f:
data = yaml.load(f, Loader=SafeLoader)
label_list = data['label_list'];
ie = IECore()
net = ie.read_network(pdmodel_file)
net.reshape({'image': [1, 3, 608, 608], 'im_shape': [
1, 2], 'scale_factor': [1, 2]})
exec_net = ie.load_network(net, device)
assert isinstance(exec_net, ExecutableNetwork)
input_image = cv2.imread("horse.jpg")
test_image = image_preprocess(input_image, 608)
test_im_shape = np.array([[608, 608]]).astype('float32')
test_scale_factor = np.array([[1, 2]]).astype('float32')
#print(test_image.shape)
inputs_dict = {'image': test_image, "im_shape": test_im_shape,
"scale_factor": test_scale_factor}
output = exec_net.infer(inputs_dict)
result_ie = list(output.values())
result_image = cv2.imread("horse.jpg")
scale_x = result_image.shape[1]/608*2
scale_y = result_image.shape[0]/608
result_image = draw_box(result_image, result_ie[0], label_list, scale_x, scale_y)
_,ret_array = cv2.imencode('.jpg', result_image)
i = display.Image(data=ret_array)
display.display(i)
cv2.imwrite("result.png",result_image)
print('Done. result save in ./result.png.')
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8050099
|
'''
预处理文本
'''
import json
def ht_txt2json():
'''
将 txt格式的英文版HP转换为json文件
'''
res = {"title": "", "chapterCount": 0, "chapters": []}
with open("tmp.txt", "r", encoding="utf-8") as rfp:
lines = rfp.readlines()
res["title"] = lines[0]
chapindexes = []
for i in range(1, len(lines)):
if lines[i][0:7] == "CHAPTER":
chapindexes.append(i)
chapindexes.append(len(lines) + 1)
res["chapterCount"] = len(chapindexes) - 1
for i in range(0, len(chapindexes) - 1):
tmpchapter = {
"title": lines[chapindexes[i] + 1], "lineCount": chapindexes[i + 1] - chapindexes[i] - 2, "lines": []}
for j in range(chapindexes[i] + 2, chapindexes[i + 1]):
if j < len(lines):
tmpchapter["lines"].append(lines[j])
res["chapters"].append(tmpchapter)
json.dump(res, open("tmpp.json", "w", encoding="utf-8"))
if __name__ == "__main__":
ht_txt2json()
|
StarcoderdataPython
|
9656279
|
# *****************************************************************
# Copyright (c) 2013 Massachusetts Institute of Technology
#
# Developed exclusively at US Government expense under US Air Force contract
# FA8721-05-C-002. The rights of the United States Government to use, modify,
# reproduce, release, perform, display or disclose this computer software and
# computer software documentation in whole or in part, in any manner and for
# any purpose whatsoever, and to have or authorize others to do so, are
# Unrestricted and Unlimited.
#
# Licensed for use under the BSD License as described in the BSD-LICENSE.txt
# file in the root directory of this release.
#
# Project: SPAR
# Authors: OMD
# Description: Unit tests for the Enum class.
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 07 Dec 2011 omd Original Version
# *****************************************************************
import unittest
from enum import Enum
class EnumTest(unittest.TestCase):
def test_basic(self):
"""Test the most basic usage."""
ANIMALS = Enum('CAT', 'DOG', 'TIGER', 'WOLF')
self.assertEqual(ANIMALS.CAT, 0)
self.assertEqual(ANIMALS.DOG, 1)
self.assertEqual(ANIMALS.TIGER, 2)
self.assertEqual(ANIMALS.WOLF, 3)
def test_selected_integers(self):
"""Test that user-defined integer assignements work."""
ANIMALS = Enum(CAT = 10, DOG = 22, TIGER = 3, WOLF = 7)
self.assertEqual(ANIMALS.CAT, 10)
self.assertEqual(ANIMALS.DOG, 22)
self.assertEqual(ANIMALS.TIGER, 3)
self.assertEqual(ANIMALS.WOLF, 7)
def test_mixed_selected_assigned_integers(self):
"""Test that you can mix specified mappings from integer to value and
just a list of values and it works right."""
ANIMALS = Enum('DOG', 'CAT', 'WOLF', HAMSTER = 0, RABBIT = 2,
WHALE = 100)
self.assertEqual(ANIMALS.HAMSTER, 0)
self.assertEqual(ANIMALS.DOG, 1)
self.assertEqual(ANIMALS.RABBIT, 2)
self.assertEqual(ANIMALS.CAT, 3)
self.assertEqual(ANIMALS.WOLF, 4)
self.assertEqual(ANIMALS.WHALE, 100)
def test_to_string(self):
"""Uses the same enum as test_mixed_selected_assigned_integers and tests
that we can map from integer back to string."""
ANIMALS = Enum('DOG', 'CAT', 'WOLF', HAMSTER = 0, RABBIT = 2,
WHALE = 100)
self.assertEqual(ANIMALS.to_string(0), 'HAMSTER')
self.assertEqual(ANIMALS.to_string(1), 'DOG')
self.assertEqual(ANIMALS.to_string(2), 'RABBIT')
self.assertEqual(ANIMALS.to_string(3), 'CAT')
self.assertEqual(ANIMALS.to_string(4), 'WOLF')
self.assertEqual(ANIMALS.to_string(100), 'WHALE')
def test_constant(self):
"""An enum should be constant. Try to modify it in various ways and make
sure it doesn't change."""
ANIMALS = Enum('DOG', 'CAT', 'WOLF', HAMSTER = 0, RABBIT = 2,
WHALE = 100)
with self.assertRaises(ValueError):
del(ANIMALS.DOG)
with self.assertRaises(ValueError):
ANIMALS.CAT = 222
def test_generate_string(self):
"""Test the values_generator method."""
ANIMALS = Enum('DOG', 'CAT', 'WOLF', HAMSTER = 0, RABBIT = 2,
WHALE = 100)
expected = ['HAMSTER', 'DOG', 'RABBIT', 'CAT', 'WOLF', 'WHALE']
expected_index = 0
for val in ANIMALS.values_generator():
self.assertEqual(val, expected[expected_index])
expected_index += 1
def test_generate_numbers(self):
"""Test the numbers_generator method."""
ANIMALS = Enum('DOG', 'CAT', 'WOLF', HAMSTER = 0, RABBIT = 2,
WHALE = 100)
expected = [0, 1, 2, 3, 4, 100]
expected_index = 0
for i in ANIMALS.numbers_generator():
self.assertEqual(i, expected[expected_index])
expected_index += 1
def test_from_dict_list(self):
"""Test that the from_dict_list factory method works."""
E_NORMAL = Enum('DOG', 'CAT', HAMSTER = 1)
E_FACTORY = Enum.from_dict_list({'HAMSTER': 1}, ['DOG', 'CAT'])
self.assertEqual(E_FACTORY.DOG, E_NORMAL.DOG)
self.assertEqual(E_FACTORY.CAT, E_NORMAL.CAT)
self.assertEqual(E_FACTORY.HAMSTER, E_NORMAL.HAMSTER)
self.assertEqual(E_FACTORY.numbers_list(), E_NORMAL.numbers_list())
for f, n in zip(E_FACTORY.values_generator(),
E_NORMAL.values_generator()):
self.assertEqual(f, n)
# zip tries to handle generators that produce different numbers of items
# so we double check that they're both the same size here.
self.assertEqual(len([x for x in E_FACTORY.values_generator()]),
len([x for x in E_NORMAL.values_generator()]))
def test_from_enum(self):
BASE = Enum('DOG', CAT = 3, HORSE = 5)
NEW = Enum.from_enum(BASE, 'HAMSTER', 'RAT', 'MOUSE', SNAKE = 6,
DONKEY = 11)
self.assertEqual(NEW.DOG, 0)
self.assertEqual(NEW.HAMSTER, 1)
self.assertEqual(NEW.RAT, 2)
self.assertEqual(NEW.CAT, 3)
self.assertEqual(NEW.MOUSE, 4)
self.assertEqual(NEW.HORSE, 5)
self.assertEqual(NEW.SNAKE, 6)
self.assertEqual(NEW.DONKEY, 11)
def test_from_enum_collisions(self):
"""Make sure that an exception gets thrown if we try to create an enum
via from_enum but there are collisions in the requested values."""
BASE = Enum('CAR', 'BIKE', SCOOTER = 5)
# Try to create new enums but with numerical values that already exist
# in the base enum. This should cause an exception.
with self.assertRaises(ValueError):
NEW = Enum.from_enum(BASE, AIRPLANE = 1)
with self.assertRaises(ValueError):
NEW3 = Enum.from_enum(BASE, AIRPLANE = 0)
with self.assertRaises(ValueError):
NEW4 = Enum.from_enum(BASE, AIRPLANE = 5)
with self.assertRaises(ValueError):
NEW5 = Enum.from_enum(BASE, 'TRAIN', AIRPLANE = 5)
# Try to create new enums but with names that already exist in the base
# enum (though with different numerical values). This should cause an
# exception.
with self.assertRaises(ValueError):
NEW6 = Enum.from_enum(BASE, 'CAR')
with self.assertRaises(ValueError):
NEW7 = Enum.from_enum(BASE, BIKE = 11)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1708152
|
<reponame>mih/multimatch
#!/usr/bin/python3
import numpy as np
import math
import sys
import collections
def cart2pol(x, y):
"""Transform cartesian into polar coordinates.
:param x: float
:param y : float
:return: rho: float, length from (0,0)
:return: theta: float, angle in radians
"""
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return rho, theta
def calcangle(x1, x2):
"""Calculate angle between to vectors (saccades).
:param: x1, x2: list of float
:return: angle: float, angle in degrees
"""
angle = math.degrees(
math.acos(
np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))))
return angle
def gen_scanpath_structure(data):
"""Transform a fixation vector into a vector based scanpath representation.
Takes an nx3 fixation vector (start_x, start_y, duration) in the form of
of a record array and transforms it into appropriate vectorbased scanpath
representation. Indices are as follows:
0: fixation_x
1: fixation_y
2: fixation_dur
3: saccade_x
4: saccade_y
5: saccade_lenx
6: saccade_leny
7: saccade_theta
8: saccade_rho
:param: data: record array
:return: eyedata: array-like, list of lists, vector-based scanpath representation
"""
fixation_x = []
fixation_y = []
fixation_dur = []
saccade_x = []
saccade_y = []
saccade_lenx = []
saccade_leny = []
saccade_theta = []
saccade_rho = []
# get the number of rows
length = np.shape(data)[0]
# keep coordinates and durations of fixations
for i in range(0, length):
fixation_x.append(data[i]['start_x'])
fixation_y.append(data[i]['start_y'])
fixation_dur.append(data[i]['duration'])
# fixations are the start coordinates for saccades
for i in range(0, length - 1):
saccade_x.append(data[i]['start_x'])
saccade_y.append(data[i]['start_y'])
# calculate saccade length and angle from vector lengths between fixations
for i in range(1, length):
saccade_lenx.append(fixation_x[i] - saccade_x[i - 1])
saccade_leny.append(fixation_y[i] - saccade_y[i - 1])
rho, theta = cart2pol(saccade_lenx[i - 1], saccade_leny[i - 1])
saccade_rho.append(rho)
saccade_theta.append(theta)
# append everything into an ordered dict.
eyedata = collections.OrderedDict()
eyedata['fixation_x'] = fixation_x
eyedata['fixation_y'] = fixation_y
eyedata['fixation_dur'] = fixation_dur
eyedata['saccade_x'] = saccade_x
eyedata['saccade_y'] = saccade_y
eyedata['saccade_lenx'] = saccade_lenx
eyedata['saccade_leny'] = saccade_leny
eyedata['saccade_theta'] = saccade_theta
eyedata['saccade_rho'] = saccade_rho
return eyedata
def keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data
):
"""
Helper function for scanpath simplification. If no simplification can be
performed on a particular saccade, this functions stores the original data.
:param i: current index
:param j: current index
:param sim_lenx: list
:param sim_leny: list
:param sim_x: list
:param sim_y: list
:param sim_theta: list
:param sim_len: list
:param sim_dur: list
:param data: eyedata, list of list
"""
sim_lenx.insert(j, data['saccade_lenx'][i])
sim_leny.insert(j, data['saccade_leny'][i])
sim_x.insert(j, data['saccade_x'][i])
sim_y.insert(j, data['saccade_y'][i])
sim_theta.insert(j, data['saccade_theta'][i])
sim_len.insert(j, data['saccade_rho'][i])
sim_dur.insert(j, data['fixation_dur'][i])
i += 1
j += 1
return sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j
def simlen(data, TAmp, TDur):
"""Simplify scanpaths based on saccadic length.
Simplify consecutive saccades if their length is smaller than the
threshold TAmp and the duration of the closest fixations is lower
than threshold TDur.
:param: data: array-like, list of lists, output of gen_scanpath_structure
:param: TAmp: float, length in px
:param: TDur: float, time in seconds
:return: eyedata: list of lists, one iteration of length based simplification
"""
if len(data['saccade_x']) < 1:
return data
# if the scanpath is long enough
else:
i = 0
j = 0
# initialize new empty lists for simplified results
sim_dur = []
sim_x = []
sim_y = []
sim_lenx = []
sim_leny = []
sim_theta = []
sim_len = []
# while we don't run into index errors
while i <= len(data['saccade_x']) - 1:
# if saccade is the last one
if i == len(data['saccade_x']) - 1:
# and if saccade has short length:
if data['saccade_rho'][i] < TAmp:
# and if the fixation duration is short:
if (data['fixation_dur'][-1] < TDur) or (data['fixation_dur'][-2] < TDur):
# calculate sum of local vectors for simplification
v_x = data['saccade_lenx'][-2] + data['saccade_lenx'][-1]
v_y = data['saccade_leny'][-2] + data['saccade_leny'][-1]
rho, theta = cart2pol(v_x, v_y)
# save them in the new vectors
sim_lenx[j - 1] = v_x
sim_leny[j - 1] = v_y
sim_theta[j - 1] = theta
sim_len[j - 1] = rho
sim_dur.insert(j, data['fixation_dur'][i - 1])
j -= 1
i += 1
# if fixation duration is long:
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# if saccade doesn't have short length:
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# if saccade is not the last one
else:
# and if saccade has short length
if (data['saccade_rho'][i] < TAmp) and (i < len(data['saccade_x']) - 1):
# and if fixation durations are short
if (data['fixation_dur'][i + 1] < TDur) or (data['fixation_dur'][i] < TDur):
# calculate sum of local vectors in x and y length for simplification
v_x = data['saccade_lenx'][i] + data['saccade_lenx'][i + 1]
v_y = data['saccade_leny'][i] + data['saccade_leny'][i + 1]
rho, theta = cart2pol(v_x, v_y)
# save them in the new vectors
sim_lenx.insert(j, v_x)
sim_leny.insert(j, v_y)
sim_x.insert(j, data['saccade_x'][i])
sim_y.insert(j, data['saccade_y'][i])
sim_theta.insert(j, theta)
sim_len.insert(j, rho)
# add the old fixation duration
sim_dur.insert(j, data['fixation_dur'][i])
i += 2
j += 1
# if fixation durations are long
else:
# insert original data in new lists -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# if saccade doesn't have short length
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# append the last fixation duration
sim_dur.append(data['fixation_dur'][-1])
# append everything into an ordered dict.
eyedata = collections.OrderedDict()
eyedata['fixation_dur'] = sim_dur
eyedata['saccade_x'] = sim_x
eyedata['saccade_y'] = sim_y
eyedata['saccade_lenx'] = sim_lenx
eyedata['saccade_leny'] = sim_leny
eyedata['saccade_theta'] = sim_theta
eyedata['saccade_rho'] = sim_len
return eyedata
def simdir(data,
TDir,
TDur
):
"""Simplify scanpaths based on angular relations between saccades (direction).
Simplify consecutive saccades if the angle between them is smaller than the
threshold TDir and the duration of the intermediate fixations is lower
than threshold TDur.
:param: data: array-like, list of lists, output of gen_scanpath_structure
:param: TDir: float, angle in degrees
:param: TDur: float, time in seconds
:return: eyedata: list of lists, one iteration of direction based simplification
"""
if len(data['saccade_x']) < 1:
return data
# if the scanpath is long enough
else:
i = 0
j = 0
# initialize empty lists
sim_dur = []
sim_x = []
sim_y = []
sim_lenx = []
sim_leny = []
sim_theta = []
sim_len = []
# while we don't run into index errors
while i <= len(data['saccade_x']) - 1:
if i < len(data['saccade_x']) - 1:
# lets check angles
v1 = [data['saccade_lenx'][i], data['saccade_leny'][i]]
v2 = [data['saccade_lenx'][i + 1], data['saccade_leny'][i + 1]]
angle = calcangle(v1, v2)
else:
# an angle of infinite size won't go into any further loop
angle = float('inf')
# if the angle is small and its not the last saccade
if (angle < TDir) & (i < len(data['saccade_x']) - 1):
# if the fixation duration is short:
if data['fixation_dur'][i + 1] < TDur:
# if the fixation durations are short:
# calculate the sum of local vectors
v_x = data['saccade_lenx'][i] + data['saccade_lenx'][i + 1]
v_y = data['saccade_leny'][i] + data['saccade_leny'][i + 1]
rho, theta = cart2pol(v_x, v_y)
# save them in the new vectors
sim_lenx.insert(j, v_x)
sim_leny.insert(j, v_y)
sim_x.insert(j, data['saccade_x'][i])
sim_y.insert(j, data['saccade_y'][i])
sim_theta.insert(j, theta)
sim_len.insert(j, rho)
# add the fixation duration
sim_dur.insert(j, data['fixation_dur'][i])
i += 2
j += 1
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# elif the angle is small, but its the last saccade:
elif (angle < TDir) & (i == len(data['saccade_x']) - 1):
# if the fixation duration is short:
if data['fixation_dur'][i + 1] < TDur:
# calculate sum of local vectors
v_x = data['saccade_lenx'][i - 2] + data['saccade_lenx'][i - 1]
v_y = data['saccade_leny'][i - 2] + data['saccade_leny'][i - 1]
rho, theta = cart2pol(v_x, v_y)
# save them in new vectors
sim_lenx[j - 1] = v_x
sim_leny[j - 1] = v_y
sim_theta[j - 1] = theta
sim_len[j - 1] = rho
sim_dur.insert(j, data['fixation_dur'][-1] + (data['fixation_dur'][i] / 2))
j -= 1
i += 1
# if fixation duration is long:
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# else (the angle is too large
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# now append the last fixation duration
sim_dur.append(data['fixation_dur'][-1])
# append everything into an ordered dict.
eyedata = collections.OrderedDict()
eyedata['fixation_dur'] = sim_dur
eyedata['saccade_x'] = sim_x
eyedata['saccade_y'] = sim_y
eyedata['saccade_lenx'] = sim_lenx
eyedata['saccade_leny'] = sim_leny
eyedata['saccade_theta'] = sim_theta
eyedata['saccade_rho'] = sim_len
return eyedata
def simplify_scanpath(data,
TAmp,
TDir,
TDur
):
"""Simplify scanpaths until no further simplification is possible.
Loops over simplification functions simdir and simlen until no
further simplification of the scanpath is possible.
:param: data: list of lists, output of gen_scanpath_structure
:param: TAmp: float, length in px
:param: TDir: float, angle in degrees
:param: TDur: float, duration in seconds
:return: eyedata: list of lists, simplified vector-based scanpath representation
"""
looptime = 0
while True:
data = simdir(data, TDir, TDur)
data = simlen(data, TAmp, TDur)
looptime += 1
if looptime == len(data['fixation_dur']):
return data
def cal_vectordifferences(data1,
data2
):
"""Create matrix of vector-length differences of all vector pairs
Create M, a Matrix with all possible saccade-length differences between
saccade pairs.
:param: data1, data2: list of lists, vector-based scanpath representations
:return: M: array-like
Matrix of vector length differences
"""
# take length in x and y direction of both scanpaths
x1 = np.asarray(data1['saccade_lenx'])
x2 = np.asarray(data2['saccade_lenx'])
y1 = np.asarray(data1['saccade_leny'])
y2 = np.asarray(data2['saccade_leny'])
# initialize empty lists M and row, will become matrix to store sacc-length
# pairings
M = []
row = []
# calculate saccade length differences, vectorized
for i in range(0, len(x1)):
x_diff = abs(x1[i] * np.ones(len(x2)) - x2)
y_diff = abs(y1[i] * np.ones(len(y2)) - y2)
# calc final length from x and y lengths, append, stack into matrix M
row.append(np.asarray(np.sqrt(x_diff ** 2 + y_diff ** 2)))
M = np.stack(row)
return M
def createdirectedgraph(szM,
M,
M_assignment
):
"""Create a directed graph:
The data structure of the result is a dicitionary within a dictionary
such as
weightedGraph = {0 : {1:259.55, 15:48.19, 16:351.95},
1 : {2:249.354, 16:351.951, 17:108.97},
2 : {3:553.30, 17:108.97, 18:341.78}, ...}
It defines the possible nodes to reach from a particular node, and the weight that
is associated with the path to each of the possible nodes.
:param: szM: list, shape of matrix M
:param: M: array-like, matrix of vector length differences
:param: M_assignment: array-like, Matrix, arranged with values from 0 to number of entries in M
:return: weighted graph: dict, Dictionary within a dictionary pairing weights (distances) with
node-pairings
"""
# initialize dictionary for neighbouring vertices and edge weights
adjacent = {}
weight = {}
# loop through every node rowwise
for i in range(0, szM[0]):
# loop through every node columnwise
for j in range(0, szM[1]):
currentNode = i * szM[1] + j
# if in the last (bottom) row, only go right
if (i == szM[0] - 1) & (j < szM[1] - 1):
adjacent[M_assignment[i, j]] = [currentNode + 1]
weight[M_assignment[i, j]] = [M[i, j + 1]]
# if in the last (rightmost) column, only go down
elif (i < szM[0] - 1) & (j == szM[1] - 1):
adjacent[M_assignment[i, j]] = [currentNode + szM[1]]
weight[M_assignment[i, j]] = [M[i + 1, j]]
# if in the last (bottom-right) vertex, do not move any further
elif (i == szM[0] - 1) & (j == szM[1] - 1):
adjacent[M_assignment[i, j]] = [currentNode]
weight[M_assignment[i, j]] = [0]
# anywhere else, move right, down and down-right.
else:
adjacent[M_assignment[i, j]] = [currentNode + 1,
currentNode + szM[1],
currentNode + szM[1] + 1]
weight[M_assignment[i, j]] = [M[i, j + 1],
M[i + 1, j],
M[i + 1, j + 1]]
# create ascending list ranging from first to last node - this
# will be the first key in the nested dict
Startnodes = range(0, szM[0] * szM[1])
# initialize list with adjacent nodes (adjacent to each startnode)
# and the weights associated with the paths between them
weightedEdges = []
# zip Nodes and weights
for i in range(0, len(adjacent)):
weightedEdges.append(list(zip(list(adjacent.values())[i],
list(weight.values())[i])))
# initialize final dictionary
weightedGraph = {}
# zip Startnodes together with Nodes-Weights, result is a nested dict
for i in range(0, len(weightedEdges)):
weightedGraph[Startnodes[i]] = dict(weightedEdges[i])
return weightedGraph
def dijkstra(weightedGraph,
start,
end
):
"""Implementation of Dijkstra algorithm:
Use the dijkstra algorithm to find the shortest path through a directed
graph (weightedGraph) from start to end.
:param: weightedGraph: dict, dictionary within a dictionary pairing weights (distances) with
node-pairings
:param: start: int, starting point of path, should be 0
:param: end: int, end point of path, should be (n, m) of Matrix M
:return: path: array-like, array of indices of the shortest path, i.e. best-fitting saccade pairs
:return: dist: float, sum of weights
"""
# initialize empty dictionary to hold distances
dist = {}
# inialize list of vertices in the path to current vertex (predecessors)
pred = {}
# where do I need to go still?
to_assess = weightedGraph.keys()
for node in weightedGraph:
# set inital distances to infinity
dist[node] = float('inf')
# no node has any predecessors yet
pred[node] = None
# initialize list to be filled with final distances(weights) of nodes
sp_set = []
# the starting node gets a weight of 0 to make sure to start there
dist[start] = 0
# continue the algorithm as long as there are still unexplored nodes
while len(sp_set) < len(to_assess):
still_in = {node: dist[node] for node in [node for node in to_assess if
node not in sp_set]}
# find adjacent node with minimal weight and append to sp_set
closest = min(still_in, key=dist.get)
sp_set.append(closest)
for node in weightedGraph[closest]:
if dist[node] > dist[closest] + weightedGraph[closest][node]:
dist[node] = dist[closest] + weightedGraph[closest][node]
pred[node] = closest
# append endnode to list path
path = [end]
# append contents of pred in reversed order to path
while start not in path:
path.append(pred[path[-1]])
# return path in reverse order (begin to end) and final distance
return path[::-1], dist[end]
def cal_angulardifference(data1,
data2,
path,
M_assignment
):
"""Calculate angular similarity of two scanpaths:
:param: data1: array-like, list of lists, contains vector-based scanpath representation of the
first scanpath
:param: data2: array-like, list of lists, contains vector-based scanpath representation of the
second scanpath
:param: path: array-like, array of indices for the best-fitting saccade pairings between scan-
paths
:param: M_assignment: array-like, Matrix, arranged with values from 0 to number of entries in
M, the matrix of vector length similarities
:return: anglediff: array of floats, array of angular differences between pairs of saccades
of two scanpaths
"""
# get the angle between saccades from the scanpaths
theta1 = data1['saccade_theta']
theta2 = data2['saccade_theta']
# initialize list to hold individual angle differences
anglediff = []
# calculate angular differences between the saccades along specified path
for k in range(0, len(path)):
# which saccade indices correspond to path?
i, j = np.where(M_assignment == path[k])
# extract the angle
spT = [theta1[np.asscalar(i)], theta2[np.asscalar(j)]]
for t in range(0, len(spT)):
# get results in range -pi, pi
if spT[t] < 0:
spT[t] = math.pi + (math.pi + spT[t])
spT = abs(spT[0] - spT[1])
if spT > math.pi:
spT = 2 * math.pi - spT
anglediff.append(spT)
return anglediff
def cal_durationdifference(data1,
data2,
path,
M_assignment
):
"""Calculate similarity of two scanpaths fixation durations.
:param: data1: array-like
list of lists, contains vector-based scanpath representation of the
first scanpath
:param: data2: array-like
list of lists, contains vector-based scanpath representation of the
second scanpath
:param: path: array-like
array of indices for the best-fitting saccade pairings between scan-
paths
:param: M_assignment: array-like
Matrix, arranged with values from 0 to number of entries in M, the
matrix of vector length similarities
:return: durdiff: array of floats,
array of fixation duration differences between pairs of saccades from
two scanpaths
"""
# get the duration of fixations in the scanpath
dur1 = data1['fixation_dur']
dur2 = data2['fixation_dur']
# initialize list to hold individual duration differences
durdiff = []
# calculation fixation duration differences between saccades along path
for k in range(0, len(path)):
# which saccade indices correspond to path?
i, j = np.where(M_assignment == path[k])
maxlist = [dur1[np.asscalar(i)], dur2[np.asscalar(j)]]
# compute abs. duration diff, normalize by largest duration in pair
durdiff.append(abs(dur1[np.asscalar(i)] -
dur2[np.asscalar(j)]) / abs(max(maxlist)))
return durdiff
def cal_lengthdifference(data1,
data2,
path,
M_assignment
):
"""Calculate length similarity of two scanpaths.
:param: data1: array-like
list of lists, contains vector-based scanpath representation of the
first scanpath
:param: data2: array-like
list of lists, contains vector-based scanpath representation of the
second scanpath
:param: path: array-like
array of indices for the best-fitting saccade pairings between scan-
paths
:param: M_assignment: array-like
Matrix, arranged with values from 0 to number of entries in M, the
matrix of vector length similarities
:return: lendiff: array of floats
array of length difference between pairs of saccades of two scanpaths
"""
# get the saccade lengths rho
len1 = np.asarray(data1['saccade_rho'])
len2 = np.asarray(data2['saccade_rho'])
# initialize list to hold individual length differences
lendiff = []
# calculate length differences between saccades along path
for k in range(0, len(path)):
i, j = np.where(M_assignment == path[k])
lendiff.append(abs(len1[i] - len2[j]))
return lendiff
def cal_positiondifference(data1,
data2,
path,
M_assignment
):
"""Calculate position similarity of two scanpaths.
:param: data1: array-like
list of lists, contains vector-based scanpath representation of the
first scanpath
:param: data2: array-like
list of lists, contains vector-based scanpath representation of the
second scanpath
:param: path: array-like
array of indices for the best-fitting saccade pairings between scan-
paths
:param: M_assignment: array-like
Matrix, arranged with values from 0 to number of entries in M, the
matrix of vector length similarities
:return: posdiff: array of floats
array of position differences between pairs of saccades
of two scanpaths
"""
# get the x and y coordinates of points between saccades
x1 = np.asarray(data1['saccade_x'])
x2 = np.asarray(data2['saccade_x'])
y1 = np.asarray(data1['saccade_y'])
y2 = np.asarray(data2['saccade_y'])
# initialize list to hold individual position differences
posdiff = []
# calculate position differences along path
for k in range(0, len(path)):
i, j = np.where(M_assignment == path[k])
posdiff.append(math.sqrt((x1[np.asscalar(i)] - x2[np.asscalar(j)]) ** 2 +
(y1[np.asscalar(i)] - y2[np.asscalar(j)]) ** 2))
return posdiff
def cal_vectordifferencealongpath(data1,
data2,
path,
M_assignment
):
"""Calculate vector similarity of two scanpaths.
:param: data1: array-like
list of lists, contains vector-based scanpath representation of the
first scanpath
:param: data2: array-like
list of lists, contains vector-based scanpath representation of the
second scanpath
:param: path: array-like
array of indices for the best-fitting saccade pairings between scan-
paths
:param: M_assignment: array-like
Matrix, arranged with values from 0 to number of entries in M, the
matrix of vector length similarities
:return: vectordiff: array of floats
array of vector differences between pairs of saccades of two scanpaths
"""
# get the saccade lengths in x and y direction of both scanpaths
x1 = np.asarray(data1['saccade_lenx'])
x2 = np.asarray(data2['saccade_lenx'])
y1 = np.asarray(data1['saccade_leny'])
y2 = np.asarray(data2['saccade_leny'])
# initialize list to hold individual vector differences
vectordiff = []
# calculate vector differences along path
for k in range(0, len(path)):
i, j = np.where(M_assignment == path[k])
vectordiff.append(np.sqrt((x1[np.asscalar(i)] - x2[np.asscalar(j)]) ** 2 +
(y1[np.asscalar(i)] - y2[np.asscalar(j)]) ** 2))
return vectordiff
def getunnormalised(data1,
data2,
path,
M_assignment
):
"""Calculate unnormalised similarity measures.
Calls the five functions to create unnormalised similarity measures for
each of the five similarity dimensions. Takes the median of the resulting
similarity values per array.
:param: data1: array-like
list of lists, contains vector-based scanpath representation of the
first scanpath
:param: data2: array-like
list of lists, contains vector-based scanpath representation of the
second scanpath
:param: path: array-like
array of indices for the best-fitting saccade pairings between scan-
paths
:param: M_assignment: array-like
Matrix, arranged with values from 0 to number of entries in M, the
matrix of vector length similarities
:return: unnormalised: array
array of unnormalised similarity measures on five dimensions
>>> unorm_res = getunnormalised(scanpath_rep1, scanpath_rep2, path, M_assignment)
"""
args = data1, data2, path, M_assignment
VecSim = np.median(cal_vectordifferencealongpath(*args))
DirSim = np.median(cal_angulardifference(*args))
LenSim = np.median(cal_lengthdifference(*args))
PosSim = np.median(cal_positiondifference(*args))
DurSim = np.median(cal_durationdifference(*args))
unnormalised = [VecSim, DirSim, LenSim, PosSim, DurSim]
return unnormalised
def normaliseresults(unnormalised,
sz=[1280, 720]
):
"""Normalize similarity measures.
Vector similarity is normalised against two times screen diagonal,
the maximum
theoretical distance.
Direction similarity is normalised against pi.
Length Similarity is normalised against screen diagonal.
Position Similarity and Duration Similarity are already normalised.
:param: unnormalised: array
array of unnormalised similarity measures,
output of getunnormalised()
:return: normalresults: array
array of normalised similarity measures
>>> normal_res = normaliseresults(unnormalised, sz = [1280, 720])
"""
# normalize vector similarity against two times screen diagonal, the maximum
# theoretical distance
VectorSimilarity = 1 - unnormalised[0] / (2 * math.sqrt(sz[0] ** 2 + sz[1] ** 2))
# normalize against pi
DirectionSimilarity = 1 - unnormalised[1] / math.pi
# normalize against screen diagonal
LengthSimilarity = 1 - unnormalised[2] / math.sqrt(sz[0] ** 2 + sz[1] ** 2)
PositionSimilarity = 1 - unnormalised[3] / math.sqrt(sz[0] ** 2 + sz[1] ** 2)
# no normalisazion necessary, already done
DurationSimilarity = 1 - unnormalised[4]
normalresults = [VectorSimilarity, DirectionSimilarity, LengthSimilarity,
PositionSimilarity, DurationSimilarity]
return normalresults
def docomparison(fixation_vectors1,
fixation_vectors2,
sz=[1280, 720],
grouping=False,
TDir=0.0,
TDur=0.0,
TAmp=0.0
):
"""Compare two scanpaths on five similarity dimensions.
:param: fixation_vectors1: array-like n x 3 fixation vector of one scanpath
:param: fixation_vectors2: array-like n x 3 fixation vector of one scanpath
:param: sz: list, screen dimensions in px. Default: [1280, 720]
:param: grouping: boolean, if True, simplification is performed based on thresholds TAmp,
TDir, and TDur. Default: False
:param: TDir: float, Direction threshold, angle in degrees. Default: 0.0
:param: TDur: float, Duration threshold, duration in seconds. Default: 0.0
:param: TAmp: float, Amplitude threshold, length in px. Default: 0.0
:return: scanpathcomparisons: array
array of 5 scanpath similarity measures. Vector (Shape), Direction
(Angle), Length, Position, and Duration. 1 means absolute similarity, 0 means
lowest similarity possible.
>>> results = docomparison(fix_1, fix_2, sz = [1280, 720], grouping = True, TDir = 45.0, TDur = 0.05, TAmp = 150)
>>> print(results)
>>> [[0.95075847681364678, 0.95637548674423822, 0.94082367355291008, 0.94491164030498609, 0.78260869565217384]]
"""
# initialize result vector
scanpathcomparisons = []
# check if fixation vectors/scanpaths are long enough
if (len(fixation_vectors1) >= 3) & (len(fixation_vectors2) >= 3):
# get the data into a geometric representation
subj1 = gen_scanpath_structure(fixation_vectors1)
subj2 = gen_scanpath_structure(fixation_vectors2)
if grouping:
# simplify the data
subj1 = simplify_scanpath(subj1, TAmp, TDir, TDur)
subj2 = simplify_scanpath(subj2, TAmp, TDir, TDur)
# create M, a matrix of all vector pairings length differences (weights)
M = cal_vectordifferences(subj1, subj2)
# initialize a matrix of size M for a matrix of nodes
szM = np.shape(M)
M_assignment = np.arange(szM[0] * szM[1]).reshape(szM[0], szM[1])
# create a weighted graph of all possible connections per Node, and their weight
weightedGraph = createdirectedgraph(szM, M, M_assignment)
# find the shortest path (= lowest sum of weights) through the graph
path, dist = dijkstra(weightedGraph, 0, szM[0] * szM[1] - 1)
# compute similarities on alinged scanpaths and normalize them
unnormalised = getunnormalised(subj1, subj2, path, M_assignment)
normal = normaliseresults(unnormalised, sz)
scanpathcomparisons.append(normal)
# return nan as result if at least one scanpath it too short
else:
scanpathcomparisons.append(np.repeat(np.nan, 5))
return scanpathcomparisons
def main(args=sys.argv):
import argparse
parser = argparse.ArgumentParser(
prog='multimatch', )
parser.add_argument(
'input1', metavar='<datafile>',
help="""Eyemovement data of scanpath 1. Should be a tab separated
file with columns corresponding to x-coordinates, y-coordinates, and
fixation duration in seconds.""")
parser.add_argument(
'input2', metavar='<datafile>',
help="""Eyemovement data of scanpath 2. Should be a tab separated
file with columns corresponding to x-coordinates, y-coordinates, and
fixation duration in seconds.""")
parser.add_argument(
'--screensize', nargs='+', metavar='<screensize>', default=[1280, 720],
help="""screensize: Resolution of screen in px, should be supplied as
--screensize 1000 800 for a screen of resolution [1000, 800]. The
default is 1280 x 720px.""")
parser.add_argument(
'--direction-threshold', type=float, metavar='<TDir>', default=0.0,
help="""Threshold for direction based grouping in degree (example: 45.0).
Two consecutive saccades with an angle below TDir and short fixations will
be grouped together to reduce scanpath complexity. If 0: no
simplification will be performed.""")
parser.add_argument(
'--amplitude-threshold', type=float, metavar='<TAmp>', default=0.0,
help="""Threshold for amplitude based grouping in pixel (example: 140.0).
Two consecutive saccades shorter than TAmp and short fixations will be
grouped together to reduce scanpath complexity. If 0: no simplification
will be performed.""")
parser.add_argument(
'--duration-threshold', type=float, metavar='<TDur>', default=0.0,
help="""Threshold for fixation duration during amplitude and direction
based grouping, in seconds.""")
args = parser.parse_args()
data1 = np.recfromcsv(args.input1,
delimiter='\t',
dtype={'names': ('start_x', 'start_y', 'duration'),
'formats': ('f8', 'f8', 'f8')})
data2 = np.recfromcsv(args.input2,
delimiter='\t',
dtype={'names': ('start_x', 'start_y', 'duration'),
'formats': ('f8', 'f8', 'f8')})
TDir = args.direction_threshold
TAmp = args.amplitude_threshold
TDur = args.duration_threshold
if args.screensize:
sz = [float(i) for i in args.screensize]
if len(sz) != 2:
print('I expected two floats after --screensize, such as --screensize 1280 720.'
'However, I got {}. I will default to a screensize of 1280 x 720.'.format(args.screensize))
sz = [1280, 720]
if (TDir != 0) and (TAmp != 0):
grouping = True
print(
'Scanpath comparison is done with simplification. Two consecutive saccades shorter than {}px and '
'with an angle smaller than {} degrees are grouped together if intermediate fixations are shorter '
'than {} seconds.'.format(TAmp, TDir, TDur))
else:
grouping = False
print('Scanpath comparison is done without any simplification.')
result = docomparison(data1,
data2,
sz=sz,
grouping=grouping,
TDir=TDir,
TDur=TDur,
TAmp=TAmp)
print('Vector similarity = ', result[0][0])
print('Direction similarity = ', result[0][1])
print('Length similarity = ', result[0][2])
print('Position similarity = ', result[0][3])
print('Duration similarity = ', result[0][4])
if __name__ == '__main__':
import argparse
# execution
main()
|
StarcoderdataPython
|
219447
|
<reponame>maxminoS/neurage<filename>packages/server/app/build_model.py
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.layers as L
# import plotly.express as px
from sklearn.model_selection import train_test_split
def preprocess_data(filename):
data = pd.read_csv(filename)
# Convert pixels into numpy array
data["pixels"] = data["pixels"].apply(lambda x: np.array(x.split(), dtype="float32"))
# data.head()
print("Total rows: {}".format(len(data)))
print("Total columns: {}".format(len(data.columns)))
X = np.array(data["pixels"].tolist())
# Convert pixels from 1D to 3D
X = X.reshape(X.shape[0], 48, 48, 1)
y = data["age"]
return X, y
def train_model(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.22, random_state=37)
model = tf.keras.Sequential([
L.InputLayer(input_shape=(48, 48, 1)),
L.Conv2D(32, (3, 3), activation="relu", input_shape=(32, 32, 3)),
L.BatchNormalization(),
L.MaxPooling2D((2, 2)),
L.Conv2D(64, (3, 3), activation="relu"),
L.MaxPooling2D((2, 2)),
L.Conv2D(128, (3, 3), activation="relu"),
L.MaxPooling2D((2, 2)),
L.Flatten(),
L.Dense(64, activation="relu"),
L.Dropout(rate=0.5),
L.Dense(1, activation="relu")
])
# sgd = tf.keras.optimizers.SGD(momentum=0.9)
model.compile(optimizer="adam",
loss="mean_squared_error",
metrics=["mae"])
# Stop training when validation loss reach 110
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get("val_loss") < 110):
print("\nReached 110 val_loss so cancelling training!")
self.model.stop_training = True
callback = myCallback()
model.summary()
history = model.fit(X_train, y_train, epochs=20, validation_split=0.1, batch_size=64, callbacks=[callback])
mse, mae = model.evaluate(X_test, y_test, verbose=0)
print("Test Mean squared error: {}".format(mse))
print("Test Mean absolute error: {}".format(mae))
return model, history
# def graph_model(history):
# fig = px.line(
# history.history, y=["loss", "val_loss"],
# labels={"index": "epoch", "value": "loss"},
# title="Training History")
# fig.show()
if __name__ == "__main__":
X, y = preprocess_data("age_gender.csv")
model, history = train_model(X, y)
model.save("age_model.h5")
# graph_model(history)
|
StarcoderdataPython
|
11350955
|
<reponame>smart-cow/scow<gh_stars>0
from xml.dom.minidom import *
from scowclient import ScowClient
def listUsers():
url = 'users'
sclient = ScowClient()
document = parseString(sclient.get(url))
users = document.getElementsByTagName('user')
for u in users:
idTag = u.getElementsByTagName('id')[0]
username = idTag.childNodes[0].data
print username
def main():
listUsers()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
94697
|
'''
*
* Copyright (C) 2020 Universitat Politècnica de Catalunya.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
'''
# -*- coding: utf-8 -*-
# Basic modules
import time
import logging
import logging.config
import re
# 3rd party modules
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.common.exceptions import UnexpectedAlertPresentException, InvalidSessionIdException
# Own modules
import config
from data_manager import get_network, manage_request
logging.config.fileConfig('../logging.conf')
logger = logging.getLogger("DRIVER_MANAGER")
from selenium.webdriver.common.keys import Keys
def build_driver(plugin, cache, process):
""" Creates the selenium driver to be used by the script and loads the corresponding plugin if needed. """
try:
chrome_options = webdriver.ChromeOptions()
# Clean cache/cookies if not specified to maintain
if not cache:
chrome_options.add_argument('--media-cache-size=0')
chrome_options.add_argument('--v8-cache-options=off')
chrome_options.add_argument('--disable-gpu-program-cache')
chrome_options.add_argument('--gpu-program-cache-size-kb=0')
chrome_options.add_argument('--disable-gpu-shader-disk-cache')
chrome_options.add_argument('--disk-cache-dir=/tmp')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--v8-cache-strategies-for-cache-storage=off')
chrome_options.add_argument('--mem-pressure-system-reserved-kb=0')
chrome_options.set_capability("applicationCacheEnabled", False)
chrome_options.add_extension(config.CLEANER_PLUGIN_PATH)
# Set Devtools Protocol to start taking network logs
chrome_options.set_capability("loggingPrefs", {'performance': 'ALL'})
chrome_options.add_experimental_option('w3c', False)
# Load received plugin (except for vanilla)
if plugin.values["name"] != "Vanilla":
chrome_options.add_extension(plugin.values['path'])
driver = webdriver.Chrome(options=chrome_options)
if plugin.values["name"] != "Vanilla" and plugin.values['custom']:
driver.get(plugin.values['url'])
time.sleep(3)
driver.switch_to.frame(0)
driver.find_element_by_xpath(plugin.values['xpath_to_click']).click()
time.sleep(20)
driver.switch_to.window(driver.window_handles[0])
# driver.close()
# driver.switch_to.window(driver.window_handles[0])
return driver
except Exception as e:
# logger.error(e)
logger.error("(proc. %d) Error creating driver: %s" % (process, str(e)))
return 0
def reset_browser(driver, process, plugin, cache):
""" Reset the browser to the default state. """
try:
driver.switch_to.default_content()
if not cache:
driver.delete_all_cookies()
except UnexpectedAlertPresentException:
try:
alert = driver.switch_to.alert
alert.dismiss()
except Exception as e:
# logger.error(e)
logger.error("(proc. %d) Error #4: %s" % (process, str(e)))
driver.close()
driver = build_driver(plugin, cache, process)
while not driver:
driver = build_driver(plugin, cache, process)
driver.set_page_load_timeout(30)
except InvalidSessionIdException as e:
logger.error("(proc. %d) Error #6: %s" % (process, str(e)))
driver = build_driver(plugin, cache, process)
while not driver:
driver = build_driver(plugin, cache, process)
driver.set_page_load_timeout(30)
except Exception as e:
logger.error("(proc. %d) Error #5: %s" % (process, str(e)))
driver.close()
driver = build_driver(plugin, cache, process)
while not driver:
driver = build_driver(plugin, cache, process)
driver.set_page_load_timeout(30)
return driver
def clickonall(list, matched, driver, domain):
for element in list['Found']:
try:
hashref = element.get_attribute('href')
tagtype = element.tag_name
if (not ('all Rights' in element.text or 'all rights' in element.text or 'All rights' in element.text or 'All Rights' in element.text) and hashref == None and (tagtype == 'span' or tagtype=='button' or tagtype=='input' or tagtype=='a')):
if(matched=='ok' and re.search('(^|\s)+ok+(\s|$)',element.text.lower())!=None):
element.click()
list['Clicked'] = True
handles2 = len(driver.window_handles)
if handles2 != 1:
logger.info(
"{} opened multiple windows in total: {}".format(domain.values["name"], handles2))
while handles2 > 1:
driver.switch_to.window(driver.window_handles[1])
driver.close()
handles2 = len(driver.window_handles)
driver.switch_to.window(driver.window_handles[0])
elif(matched=='all' and re.search('(^|\s)+all+(\s|$)',element.text.lower())!=None):
element.click()
list['Clicked'] = True
handles2 = len(driver.window_handles)
if handles2 != 1:
logger.info(
"{} opened multiple windows in total: {}".format(domain.values["name"], handles2))
while handles2 > 1:
driver.switch_to.window(driver.window_handles[1])
driver.close()
handles2 = len(driver.window_handles)
driver.switch_to.window(driver.window_handles[0])
elif(matched!='all' and matched!='ok'):
element.click()
list['Clicked'] = True
handles2 = len(driver.window_handles)
if handles2 != 1:
logger.info(
"{} opened multiple windows in total: {}".format(domain.values["name"], handles2))
while handles2 > 1:
driver.switch_to.window(driver.window_handles[1])
driver.close()
handles2 = len(driver.window_handles)
driver.switch_to.window(driver.window_handles[0])
except:
pass
def find_patterns(driver, found, domain):
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"accept")]')
clickonall(found, 'accept',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"enable")]')
clickonall(found, 'enable',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"all")]')
clickonall(found, 'all',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"got")]')
clickonall(found, 'got',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"yes")]')
clickonall(found, 'yes',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"agree")]')
clickonall(found, 'agree',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"acept")]')
clickonall(found, 'acept',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"cookie")]')
clickonall(found, 'cookie',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"consent")]')
clickonall(found, 'consent',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"akzep")]')
clickonall(found, 'akzep',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"continue")]')
clickonall(found, 'continue',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"prosseguir")]')
clickonall(found, 'prosseguir',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"ok")]')
clickonall(found, 'ok',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"okay")]')
clickonall(found, 'okay',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"confirm")]')
clickonall(found, 'confirm',driver,domain)
except:
pass
try:
found['Found'] = driver.find_elements_by_xpath(
'//Body//*[contains(translate(text(),"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"aceit")]')
clickonall(found, 'aceit',driver,domain)
except:
pass
def find_all_iframes(driver, found, domain, deep, process):
#deep= deep + 1
#if deep == 4:
# logger.info('!!!!In: {} proc: {} May be FUCKED reached 2 deep'.format(domain.values['name'],process))
# driver.switch_to.default_content()
# return
find_patterns(driver,found,domain)
iframes = driver.find_elements_by_xpath('//iframe')
for index, iframe in enumerate(iframes):
# Your sweet business logic applied to iframe goes here.
#logger.info('In: {} proc: {} It has iframes with iframe: {} and deep: {}'.format(domain.values['name'],process,iframe,deep))
try:
driver.switch_to.frame(index)
except:
pass
#find_all_iframes(driver,found,domain,deep,process)
find_patterns(driver, found, domain)
try:
driver.switch_to.parent_frame()
except:
pass
def visit_site(db, process, driver, domain, plugin, temp_folder, cache):
""" Loads the website and extract its information. """
# Load the website and wait some time inside it
try:
driver.get('http://' + domain.values["name"])
except TimeoutException:
logger.warning("Site %s timed out (proc. %d)" % (domain.values["name"], process))
driver.close()
driver = build_driver(plugin, cache, process)
while not driver:
driver = build_driver(plugin, cache, process)
driver.set_page_load_timeout(30)
return driver, True
except WebDriverException as e:
logger.warning("WebDriverException on site %s / Error: %s (proc. %d)" % (domain.values["name"], str(e),
process))
driver = reset_browser(driver, process, plugin, cache)
return driver, True
except Exception as e:
logger.error("%s (proc. %d)" % (str(e), process))
driver = reset_browser(driver, process, plugin, cache)
return driver, True
else:
time.sleep(10)
found = {'Found': [], 'Clicked': False}
handles = len(driver.window_handles)
find_all_iframes(driver, found, domain,0,process)
driver.switch_to.default_content()
domain.values["clicked"] = found['Clicked']
logger.info("In {} proc: {} clicked = {}".format(domain.values["name"],process, found['Clicked']))
# if found['Clicked']:
#driver.execute_script("location.reload(true);")
driver.refresh()
time.sleep(10)
# Get network traffic dictionary
# logger.debug(driver.log_types)
log_entries = driver.get_log('performance')
# logger.debug("(proc. %d) Network data: %s" % (process, str(log_entries)))
network_traffic = get_network(log_entries)
# logger.debug("(proc. %d) Extracted data: %s" % (process, str(network_traffic)))
# Process traffic dictionary
for key in network_traffic.keys():
manage_request(db, process, domain, network_traffic[key], plugin, temp_folder)
for sub_key in network_traffic[key]["requests"].keys():
manage_request(db, process, domain, network_traffic[key]["requests"][sub_key], plugin, temp_folder)
driver = reset_browser(driver, process, plugin, cache)
return driver, False
|
StarcoderdataPython
|
11331840
|
import os
import h5py
import mat73
import numpy as np
"""
View HDF5 data structure
------------------------
"""
def traverse_datasets(hdf_file):
"""
Peak into matlab file and print the Key, Shape, Data type.
:param hdf_file:
:return:
"""
def h5py_dataset_iterator(g, prefix=''):
"""
iterate through the HDF5 file and search through the nested datasets
:param g: .mat filepath
:param prefix:
:return: prints out the directory/subdirectory, shape, and dtype in the HDF5 file
"""
for key in g.keys():
item = g[key]
path = f'{prefix}/{key}'
if isinstance(item, h5py.Dataset): # test for dataset
yield path, item
elif isinstance(item, h5py.Group): # test for group (go down)
yield from h5py_dataset_iterator(item, path)
for path, _ in h5py_dataset_iterator(hdf_file):
yield path
def view_hdf_structure(filepath, print_labels=False):
"""
Looks through the structure and prints information about the structure.
:param filepath: filepath of .mat
:return:
"""
vol_labels = []
with h5py.File(filepath, 'r') as f:
for dataset in traverse_datasets(f):
if print_labels:
print(f'Path: {dataset}\tShape: {f[dataset].shape}\tData type: {f[dataset].dtype}')
vol_labels.append(dataset)
return vol_labels[:-1]
def list_experiment_directories(experiment_parent_directory):
"""
:param experiment_parent_directory: the directory which contains the folders of fish experiments.
:return: a list of directories, each directory containing 3 .mat file i.e. log files that need preprocessing.
"""
list_experiment_directories = next(os.walk(experiment_parent_directory))[1]
print('Experiments:')
[print(i) for i in list_experiment_directories]
return list_experiment_directories
def extract_dataset(filepath, dataset_name=''):
"""
extracts the dataset of the dataset you are interested in
:param filepath: the .mat filepath
:param dataset_name: the name of the dataset you are interested in
:return: a n-dimensional array for the dataset.
"""
# print(dataset_name)
with h5py.File(filepath, 'r') as f:
data = np.array(f[dataset_name][:])
return data
def pull_frames(mat_filepath, plane_number, frame_range, start_frame=0, sample_rate=10):
labels = view_hdf_structure(mat_filepath)
# extract shape of plane
plane_shape = extract_dataset(mat_filepath, labels[0]).shape[:2]
frames_shape = (int(frame_range / sample_rate),) + plane_shape
frames = np.zeros(frames_shape)
for i, frame in enumerate(range(start_frame, start_frame + frame_range, sample_rate)):
frames[i, :, :] = extract_dataset(mat_filepath, labels[frame])[:, :, plane_number, 0, 0]
return frames
def compute_total_frames(volumes_dir, number_chars=3, is_mat_file=False):
vol_list = os.listdir(volumes_dir)
try:
last_frame = max([int(name[:-number_chars]) for name in vol_list])
except:
raise print("Ensure only integer named .h5 file volumes are in directory:", volumes_dir)
if is_mat_file:
return last_frame
return last_frame
def list_frames_numbers(volumes_dir, number_chars=3):
vol_list = os.listdir(volumes_dir)
try:
frames = np.array([int(name[:-number_chars]) for name in vol_list])
except:
raise print("Ensure only integer named .h5 file volumes are in directory:", volumes_dir)
return np.sort(frames)
def export_numpy_2_h5(array, filepath, to_compress=True):
# store original volume shape
vol_shape = array.shape
# reshape volume into 2D array
array = array.reshape(vol_shape[0], -1)
# export as hdf5 file
file = h5py.File(filepath, 'w')
if to_compress:
file.create_dataset("vol", shape=vol_shape, data=array, compression="gzip", compression_opts=9)
else:
file.create_dataset("vol", shape=vol_shape, data=array)
file.close()
return
if __name__ == '__main__':
path_dir = "/Volumes/LSM4/tomData/01062021/Fish4/tiff_stacks/20210601_7dpf_HUC_H2B_fish4_run1/"
file_name = "dataSkewCorrected.mat"
labels = view_hdf_structure(path_dir + file_name)
# # extract plane
# plane_160 = np.zeros((100, 262, 710))
# for i, frame in enumerate(range(0, 1000, 10)):
# plane_160[i, :, :] = extract_dataset(path_dir + file_name, labels[frame])[:, :, 160, 0, 0]
# extract volume
full_vol = extract_dataset(path_dir + file_name, labels[100])[:, :, :, 0, 0]
|
StarcoderdataPython
|
11247368
|
<filename>Exercise_4_BTE_1.py<gh_stars>1-10
#Hexadecimal output
'''
In this exercise, you’ll see how a bit of creativity, along with the built-in 'reversed'
and 'enumerate' functions, can help you to get around issues.
For this exercise, you need to write a function (hex_output) that takes a hex number
and returns the decimal equivalent. That is, if the user enters 50, you’ll assume
that it’s a hex number (equal to 0x50) and will print the value 80 to the screen. And
no, you shouldn’t convert the number all at once using the int function, although it’s
permissible to use int one digit at a time.
'''
#Beyond the exercise 1 (BTE_1)
'''
Reimplement the solution for this exercise such that it doesn't use the 'int' function
at all, but rather uses the built-in 'ord' and 'chr' functions to identify the
character. This implementation should be more robust, ignoring characters
that aren't legal for the entered number base.
'''
def hex_output():
legal_chars = (l for l in '0123456789ABCDEF')
result = 0
temp = 0
hex_num_1 = 0
hex_num = input("Enter a hex number: 0x")
if all((c in legal_chars) for c in hex_num):
for ind, val in enumerate(reversed(hex_num)):
result += (ord(val) - 48) * 16 ** ind #chr(48) = '0' | ord('0') = 48
print("Decimal equivalent is:", result)
print(hex(result)) #checking out
else: print("Illegal character!")
hex_output()
|
StarcoderdataPython
|
4974330
|
# -*- coding: utf-8 -*-
#
# # MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Formats a GPX file."""
import datetime
import XmlWriter
GPX_TAG_NAME = "gpx"
GPX_TAG_NAME_METADATA = "metadata"
GPX_TAG_NAME_NAME = "name"
GPX_TAG_NAME_TRACK = "trk"
GPX_TAG_NAME_TRACKSEGMENT = "trkseg"
GPX_TAG_NAME_TRACKPOINT = "trkpt"
GPX_TAG_NAME_ELEVATION = "ele"
GPX_TAG_NAME_TIME = "time"
GPX_TAG_NAME_TYPE = "type"
GPX_ATTR_NAME_VERSION = "version"
GPX_ATTR_NAME_CREATOR = "creator"
GPX_ATTR_NAME_LATITUDE = "lat"
GPX_ATTR_NAME_LONGITUDE = "lon"
GPX_TAG_NAME_EXTENSIONS = "extensions"
GPX_TPX = "gpxtpx:TrackPointExtension"
GPX_TPX_HR = "gpxtpx:hr"
GPX_TPX_CADENCE = "gpxtpx:cad"
GPX_TPX_POWER = "power"
class GpxWriter(XmlWriter.XmlWriter):
"""Formats a GPX file."""
def __init__(self):
XmlWriter.XmlWriter.__init__(self)
def create_gpx(self, file_name, creator):
self.create(file_name)
attributes = {}
attributes[GPX_ATTR_NAME_VERSION] = "1.1"
attributes[GPX_ATTR_NAME_CREATOR] = creator
attributes["xsi:schemaLocation"] = "http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/GpxExtensions/v3 http://www.garmin.com/xmlschemas/GpxExtensionsv3.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd"
attributes["xmlns"] = "http://www.topografix.com/GPX/1/1"
attributes["xmlns:gpxtpx"] = "http://www.garmin.com/xmlschemas/TrackPointExtension/v1"
attributes["xmlns:gpxx"] = "http://www.garmin.com/xmlschemas/GpxExtensions/v3"
attributes["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
self.open_tag_with_attributes(GPX_TAG_NAME, attributes, True)
def close(self):
self.close_all_tags()
def write_metadata(self, start_time_ms):
self.open_tag(GPX_TAG_NAME_METADATA)
buf = self.format_time_ms(start_time_ms)
self.write_tag_and_value(GPX_TAG_NAME_TIME, buf)
self.close_tag()
def write_name(self, name):
self.write_tag_and_value(GPX_TAG_NAME_NAME, name)
def write_type(self, activity_type):
self.write_tag_and_value(GPX_TAG_NAME_TYPE, activity_type)
def start_track(self):
self.open_tag(GPX_TAG_NAME_TRACK)
def end_track(self):
if self.current_tag() is GPX_TAG_NAME_TRACK:
self.close_tag()
def start_track_segment(self):
self.open_tag(GPX_TAG_NAME_TRACKSEGMENT)
def end_track_segment(self):
if self.current_tag() is GPX_TAG_NAME_TRACKSEGMENT:
self.close_tag()
def start_trackpoint(self, lat, lon, alt, time_ms):
if self.current_tag() is not GPX_TAG_NAME_TRACKSEGMENT:
raise Exception("GPX write error.")
attributes = {}
attributes[GPX_ATTR_NAME_LONGITUDE] = str(lon)
attributes[GPX_ATTR_NAME_LATITUDE] = str(lat)
time_str = self.format_time_ms(time_ms)
self.open_tag_with_attributes(GPX_TAG_NAME_TRACKPOINT, attributes, False)
self.write_tag_and_value(GPX_TAG_NAME_ELEVATION, str(alt))
self.write_tag_and_value(GPX_TAG_NAME_TIME, time_str)
def end_trackpoint(self):
if self.current_tag() is GPX_TAG_NAME_TRACKPOINT:
self.close_tag()
def start_extensions(self):
self.open_tag(GPX_TAG_NAME_EXTENSIONS)
def end_extensions(self):
if self.current_tag() is GPX_TAG_NAME_EXTENSIONS:
self.close_tag()
def start_trackpoint_extensions(self):
self.open_tag(GPX_TPX)
def end_trackpoint_extensions(self):
if self.current_tag() is GPX_TPX:
self.close_tag()
def store_heart_rate_bpm(self, heart_rate_bpm):
if self.current_tag() is not GPX_TPX:
raise Exception("GPX write error.")
self.write_tag_and_value(GPX_TPX_HR, heart_rate_bpm)
def store_cadence_rpm(self, cadence_rpm):
if self.current_tag() is not GPX_TPX:
raise Exception("GPX write error.")
self.write_tag_and_value(GPX_TPX_CADENCE, cadence_rpm)
def store_power_in_watts(self, power_in_watts):
if self.current_tag() is not GPX_TPX:
raise Exception("GPX write error.")
self.write_tag_and_value(GPX_TPX_POWER, power_in_watts)
def format_time_ms(self, t):
sec = t / 1000
ms = t % 1000
buf1 = datetime.datetime.utcfromtimestamp(sec).strftime('%Y-%m-%dT%H:%M:%S')
buf2 = buf1 + ".%03uZ" % ms
return buf2
|
StarcoderdataPython
|
1632126
|
import numpy as np
# With a Q-learning algorithm returns how good is each response.
def player0(data, Q, player, valid, learning_rate, feedback):
actual = Q[data[player][0]][data[player][1]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4]))] # How much it weights the actual state.
p0 = 0 # The amount of points if choose 0.
p1 = 0 # The amount of points if choose 1.
p2 = 0 # The amount of points if choose 2.
p3 = 0 # The amount of points if choose 3.
if feedback == 0:
# The probability is 0 because it is an illegal move.
if valid == 0:
p1 = actual
p3 = 0
else:
p1 = 0
# If it is a legal move then the value of playing 0 is the same to raise the points_hand variable,
# Else it's 0.
if data[0][4] == 8:
p0 = 0
else:
p0 = Q[data[player][0]][data[player][1]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4])) + 1]
# If the points of the opponent is greater than 15 then it shouldn't be an option.
try:
p2 = Q[data[player][0]][data[player][1] + data[player][4]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4]))]
except:
p2 = 0
else: # If it is call because of feedback then update the current state
Q[data[player][0]][data[player][1]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4]))] += learning_rate * (feedback - actual)
return [p0, p1, p2, p3]
# Taking the probability of the 4 possible inputs, returns the final response.
def players(probability):
p0 = probability[0]
p1 = probability[1]
p2 = probability[2]
p3 = probability[3]
normal_const = p0 + p1 + p2 + p3
chance = np.array([p0, p1, p2, p3]) / normal_const
choose = np.random.random()
if choose < chance[0]:
return 0
elif choose < chance[0] + chance[1]:
return 1
elif choose < chance[0] + chance[1] + chance[2]:
return 2
else:
return 3
|
StarcoderdataPython
|
3424451
|
#!/usr/bin/env python
# This node subscribes to a topic published by rviz which gives a pose estimate for a robot
# and corrects its frame_id so that it will properly work in a multi-robot system.
# In a regular single robot system, rviz publishes pose estimates to /initialpose. In
# the multibot system, rviz is configured to publish into specific namespaces, eg.
# /robot1/initialpose/patch and /robot2/initialpose/patch. This node lives inside one of the
# robot namespaces, and subscribes to initialpose/patch. It then corrects the message
# published by rviz by changing the frame_id from "map" to "/map" and then publishes this modified
# message to initialpose/ where the bot's navigation stack is waiting for it.
import rospy
from geometry_msgs.msg import PoseWithCovarianceStamped
def callback(msg):
msg.header.frame_id = "/map"
pub.publish(msg)
print msg
sub_topic = "initialpose/patch"
pub_topic = "initialpose/"
rospy.init_node("pose_estimate_patcher")
sub = rospy.Subscriber(sub_topic, PoseWithCovarianceStamped, callback)
pub = rospy.Publisher(pub_topic, PoseWithCovarianceStamped, queue_size=1)
rospy.spin()
|
StarcoderdataPython
|
1884941
|
<reponame>caser789/libcollection<filename>lib_collection/priority_queue/tt.py
class Queue(object):
def __init__(self, capacity=2):
self.values = [None] * capacity
self.n = 1
self.capacity = capacity
def __len__(self):
return self.n-1
def _resize(self, capacity):
values = [None] * capacity
for i in range(self.n):
values[i] = self.values[i]
self.values = values
self.capacity = capacity
def _swim(self, n):
while n > 1:
h = n//2
if self.values[h] > self.values[n]:
break
self.values[h], self.values[n] = self.values[n], self.values[h]
n = h
def _sink(self, n):
h = 1
while h*2 <= n:
j = h*2
if j+1 <= n and self.values[j+1] > self.values[j]:
j += 1
if self.values[h] > self.values[j]:
break
self.values[h], self.values[j] = self.values[j], self.values[h]
h = j
def enqueue(self, v):
if self.n == self.capacity:
self._resize(self.capacity*2)
self.values[self.n] = v
self._swim(self.n)
self.n += 1
def dequeue(self):
if len(self) == 0:
return
v = self.values[1]
self.n -= 1
self.values[1], self.values[self.n] = self.values[self.n], self.values[1]
self._sink(self.n-1)
if self.n-1 == self.capacity//4:
self._resize(self.capacity//2)
return v
def max(self):
if len(self) == 0:
return
return self.values[1]
if __name__ == '__main__':
q = Queue()
q.enqueue(2)
q.enqueue(9)
q.enqueue(3)
q.enqueue(8)
q.enqueue(5)
q.enqueue(7)
q.enqueue(0)
q.enqueue(1)
q.enqueue(4)
q.enqueue(6)
for i in range(len(q)):
assert q.max() == 9-i
assert q.dequeue() == 9-i
|
StarcoderdataPython
|
1772497
|
<reponame>wk8/elle
# Copyright (C) 2009-2016, Quentin "mefyl" Hocquet
#
# This software is provided "as is" without warranty of any kind,
# either expressed or implied, including but not limited to the
# implied warranties of fitness for a particular purpose.
#
# See the LICENSE file for more information.
from .. import Builder, Node, Path, node
class Bison:
def __init__(self, bison = 'bison'):
self.__bison = bison
def plug(self, toolkit):
toolkit.hook_bin_src_add(self.hook_bin_src)
def hook_bin_src(self, src):
if isinstance(src, BisonSource):
builder = BisonCompiler(src, self)
return builder.cc()
def compile(self, path, dst):
return '%s --defines --report=all -Dparse.error=verbose -Dlr.default-reductions=consistent --xml %s -o %s' % (self.__bison, path, dst)
class BisonSource(Node):
def __init__(self, path):
Node.__init__(self, path)
Node.extensions['y'] = BisonSource
Node.extensions['yy'] = BisonSource
class BisonCompiler(Builder):
name = 'Bison compilation'
def __init__(self, source, bison):
self.__source = source
self.__bison = bison
base_path = source.name()
grammar_cc_path = Path(base_path)
grammar_cc_path.extension = 'cc'
self.__grammar_cc = node(grammar_cc_path)
grammar_hh_path = Path(base_path)
grammar_hh_path.extension = 'hh'
self.__grammar_hh = node(grammar_hh_path)
for base in ['location', 'position', 'stack']:
path = base_path.dirname() / ('%s.hh' % base)
self.__dict__['_BisonCompiler__%s' % base] = node(path)
self.__grammar_hh = node(grammar_hh_path)
Builder.__init__(self, [source],
[self.__grammar_cc,
self.__grammar_hh,
self.__location,
self.__position,
self.__stack,
])
def execute(self):
return self.cmd('Bison %s' % self.__source,
self.__bison.compile(self.__source.path(),
self.__grammar_cc.path()))
def cc(self):
return self.__grammar_cc
|
StarcoderdataPython
|
5146724
|
<filename>hydrolm/__init__.py
from hydrolm.lm import LM
from hydrolm import util
|
StarcoderdataPython
|
9608020
|
# Hack so that tests are importable in different levels
try:
from . import DatasetHandlerTester
except:
from util import DatasetHandlerTester
class SpotifyHandler(DatasetHandlerTester):
@classmethod
def setUpClass(cls):
# Make DataHandlerTester class methods available
super()
# Create the spotify handlers
cls.available_handlers(cls, ['spotify'])
cls.create_handlers(cls)
def test_data_fetching(self):
# Random subsample size for testing
sample = 300
# Load only sample of the full data
self.load_function(300)
# Try to fetch the data
self.fetch_function(None)
@classmethod
def tearDownClass(cls):
print("Tearing down")
cls.destroy_test_datasets(cls)
|
StarcoderdataPython
|
11209539
|
# pylint: disable=C0103
import tensorflow as tf
def shape_list(input_tensor):
"""Return list of dims, statically where possible."""
tensor = tf.convert_to_tensor(input_tensor)
# If unknown rank, return dynamic shape
if tensor.get_shape().dims is None:
return tf.shape(tensor)
static = tensor.get_shape().as_list()
shape = tf.shape(tensor)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
|
StarcoderdataPython
|
4931763
|
<reponame>relax-space/thread-first
'''
说明: 多线程并发执行任务,比单线程要节约时间
'''
import time
from queue import Queue
from threading import Thread
def req1(param):
time.sleep(1)
return param
def main1():
return [req1(1), req1(2)]
def req2(param, res_value: Queue):
time.sleep(1)
res_value.put(param)
def main2():
res_value = Queue()
tasks = [Thread(target=req2, args=(1, res_value)),
Thread(target=req2, args=(2, res_value))]
for i in tasks:
i.start()
for i in tasks:
i.join()
return [res_value.get() for i in tasks]
if __name__ == '__main__':
t1 = time.time()
res1 = main1()
t2 = time.time()
res2 = main2()
t3 = time.time()
print(f'main1结果{res1}\t时间{round(t2-t1,1)}')
print(f'main2结果{res2}\t时间{round(t3-t2,1)}')
'''
输出:
main1结果[1, 2] 时间2.0
main2结果[2, 1] 时间1.0
'''
|
StarcoderdataPython
|
12852236
|
v = int(input('Digite um valor: '))
validador = 0
contador = 1
while contador < v:
if v % contador == 0:
validador += 1
contador +=1
if validador > 1:
print(f'Esse número NÃO é primo, pois é divisível por {validador+1} números diferentes ')
else:
print('Esse número é primo')
|
StarcoderdataPython
|
1771095
|
import sys
from pypy.translator.llvm.log import log
from pypy.translator.llvm.typedefnode import create_typedef_node
from pypy.translator.llvm.typedefnode import getindexhelper
from pypy.translator.llvm.funcnode import FuncImplNode
from pypy.translator.llvm.extfuncnode import ExternalFuncNode
from pypy.translator.llvm.opaquenode import OpaqueNode, ExtOpaqueNode
from pypy.translator.llvm.structnode import StructNode, StructVarsizeNode, \
FixedSizeArrayNode
from pypy.translator.llvm.arraynode import ArrayNode, StrArrayNode, \
VoidArrayNode, ArrayNoLengthNode, StrArrayNoLengthNode, DebugStrNode
from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi
from pypy.objspace.flow.model import Constant, Variable
from pypy.rlib.objectmodel import Symbolic, ComputedIntSymbolic
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.rlib import objectmodel
from pypy.rlib import jit
log = log.database
def var_size_type(T):
" returns None if T not varsize "
if not T._is_varsize():
return None
elif isinstance(T, lltype.Array):
return T.OF
elif isinstance(T, lltype.Struct):
return T._arrayfld
else:
assert False, "unknown type"
class Database(object):
def __init__(self, genllvm, translator):
self.genllvm = genllvm
self.translator = translator
self.gctransformer = None
self.obj2node = {}
self._pendingsetup = []
self._tmpcount = 1
self.primitives = Primitives(self)
# keep ordered list for when we write
self.funcnodes = []
self.typedefnodes = []
self.containernodes = []
self.debugstringnodes = []
#_______debuggging______________________________________
def dump_pbcs(self):
r = ""
for k, v in self.obj2node.iteritems():
if isinstance(v, FuncImplNode):
continue
if isinstance(k, lltype.LowLevelType):
continue
assert isinstance(lltype.typeOf(k), lltype.ContainerType)
# Only dump top levels
p, _ = lltype.parentlink(k)
type_ = self.repr_type(lltype.Ptr(lltype.typeOf(k)))
r += "\ndump_pbcs %s (%s)\n" \
"parent %s\n" \
"type %s\n" \
"ref -> %s \n" % (v, k, p, type_, v.ref)
return r
#_______setting up and preparation______________________________
def create_constant_node(self, type_, value):
node = None
if isinstance(type_, lltype.FuncType):
if getattr(value, 'external', None) == 'C':
node = ExternalFuncNode(self, value)
else:
node = FuncImplNode(self, value)
elif isinstance(type_, lltype.FixedSizeArray):
node = FixedSizeArrayNode(self, value)
elif isinstance(type_, lltype.Struct):
if type_._arrayfld:
node = StructVarsizeNode(self, value)
else:
node = StructNode(self, value)
elif isinstance(type_, lltype.Array):
if type_.OF is lltype.Char:
if type_._hints.get("nolength", False):
node = StrArrayNoLengthNode(self, value)
else:
node = StrArrayNode(self, value)
elif type_.OF is lltype.Void:
node = VoidArrayNode(self, value)
else:
if type_._hints.get("nolength", False):
node = ArrayNoLengthNode(self, value)
else:
node = ArrayNode(self, value)
elif isinstance(type_, lltype.OpaqueType):
if type_.hints.get('render_structure', False):
node = ExtOpaqueNode(self, value)
else:
node = OpaqueNode(self, value)
elif type_ is llmemory.WeakRef:
# XXX this uses a hack in translator.c.node.weakrefnode_factory()
# because we need to obtain not just *a* conversion of the weakref
# by the gcpolicy, but *the same* one as was already registered
# in the genc database and seen by the gctransformer
value = value._converted_weakref
return self.create_constant_node(lltype.typeOf(value), value)
assert node is not None, "%s not supported" % (type_)
return node
def addpending(self, key, node):
# santity check we at least have a key of the right type
assert (isinstance(key, lltype.LowLevelType) or
isinstance(lltype.typeOf(key), lltype.ContainerType))
assert key not in self.obj2node, (
"node with key %r already known!" %(key,))
#log("added to pending nodes:", type(key), node)
self.obj2node[key] = node
self._pendingsetup.append(node)
def prepare_type(self, type_):
if type_ in self.obj2node:
return
if isinstance(type_, lltype.Primitive):
return
if isinstance(type_, lltype.Ptr):
self.prepare_type(type_.TO)
else:
node = create_typedef_node(self, type_)
self.addpending(type_, node)
self.typedefnodes.append(node)
def prepare_type_multi(self, types):
for type_ in types:
self.prepare_type(type_)
def prepare_constant(self, ct, value):
# always add type (it is safe)
self.prepare_type(ct)
if isinstance(ct, lltype.Primitive):
# special cases for address
if ct is llmemory.Address:
# prepare the constant data which this address references
fakedaddress = value
if fakedaddress:
ptrvalue = fakedaddress.ptr
ct = lltype.typeOf(ptrvalue)
self.prepare_constant(ct, ptrvalue)
else:
if isinstance(value, llmemory.AddressOffset):
self.prepare_offset(value)
return
if isinstance(ct, lltype.Ptr):
ptrvalue = value
ct = ct.TO
value = ptrvalue._obj
# we dont need a node for nulls
if value is None:
return
# we dont need a node for tagged pointers
if isinstance(value, int):
return
# we can share data via pointers
assert isinstance(ct, lltype.ContainerType)
if value not in self.obj2node:
self.addpending(value, self.create_constant_node(ct, value))
def prepare_arg(self, const_or_var):
"""if const_or_var is not already in a dictionary self.obj2node,
the appropriate node gets constructed and gets added to
self._pendingsetup and to self.obj2node"""
if isinstance(const_or_var, Constant):
self.prepare_constant(const_or_var.concretetype,
const_or_var.value)
else:
assert isinstance(const_or_var, Variable)
self.prepare_type(const_or_var.concretetype)
def prepare_offset(self, offset):
if isinstance(offset, llmemory.CompositeOffset):
for value in offset.offsets:
self.prepare_offset(value)
elif isinstance(offset, llarena.RoundedUpForAllocation):
self.prepare_offset(offset.basesize)
elif hasattr(offset, 'TYPE'):
self.prepare_type(offset.TYPE)
def setup_all(self):
self.gcpolicy.setup()
while self._pendingsetup:
node = self._pendingsetup.pop()
#log.settingup(node)
node.setup()
def set_entrynode(self, key):
self.entrynode = self.obj2node[key]
return self.entrynode
def getnodes(self):
return self.obj2node.itervalues()
def gettypedefnodes(self):
return self.typedefnodes
# __________________________________________________________
# Representing variables and constants in LLVM source code
def to_getelementptr(self, value):
# so we build the thing up instead
p = value
children = []
while True:
p, c = lltype.parentlink(p)
if p is None:
break
children.append((p, c))
children.reverse()
TYPE = lltype.typeOf(children[0][0])
parentnode = self.obj2node[children[0][0]]
indices = [("i32", 0)]
for _, ii in children:
typedefnode = self.obj2node[TYPE]
if isinstance(ii, str):
TYPE = typedefnode.fieldname_to_getelementptr(indices, ii)
else:
TYPE = typedefnode.indexref_to_getelementptr(indices, ii)
indices_str = ', '.join ([('%s %s' % (x,y)) for x, y in indices])
ref = "getelementptr(%s* %s, %s)" % (
parentnode.get_typerepr(),
parentnode.ref,
indices_str)
return ref
def get_ref(self, value):
node = self.obj2node[value]
T = lltype.typeOf(value)
p, c = lltype.parentlink(value)
if p is None:
ref = node.ref
VT = var_size_type(T)
if VT and VT is not lltype.Void:
ref = "bitcast(%s* %s to %s*)" % (node.get_typerepr(),
ref,
self.repr_type(T))
else:
ref = self.to_getelementptr(value)
if isinstance(node, FixedSizeArrayNode):
assert isinstance(value, lltype._subarray)
# XXX UGLY (but needs fixing outside of genllvm)
# ptr -> array of len 1 (for now, since operations expect this)
ref = "bitcast(%s* %s to %s*)" % (self.repr_type(T.OF),
ref,
self.repr_type(T))
return ref
def repr_arg(self, arg):
if isinstance(arg, Constant):
if isinstance(arg.concretetype, lltype.Primitive):
return self.primitives.repr(arg.concretetype, arg.value)
else:
assert isinstance(arg.value, lltype._ptr)
if not arg.value:
return 'null'
else:
return self.get_ref(arg.value._obj)
else:
assert isinstance(arg, Variable)
return "%" + str(arg)
def repr_arg_type(self, arg):
assert isinstance(arg, (Constant, Variable))
ct = arg.concretetype
return self.repr_type(ct)
def repr_type(self, type_):
try:
return self.obj2node[type_].ref
except KeyError:
if isinstance(type_, lltype.Primitive):
return self.primitives[type_]
elif isinstance(type_, lltype.Ptr):
return self.repr_type(type_.TO) + '*'
else:
raise TypeError("cannot represent %r" %(type_,))
def repr_argwithtype(self, arg):
return self.repr_arg(arg), self.repr_arg_type(arg)
def repr_arg_multi(self, args):
return [self.repr_arg(arg) for arg in args]
def repr_arg_type_multi(self, args):
return [self.repr_arg_type(arg) for arg in args]
def repr_constant(self, value):
" returns node and repr as tuple "
type_ = lltype.typeOf(value)
if isinstance(type_, lltype.Primitive):
repr = self.primitives.repr(type_, value)
return None, "%s %s" % (self.repr_type(type_), repr)
elif isinstance(type_, lltype.Ptr):
toptr = self.repr_type(type_)
value = value._obj
# special case, null pointer
if value is None:
return None, "%s null" % toptr
node = self.obj2node[value]
ref = self.get_ref(value)
return node, "%s %s" % (toptr, ref)
elif isinstance(type_, (lltype.Array, lltype.Struct)):
node = self.obj2node[value]
return node, node.constantvalue()
elif isinstance(type_, lltype.OpaqueType):
node = self.obj2node[value]
if isinstance(node, ExtOpaqueNode):
return node, node.constantvalue()
assert False, "%s not supported" % (type(value))
def repr_tmpvar(self):
count = self._tmpcount
self._tmpcount += 1
return "%tmp_" + str(count)
# __________________________________________________________
# Other helpers
def get_machine_word(self):
return self.primitives[lltype.Signed]
def is_function_ptr(self, arg):
if isinstance(arg, (Constant, Variable)):
arg = arg.concretetype
if isinstance(arg, lltype.Ptr):
if isinstance(arg.TO, lltype.FuncType):
return True
return False
def create_debug_string(self, s):
r = DebugStrNode(s)
self.debugstringnodes.append(r)
return r
class Primitives(object):
def __init__(self, database):
self.database = database
self.types = {
lltype.Char: "i8",
lltype.Bool: "i1",
lltype.SingleFloat: "float",
lltype.Float: "double",
lltype.UniChar: "i32",
lltype.Void: "void",
lltype.UnsignedLongLong: "i64",
lltype.SignedLongLong: "i64",
llmemory.Address: "i8*",
}
# 32 bit platform
if sys.maxint == 2**31-1:
self.types.update({
lltype.Signed: "i32",
lltype.Unsigned: "i32" })
# 64 bit platform
elif sys.maxint == 2**63-1:
self.types.update({
lltype.Signed: "i64",
lltype.Unsigned: "i64" })
else:
raise Exception("Unsupported platform - unknown word size")
self.reprs = {
lltype.SignedLongLong : self.repr_signed,
lltype.Signed : self.repr_signed,
lltype.UnsignedLongLong : self.repr_default,
lltype.Unsigned : self.repr_default,
lltype.SingleFloat: self.repr_singlefloat,
lltype.Float : self.repr_float,
lltype.Char : self.repr_char,
lltype.UniChar : self.repr_unichar,
lltype.Bool : self.repr_bool,
lltype.Void : self.repr_void,
llmemory.Address : self.repr_address,
}
try:
import ctypes
except ImportError:
pass
else:
def update(from_, type):
if from_ not in self.types:
self.types[from_] = type
if from_ not in self.reprs:
self.reprs[from_] = self.repr_default
for tp in [rffi.SIGNEDCHAR, rffi.UCHAR, rffi.SHORT,
rffi.USHORT, rffi.INT, rffi.UINT, rffi.LONG, rffi.ULONG,
rffi.LONGLONG, rffi.ULONGLONG]:
bits = rffi.size_and_sign(tp)[0] * 8
update(tp, 'i%s' % bits)
def get_attrs_for_type(self, type):
# because we want to bind to external functions that depend
# on sign/zero extensions, we need to use these attributes in function sigs
# note that this is not needed for internal functions because they use
# casts if necessary
type_attrs = ""
if not isinstance(type, lltype.Number):
return type_attrs
size, sign = rffi.size_and_sign(type)
if size < 4:
if not sign:
type_attrs += "signext"
else:
type_attrs += "zeroext"
return type_attrs
def __getitem__(self, key):
return self.types[key]
def repr(self, type_, value):
try:
reprfn = self.reprs[type_]
except KeyError:
raise Exception, "unsupported primitive type %r, value %r" % (type_, value)
else:
return reprfn(type_, value)
def repr_default(self, type_, value):
return str(value)
def repr_bool(self, type_, value):
return str(value).lower() #False --> false
def repr_void(self, type_, value):
return 'void'
def repr_char(self, type_, value):
x = ord(value)
if x >= 128:
# XXX check this really works
r = "trunc (i16 %s to i8)" % x
else:
r = str(x)
return r
def repr_unichar(self, type_, value):
return str(ord(value))
def repr_float(self, type_, value):
from pypy.rlib.rarithmetic import isinf, isnan
if isinf(value) or isnan(value):
# Need hex repr
import struct
packed = struct.pack("d", value)
if sys.byteorder == 'little':
packed = packed[::-1]
repr = "0x" + "".join([("%02x" % ord(ii)) for ii in packed])
else:
repr = "%f" % value
# llvm requires a . when using e notation
if "e" in repr and "." not in repr:
repr = repr.replace("e", ".0e")
return repr
def repr_singlefloat(self, type_, value):
from pypy.rlib.rarithmetic import isinf, isnan
f = float(value)
if isinf(f) or isnan(f):
import struct
packed = value._bytes
if sys.byteorder == 'little':
packed = packed[::-1]
assert len(packed) == 4
repr = "0x" + "".join([("%02x" % ord(ii)) for ii in packed])
else:
#repr = "%f" % f
# XXX work around llvm2.1 bug, seems it doesnt like constants for floats
repr = "fptrunc(double %f to float)" % f
# llvm requires a . when using e notation
if "e" in repr and "." not in repr:
repr = repr.replace("e", ".0e")
return repr
def repr_address(self, type_, value):
# XXX why-o-why isnt this an int ???
if not value:
return 'null'
ptr = value.ptr
node, ref = self.database.repr_constant(ptr)
res = "bitcast(%s to i8*)" % (ref,)
return res
def repr_signed(self, type_, value):
if isinstance(value, Symbolic):
return self.repr_symbolic(type_, value)
return str(value)
def repr_symbolic(self, type_, value):
""" returns an int value for pointer arithmetic - not sure this is the
llvm way, but well XXX need to fix adr_xxx operations """
if (type(value) == llmemory.GCHeaderOffset or
type(value) == llmemory.AddressOffset):
repr = 0
elif isinstance(value, llmemory.AddressOffset):
repr = self.repr_offset(value)
elif isinstance(value, ComputedIntSymbolic):
# force the ComputedIntSymbolic to become a real integer value now
repr = '%d' % value.compute_fn()
elif isinstance(value, CDefinedIntSymbolic):
if value is objectmodel.malloc_zero_filled:
repr = '1'
elif value is jit._we_are_jitted:
repr = '0'
elif value is objectmodel.running_on_llinterp:
repr = '0'
else:
raise NotImplementedError("CDefinedIntSymbolic: %r" % (value,))
else:
raise NotImplementedError("symbolic: %r" % (value,))
return repr
def repr_offset(self, value):
if isinstance(value, llarena.RoundedUpForAllocation):
# XXX not supported when used in a CompositeOffset
from pypy.rpython.tool import rffi_platform
align = rffi_platform.memory_alignment()
r_basesize = self.repr_offset(value.basesize)
# Note that the following expression is known to crash 'llc';
# you may need to upgrade llvm.
return "and(i32 add(i32 %s, i32 %d), i32 %d)" % (
r_basesize, align-1, ~(align-1))
from_, indices, to = self.get_offset(value, [])
if from_ is lltype.Void or not indices:
return "0"
assert to is not lltype.Void
r = self.database.repr_type
indices_as_str = ", ".join("%s %s" % (w, i) for w, i in indices)
return "ptrtoint(%s* getelementptr(%s* null, %s) to i32)" % (r(to),
r(from_),
indices_as_str)
def get_offset(self, value, indices):
" return (from_type, (indices, ...), to_type) "
word = self.database.get_machine_word()
if isinstance(value, llmemory.ItemOffset):
if not indices:
indices.append((word, 0))
# skips over a fixed size item (eg array access)
from_ = value.TYPE
if from_ is not lltype.Void:
lasttype, lastvalue = indices[-1]
assert lasttype == word
indices[-1] = (word, lastvalue + value.repeat)
to = value.TYPE
elif isinstance(value, llmemory.FieldOffset):
if not indices:
indices.append((word, 0))
# jumps to a field position in a struct
from_ = value.TYPE
pos = getindexhelper(self.database, value.fldname, value.TYPE)
indices.append((word, pos))
to = getattr(value.TYPE, value.fldname)
elif isinstance(value, llmemory.ArrayLengthOffset):
assert not value.TYPE._hints.get("nolength", False)
if not indices:
indices.append((word, 0))
# jumps to the place where the array length is stored
from_ = value.TYPE # <Array of T> or <GcArray of T>
assert isinstance(value.TYPE, lltype.Array)
typedefnode = self.database.obj2node[value.TYPE]
indexref = typedefnode.indexref_for_length()
indices.append((word, indexref))
to = lltype.Signed
elif isinstance(value, llmemory.ArrayItemsOffset):
if not indices:
if isinstance(value.TYPE, lltype.Array) and value.TYPE._hints.get("nolength", False):
pass
else:
indices.append((word, 0))
if value.TYPE.OF is lltype.Void:
# skip over the whole structure in order to get to the
# (not-really-existent) array part
return self.get_offset(llmemory.ItemOffset(value.TYPE),
indices)
# jumps to the beginning of array area
from_ = value.TYPE
if not isinstance(value.TYPE, lltype.FixedSizeArray) and not value.TYPE._hints.get("nolength", False):
typedefnode = self.database.obj2node[value.TYPE]
indexref = typedefnode.indexref_for_items()
indices.append((word, indexref))
indices.append((word, 0)) # go to the 1st item
if isinstance(value.TYPE, lltype.FixedSizeArray):
indices.append((word, 0)) # go to the 1st item
to = value.TYPE.OF
elif isinstance(value, llmemory.CompositeOffset):
from_, indices, to = self.get_offset(value.offsets[0], indices)
for item in value.offsets[1:]:
_, indices, to1 = self.get_offset(item, indices)
if to1 is not lltype.Void:
to = to1
else:
raise Exception("unsupported offset")
return from_, indices, to
|
StarcoderdataPython
|
11282276
|
<gh_stars>100-1000
from kivy.uix.popup import Popup
from kivy.properties import ObjectProperty, StringProperty
from kivy.lang import Builder
import kivy.uix.filechooser
Builder.load_file('persimmon/view/util/filedialog.kv')
# TODO: firx for write csv
class FileDialog(Popup):
"""File Dialogs is a popup that gets a file"""
file_chooser = ObjectProperty()
#tinput = ObjectProperty()
load_button = ObjectProperty()
file_chosen = StringProperty()
def __init__(self, dir='~', filters=None, **kwargs):
super().__init__(**kwargs)
self.file_chooser.path = dir
if filters:
self.file_chooser.filters=filters
else:
self.file_chooser.filters=[]
def toggle_load_button(self, selection):
if selection:
#self.file_chosen = self.file_chooser.selection[0]
self.tinput.text = self.file_chooser.selection[0]
self.load_button.disabled = False
elif self.tinput.text:
self.load_button.disabled = False
else:
self.load_button.disabled = True
def changed_dir(self, file_chooser, entry, _):
#self.file_chosen = file_chooser.path
self.tinput.text = file_chooser.path
|
StarcoderdataPython
|
4809896
|
from django.apps import AppConfig
class ProductStockConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'product_management_models.product_stocks'
verbose_name = 'Product Stocks'
|
StarcoderdataPython
|
290796
|
from random import randrange
class CodeMaker:
"""Implementation of AI logic."""
def __init__(self):
self.code = [None]*4
self.key_pegs = [0]*4
self.key_peg_amount = 0
def draw_code(self):
code = []
for i in range(4):
code.append(randrange(5))
self.code = code
def reset_code(self):
self.code = [None]*4
def codebreaker_wins_condition(self):
if all(self.key_pegs) == 2:
return True
return False
def check_for_ideal_placement(self, guess):
for peg_id in range(len(self.code)):
if self.code[peg_id] == guess[peg_id]:
self.key_pegs[self.key_peg_amount] = 2
self.key_peg_amount += 1
guess[peg_id] = None
return guess
def check_for_color_pairs(self, guess):
temp_guess = guess
for peg_id in range(len(self.code)):
if self.code[peg_id] in guess:
guess.remove(self.code[peg_id])
self.key_pegs[self.key_peg_amount] = 1
self.key_peg_amount += 1
def provide_feedback(self, guess):
guess = self.check_for_ideal_placement(guess)
self.check_for_color_pairs(guess)
return self.key_pegs
def reset_feedback_pegs(self):
self.key_pegs = [0]*4
self.key_peg_amount = 0
|
StarcoderdataPython
|
301171
|
from DbxSync.CodeTransformer.LineTransformer.StringLine import StringLine
class LineTransformerResolver:
def __init__(self, lineTransformers: list):
self.__lineTransformers = lineTransformers
def resolve(self, parsedLine):
for lineTransformer in self.__lineTransformers:
if lineTransformer.handles(parsedLine):
return lineTransformer
raise Exception('No line Transformer for parsed line ' + str(parsedLine.__class__))
|
StarcoderdataPython
|
397025
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head or not head.next:
return head
arr = []
while head:
arr.append(head.val)
head = head.next
arr.sort()
new = cur = ListNode()
for ele in arr:
cur.next = ListNode(ele)
cur = cur.next
return new.next
|
StarcoderdataPython
|
4943370
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQMOffline.EGamma.photonAnalyzer_cfi import *
dqmElectronOfflineClient = DQMEDHarvester("ElectronOfflineClient",
Verbosity = cms.untracked.int32(0),
FinalStep = cms.string("AtJobEnd"),
InputFile = cms.string(""),
OutputFile = cms.string(""),
InputFolderName = cms.string("Egamma/Electrons"),
OutputFolderName = cms.string("Egamma/Electrons"),
EffHistoTitle = cms.string("fraction of reco ele matching a reco sc")
)
|
StarcoderdataPython
|
8033123
|
import numpy as np
# BD-Rate and BD-PNSR computation
# (c) <NAME> (<EMAIL>)
def bj_delta(R1, PSNR1, R2, PSNR2, mode=0):
lR1 = np.log(R1)
lR2 = np.log(R2)
# find integral
if mode == 0:
# least squares polynomial fit
p1 = np.polyfit(lR1, PSNR1, 3)
p2 = np.polyfit(lR2, PSNR2, 3)
# integration interval
min_int = max(min(lR1), min(lR2))
max_int = min(max(lR1), max(lR2))
# indefinite integral of both polynomial curves
p_int1 = np.polyint(p1)
p_int2 = np.polyint(p2)
# evaluates both poly curves at the limits of the integration interval
# to find the area
int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)
int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)
# find avg diff between the areas to obtain the final measure
avg_diff = (int2-int1)/(max_int-min_int)
else:
# rate method: sames as previous one but with inverse order
p1 = np.polyfit(PSNR1, lR1, 3)
p2 = np.polyfit(PSNR2, lR2, 3)
# integration interval
min_int = max(min(PSNR1), min(PSNR2))
max_int = min(max(PSNR1), max(PSNR2))
# indefinite interval of both polynomial curves
p_int1 = np.polyint(p1)
p_int2 = np.polyint(p2)
# evaluates both poly curves at the limits of the integration interval
# to find the area
int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)
int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)
# find avg diff between the areas to obtain the final measure
avg_exp_diff = (int2-int1)/(max_int-min_int)
avg_diff = (np.exp(avg_exp_diff)-1)*100
return avg_diff
|
StarcoderdataPython
|
4997936
|
def recaman(n):
arr = [0] * n
arr[0] = 0
print(arr[0], end=", ")
for i in range(1, n):
curr = arr[i-1] - i
for j in range(0, i):
if ((arr[j] == curr) or curr < 0):
curr = arr[i-1] + i
break
arr[i] = curr
print(arr[i], end=", ")
# Driver code
n = 10
recaman(n)
|
StarcoderdataPython
|
8098726
|
#!/usr/bin/python
#encoding=utf8
info = {"name":"xiaoming", "age":23, "sex":"male"}
print(info)
#获取info的所有的key
print(info.keys())
#获取所有的value
print(info.values())
#以数组元素的形式输出
print(info.items())
#获取某一个key的值
name = info['name']
print('name: %s'%name)
#获取一个不存在的key,并且设置默认值
print(info.get('home', 'www.baidu.com'))
#删除指定的key
del info['sex']
print('del info sex: %s'%info)
#遍历
for name,age in info.items():
print('name: %s age: %s'%(name,age))
#清空整个字典
info.clear()
print("info is clear: %s "%info)
#判断是否拥有某一个key
print("info has key name: %s"%(info.has_key('name')))
|
StarcoderdataPython
|
264775
|
# encoding:UTF-8
import tkMessageBox
from Tkinter import *
__author__ = 'Hope6537'
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.nameInput = Entry(self)
self.nameInput.pack()
self.alertButton = Button(self, text='Hello', command=self.hello)
self.alertButton.pack()
def hello(self):
name = self.nameInput.get() or 'world'
tkMessageBox.showinfo('Message', 'Hello, %s' % name)
app = Application()
# 设置窗口标题:
app.master.title('Hello World')
# 主消息循环:
app.mainloop()
|
StarcoderdataPython
|
11240264
|
import math
import torch
from .constants import STAGES
from .constants import Any, Tensor, DataLoader
from .constants import Tuple, Dict, List
from .constants import Optional, Union
class LoopState:
"""
Maintains train/valid/test loop state for a single run of
a certain number of epochs, does not used to preserve state
between runs.
"""
_stages = STAGES
_batch_step, _epoch_start, _epoch_end = _stages
def __init__(self, phase:str, floop:object, no_cast:bool,
no_float:bool, is_train:bool, is_test:bool,
dl:DataLoader
):
"""
phase : phase name 'train', 'valid' or 'test'
floop : the calling FitLoop object
"""
self.__batch = ()
self.__floop = floop
self._no_cast = no_cast
self._no_float = no_float
self.phase = phase
self.batch_num = 0
self.epoch_num = 0
self.metrics = {s:{} for s in self._stages}
self.is_train = is_train
self.is_test = is_test
# For easy access
bs = dl.batch_size
dr = dl.drop_last
sz = len(dl.dataset)
bt = sz / bs
# Gives dataset size and batch count
self.size = sz
self.batches = math.floor(bt) if dr else math.ceil(bt)
self.batch_size = 0
def __getattr__(self, name:str) -> Any:
# To get attributes from the FitLoop object
# for use in the stage functions.
return getattr(self.__floop, name)
def __getitem__(self, metric_name:str):
# To get the metrics stored in the batch step stage
metric_value = self.metrics[self._batch_step][metric_name]
try:
return torch.tensor(metric_value).float()
except:
return metric_value
"""
Getter and setter for the current batch
"""
@property
def batch(self) -> Tuple[Tensor,...]:
if self._no_cast:
return self.__batch
return (
d.to(device=self.device,dtype=self.dtype)
if d.is_floating_point()
else d.to(device=self.device,dtype=torch.long)
for d in self.__batch
)
@batch.setter
def batch(self, current_batch:Tuple[Tensor,...]) -> None:
self.__batch = current_batch
"""
Functions to append rdict values to self.metrics
"""
def _append(self, rdict:Dict[str, float], stage:str) -> None:
# Append metrics to the specific stage.
for key in rdict:
if key not in self.metrics[stage]:
self.metrics[stage][key] = []
self.metrics[stage][key].append(rdict[key])
def _append_batch_step(self, rdict:Dict[str, float]) -> None:
# Called after batch step rdict is returned
self._append(rdict, self._batch_step)
def _append_epoch_start(self, rdict:Dict[str, float]) -> None:
# Called before epoch start
self._append(rdict, self._epoch_start)
def _append_epoch_end(self, rdict:Dict[str, float]) -> None:
# Called after epoch end step rdict is returned
self._append(rdict, self._epoch_end)
"""
Functions to clear rdict values from self.metrics
"""
def _clear(self, stage:str) -> None:
# Clear the batch metrics at the end of the batch.
for mlist in self.metrics[stage]:
self.metrics[stage][mlist].clear()
def _clear_batch_step(self) -> None:
# Called before epoch start
self._clear(self._batch_step)
def _clear_epoch_start(self) -> None:
# Called ??
self._clear(self._epoch_start)
def _clear_epoch_end(self) -> None:
# Called after loop end
self._clear(self._epoch_end)
"""
State updates before epoch start and batch step stages
"""
def _pre_epoch_start_update(self, epoch_num:int) -> None:
self._clear_batch_step()
self.batch_num = 0
self.epoch_num = epoch_num
def _pre_batch_step_update(self, current_batch):
self.batch_size = current_batch[0].size(0)
self.batch_num += 1
self.batch = current_batch
"""
Functions to get various metrics at different stages
"""
def _get_epoch_metric(self, criteria:str) -> float:
# Last added metric that is to be used as a model
# selection criteria
metric = self.metrics[self._epoch_end][criteria][-1]
if self._no_float:
return metric
else:
try:
return float(metric)
except:
return metric
def _get_epoch_metrics(self,
display_metrics:Optional[Union[str,List[str]]]=None
) -> Dict[str,float]:
# Return the last saved epoch metrics
if isinstance(display_metrics, str):
return {display_metrics:self._get_epoch_metric(display_metrics)}
elif isinstance(display_metrics, list):
return {
metric:self._get_epoch_metric(metric)
for metric in display_metrics
}
else:
return {
metric: self._get_epoch_metric(metric)
for metric in self.metrics[self._epoch_end]
}
|
StarcoderdataPython
|
3283978
|
import asyncio
from asyncio import CancelledError
from typing import Any, Awaitable, Sequence, TypeVar, cast, Union
from protoactor.actor.exceptions import OperationCancelled, EventLoopMismatch
_R = TypeVar('_R')
class CancelToken:
def __init__(self, name: str, loop: asyncio.AbstractEventLoop = None) -> None:
self.name = name
self._chain = []
self._triggered = asyncio.Event(loop=loop)
self._loop = loop
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self._loop
def chain(self, token: 'CancelToken') -> 'CancelToken':
if self.loop != token._loop:
raise EventLoopMismatch("Chained CancelToken objects must be on the same event loop")
chain_name = ":".join([self.name, token.name])
chain = CancelToken(chain_name, loop=self.loop)
chain._chain.extend([self, token])
return chain
def trigger(self) -> None:
self._triggered.set()
@property
def triggered_token(self) -> Union['CancelToken', Any]:
if self._triggered.is_set():
return self
for token in self._chain:
if token.triggered:
return token.triggered_token
return None
@property
def triggered(self) -> bool:
if self._triggered.is_set():
return True
return any(token.triggered for token in self._chain)
def raise_if_triggered(self) -> None:
if self.triggered:
raise OperationCancelled(f'Cancellation requested by {self.triggered_token} token')
async def wait(self, timeout: float = None) -> None:
if self.triggered_token is not None:
return
futures = [asyncio.ensure_future(self._triggered.wait(), loop=self.loop)]
for token in self._chain:
futures.append(asyncio.ensure_future(token.wait(), loop=self.loop))
if timeout is not None:
futures.append(asyncio.ensure_future(asyncio.sleep(timeout), loop=self.loop))
def cancel_not_done(fut: 'asyncio.Future[None]') -> None:
for future in futures:
if not future.done():
future.cancel()
async def _wait_for_first(futures: Sequence[Awaitable[Any]]) -> None:
for future in asyncio.as_completed(futures):
await cast(Awaitable[Any], future)
return
fut = asyncio.ensure_future(_wait_for_first(futures), loop=self.loop)
fut.add_done_callback(cancel_not_done)
await fut
async def cancellable_wait(self, *awaitables: Awaitable[_R], timeout: float = None) -> _R:
futures = [asyncio.ensure_future(a, loop=self.loop) for a in awaitables + (self.wait(),)]
try:
done, pending = await asyncio.wait(
futures,
timeout=timeout,
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop,
)
except CancelledError:
for future in futures:
future.cancel()
raise
for task in pending:
task.cancel()
await asyncio.wait(pending, return_when=asyncio.ALL_COMPLETED, loop=self.loop,)
if not done:
raise TimeoutError()
if self.triggered_token is not None:
for task in done:
task.exception()
raise OperationCancelled(f'Cancellation requested by {self.triggered_token} token')
return done.pop().result()
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'CancelToken: {self.name}'
|
StarcoderdataPython
|
4868646
|
<filename>assignments/counter_clinton.py
# Copyright (C) 2021 <NAME>
# MIT Open Source Initiative Approved License
# counter_clinton.py
# CIS-135 Python
# Assignment #10 Counters in Loops
# Rubric: 1 Point
# Use a python while loop that continuously runs as long as a user inputs the
# response 'y' for yes. Inside the loop increment the value of a counter
# variable, printing out the value of the counter with each loop cycle.
# After the loop finally exits, print the value of the counter again.
control_variable_1 = 'y'
counter_position_1 = 0
while control_variable_1 == 'y':
counter_position_1 = counter_position_1 + 1
control_variable_1 = input("Counter 1 at %d, enter 'y' to continue: " %(counter_position_1))
# optional implementation
# print("The counter is", counter_position_1)
# control_variable_1 = input("Enter 'y' to continue, or any other input to exit: ")
# Rubric: 1 Point
# Use a traditional while loop that continuously runs as long as a user inputs the
# response 'y' for yes. Inside the loop increment the value of a counter variable,
# printing out the value of the counter with each loop cycle. After the
# loop finally exits, print the value of the counter again, this time making sure
# that the value displayed on the last run of the loop is the same as the final
# value printed once the loop is finished.
control_variable_2 = 'y'
counter_position_2 = 0
while control_variable_2 == 'y':
counter_position_2 = counter_position_2 + 1
control_variable_2 = input("Counter 2 at %d, enter 'y' to continue: " %(counter_position_2))
print("The counter 2 ended at position: ", counter_position_2)
# Rubric: 1 Point
# Use a while loop to print the values counting up from 1 to 10.
count_up = 1
while count_up < 11:
print("Counter 3 =", count_up)
count_up = count_up + 1
# Rubric: 1 Point
# Use a while loop to print the values counting down from 10 to 1.
count_down = 10
while count_down > 0:
print("Counter 4 =", count_down)
count_down = count_down - 1
# # Extra Credit:
# # Wrap each loop into a function (see the next assignment - Lab 11).
#
# def countdown():
# count_down = 10
# while count_down > 0:
# print("Counter 4 =", count_down)
# #print(count_down)
# count_down = count_down - 1
# return
#
# def countup():
# count_up = 1
# while count_up < 11:
# print("Counter 3 =", count_up)
# #print(count_up)
# count_up = count_up + 1
# return
#
# def print_inside():
# control_variable_1 = 'y'
# counter_position_1 = 0
# while control_variable_1 == 'y':
# counter_position_1 = counter_position_1 + 1
# control_variable_1 = input("Counter 1 at %d, enter 'y' to continue: " % (counter_position_1))
# return
#
# def print_outside():
# control_variable_2 = 'y'
# counter_position_2 = 0
# while control_variable_2 == 'y':
# counter_position_2 = counter_position_2 + 1
# control_variable_2 = input("Counter 2 at %d, enter 'y' to continue: " % (counter_position_2))
# print("Counter 2 ended at position ", counter_position_2)
# return
#
# print_inside()
# print_outside()
# countup()
# countdown()
|
StarcoderdataPython
|
11952
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from absl import logging
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be a direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
# TODO(b/175426744): Data Binder will modify the uri.
logging.warning(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from a previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def _set_execution_result_if_not_empty(
executor_output: Optional[execution_result_pb2.ExecutorOutput],
execution: metadata_store_pb2.Execution) -> bool:
"""Sets execution result as a custom property of the execution."""
if executor_output and (executor_output.execution_result.result_message or
executor_output.execution_result.metadata_details or
executor_output.execution_result.code):
# TODO(b/190001754): Consider either switching to base64 encoding or using
# a proto descriptor pool to circumvent TypeError which may be raised when
# converting embedded `Any` protos.
try:
execution_lib.set_execution_result(executor_output.execution_result,
execution)
except TypeError:
logging.exception(
'Skipped setting execution_result as custom property of the '
'execution due to error')
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be a subset
of the system-generated output artifacts dict. 2. An update to a certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to a output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is a common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(proto_artifact, original_artifact,
len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(
metadata_handler, execution, contexts, output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
_set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers a new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts)
|
StarcoderdataPython
|
17610
|
<gh_stars>0
divisor = int(input())
bound = int(input())
for num in range(bound, 0, -1):
if num % divisor == 0:
print(num)
break
|
StarcoderdataPython
|
3200697
|
from collections import defaultdict
import sys
import copy
# Read input data
graph = defaultdict(set)
dependencies = defaultdict(set)
visited = defaultdict(bool)
for line in sys.stdin:
words = line.strip().split()
edge1, edge2 = words[1], words[-3]
graph[edge1].add(edge2)
dependencies[edge2].add(edge1)
# Part 1
# Set initial available vertices
available = set()
for vertex in graph.keys():
if len(dependencies[vertex]) != 0:
continue
available.add(vertex)
res = ''
# Do while you can (while vertices are available)
while len(available) != 0:
# Sort available vertices, because we have to start
# with the smalles according to alphabetic sort
available_s = sorted(available)
# Find next vertex, that is, vertex
# with all dependencies staisfied
next_v = None
for vertex in available_s:
possible = True
deps = dependencies[vertex]
for dep in deps:
if not visited[dep]:
possible = False
break
if possible:
next_v = vertex
break
# If there is no next vertex something
# went terribly wrong, so crash
if next_v is None:
raise Exception('vertex not found')
res += next_v
visited[next_v] = True
available |= graph[next_v]
available.remove(next_v)
print(f'Part 1: {res}')
# Part 2
# Empty visited, and create initial available vertices
visited = defaultdict(bool)
available = set()
for vertex in graph.keys():
if len(dependencies[vertex]) != 0:
continue
available.add(vertex)
# Final time to output
res_time = 0
# Some constants
NUMBER_OF_WORKERS = 5
ADD_TIME = 60
# Array of current workers.
# i-th worker is working on workers_letters[i] letter,
# and needs workers_time[i] time to complete it
workers_letters = []
workers_time = []
# Do while workers are working and there are still letters available
while len(available) != 0 or len(workers_letters) != 0:
# Clean workers that are finished (their time remaining is 0)
i = 0
while i < len(workers_time):
if workers_time[i] == 0:
workers_time.pop(i)
# Add vertex as finished
letter = workers_letters[i]
visited[letter] = True
workers_letters.pop(i)
# Append new available nodes
available |= graph[letter]
else:
i += 1
# Find all available vertices that
# have dependencies completed
available_s = sorted(available)
next_v = []
for vertex in available_s:
possible = True
deps = dependencies[vertex]
for dep in deps:
if not visited[dep]:
possible = False
break
if possible:
next_v.append(vertex)
# Give workers something to work on
i = 0
while len(workers_letters) < NUMBER_OF_WORKERS:
if i >= len(next_v):
break
letter = next_v[i]
workers_letters.append(letter)
time = ADD_TIME + ord(letter) - ord('A') + 1
workers_time.append(time)
available.remove(letter)
i += 1
# If no workers are working on stuff, we are in inital
# loop and no time has passed
if len(workers_time) == 0:
time_diff = 0
else: # When the first worker can finish
time_diff = min(workers_time)
# Add to time, and remove the passed time
# from workers remaining time
res_time += time_diff
for i in range(len(workers_time)):
workers_time[i] -= time_diff
print(f'Part 2: {res_time}')
|
StarcoderdataPython
|
313641
|
import roxar
import roxar.events
def elist_qc_owners(elist):
"""Return a list of all events with non-standard event owners
Args:
elist: List of roxar events
Returns:
List of flawed events
"""
errlist = []
for eve in elist:
evdet = roxar.events.Event.details(eve.type)
if evdet['owner_type'] == 'Simulator model':
if eve.owner[0] != 'Simulator':
errlist.append(eve)
elif evdet['owner_type'] == 'Trajectory':
if len(eve.owner) != 3:
errlist.append(eve)
return errlist
|
StarcoderdataPython
|
3329075
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 4 08:12:49 2018
@author: juan
"""
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.autograd import Variable
from CDNet2014Dataset3d import CDNet2014Dataset3d, Rescale, ToTensor
from model3d import BackSubModel3d
def main():
dataset = CDNet2014Dataset3d(root_dir='/datasets/backsub/cdnet2014/dataset',
category='cameraJitter',
train=False,
num_consecutive_frames=10,
transform=transforms.Compose([
Rescale((240, 320)),
ToTensor()
]))
dataloader = DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=4)
# Instantiate model
model = BackSubModel3d()
if torch.cuda.is_available():
print('Using GPU:', torch.cuda.get_device_name(0))
model.cuda()
else:
print('NO GPU DETECTED!')
chk = '/home2/backsub_repo/checkpoints/model3d/model3d_camerajitter.pkl'
print('Loading checkpoint ...')
model.load_state_dict(torch.load(chk))
for i_batch, sample_batch in enumerate(dataloader):
print(i_batch, sample_batch['images'].size(),
sample_batch['label'].size())
images = sample_batch['images']
if torch.cuda.is_available():
images = Variable(images.cuda())
else:
images = Variable(images)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, prediction = torch.max(outputs.data, 1)
prediction = prediction.cpu().numpy()
prediction = np.squeeze(prediction)
prediction = prediction.astype(np.float32)
print('output size:', prediction.shape)
print(np.unique(prediction))
frame = sample_batch['images'][0, :, 9, :, :].numpy().transpose((1, 2, 0))
cv2.imshow('Video', frame)
cv2.imshow('Pred', prediction)
k = cv2.waitKey(10) & 0xFF
if k == 27:
break
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3223477
|
import glob
import cv2
import os
import numpy as np
from keras.models import load_model
labels = ["100won", "10won", "500won", "50won"]
model = load_model('model/my_model.h5')
img_path = glob.glob("data/origin_images/*.jpg")
for path in img_path:
# Read image
org = cv2.imread(path)
img = cv2.resize(org, (0, 0), fx=0.2, fy=0.2, interpolation=cv2.INTER_AREA)
# Convert image to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur
blur = cv2.GaussianBlur(gray, (0, 0), 3)
# Adaptive threshold
th = cv2.adaptiveThreshold(
blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 2)
# Contour
contours, hier = cv2.findContours(
th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# Draw contour
dst = img.copy()
idx = 0
while idx >= 0:
# Filter area
cnt = contours[idx]
area = cv2.contourArea(cnt)
if 500 > area or area > 6000:
idx = hier[0, idx, 0]
continue
# Filter aspect ratio
_, _, w, h = cv2.boundingRect(cnt)
aspect_ratio = w / h
if abs(1 - aspect_ratio) > 0.4:
idx = hier[0, idx, 0]
continue
# Convex hull
hull = cv2.convexHull(contours[idx])
# Fit rectangle
x, y, w, h = cv2.boundingRect(hull)
# Draw rectangle
cv2.rectangle(dst, (x, y), (x+w, y+h), (0, 0, 255), 1)
idx = hier[0, idx, 0]
# Crop coin image
coin = org[y*5:(y+h)*5, x*5:(x+w)*5, :]
coin = cv2.resize(coin, (300, 300), interpolation=cv2.INTER_AREA)
# Predict
coin = coin.reshape(-1, 300, 300, 3)
prediction = model.predict([coin])
label = labels[np.argmax(prediction)]
# Show label
cv2.putText(dst, label, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
# Show
title = os.path.basename(path)
# cv2.imshow(title + " - img", img)
# cv2.imshow(title + " - gray", gray)
# cv2.imshow(title + " - th", th)
cv2.imshow(title + " - dst", dst)
while cv2.waitKey(0) != ord('q'):
pass
cv2.destroyAllWindows()
|
StarcoderdataPython
|
6526579
|
# -*- coding: utf-8 -*-
from django.urls import path
from rest_framework_jwt.views import obtain_jwt_token, verify_jwt_token
app_name = "auth"
urlpatterns = [
path('login/', obtain_jwt_token, name="login"),
path('verify/', verify_jwt_token, name="verify"),
]
|
StarcoderdataPython
|
3310451
|
<gh_stars>1-10
from bitcoin.core.script import *
from bitcoin.core import Hash160
import bitcoin.base58
import struct
import unittest
from hashlib import sha256
def payment_script(time_lock, secret_hash, pub_0, pub_1):
"""
this function making payment script for mm2 atomic swap
ported from mm2 rust code
"""
return CScript([OP_IF, struct.pack('<I', time_lock), OP_NOP2 , OP_DROP, pub_0, OP_CHECKSIG,
OP_ELSE, OP_SIZE, b'\x20', OP_EQUALVERIFY, OP_HASH160, secret_hash, OP_EQUALVERIFY, pub_1, OP_CHECKSIG, OP_ENDIF])
def get_payment_address(time_lock, secret_hash, pub_0, pub_1):
pubkey_script = payment_script(time_lock, secret_hash, pub_0, pub_1)
print(pubkey_script.hex())
pubkey_hash = Hash160(pubkey_script)
data = b'\x55' + pubkey_hash
checksum = sha256(sha256(data).digest()).digest()[:4]
byte_address = data + checksum
address = bitcoin.base58.encode(byte_address)
return address
class AddressConvertTest(unittest.TestCase):
def test(self):
address = get_payment_address(1588875030,
bytes.fromhex("bc88c6534d5b82866807cde2da0ce5735c335a2a"),
bytes.fromhex("03683c77e807a47dcd559fa60a6510087e5c5aa0016c094cf5eb4d7e002db18e9f"),
bytes.fromhex("032eadab416e372d21d8cbf798019325088dc796fd6762b6304c0298f279d58038"))
self.assertEqual(address, "bKYeacaKzVzGEDmrEX6zB5vr2ZoRNh7A3p")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4815112
|
<reponame>christopinka/django-civil
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from civil.library.admin import BaseAdmin
from .models import *
#==============================================================================
class SavedSearchItemInline(admin.StackedInline):
model = SavedSearchItem
extra = 0
classes = ('collapse closed',)
#==============================================================================
class SavedSearchAdmin(BaseAdmin):
list_display = ('id', 'name', 'test_link',)
list_display_links = ('id', 'name', )
inlines = [ SavedSearchItemInline ]
#--------------------------------------------------------------------------
def queryset(self, request):
"""
The queryset returned for this model admin
"""
qs = super(SavedSearchAdmin, self).queryset(request)
# superuser should see everything
if not request.user.is_superuser:
qs = qs.filter(user=request.user)
return qs
#--------------------------------------------------------------------------
def has_change_permission(self, request, obj=None):
"""
Check also if we have the permissions to edit this object
"""
has_class_permission = super(SavedSearchAdmin, self).has_change_permission(request, obj)
if not has_class_permission:
return False
if obj is not None and not request.user.is_superuser and request.user.id != obj.user.id:
return False
return True
admin.site.register(SavedSearch, SavedSearchAdmin)
|
StarcoderdataPython
|
4841173
|
<filename>python/cugraph/dask/pagerank/__init__.py
from .pagerank import pagerank, get_chunksize
|
StarcoderdataPython
|
3460017
|
<gh_stars>0
"""This module contains objects for auth endpoints"""
import os
import datetime
import jwt
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
from flask import Flask, jsonify, request, make_response, abort
from flask_restful import Resource
from flask_jwt_extended import (
jwt_required, create_access_token,
get_jwt_identity
)
from instance import config
from ..utils.validator import Validator
from ..models import users
from ..utils import verify
from . import common_functions
class SignUp(Resource):
"""Signup class"""
def post(self):
"""POST /auth/login"""
logged_user = verify.verify_tokens()[0]
common_functions.abort_if_user_is_not_admin(logged_user)
data = request.get_json()
if not data:
return make_response(jsonify({
"message": "Missing required credentials"
}), 400)
try:
email = data["email"]
except KeyError:
return make_response(jsonify({
"message": "Please supply an email to be able to register an attendant"
}), 400)
try:
request_password = data["password"]
except KeyError:
return make_response(jsonify({
"message": "Please supply a password to be able to register an attendant"
}), 400)
if not isinstance(data["email"], str):
return make_response(jsonify({
"message": "Email should be a string"
}), 400)
if not isinstance(data["password"], str):
return make_response(jsonify({
"message": "Password should be a string"
}), 400)
Validator.validate_credentials(self, data)
Validator.check_duplication("email", "users", email)
hashed_password = generate_password_hash(request_password, method='sha256')
user = users.User_Model(email, hashed_password, "attendant")
user.save()
return make_response(jsonify({
"message": "Account created successfully",
"user": {
"email": email,
"role": "attendant"
}
}), 202)
class Login(Resource):
"""Login class"""
def post(self):
"""POST /auth/signup"""
data = request.get_json()
if not data:
return make_response(jsonify({
"message": "Kindly provide an email and a password to login"
}
), 400)
try:
request_mail = data["email"]
except:
return make_response(jsonify({
"message": "Kindly provide an email address to log in"
}), 400)
try:
request_password = data["password"]
except:
return make_response(jsonify({
"message": "Kindly provide a password to log in"
}), 400)
if not isinstance(data['email'], str):
return make_response(jsonify({
"message": "E-mail should be a string"
}
), 406)
if not isinstance(data['password'], str):
return make_response(jsonify({
"message": "Password should be a string"
}
), 406)
request_email = request_mail.strip()
user = users.User_Model.fetch_user(request_email)
if user and request_email == user[0]['email'] and check_password_hash(user[0]['password'], request_password):
token = jwt.encode({
"email": request_email,
"user_id": user[0]['user_id'],
"exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=3000)
}, os.getenv('JWT_SECRET_KEY', default='<KEY>'))
return make_response(jsonify({
"message": "Login successful",
"token": token.decode("UTF-8"),
"role": user[0]['role']}), 200)
return make_response(jsonify({
"message": "Try again. E-mail or password is incorrect!"
}
), 403)
class SignUpAdmin(Resource):
"""Signup class"""
def post(self):
"""POST /auth/login/admin"""
data = request.get_json()
if not data:
return make_response(jsonify({
"message": "Missing required credentials"
}), 400)
try:
email = data["email"]
except KeyError:
return make_response(jsonify({
"message": "Please supply an email to be able to register an admin"
}), 400)
try:
request_password = data["password"]
except KeyError:
return make_response(jsonify({
"message": "Please supply a password to be able to register an admin"
}), 400)
if not isinstance(data["email"], str):
return make_response(jsonify({
"message": "Email should be a string"
}), 400)
if not isinstance(data["password"], str):
return make_response(jsonify({
"message": "Password should be a string"
}), 400)
Validator.validate_credentials(self, data)
Validator.check_duplication("email", "users", email)
hashed_password = generate_password_hash(request_password, method='sha256')
user = users.User_Model(email, hashed_password, "<PASSWORD>")
user.save()
return make_response(jsonify({
"message": "Account created successfully",
"user": {
"email": email,
"role": "admin"
}
}), 202)
class Logout(Resource):
"""Logout class"""
def post(self):
"""POST /auth/logout"""
token = request.headers['Authorization']
user = users.User_Model(token=token)
user.logout()
return make_response(jsonify({
'message': 'User Logged out successfully'
}))
|
StarcoderdataPython
|
294206
|
#!/usr/bin/python
#
#
import gi
gi.require_version('Notify', '0.7')
from gi.repository import Notify
# https://lazka.github.io/pgi-docs/Notify-0.7/functions.html
Notify.init("Your App Name")
# https://lazka.github.io/pgi-docs/Notify-0.7/classes/Notification.html
Hello = Notify.Notification.new("Hello world", "This is an example notification.", "dialog-information")
# https://lazka.github.io/pgi-docs/Notify-0.7/classes/Notification.html#Notify.Notification.show
Hello.show()
|
StarcoderdataPython
|
235455
|
import requests
from flask import render_template, url_for, request, redirect, jsonify, make_response
from flask_restful import Resource
from app import app, db, api
from models import Slide
class RestSlides(Resource):
#Handles the GET requests
def get(self):
response = {}
response['count'] = Slide.query.count() #Creates an item in the dict. with key 'count' and value <the number of rows in the database>
response['results'] = [] #creates another item in the dict w key 'results' and an empty list as the value
slides = Slide.query.all() #the set of slides for iterating later
for i in slides:
results = {}
results['name'] = i.name
results['object_id'] = i.object_id
response['results'].append(results)
return response
#Handles the POST requests
def post(self):
#Use table 'slides'
__tablename__ = 'slides'
name = request.form['name']
content = request.form['content']
new_slide = Slide(name, content)
db.session.add(new_slide)
db.session.commit()
return 'New Slide Created | name : {} | content : {}'.format(name, content)
class DeleteSlide(Resource):
def delete(self, object_id):
a = Slide.query.get(object_id) #Search for object in the database with id:object_id
del_name = a.name
del_id = a.object_id
db.session.delete(a) #Deleted the object
db.session.commit() #Commit the deletion
return 'Slide {} with ID {} has been successfuly deleted'.format(del_name, del_id)
#RENDERING SLIDES
@app.route('/slides/<object_id>/', methods=['GET'])
def presentation(object_id):
obj = Slide.query.get(object_id)
content = obj.content
return render_template('main.html', content=content)
#Run the API in .../slides/
api.add_resource(RestSlides, '/slides/')
api.add_resource(DeleteSlide, '/slides/<object_id>/')
|
StarcoderdataPython
|
6652922
|
<gh_stars>0
import os
from datetime import datetime
from contextlib import redirect_stdout
from modeling.losses import build_losses
from modeling.miners import build_mining
from data.samplers import build_sampler
from modeling.models import build_model
from modeling.solver.optimizer import build_optimizer
from engine.engine import Engine
from utils.data_logger import setup_logger
from utils import cfg
if __name__ == "__main__":
dateTimeObj = datetime.now()
cfg.SAVE_DIR = os.path.join(cfg.SAVE_DIR, dateTimeObj.strftime("%d-%b-%Y_%H:%M"))
os.makedirs(cfg.SAVE_DIR, exist_ok=True)
# save current config to output directory
with open(f"{cfg.SAVE_DIR}/config.yml", 'w') as f:
with redirect_stdout(f):
print(cfg.dump())
setup_logger(cfg)
models = build_model(cfg)
optimizers = build_optimizer(cfg, models)
loss_funcs = build_losses(cfg)
mining_funcs = build_mining(cfg)
sampler = build_sampler(cfg)
engine = Engine(cfg, models=models, optimizers=optimizers, lr_schedulers=None, loss_funcs=loss_funcs,
mining_funcs=mining_funcs, sampler=sampler)
engine.run()
|
StarcoderdataPython
|
1661273
|
<reponame>adobe-research/beacon-aug
# Copyright 2021 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
from .operators import * # the class are not known until run time
from .advanced.autoaugment import AutoAugment
from .advanced.randaugment import RandAugment
from .advanced.collections import Collections
from .advanced.benign_transforms import Benign
from . import screenshot
from . import properties
# Inherit the core module from albumentations
from albumentations.core.composition import *
from albumentations.core.serialization import *
__version__ = "Opensource-01112022"
__release__ = __version__
|
StarcoderdataPython
|
6638155
|
<reponame>robust-systems-group/illusion_system
#!/usr/bin/python
#
# Copyright (C) 2020 by The Board of Trustees of Stanford University
# This program is free software: you can redistribute it and/or modify it under
# the terms of the Modified BSD-3 License as published by the Open Source
# Initiative.
# If you use this program in your research, we request that you reference the
# Illusion paper, and that you send us a citation of your work.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the BSD-3 License for more details.
# You should have received a copy of the Modified BSD-3 License along with this
# program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
#
import os, sys
import argparse
import shutil
import string
class XTemplate(string.Template):
delimiter = '$'
escaped = '$$'
longMaskFmt=False
maskSize = 256
def getMask(start, end):
cur = 0
l = []
for i in range(maskSize):
j = i % 32
if i >= start and i < end: cur |= 1 << j
if (i + 1) % 32 == 0:
l.append(cur)
cur = 0
l.reverse()
return ','.join('%08x' % n for n in l)
def getList(start, end):
if end - start == 1:
return str(start)
else:
return str(start) + '-' + str(end-1)
parser = argparse.ArgumentParser(
description='Generate patch root for heterogeneous system')
parser.add_argument('--bc', type=int, default=1,
help='Number of big cores')
parser.add_argument('--lc', type=int, default=0,
help='Number of little cores')
parser.add_argument('--bn', type=int, default=1,
help='Number of NUMA nodes for big cores')
parser.add_argument('--ln', type=int, default=0,
help='Number of NUMA nodes for little cores')
parser.add_argument('--no-little-core-memory', action='store_true',
help='Whether the little cores share the same memory as big cores, '
'i.e., no normal memory for little cores')
parser.add_argument('--dir', '-d', type=str, default='patchRoot',
help='Destination directory')
args = parser.parse_args()
bcores = args.bc
lcores = args.lc
bnodes = args.bn
lnodes = args.ln
no_little_core_memory = args.no_little_core_memory
root = os.path.abspath(args.dir)
ncores = bcores + lcores
nnodes = bnodes + lnodes
progDir = os.path.dirname(os.path.abspath(__file__))
if ncores < 1:
print 'ERROR: Need >= 1 cores!'
sys.exit(1)
if ncores > maskSize:
print 'WARN: These many cpus have not been tested, x2APIC systems may be different...'
if ncores > 2048:
print 'ERROR: Too many cores, currently support up to 2048'
sys.exit(1)
print 'WARN: Switch to long mask format, up to 2048 cores'
longMaskFmt = True
maskSize = 2048
if bcores != 0 and bcores % bnodes != 0:
print 'ERROR: {} big cores must be evenly distributed among {} NUMA nodes!'.format(bcores, bnodes)
sys.exit(1)
bcpern = bcores / bnodes if bcores != 0 else 0
if lcores != 0 and lcores % lnodes != 0:
print 'ERROR: {} little cores must be evenly distributed among {} NUMA nodes!'.format(lcores, lnodes)
sys.exit(1)
lcpern = lcores / lnodes if lcores != 0 else 0
if os.path.exists(root):
print 'ERROR: Directory {} already exists, aborting'.format(root)
sys.exit(1)
os.makedirs(root)
if not os.path.exists(root):
print 'ERROR: Could not create {}, aborting'.format(root)
sys.exit(1)
print 'Will produce a tree for {}/{} big/little cores with {}/{} NUMA nodes in {}'.format(
bcores, lcores, bnodes, lnodes, root)
## /proc
rootproc = os.path.join(root, 'proc')
os.makedirs(rootproc)
# cpuinfo
cpuinfoBigTemplate = XTemplate(open(os.path.join(progDir, 'cpuinfo.template'), 'r').read())
try:
cpuinfoLittleTemplate = XTemplate(open(os.path.join(progDir, 'cpuinfo.little.template'), 'r').read())
except:
# Use the same cpuinfo template for big and little cores.
cpuinfoLittleTemplate = XTemplate(open(os.path.join(progDir, 'cpuinfo.template'), 'r').read())
with open(os.path.join(rootproc, 'cpuinfo'), 'w') as fh:
for cpu in range(bcores):
print >>fh, cpuinfoBigTemplate.substitute({'CPU' : str(cpu), 'NCPUS' : ncores}),
for cpu in range(bcores, ncores):
print >>fh, cpuinfoLittleTemplate.substitute({'CPU' : str(cpu), 'NCPUS' : ncores}),
# stat
statTemplate = XTemplate(open(os.path.join(progDir, 'stat.template'), 'r').read())
cpuAct = [int(x) for x in '665084 119979939 9019834 399242499 472611 20 159543 0 0 0'.split(' ')]
totalAct = [x * ncores for x in cpuAct]
with open(os.path.join(rootproc, 'stat'), 'w') as fh:
cpuStat = 'cpu ' + ' '.join([str(x) for x in totalAct])
for cpu in range(ncores):
cpuStat += '\ncpu{} '.format(cpu) + ' '.join([str(x) for x in cpuAct])
print >>fh, statTemplate.substitute({'CPUSTAT' : cpuStat}),
# self/status
os.makedirs(os.path.join(rootproc, 'self'))
with open(os.path.join(rootproc, 'self', 'status'), 'w') as fh:
# FIXME: only for CPU/memory list
print >>fh, '...'
print >>fh, 'Cpus_allowed:\t' + getMask(0, ncores)
print >>fh, 'Cpus_allowed_list:\t' + getList(0, ncores)
print >>fh, 'Mems_allowed:\t' + getMask(0, nnodes)
print >>fh, 'Mems_allowed_list:\t' + getList(0, nnodes)
print >>fh, '...'
## /sys
rootsys = os.path.join(root, 'sys')
os.makedirs(rootsys)
# cpus
cpuDir = os.path.join(rootsys, 'devices', 'system', 'cpu')
os.makedirs(cpuDir)
for f in ['online', 'possible', 'present']:
with open(os.path.join(cpuDir, f), 'w') as fh:
print >>fh, getList(0, ncores)
with open(os.path.join(cpuDir, 'offline'), 'w') as fh:
print >>fh, ''
with open(os.path.join(cpuDir, 'sched_mc_power_savings'), 'w') as fh:
print >>fh, 0
with open(os.path.join(cpuDir, 'kernel_max'), 'w') as fh:
print >>fh, maskSize-1
for (cores, nodes, cpern) in [(range(bcores), range(bnodes), bcpern),
(range(bcores, ncores), range(bnodes, nnodes), lcpern)]:
for cpu in cores:
c = cpu - cores[0] # cid within group
n = c / cpern # nid within group
node = n + nodes[0]
coreSiblings = (cores[0] + n*cpern, cores[0] + (n+1)*cpern)
d = os.path.join(cpuDir, 'cpu{}'.format(cpu))
td = os.path.join(d, 'topology')
os.makedirs(d)
os.makedirs(td)
with open(os.path.join(td, 'core_id'), 'w') as fh:
print >>fh, cpu
with open(os.path.join(td, 'physical_package_id'), 'w') as fh:
print >>fh, node
with open(os.path.join(td, 'core_siblings'), 'w') as fh:
print >>fh, getMask(*coreSiblings)
with open(os.path.join(td, 'core_siblings_list'), 'w') as fh:
print >>fh, getList(*coreSiblings)
with open(os.path.join(td, 'thread_siblings'), 'w') as fh:
# FIXME: assume single-thread core
print >>fh, getMask(cpu, cpu+1)
with open(os.path.join(td, 'thread_siblings_list'), 'w') as fh:
# FIXME: assume single-thread core
print >>fh, getList(cpu, cpu+1)
with open(os.path.join(d, 'online'), 'w') as fh:
print >>fh, 1
# nodes
nodeDir = os.path.join(rootsys, 'devices', 'system', 'node')
os.makedirs(nodeDir)
for f in ['online', 'possible']:
with open(os.path.join(nodeDir, f), 'w') as fh:
print >>fh, getList(0, nnodes)
with open(os.path.join(nodeDir, 'has_normal_memory'), 'w') as fh:
if no_little_core_memory:
print >>fh, getList(0, bnodes)
else:
print >>fh, getList(0, nnodes)
with open(os.path.join(nodeDir, 'has_cpu'), 'w') as fh:
print >>fh, getList(0, nnodes) if nnodes > 1 else ''
meminfoTemplate = XTemplate(open(os.path.join(progDir, 'nodeFiles', 'meminfo.template'), 'r').read())
for (cores, nodes, cpern) in [(range(bcores), range(bnodes), bcpern),
(range(bcores, ncores), range(bnodes, nnodes), lcpern)]:
for node in nodes:
n = node - nodes[0] # nid within group
coreSiblings = (cores[0] + n*cpern, cores[0] + (n+1)*cpern)
d = os.path.join(nodeDir, 'node{}'.format(node))
os.makedirs(d)
for cpu in range(*coreSiblings):
os.symlink(os.path.relpath(os.path.join(cpuDir, 'cpu{}'.format(cpu)), d), os.path.join(d, 'cpu{}'.format(cpu)))
for f in ['numastat', 'scan_unevictable_pages', 'vmstat']:
shutil.copy(os.path.join(progDir, 'nodeFiles', f), d)
with open(os.path.join(d, 'cpumap'), 'w') as fh:
print >>fh, getMask(*coreSiblings)
with open(os.path.join(d, 'cpulist'), 'w') as fh:
print >>fh, getList(*coreSiblings)
with open(os.path.join(d, 'meminfo'), 'w') as fh:
print >>fh, meminfoTemplate.substitute({'NODE' : str(node)}),
with open(os.path.join(d, 'distance'), 'w') as fh:
for node2 in range(nnodes):
print >>fh, ('10' if node2 == node else '20') + ' ',
print >>fh, ''
# misc
os.makedirs(os.path.join(rootsys, 'bus', 'pci', 'devices'))
## make read-only
for (p, ds, fs) in os.walk(root):
for d in ds:
os.chmod(os.path.join(p, d), 0555)
for f in fs:
os.chmod(os.path.join(p, f), 0444)
|
StarcoderdataPython
|
79408
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jieba
import numpy as np
def convert_small_example(example,
task_name,
vocab,
is_tokenized=False,
max_seq_length=128,
is_test=False):
input_ids = []
if task_name == 'senta':
for i, token in enumerate(jieba.cut(example[0])):
if i == max_seq_length:
break
token_id = vocab[token]
input_ids.append(token_id)
else:
if is_tokenized:
tokens = example[0][:max_seq_length]
else:
tokens = vocab(example[0])[:max_seq_length]
input_ids = vocab.convert_tokens_to_ids(tokens)
valid_length = np.array(len(input_ids), dtype='int64')
if not is_test:
label = np.array(example[-1], dtype="int64")
return input_ids, valid_length, label
else:
return input_ids, valid_length
def convert_pair_example(example,
task_name,
vocab,
is_tokenized=True,
max_seq_length=128,
is_test=False):
is_tokenized &= (task_name != 'senta')
seq1 = convert_small_example([example[0], example[2]], task_name, vocab,
is_tokenized, max_seq_length, is_test)[:2]
seq2 = convert_small_example([example[1], example[2]], task_name, vocab,
is_tokenized, max_seq_length, is_test)
pair_features = seq1 + seq2
return pair_features
def convert_two_example(example,
task_name,
tokenizer,
label_list,
max_seq_length,
vocab,
is_tokenized=True,
is_test=False):
is_tokenized &= (task_name != 'senta')
bert_features = convert_example(
example,
tokenizer=tokenizer,
label_list=label_list,
is_tokenized=is_tokenized,
max_seq_length=max_seq_length,
is_test=is_test)
if task_name == 'qqp':
small_features = convert_pair_example(
example, task_name, vocab, is_tokenized, max_seq_length, is_test)
else:
small_features = convert_small_example(
example, task_name, vocab, is_tokenized, max_seq_length, is_test)
return bert_features[:2] + small_features
def convert_example(example,
tokenizer,
label_list,
is_tokenized=False,
max_seq_length=512,
is_test=False):
"""convert a glue example into necessary features"""
def _truncate_seqs(seqs, max_seq_length):
if len(seqs) == 1: # single sentence
# Account for [CLS] and [SEP] with "- 2"
seqs[0] = seqs[0][0:(max_seq_length - 2)]
else: # Sentence pair
# Account for [CLS], [SEP], [SEP] with "- 3"
tokens_a, tokens_b = seqs
max_seq_length -= 3
while True: # Truncate with longest_first strategy
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_seq_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return seqs
def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])
segment_ids = sum(
([i] * (len(seq) + len(sep))
for i, (sep, seq) in enumerate(zip(separators, seqs))), [])
if isinstance(seq_mask, int):
seq_mask = [[seq_mask] * len(seq) for seq in seqs]
if isinstance(separator_mask, int):
separator_mask = [[separator_mask] * len(sep) for sep in separators]
p_mask = sum((s_mask + mask
for sep, seq, s_mask, mask in zip(
separators, seqs, seq_mask, separator_mask)), [])
return concat, segment_ids, p_mask
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example[-1]
example = example[:-1]
# Create label maps if classification task
if label_list:
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
label = label_map[label]
label = np.array([label], dtype=label_dtype)
if is_tokenized:
tokens_raw = example
else:
# Tokenize raw text
tokens_raw = [tokenizer(l) for l in example]
# Truncate to the truncate_length,
tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
# Concate the sequences with special tokens
tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
len(tokens_trun))
# Convert the token to ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
valid_length = len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
# input_mask = [1] * len(input_ids)
if not is_test:
return input_ids, segment_ids, valid_length, label
else:
return input_ids, segment_ids, valid_length
return output_list
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.