max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
setup.py | huonw/strawberry | 0 | 12790451 | <reponame>huonw/strawberry<gh_stars>0
#!/usr/bin/env python
# we use poetry for our build, but this file seems to be required
# in order to get GitHub dependencies graph to work
import setuptools
if __name__ == "__main__":
setuptools.setup(name="strawberry-graphql")
| 0.820313 | 1 |
vivarium/library/wrappers.py | vivarium-collective/vivarium-core | 13 | 12790452 | <reponame>vivarium-collective/vivarium-core<filename>vivarium/library/wrappers.py
from typing import Union
from vivarium.core.process import Process
from vivarium.core.types import Schema, State, Update
from vivarium.composites.toys import ToyProcess
def make_logging_process(
process_class,
logging_port_name="log_update"
) -> type:
"""
Given a subclass of Process, returns a new subclass that behaves exactly
the same except that it logs all of its updates in a port with name given by
logging_port_name.
The returned class has the same name as process_class, but prefixed with
'Logging_'.
Args:
process_class: The Process class to be logged
logging_port_name: Name of the port in which updates will be stored
('log_update' by default.)
Returns:
logging_process: the logging version of process_class.
"""
if not issubclass(process_class, Process):
raise ValueError(f'process_class must be a subclass of Process.')
logging_process = type(f"Logging_{process_class.__name__}",
(process_class,),
{})
__class__ = logging_process # set __class__ manually so super() knows what to do
def ports_schema(
self
) -> Schema:
ports = super().ports_schema() # type: ignore
ports[logging_port_name] = {'_default': {}, '_updater': 'set', '_emit': True} # add a new port
return ports
def next_update(
self,
timestep: Union[float, int],
states: State
) -> Update:
update = super().next_update(timestep, states) # type: ignore
log_update = {logging_port_name: update} # log the update
return {**update, **log_update}
logging_process.ports_schema = ports_schema # type: ignore
logging_process.next_update = next_update # type: ignore
return logging_process
def test_logging_process():
logging_toy = make_logging_process(ToyProcess)
logging_toy_instance = logging_toy()
ports = logging_toy_instance.ports_schema()
assert 'log_update' in ports
if __name__ == '__main__':
test_logging_process()
| 2.25 | 2 |
tests/unittest/batchify/test_batchify_embedding.py | bkktimber/gluon-nlp | 0 | 12790453 | <filename>tests/unittest/batchify/test_batchify_embedding.py
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import itertools
import pytest
import numpy as np
import gluonnlp as nlp
@pytest.mark.parametrize('reduce_window_size_randomly', [True, False])
@pytest.mark.parametrize('shuffle', [True, False])
@pytest.mark.parametrize('cbow', [True, False])
@pytest.mark.parametrize('stream', [True, False])
def test_center_context_batchify_stream(reduce_window_size_randomly, shuffle,
cbow, stream):
dataset = [np.arange(100).tolist()] * 3
batchify = nlp.data.batchify.EmbeddingCenterContextBatchify(
batch_size=8,
window_size=5,
reduce_window_size_randomly=reduce_window_size_randomly,
shuffle=shuffle,
cbow=cbow)
if stream:
stream = nlp.data.SimpleDataStream([dataset, dataset])
batches = list(
itertools.chain.from_iterable(stream.transform(batchify)))
else:
samples = batchify(dataset)
batches = list(samples)
if cbow:
assert len(batches) == 37 if not stream else 74
elif not reduce_window_size_randomly:
assert len(batches) == 363 if not stream else 726
else:
pass
@pytest.mark.parametrize('cbow', [True, False])
def test_center_context_batchify(cbow):
dataset = [np.arange(100).tolist()]
batchify = nlp.data.batchify.EmbeddingCenterContextBatchify(
batch_size=3, window_size=1, cbow=cbow)
samples = batchify(dataset)
center, context = next(iter(samples))
(contexts_data, contexts_row, contexts_col) = context
assert center.dtype == np.int64
assert contexts_data.dtype == np.float32
assert contexts_row.dtype == np.int64
assert contexts_col.dtype == np.int64
if cbow:
assert center.asnumpy().tolist() == [0, 1, 2]
assert contexts_data.asnumpy().tolist() == [1, 0.5, 0.5, 0.5, 0.5]
assert contexts_row.asnumpy().tolist() == [0, 1, 1, 2, 2]
assert contexts_col.asnumpy().tolist() == [1, 0, 2, 1, 3]
else:
assert center.asnumpy().tolist() == [0, 1, 1]
assert contexts_data.asnumpy().tolist() == [1, 1, 1]
assert contexts_row.asnumpy().tolist() == [0, 1, 2]
assert contexts_col.asnumpy().tolist() == [1, 0, 2]
| 2.28125 | 2 |
setup.py | dbbs-lab/ndsb | 0 | 12790454 | #!/usr/bin/env python3
import os, sys
import setuptools
# Get text from README.txt
with open("README.md", "r") as fp:
readme_text = fp.read()
# Get __version__ without importing
with open(os.path.join(os.path.dirname(__file__),"ndsb", "__init__.py"), "r") as f:
for line in f:
if line.startswith("__version__ = "):
exec(line.strip())
break
setuptools.setup(
name="ndsb",
version=__version__,
description="Collect data, turn it into static artifacts and beam it to a vault.",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/dbbs-lab/ndsb",
long_description=readme_text,
long_description_content_type="text/markdown",
packages=["ndsb"],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
install_requires=["portalocker", "requests", "requests-toolbelt"],
extras_require={"dev": ["sphinx", "sphinx_rtd_theme>=0.4.3", "pre-commit", "black"],},
)
| 1.585938 | 2 |
pylegoclassifier.py | fieryWalrus1002/pylegoclassifier | 1 | 12790455 |
# import the needed packages
import pickle
from sklearn import preprocessing
import time
from os import listdir
from os.path import isfile, join
from random import randint, uniform
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
from scipy import ndimage
from skimage import morphology
from skimage import exposure
import os
from math import pi
from math import isnan
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from skimage.filters import sobel
# set random seed
np.random.seed(26)
# the NaiveBayes classifier I wrote for assignment 6 in BSYSE_530, modified a little for this purpose
class NaiveBayes:
# P(c|x) = P(x|c) * P(c) / P(x)
# P(x|x) is the posterior probability
# P(x|c) is the likelihood
# P(c) is the class prior probability, or the prob of c occuring indpendently.
# P(x) is the predictor prior probability, or the prob of x occuring independently
def fit(self, features, target):
# define class variables
self.classes = np.unique(target)
self.count = len(self.classes)
self.feature_nums = features.shape[1]
self.rows = features.shape[0]
# calculate statistics for all those features
self.calc_statistics(features, target)
# prior is the random chance of drawing a particular class based on its proportion in the dataset
self.prior = self.calc_prior(features, target)
def get_predictions(self, input_vector):
predictions = []
for i in range(len(input_vector)):
result = self.calc_posterior((input_vector.iloc[i,:]))
predictions.append(result)
return predictions
def predict(self, observation):
#call the calc_posterior function on the observation
pred_class = self.calc_posterior(observation)
return pred_class
def calc_statistics(self, features, target):
# calculate mean, variance for each column and convert to numpy array
self.mean = features.groupby(target).apply(np.mean).to_numpy()
self.var = features.groupby(target).apply(np.var).to_numpy()
return self.mean, self.var
def calc_prior(self, features, target):
# this is the probability of picking one of a class at random from the dataset
self.prior = (features.groupby(target).apply(lambda x: len(x)/self.rows).to_numpy())
return self.prior
def calc_posterior(self, x):
# this is the probability, post evidence
# x is a numpy array
# x is feature vector for one observation
# make a list that we will add each classes posterior prob to
posteriors = []
# iterate through the classes
for i in range(0, self.count):
# for each class look at the prior probability for the class
prior = self.prior[i]
# calculate the conditional probability for the
conditional = np.sum(self.gaussian_density(i, x))
posterior = prior + conditional
# print(f"i = {i}, prior = {prior}, conditional = {conditional}, posterior = {posterior}")
posteriors.append(posterior)
return self.classes[np.argmax(posteriors)]
def gaussian_density(self, class_idx, x):
# calc probability from gaussian denssityy fucntion (normal dist)
mean = self.mean[class_idx]
var = self.var[class_idx]
# this part sucked and I had a typo that cost me hours
numerator = np.exp(-((x-mean)**2 / (2 * var)))
denominator = np.sqrt(2 * np.pi * var)
return numerator / denominator
def pdf(self, x, mean, stdev):
# calculate probability density function
exponent = np.exp(-((x-mean)**2 / (2*stdev**2)))
return exponent * (1/(np.sqrt(2*np.pi)*stdev))
def get_accuracy(self, test, predictions):
correct = 0
for i in range(len(test)):
if test.iloc[i] == predictions[i]:
correct += 1
return (correct / float(len(test)))
# TODO: read these and see how it works
# https://www.mathworks.com/help/matlab/matlab_external/matlab-arrays-as-python-variables.html
# https://www.mathworks.com/help/matlab/matlab_external/passing-data-to-python.html
# this exists only for my testing purposes
class MatlabSurrogate():
def __init__(self):
self.state_of_mind = "Badass."
def acquire_kinect_image(self, filename):
# give this function a filename, and it will load that image with opencv
# this will be a BGR format, because that is how opencv rolls
kinect_image = cv.imread(filename)
print(f"kinect has acquired the image with shape = {kinect_image.shape}")
return kinect_image
# function to display images resized, using opencv
def imshow(self, image, imdiv = 4):
imdiv = int(imdiv)
w, h = int(image.shape[1]/imdiv), int(image.shape[0]/imdiv)
cv.namedWindow("output", cv.WINDOW_NORMAL)
cv.resizeWindow("output", (w, h))
cv.imshow("output", image)
cv.waitKey(0)
cv.destroyAllWindows()
# I should probably have one image processing class that takes in a single image and then spits out a dataframe that could be used for prediction
# replaces ImageSegmenter
class ImageProcess():
def __init__(self):
print("image processor activated! use 'process_image_to_df()' to get back a pandas df")
self.black_lower = (0, 0, 0)
self.black_upper = (179, 255, 30)
self.hsv_lower = (0, 0, 0)
self.hsv_upper = (179, 255, 90)
# self.black_lower = (0, 0, 203)
# self.black_upper = (43, 255, 255)
# self.hsv_lower = (0, 0, 70)
# self.hsv_upper = (179, 34, 255)
# NOT mask for lego_imgs[14]
# hsv_lower = (0,0,0)
# hsv_upper = (179,234,77)
def dummy_method(self, a):
if type(a) is np.ndarray:
result = "object is a numpy.ndarray, this is perfect. Is the image RGB order or BGR?"
return result
else:
result = "object is a " + str(type(a)) + "and I'm gonna have a hard time with that"
return result
def bg_segmentation(self, image, mode="hsv", show_img=False):
# create an hsv mask for red colors
hsv_mask = cv.inRange(cv.cvtColor(image, cv.COLOR_BGR2HSV),
self.hsv_lower,
self.hsv_upper).astype(np.uint8)
# use this as a NOT mask
hsv_mask = np.where(hsv_mask > 1, 0, 1).astype(np.uint8)
hsv_mask = ndimage.gaussian_filter(hsv_mask, sigma=1)
# erode the mask
hsv_mask = morphology.erosion(hsv_mask, morphology.disk(3))
# # median filter to despeckle
# hsv_mask = ndimage.median_filter(hsv_mask, size=(3, 3)).astype(np.uint8)
# binary dilation
hsv_mask = morphology.binary_dilation(hsv_mask, np.ones((20, 20))).astype(np.uint8)
# fill the holes
hsv_mask = ndimage.binary_fill_holes(hsv_mask).astype(np.uint8)
# erode the mask
hsv_mask = morphology.erosion(hsv_mask, morphology.disk(5))
# TODO: remove this it is for testing purposes to show the segmentation
if (show_img == True):
m = MatlabSurrogate()
m.imshow(cv.bitwise_and(image, image, mask=hsv_mask).astype(np.uint8))
# apply the mask and return the result
return cv.bitwise_and(image, image, mask=hsv_mask).astype(np.uint8)
def process_image_to_df(self, input_image, area_th):
seg_img = self.bg_segmentation(input_image, show_img=False)
# # make the mask a binary thresholded image
mask = cv.cvtColor(seg_img, cv.COLOR_BGR2GRAY)
mask = cv.GaussianBlur(mask,(5,5),0)
ret3, mask = cv.threshold(mask,0,255,cv.THRESH_BINARY)
# output image with contours drawn on the original image
output_image = input_image.copy()
# find the contours of the detected objects in the image
contours, hier = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# create the df that we'll return for this image
df = pd.DataFrame(columns=['color'])
# # reset the object num
object_num = 0
for cnt in contours:
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
# CALCULATE ALL THE CONTOUR SHAPE FEATURES
# get the x, y, w, h of the bounding rect for the contour
x, y, w, h = cv.boundingRect(cnt)
# contour features
area = cv.contourArea(cnt)
rect_area = w * h
fullosity = area / rect_area
aspect_ratio = float(w)/h
extent = float(area/ rect_area)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
eq_diameter = np.sqrt(4*area/np.pi)
M= cv.moments(cnt)
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
# take this rectangle as a subset of the input_image, and calculate things within it
img_subset = input_image[y:y+h, x:x+w, :]
# convert to hsv for extracting those values
img_subset_hsv = cv.cvtColor(img_subset, cv.COLOR_BGR2HSV)
# FILTER OUT THE WEIRD ONES
# get rid of tiny objects that are probably noisef
if area > area_th:
# draw a blank canvas to put the contour onto, JUST THIS ONE not the others
# this is a mask
cimg_justthiscontour = np.zeros_like(input_image)
# draw the contours on the blank canvas which is original sized
cv.drawContours(cimg_justthiscontour, [cnt], 0, color=(255, 255, 255), thickness=-1)
# now take the subset of just the area around the contour of interest
cimg_subset = cimg_justthiscontour[y:y+h, x:x+w, :]
# make a binary mask
cimg_mask = cv.cvtColor(cimg_subset, cv.COLOR_BGR2GRAY)
ret2, mask = cv.threshold(cimg_mask,0,255,cv.THRESH_BINARY)
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
img_subset = cv.bitwise_and(img_subset, img_subset, mask=mask).astype(np.uint8)
# calculate where the object is
pts = np.where(cimg_subset == 255)
hue = img_subset_hsv[pts[0], pts[1], 0]
sat = img_subset_hsv[pts[0], pts[1], 1]
val = img_subset_hsv[pts[0], pts[1], 2]
r = img_subset[pts[0], pts[1], 0]
g = img_subset[pts[0], pts[1], 1]
b = img_subset[pts[0], pts[1], 2]
# and export the image for later analysis with something else like a neural network
cv.imwrite(f"images/train/XX_{object_num}_{randint(10000,99999)}.png", img_subset)
# add the object labels to the cimg for identification
cv.putText(output_image, text= str(object_num),
org=(cx - 5,cy - 5),
fontFace= cv.FONT_HERSHEY_SIMPLEX,
fontScale=3,
color=(255,255,255),
thickness=5,
lineType=cv.LINE_AA)
# print(r.mean(), g.mean(), b.mean(), gli.mean())
df = df.append({'color' : 0,
'x': x,
'y': y,
'object_num': object_num,
'r': r.mean(),
'g': g.mean(),
'b': b.mean(),
'hue': hue.mean(),
'sat': sat.mean(),
'val': val.mean()
}, ignore_index=True)
# last thing we do on this loop is increment the object_num
object_num += 1
#
# end result should be a pandas dataframe and the contour image with numbers
return df.sort_values(by='object_num', axis=0, ascending=True), output_image
def hsv_slide_tool(self, image):
def empty(a):
pass
h, w = int(image.shape[1]/2), int(image.shape[0]/2)
cv.namedWindow('masked_image', cv.WINDOW_NORMAL)
cv.resizeWindow('masked_image', h, w)
cv.namedWindow("trackbars")
cv.resizeWindow("trackbars", 800, 300)
# color mask trackbars
cv.createTrackbar("hue_min", "trackbars", 0, 179, empty)
cv.createTrackbar('hue_max', 'trackbars', 179, 179, empty)
cv.createTrackbar('sat_min', 'trackbars', 0, 255, empty)
cv.createTrackbar('sat_max', 'trackbars', 255, 255, empty)
cv.createTrackbar('val_min', 'trackbars', 0, 255, empty)
cv.createTrackbar('val_max', 'trackbars', 255, 255, empty)
while True:
# get image
img_hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
# get trackbar positions
h_min = cv.getTrackbarPos("hue_min", "trackbars")
h_max = cv.getTrackbarPos('hue_max', 'trackbars')
s_min = cv.getTrackbarPos('sat_min', 'trackbars')
s_max = cv.getTrackbarPos('sat_max', 'trackbars')
v_min = cv.getTrackbarPos('val_min', 'trackbars')
v_max = cv.getTrackbarPos('val_max', 'trackbars')
# self.black_lower = (0, 0, 0)
# self.black_upper = (179, 255, 30)
# self.hsv_lower = (0, 0, 100)
# self.hsv_upper = (179, 255, 255)
# create mask
hsv_lower = np.array([h_min, s_min, v_min])
hsv_upper = np.array([h_max, s_max, v_max])
black_lower = np.array([0, 0, 0])
black_upper = np.array([179, 255, 30])
color_mask = cv.inRange(img_hsv, hsv_lower, hsv_upper)
black_mask = cv.inRange(img_hsv, black_lower, black_upper)
mask = color_mask + black_mask
masked_image = cv.bitwise_and(img_hsv, img_hsv, mask=mask)
cv.imshow('masked_image', masked_image)
k = cv.waitKey(1000) & 0xFF # large wait time
if k == 113 or k == 27:
break
cv.destroyAllWindows()
print(f'hsv_lower is {hsv_lower}, hsv_upper = {hsv_upper}')
def label_dataframe(self, image_df, class_list):
for i, row in image_df.iterrows():
image_df.loc[i, 'color'] = class_list[i]
print(type(image_df))
return image_df
# def fake_df(self, input_df, reps = 3):
# # creates a bunch of fake adjustments to the dataframe so my train set is bigger
# output_df = input_df.copy()
# for rep in range(0, reps):
# fake_df = input_df.copy()
# for i, row in fake_df.iterrows():
# fake_df.loc[i, 'r'] = fake_df.loc[i, 'r'] + uniform(-.1, .1)
# fake_df.loc[i, 'g'] = fake_df.loc[i, 'g'] + uniform(-.1, .1)
# fake_df.loc[i, 'b'] = fake_df.loc[i, 'b'] + uniform(-.1, .1)
# output_df = pd.concat(output_df, fake_df)
# return output_df
def otsu_threshold(self, image):
blur = cv.GaussianBlur(image,(5,5),0)
ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
return ret3, th3
def process_image_make_predictions(self, input_image, model):
predictive_model = model
area_th = 400
seg_img = self.bg_segmentation(input_image, show_img=False)
# # make the mask a binary thresholded image
mask = cv.cvtColor(seg_img, cv.COLOR_BGR2GRAY)
mask = cv.GaussianBlur(mask,(5,5),0)
ret3, mask = cv.threshold(mask,0,255,cv.THRESH_BINARY)
# output image with contours drawn on the original image
output_image = input_image.copy()
# find the contours of the detected objects in the image
contours, hier = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# create the df that we'll return for this image
df = pd.DataFrame(columns=['color'])
# # reset the object num
object_num = 0
for cnt in contours:
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
# CALCULATE ALL THE CONTOUR SHAPE FEATURES
# get the x, y, w, h of the bounding rect for the contour
x, y, w, h = cv.boundingRect(cnt)
# contour features
area = cv.contourArea(cnt)
rect_area = w * h
fullosity = area / rect_area
aspect_ratio = float(w)/h
extent = float(area/ rect_area)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
eq_diameter = np.sqrt(4*area/np.pi)
M= cv.moments(cnt)
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
# take this rectangle as a subset of the input_image, and calculate things within it
img_subset = input_image[y:y+h, x:x+w, :]
# convert to hsv for extracting those values
img_subset_hsv = cv.cvtColor(img_subset, cv.COLOR_BGR2HSV)
# FILTER OUT THE WEIRD ONES
# get rid of tiny objects that are probably noisef
if area > area_th:
# draw a blank canvas to put the contour onto, JUST THIS ONE not the others
# this is a mask
cimg_justthiscontour = np.zeros_like(input_image)
# draw the contours on the blank canvas which is original sized
cv.drawContours(cimg_justthiscontour, [cnt], 0, color=(255, 255, 255), thickness=-1)
# now take the subset of just the area around the contour of interest
cimg_subset = cimg_justthiscontour[y:y+h, x:x+w, :]
# make a binary mask
cimg_mask = cv.cvtColor(cimg_subset, cv.COLOR_BGR2GRAY)
ret2, mask = cv.threshold(cimg_mask,0,255,cv.THRESH_BINARY)
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
img_subset = cv.bitwise_and(img_subset, img_subset, mask=mask).astype(np.uint8)
# calculate where the object is
pts = np.where(cimg_subset == 255)
hue = img_subset_hsv[pts[0], pts[1], 0]
sat = img_subset_hsv[pts[0], pts[1], 1]
val = img_subset_hsv[pts[0], pts[1], 2]
r = img_subset[pts[0], pts[1], 0]
g = img_subset[pts[0], pts[1], 1]
b = img_subset[pts[0], pts[1], 2]
df = [{'r': (r.mean() / 255),
'g': (g.mean() / 255),
'b': (b.mean() / 255),
'hue': (hue.mean() / 255),
'sat': (sat.mean() / 255),
'val': (val.mean() / 255)}]
df = pd.DataFrame.from_dict(df)
pred = predictive_model.get_predictions(df)
class_dict = {0:"medium_blue",
1:"black",
2:"darK_stone_gray",
3:"bright_green",
4:"light_green",
5:"bright_orange",
6:"bright_red",
7:"bright_blue",
8:"white",
9:"bright_yellow"}
color_text = class_dict[pred[0]]
object_label = "obj" + str(object_num) + "_pred" + str(pred[0])
print(object_label)
# add the object labels to the cimg for identification
cv.putText(output_image, text= str(object_label),
org=(cx - 5,cy - 5),
fontFace= cv.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0,255,0),
thickness=3,
lineType=cv.LINE_AA)
# last thing we do on this loop is increment the object_num
object_num += 1
# AFTER ALL CONTOURS HAVE BEEN DONE submit the df to the model for predictions
# results = predictive_model.blind_predictions()
# result = loaded_model.get_predictions(X_test, Y_test)
# print(result)
# # use the test set to see how we do
# y_test_predictions = nb.get_predictions(X_test)
# # scores
# acc = nb.get_accuracy(y_test, y_test_predictions)
# prec = precision_score(y_test, y_test_predictions, average="micro")
# rec = recall_score(y_test, y_test_predictions, average="micro")
# print(f"precision is {prec}, recall is {rec}, accuracy = {acc}")
# # confusion matrix
# labels = [(i, c) for i, c in class_dict.items()]
# cm = confusion_matrix(y_test, y_test_predictions)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# cax = ax.matshow(cm)
# plt.title('confusion matrix of the classifier')
# fig.colorbar(cax)
# plt.xlabel('Predicted')
# plt.ylabel('True')
# plt.show()
# print(labels)
# take the row
# end result should be a pandas dataframe and the contour image with numbers
return output_image
| 2.8125 | 3 |
mountwizzard3/modeling/model_points.py | fcbarclo/MountWizzard3 | 1 | 12790456 | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # ####
# ## ## # ## # #
# # # # # # # # # ###
# # ## # ## ## #
# # # # # # ####
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.6.4
#
# <NAME>
# (c) 2016, 2017, 2018
#
# Licence APL2.0
#
###########################################################
import logging
import os
import PyQt5
import time
import copy
import operator
import numpy
from astrometry import transform
class ModelPoints:
logger = logging.getLogger(__name__)
def __init__(self, app):
self.app = app
self.transform = transform.Transform(self.app)
self.horizonPoints = list()
self.modelPoints = list()
self.celestialEquator = list()
# signal slot
self.app.ui.btn_loadInitialModelPoints.clicked.connect(self.selectInitialModelPointsFileName)
self.app.ui.btn_saveInitialModelPoints.clicked.connect(self.saveInitialModelPoints)
self.app.ui.btn_saveInitialModelPointsAs.clicked.connect(self.saveInitialModelPointsAs)
self.app.ui.btn_loadFullModelPoints.clicked.connect(self.selectFullModelPointsFileName)
self.app.ui.btn_saveFullModelPoints.clicked.connect(self.saveFullModelPoints)
self.app.ui.btn_saveFullModelPointsAs.clicked.connect(self.saveFullModelPointsAs)
self.app.ui.btn_loadHorizonMask.clicked.connect(self.selectHorizonPointsFileName)
self.app.ui.btn_saveHorizonMask.clicked.connect(self.saveHorizonMask)
self.app.ui.btn_saveHorizonMaskAs.clicked.connect(self.saveHorizonMaskAs)
self.app.signalMountSiteData.connect(self.generateCelestialEquator)
def initConfig(self):
try:
if 'HorizonPointsFileName' in self.app.config:
self.app.ui.le_horizonPointsFileName.setText(self.app.config['HorizonPointsFileName'])
if 'CheckUseMinimumHorizonLine' in self.app.config:
self.app.ui.checkUseMinimumHorizonLine.setChecked(self.app.config['CheckUseMinimumHorizonLine'])
if 'CheckUseFileHorizonLine' in self.app.config:
self.app.ui.checkUseFileHorizonLine.setChecked(self.app.config['CheckUseFileHorizonLine'])
if 'AltitudeMinimumHorizon' in self.app.config:
self.app.ui.altitudeMinimumHorizon.setValue(self.app.config['AltitudeMinimumHorizon'])
if 'ModelInitialPointsFileName' in self.app.config:
self.app.ui.le_modelInitialPointsFileName.setText(self.app.config['ModelInitialPointsFileName'])
if 'ModelFullPointsFileName' in self.app.config:
self.app.ui.le_modelFullPointsFileName.setText(self.app.config['ModelFullPointsFileName'])
if 'HorizonPointsFileName' in self.app.config and 'CheckUseMinimumHorizonLine' in self.app.config and 'CheckUseFileHorizonLine' in self.app.config and 'AltitudeMinimumHorizon' in self.app.config:
self.loadHorizonPoints(self.app.config['HorizonPointsFileName'],
self.app.config['CheckUseFileHorizonLine'],
self.app.config['CheckUseMinimumHorizonLine'],
self.app.config['AltitudeMinimumHorizon'])
except Exception as e:
self.logger.error('item in config.cfg could not be initialize, error:{0}'.format(e))
finally:
pass
def storeConfig(self):
self.app.config['HorizonPointsFileName'] = self.app.ui.le_horizonPointsFileName.text()
self.app.config['CheckUseMinimumHorizonLine'] = self.app.ui.checkUseMinimumHorizonLine.isChecked()
self.app.config['CheckUseFileHorizonLine'] = self.app.ui.checkUseFileHorizonLine.isChecked()
self.app.config['AltitudeMinimumHorizon'] = self.app.ui.altitudeMinimumHorizon.value()
self.app.config['ModelInitialPointsFileName'] = self.app.ui.le_modelInitialPointsFileName.text()
self.app.config['ModelFullPointsFileName'] = self.app.ui.le_modelFullPointsFileName.text()
def saveHorizonMask(self):
filepath = os.getcwd() + '/config/' + self.app.ui.le_horizonPointsFileName.text()
self.saveHorizonPoints(filepath)
def saveHorizonMaskAs(self):
value, ext = self.app.selectFile(self.app, 'Save horizon mask points file', '/config', 'Model point files (*.txt)', False)
if value != '':
self.app.ui.le_horizonPointsFileName.setText(os.path.basename(value))
self.saveHorizonPoints(value)
else:
self.logger.warning('No model points file selected')
def selectHorizonPointsFileName(self):
value, ext = self.app.selectFile(self.app, 'Open horizon mask file', '/config', 'Horizon mask files (*.txt)', True)
if value != '':
self.app.ui.le_horizonPointsFileName.setText(os.path.basename(value))
self.app.hemisphereWindow.selectHorizonPointsMode()
self.app.hemisphereWindow.drawHemisphere()
def saveModelPoints(self, modelPointsFileName):
msg = None
fileHandle = None
if modelPointsFileName.strip() == '':
msg = 'No Model Points Filename given!'
self.logger.warning('No Model Points Filename given!')
return msg
try:
fileHandle = open(modelPointsFileName + '.txt', 'w')
for i in range(0, len(self.modelPoints)):
fileHandle.write('MW-3:{0:03.2f}:{1:03.2f}\n'.format(self.modelPoints[i][0], self.modelPoints[i][1]))
fileHandle.close()
except Exception as e:
msg = 'Error saving modeling points to file [{0}] error: {1}!'.format(modelPointsFileName, e)
self.logger.warning('Error loading modeling points to file [{0}] error: {1}!'.format(modelPointsFileName, e))
finally:
if fileHandle:
fileHandle.close()
return msg
def saveInitialModelPoints(self):
filepath = os.getcwd() + '/config/' + self.app.ui.le_modelInitialPointsFileName.text()
self.saveModelPoints(filepath)
def saveInitialModelPointsAs(self):
value, ext = self.app.selectFile(self.app, 'Save initial model points file', '/config', 'Model point files (*.txt)', False)
if value != '':
self.app.ui.le_modelInitialPointsFileName.setText(os.path.basename(value))
self.saveModelPoints(value)
else:
self.logger.warning('No model points file selected')
def selectInitialModelPointsFileName(self):
value, ext = self.app.selectFile(self.app, 'Open initial model points file', '/config', 'Model points files (*.txt)', True)
if value != '':
value = os.path.basename(value)
self.app.ui.le_modelInitialPointsFileName.setText(value)
self.showInitialPoints(value)
else:
self.logger.warning('No file selected')
def saveFullModelPoints(self):
filepath = os.getcwd() + '/config/' + self.app.ui.le_modelFullPointsFileName.text()
self.saveModelPoints(filepath)
def saveFullModelPointsAs(self):
value, ext = self.app.selectFile(self.app, 'Save full model points file', '/config', 'Model point files (*.txt)', False)
if value != '':
self.app.ui.le_modelFullPointsFileName.setText(os.path.basename(value))
self.saveModelPoints(value)
else:
self.logger.warning('No model points file selected')
def selectFullModelPointsFileName(self):
value, ext = self.app.selectFile(self.app, 'Open full model points file', '/config', 'Model points files (*.txt)', True)
if value != '':
value = os.path.basename(value)
self.app.ui.le_modelFullPointsFileName.setText(value)
self.showFullPoints(value, self.app.ui.checkDeletePointsHorizonMask.isChecked(), self.app.ui.checkSortPoints.isChecked())
else:
self.logger.warning('No file selected')
def loadModelPoints(self, modelPointsFileName, modeltype):
p = []
number = 0
msg = None
if modelPointsFileName.strip() == '':
msg = 'No model points filename given!'
self.logger.warning('No model points filename given!')
return p, msg
try:
with open('config/' + modelPointsFileName + '.txt', 'r') as fileHandle:
for line in fileHandle:
if line.startswith('GRID'):
# if grid, then its a TSX file (the sky x)
convertedLine = line.rstrip('\n').split()
point = (float(convertedLine[2]), float(convertedLine[3]))
number += 1
if modeltype == 'Refinement' and number > 3:
p.append(point)
elif modeltype == 'Base' and number <= 3:
p.append(point)
elif line.startswith('MW-3'):
# if mountwizzard3, it's native version 3
convertedLine = line.rstrip('\n').split(':')
p.append((float(convertedLine[1]), float(convertedLine[2])))
else:
# format is same as Per's Model Maker
convertedLine = line.rstrip('\n').split(':')
point = (int(convertedLine[0]), int(convertedLine[1]))
if len(convertedLine) == 2 and modeltype == 'Full':
p.append(point)
elif len(convertedLine) != 2 and modeltype == 'Initial':
p.append(point)
except Exception as e:
msg = 'Error loading modeling points from file [{0}] error: {1}!'.format(modelPointsFileName, e)
self.logger.warning('Error loading modeling points from file [{0}] error: {1}!'.format(modelPointsFileName, e))
finally:
return p, msg
def sortPoints(self):
if len(self.modelPoints) == 0:
self.logger.warning('There are no points to sort')
return
westSide = []
eastSide = []
a = sorted(self.modelPoints, key=operator.itemgetter(0))
for i in range(0, len(a)):
if a[i][0] >= 180:
westSide.append((a[i][0], a[i][1]))
else:
eastSide.append((a[i][0], a[i][1]))
westSide = sorted(westSide, key=operator.itemgetter(1))
eastSide = sorted(eastSide, key=operator.itemgetter(1))
self.modelPoints = westSide + eastSide
def loadHorizonPoints(self, horizonPointsFileName, horizonByFile, horizonByAltitude, altitudeMinimumHorizon):
self.horizonPoints = []
if not (horizonByFile or horizonByAltitude):
return
hp = []
msg = None
if horizonByFile:
if horizonPointsFileName == '':
msg = 'No horizon points filename given !'
return msg
if not os.path.isfile(os.getcwd() + '/config/' + horizonPointsFileName + '.txt'):
msg = 'Horizon points file does not exist !'
self.logger.warning('Horizon points file does not exist')
else:
try:
with open(os.getcwd() + '/config/' + horizonPointsFileName + '.txt') as f:
for line in f:
if ':' in line:
# model maker format
m = line.rstrip('\n').split(':')
else:
# carte du ciel / skychart format
m = line.rstrip('\n').split(' ')
point = (int(m[0]), int(m[1]))
hp.append(point)
f.close()
except Exception as e:
msg = 'Error loading horizon points: {0}'.format(e)
self.logger.error('Error loading horizon points: {0}'.format(e))
return msg
hp = sorted(hp, key=operator.itemgetter(0))
if len(hp) == 0:
hp = ((0, 0), (360, 0))
x = [i[0] for i in hp]
y = [i[1] for i in hp]
if horizonByAltitude:
y = numpy.clip(y, altitudeMinimumHorizon, None)
self.horizonPoints = [list(a) for a in zip(x, y)]
return msg
def saveHorizonPoints(self, horizonPointsFileName):
msg = None
fileHandle = None
if horizonPointsFileName.strip() == '':
msg = 'No horizon points filename given!'
self.logger.warning('No Model Points Filename given!')
return msg
try:
fileHandle = open(horizonPointsFileName + '.txt', 'w')
for i in range(0, len(self.horizonPoints)):
# saving in model maker format
fileHandle.write('{0:03d}:{1:03d}\n'.format(int(self.horizonPoints[i][0]), int(int(self.horizonPoints[i][1]))))
fileHandle.close()
except Exception as e:
msg = 'Error saving horizon points to file [{0}] error: {1}!'.format(horizonPointsFileName, e)
self.logger.warning('Error loading horizon points to file [{0}] error: {1}!'.format(horizonPointsFileName, e))
finally:
if fileHandle:
fileHandle.close()
return msg
def isAboveHorizonLine(self, point):
x = range(0, 361)
y = numpy.interp(x, [i[0] for i in self.horizonPoints], [i[1] for i in self.horizonPoints], left=None, right=None, period=None)
if point[1] > y[int(point[0])]:
return True
else:
return False
def deleteBelowHorizonLine(self):
i = 0
while i < len(self.modelPoints):
if self.isAboveHorizonLine(self.modelPoints[i]):
i += 1
else:
del self.modelPoints[i]
def deletePoints(self):
self.modelPoints = list()
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def showInitialPoints(self, filename):
self.modelPoints, msg = self.loadModelPoints(filename, 'Initial')
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def showFullPoints(self, filename, limitByHorizonMask, doSortingPoints):
self.modelPoints, msg = self.loadModelPoints(filename, 'Full')
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateDSOPoints(self, limitByHorizonMask, hoursPathLength, numberOfPathPoints, hoursPathLengthPreview):
# we have no position of the mount -> therefore we can't calculate the path
if 'RaJNow' not in self.app.workerMountDispatcher.data:
return
self.modelPoints = list()
ra = copy.copy(self.app.workerMountDispatcher.data['RaJNow'])
dec = copy.copy(self.app.workerMountDispatcher.data['DecJNow'])
for i in range(0, numberOfPathPoints):
ra = ra - float(i) * hoursPathLength / numberOfPathPoints - hoursPathLengthPreview
az, alt = self.transform.transformERFA(ra, dec, 1)
if alt > 0:
self.modelPoints.append((az, alt))
if limitByHorizonMask:
self.deleteBelowHorizonLine()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateMaxPoints(self, limitByHorizonMask, doSortingPoints):
west = []
east = []
off = -5
i = 0
for dec in range(-15, 90, 10):
if dec < 30:
step = 10
elif dec < 70:
step = 10
else:
step = 30
if i % 2:
for ha in range(120 + off, -120 + off, -step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for ha in range(-120 + off, 120 + off, step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateNormalPoints(self, limitByHorizonMask, doSortingPoints):
west = []
east = []
off = -5
i = 0
for dec in range(-15, 90, 15):
if dec < 60:
step = 10
else:
step = 20
if i % 2:
for ha in range(120 + off, -120 + off, -step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for ha in range(-120 + off, 120 + off, step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateMinPoints(self, limitByHorizonMask, doSortingPoints):
west = list()
east = list()
off = -5
i = 0
for dec in range(-15, 90, 15):
if dec < 60:
step = 15
else:
step = 30
if i % 2:
for ha in range(120 + off, -120 + off, -step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for ha in range(-120 + off, 120 + off, step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateGridPoints(self, limitByHorizonMask, doSortingPoints, numberOfRows, numberOfColumns, altitudeMin, altitudeMax):
west = list()
east = list()
i = 0
for alt in range(altitudeMin, altitudeMax + 1, int((altitudeMax - altitudeMin) / (numberOfRows - 1))):
if i % 2:
for az in range(365 - int(360 / numberOfColumns), 0, -int(360 / numberOfColumns)):
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for az in range(5, 360, int(360 / numberOfColumns)):
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateInitialPoints(self, azimuth, altitude, numberOfPoints):
self.modelPoints = list()
for i in range(0, numberOfPoints):
azp = i * 360 / numberOfPoints + azimuth
if azp > 360:
azp -= 360
azp = int(azp)
point = (azp, altitude)
self.modelPoints.append(point)
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateCelestialEquator(self):
self.celestialEquator = list()
off = -5
for dec in range(-15, 90, 15):
for ha in range(120 + off, -120 + off, -2):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
self.celestialEquator.append((az, alt))
| 2.03125 | 2 |
Working/Raycasting/vf.py | mm-wang/metashape | 4 | 12790457 | import rhinoscriptsyntax as rs
import Rhino as rc
import scriptcontext as sc
#import ghpythonlib as gh
import Grasshopper as gh
""" Calculate View Factor
"""
class ViewFactor(object):
def __init__(self):
self.sphere_nested = []
self.cpt = []
self.bound_nested = []
self.bld_num = None
self.ray_num = None
# Outputs
self.raycast_distance = None
self.raycast_pt_x = None
self.raycast_pt_y = None
self.raycast_pt_z = None
def process_raw_inputs(self,sphere_tree_in,bound_srf_lst_in,cpt_lst_in):
self.cpt = map(lambda c: rs.coerce3dpoint(c), cpt_lst_in)
# convert tree to nested list of sphere pts
for i in range(sphere_tree_in.BranchCount):
branchList = sphere_tree_in.Branch(i)
self.sphere_nested.append(branchList)
# convert tree to nested list of bound srfs
for i in range(bound_srf_lst_in.BranchCount):
branchList = bound_srf_lst_in.Branch(i)
branchList = map(lambda s: rs.coercebrep(s), branchList)
self.bound_nested.append(branchList)
# convert guids to rc points
for i in xrange(len(self.sphere_nested)):
sphere_per_bld = list(self.sphere_nested[i])
for j in xrange(len(sphere_per_bld)):
sphere_per_bld[j] = rs.coerce3dpoint(sphere_per_bld[j])
self.sphere_nested[i] = sphere_per_bld
self.bld_num = len(self.sphere_nested)
self.ray_num = len(self.sphere_nested[0])
def ray_cast(self):
"""
base_vector
direction_vector
srf2int
"""
self.ray_int_nested = []
self.ray_dist_nested = []
for i in xrange(self.bld_num):
raypts = []
raydist = []
for j in xrange(self.ray_num):
srf2int_lst = self.bound_nested[i]
r0 = self.cpt[i] #base_vector
r1 = self.sphere_nested[i][j] #direction_vector
#convert pts to vectors
r1 = rc.Geometry.Vector3d(r1) - rc.Geometry.Vector3d(r0)
ray = rc.Geometry.Ray3d(r0,r1)
point_intersect_lst = rc.Geometry.Intersect.Intersection.RayShoot(ray,srf2int_lst,1)
if point_intersect_lst:
point_intersect_lst = list(point_intersect_lst)
rpt = point_intersect_lst[0]
raypts.append(rpt)
raydist.append(rs.Distance(rpt,r0))
#rc.Geometry.Vector3d.Multiply(
self.ray_dist_nested.append(raydist)
self.ray_int_nested.append(raypts)
#print len(self.ray_int_nested)
def generate_viewfactor_matrix(self):
# flip the matrix
self.raycast_distance = map(lambda r: [None] * self.ray_num, [None] * self.bld_num)
self.raycast_x = map(lambda r: [None] * self.ray_num, [None] * self.bld_num)
self.raycast_y = map(lambda r: [None] * self.ray_num, [None] * self.bld_num)
self.raycast_z = map(lambda r: [None] * self.ray_num, [None] * self.bld_num)
self.ray_mtx = []
self.header_lst = []
for ri in xrange(self.ray_num):
hstr = "RAY_{b}_".format(b=ri)
self.header_lst.extend([hstr+"dist",hstr+"x",hstr+"y",hstr+"z"])
print len(self.header_lst)
print self.header_lst
for i in xrange(self.bld_num):
self.bld_lst = []
for j in xrange(self.ray_num):
d = self.ray_dist_nested[i][j]
x = self.ray_int_nested[i][j][0]
y = self.ray_int_nested[i][j][1]
z = self.ray_int_nested[i][j][2]
self.bld_lst.extend([d,x,y,z])
#self.ray_dist_nested[i][j]
#self.raycast_distance[i][j]
self.ray_mtx.append(self.bld_lst)
def pythonListTGhDataTree(self,pythonList):
""" Converts a nested Python list to a GH datatree """
# Create GH datatree
dataTree = gh.DataTree[object]()
# Add pythonlist sub lists to dataTree
for i,l in enumerate(pythonList):
for v in l:
dataTree.Add(v,gh.Kernel.Data.GH_Path(i))
return dataTree
vf = ViewFactor()
vf.process_raw_inputs(sphere_tree_in, bound_srf_lst_in, cpt_lst_in)
vf.ray_cast()
vf.generate_viewfactor_matrix()
header_lst = vf.header_lst
ray_tree = vf.pythonListTGhDataTree(vf.ray_mtx)
#header_tree
print len(vf.ray_mtx)
print len(vf.bld_lst)
#sphere_out = vf.sphere_nested
#cpt_out = vf.cpt
ray_out = reduce(lambda x,y: x+y, vf.ray_int_nested)
| 2.296875 | 2 |
example_app/tutorial/urls.py | mr-aliraza/django-rest-swagger | 0 | 12790458 | <filename>example_app/tutorial/urls.py
from django.urls import re_path, include
from rest_framework.routers import DefaultRouter
from rest_framework_swagger.views import get_swagger_view
from snippets import views
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
schema_view = get_swagger_view(title='Snippets API')
urlpatterns = [
re_path('^$', schema_view),
re_path(r'^', include(router.urls)),
re_path(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| 2.0625 | 2 |
mozart/music/netease.py | kushao1267/MusicAPI | 0 | 12790459 | <filename>mozart/music/netease.py
import re
import requests
import json
import binascii
from Crypto.Cipher import AES
from .base import Music
from mozart import config
from .exception import MusicDoesnotExists
__all__ = ["Netease"]
def encode_netease_data(data) -> str:
data = json.dumps(data)
key = binascii.unhexlify("7246674226682325323F5E6544673A51")
encryptor = AES.new(key, AES.MODE_ECB)
# 补足data长度,满足16的倍数
pad = 16 - len(data) % 16
fix = chr(pad) * pad
byte_data = (data + fix).encode("utf-8")
return binascii.hexlify(encryptor.encrypt(byte_data)).upper().decode()
class Netease(Music):
def __init__(self, *args, **kwargs):
super(Netease, self).__init__(*args, **kwargs)
# 网易音乐的初始化
if not self.use_id:
self.music_id = self.get_music_id_from_url(self.real_url)
self.get_music_from_id()
print(self.__repr__())
def get_music_from_id(self):
if self.music_id: # music_id合法才请求
self._get_music_info()
self._get_download_url()
def _get_music_info(self):
s = requests.Session()
s.headers.update(config.fake_headers)
s.headers.update({"referer": "http://music.163.com/"})
eparams = {
"method": "POST",
"params": {"c": "[{id:%s}]" % self.music_id},
"url": "http://music.163.com/api/v3/song/detail"
}
data = {"eparams": encode_netease_data(eparams)}
r = s.post("http://music.163.com/api/linux/forward", data=data)
if r.status_code != requests.codes.ok:
raise Exception(r.text)
j = r.json()
if len(j["songs"]) > 0:
self._cover = j["songs"][0]["al"]["picUrl"]
self._song = j["songs"][0]["al"]["name"]
self._singer = j["songs"][0]["ar"][0]["name"]
else:
raise MusicDoesnotExists("音乐不存在,请检查")
def _get_download_url(self):
""" 从网易云音乐下载 """
eparams = {
"method": "POST",
"url": "http://music.163.com/api/song/enhance/player/url",
"params": {"ids": [self.music_id], "br": 320000},
}
data = {"eparams": encode_netease_data(eparams)}
s = requests.Session()
s.headers.update(config.fake_headers)
s.headers.update({"referer": "http://music.163.com/"})
r = s.post("http://music.163.com/api/linux/forward", data=data)
if r.status_code != requests.codes.ok:
raise Exception(r.text)
j = r.json()
self._download_url = j["data"][0]["url"]
self._rate = int(j["data"][0]["br"] / 1000)
@classmethod
def get_music_id_from_url(cls, url) -> str:
music_ids = re.findall(r'music.163.com/song/(\d+)/', url)
if music_ids:
mid = music_ids[0]
return mid
return ""
| 2.6875 | 3 |
monte_carlo.py | WillSkywalker/2048_monte_carlo | 1 | 12790460 | """Algorithm for simulating a 2048 game using Monte-Carlo method."""
import random, _2048
SIMULATE_TIMES = 100000
DIRECTIONS = ('UP', 'DOWN', 'LEFT', 'RIGHT')
def simulate_to_end(game):
while game.get_state():
dircts = list(DIRECTIONS)
for i in xrange(3):
c = random.choice(dircts)
if game.move(c):
break
dircts.remove(c)
return game.get_score()
def score_sum(game,direction):
score = 0
temp = game.clone()
temp.move(direction)
for i in xrange(SIMULATE_TIMES):
score += simulate_to_end(temp)
return score
def monte_carlo(game):
scores = {}
biggest = 0
best = None
directions = list(DIRECTIONS)
for d in DIRECTIONS:
test = game.clone()
if not test.move(d):
directions.remove(d)
for direction in directions:
temp = game.clone()
score = score_sum(temp, direction)
if score > biggest:
biggest = score
best = direction
scores[direction] = score
print scores
if len(set(scores)) == 1:
return False
else:
return best
if __name__ == '__main__':
a_game = _2048.Gameplay()
print monte_carlo(a_game) | 4.0625 | 4 |
model/LsBlk.py | keithCollins77093/hardInfo | 0 | 12790461 | # Project: hardInfo
# Author: <NAME>
# Date Started: March 18, 2022
# Copyright: (c) Copyright 2022 <NAME>
# Module: model/LsBlk.py
# Date Started: March 23, 2022
# Purpose: Store and provide API for Linux lsblk command.
# Development:
# Arguments to include in the command line:
# lsblk --json --all --zoned --output-all --paths
#
from enum import Enum
from subprocess import Popen, PIPE
from sys import stderr
from json import loads
from tkinter import Tk, messagebox, LabelFrame, BOTH, RAISED
from model.Installation import INSTALLATION_FOLDER
from view.Components import JsonTreeView
PROGRAM_TITLE = "lsblk API"
LSBLK_JSON_FILE = 'lsblk.json'
class Action(Enum):
Generate = 'Generate'
Help = "Help"
Load = 'Load'
Store = 'Store'
Search = 'Search'
Update = 'Update'
Log = 'Log'
Exit = 'Exit'
def __str__(self):
return self.value
class Dispatcher:
def __init__(self):
print("Lshw.Dispatcher does not instantiate")
@staticmethod
def do( action: Action):
if action == Action.Generate:
return Dispatcher.__generateLsBlkJsonFile()
@staticmethod
def __generateLsBlkJsonFile():
# lsblk --json --all --zoned --output-all --paths
proc = Popen(['lsblk', '--json', '--all', '--zoned', '--output-all', '--paths'],
stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate()
jsonText = proc[0].decode('utf-8')
errors = proc[1].decode('utf-8')
if len(errors) > 0:
print(errors, file=stderr)
print("Saving output to:\t" + LSBLK_JSON_FILE)
file = open(LSBLK_JSON_FILE, "w")
file.write(jsonText)
file.close()
return jsonText
def ExitProgram():
answer = messagebox.askyesno('Exit program ', "Exit the " + PROGRAM_TITLE + " program?")
if answer:
mainView.destroy()
if __name__ == '__main__':
mainView = Tk()
mainView.protocol('WM_DELETE_WINDOW', ExitProgram)
mainView.geometry("700x450+250+50")
mainView.title(PROGRAM_TITLE)
jsonText = Dispatcher.do(Action.Generate)
lsblkJson = loads(jsonText)
borderFrame = LabelFrame(mainView, text="Block Devices", border=5, relief=RAISED)
jsonTreeView = JsonTreeView(borderFrame, lsblkJson, {"openBranches": True, "mode": "strict"})
jsonTreeView.pack(expand=True, fill=BOTH)
borderFrame.pack(expand=True, fill=BOTH)
mainView.mainloop()
| 2.53125 | 3 |
1-lab/simple_replacement.py | osovv/miet-security | 0 | 12790462 | import argparse
def encrypt(message: str, key: dict[int, int]) -> str:
encrypted = map(lambda char: key[char], message)
return encrypted
def decrypt(message: str, key: dict[int, int]) -> str:
decrypted = map(lambda char: key[char], message)
return decrypted
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--file')
parser.add_argument('--keyFile')
parser.add_argument('--output')
parser.add_argument('--encrypt', action='store_true')
parser.add_argument('--decrypt', action='store_true')
args = parser.parse_args()
key = None
with open(args.keyFile) as file:
key = file.readline()
encrypting_key = {idx: int(value) for idx, value in enumerate(key.split(' '))}
decrypting_key = {int(value): idx for idx, value in enumerate(key.split(' '))}
file_contents = None
encrypted = None
with open(args.file, 'rb') as file:
file_contents = file.read()
output = args.output
if args.encrypt is True:
encrypted = encrypt(file_contents, encrypting_key)
with open(output, "wb") as out:
out.write(bytearray(encrypted))
elif args.decrypt is True:
decrypted = decrypt(file_contents, decrypting_key)
with open(output, "wb") as out:
out.write(bytearray(decrypted))
else:
print('No action type was given')
if __name__ == '__main__':
main()
| 3.78125 | 4 |
ObitSystem/Obit/python/OPlot.py | sarrvesh/Obit | 5 | 12790463 | """
Obit Plotting class
Create a plot object using newOPlot which allows specifying the output
and background color. If no output is specified this information
will be prompted.
Next, the plotting region must be specified using either PSetPlot,
one of the XY plotting routines (PXYPlot, PXYOver, or PXYErr)
PGrayScale, or PContour. Then additional lines, curves, text or symbols may be added.
When all has been added to the plot, use PShow to finalize it.
Notes: on text strings in PLPlot installations
If the Obit installation uses PLPlot for plotting the following
can be used in text strings:
- Greek letters, A #g immediately prior to a Latin character will cause
the Greek equivalent to be used, e.g. #ga will be a lower case alpha.
- Subscripts: Characters between a #d and #u will be written as subscripts
- Superscripts: Characters between a #u and #d will be written as
superscripts
"""
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2006,2016,2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: <EMAIL>.
# Postal address: <NAME>
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Python shadow class to ObitPlot class
from __future__ import absolute_import
from __future__ import print_function
import Obit, _Obit, InfoList, Image
import math
class OPlot(Obit.OPlot):
"""
Python Obit interface to display server
This class is for creating and using the interface to a plot
Image Members with python interfaces:
======== =======================================
InfoList used to pass instructions to processing
Member List
======== =======================================
"""
def __init__(self, name):
super(OPlot, self).__init__()
Obit.CreateOPlot(self.this, name)
def __del__(self, DeleteOPlot=_Obit.DeleteOPlot):
if _Obit!=None:
DeleteOPlot(self.this)
def __setattr__(self,name,value):
if name == "me" :
# Out with the old
if self.this!=None:
Obit.OPlotUnref(Obit.OPlot_Get_me(self.this))
# In with the new
Obit.OPlot_Set_me(self.this,value)
return
self.__dict__[name] = value
def __getattr__(self,name):
if not isinstance(self, OPlot):
return "Bogus Dude"+str(self.__class__)
if name == "me" :
return Obit.OPlot_Get_me(self.this)
# Functions to return members
if name=="List":
return PGetList(self)
raise AttributeError(name)
def __repr__(self):
if not isinstance(self, OPlot):
return "Bogus Dude"+str(self.__class__)
return "<C OPlot instance> " + Obit.OPlotGetName(self.me)
# Foreground Colors
unBLACK = 0
RED = 1
YELLOW = 2
GREEN = 3
AQUAMARINE = 4
BLACK = 5
WHEAT = 6
GRAY = 7
BROWN = 8
BLUE = 9
BLUEVIOLET = 10
CYAN = 11
TURQUOISE = 12
MAGENTA = 13
SALMON = 14
WHITE = 15
def newOPlot(name, err, output="None", bgcolor=BLACK, nx=1, ny=1 ):
"""
Create and initialize an ObitPlot
* name = name desired for object (labeling purposes)
* err = Python Obit Error/message stack
* output = name and type of output device:
====== ==========================
"None" interactive prompt
"xwin" X-Window (Xlib)
"gcw" Gnome Canvas Widget (interacts with ObitTalk)
"ps" PostScript File (monochrome)
"psc" PostScript File (color)
"xfig" Fig file
"png" PNG file
"jpeg" JPEG file
"gif" GIF file
"null" Null device
====== ==========================
* bgcolor = background color index (1-15), symbolic names:
BLACK, RED(default), YELLOW, GREEN, AQUAMARINE, PINK, WHEAT, GRAY, BROWN,
BLUE, BLUEVIOLET, CYAN, TURQUOISE, MAGENTA, SALMON, WHITE
* nx = Number of horizontal subpages
* ny = Number of vertical subpages
"""
################################################################
out = OPlot(name)
Obit.PlotInitPlot(out.me, output, bgcolor, nx, ny, err.me)
return out
# end newOPlot
def PXYPlot (plot, symbol, x, y, err):
"""
Simple XY Plot
Plot X vs Y using symbol.
Plot should be finalized and displayed with PShow
This routine draws the frame and adds labels, to only overplot data
on the same frame, use ObitPlotXYOver
* plot = plot
* symbol = Symbol index to use for plotting
values in the range [1,12] are usable if negative, use abs value and
connect points
== =================
0 line only
1 dot
2 plus
3 \*
4 open circle
5 x
6 open square
7 open triangle
8 open star
9 filled triangle
10 filled square
11 filled circle
12 filled star
== =================
* x = Independent variable, if None use index
* y = Dependent variable
* err = ObitErr error stack
Optional parameters on plot InfoList
====== ======== ===============================================
XMAX (float) maximum X value (defaults to actual value)
XMIN (float) minimum X value (defaults to actual value)
YMAX (float) maximum Y value (defaults to actual value)
YMIN (float) minimum Y value (defaults to actual value)
TITLE (string) Label for the plot (defaults to none), max 120
XLABEL (string) Label for horizontal axis (defaults to none)
XOPT (string) Options for horizontal axis (default "BCNTS")
See PDrawAxes for details.
YLABEL (string) Label for vertical axis (defaults to none)
YOPT (string) Options for vertical axis (default "BCNTS")
See PDrawAxes for details.
XTICK (float) world coordinate interval between major tick marks
on X axis. If xtick=0.0 [def], the interval is chosen.
NXSUB (long) the number of subintervals to divide the major
coordinate interval into. If xtick=0.0 or nxsub=0,
the number is chosen. [def 0]
YTICK (float) like xtick for the Y axis.
NYSUB (int) like nxsub for the Y axis
CSIZE (int) Scaling factor for characters(default = 1)
SSIZE (int) Scaling factor for symbols(default = 1)
LWIDTH (int) Line width (default = 1)
JUST (int) If !=0 then force X and Y axis scaling to be the same
====== ======== ===============================================
"""
################################################################
# Checks
if not PIsA(plot):
print("Actually ",plot.__class__)
raise TypeError("plot MUST be a Python Obit Plot")
n = len(y) # How many points?
Obit.PlotXYPlot (plot.me, symbol, n, x, y, err.me)
# end PXYPlot
def PXYOver (plot, symbol, x, y, err):
"""
Overplot X vs Y
Overplot X vs Y using symbol.
Plot should be finalized and displayed with PShow
* plot = plot
* symbol = Symbol index to use for plotting. Values in the range [1,12]
are usable. If negative, use abs value and connect points.
== ===============
0 line only
1 dot
2 plus
3 \*
4 open circle
5 x
6 open square
7 open triangle
8 open star
9 filled triangle
10 filled square
11 filled circle
12 filled star
== ===============
* x = Independent variable, if None use index
* y = Dependent variable
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
print("Actually ",plot.__class__)
raise TypeError("plot MUST be a Python Obit Plot")
n = len(y) # How many points?
Obit.PlotXYOver (plot.me, symbol, n, x, y, err.me)
# end PXYOver
def PXYErr (plot, symbol, x, y, e, err):
"""
Simple XY Plot with error bars
Plot X vs Y using symbol and error bars.
Plot should be finalized and displayed with PShow
This routine draws the frame and adds labels, to only overplot data
on the same frame, use ObitPlotXYOver
* plot = plot
* symbol = Symbol index to use for plotting. Values in the range [1,12]
are usable. If negative, use abs value and connect points.
== ===============
0 line only
1 dot
2 plus
3 \*
4 open circle
5 x
6 open square
7 open triangle
8 open star
9 filled triangle
10 filled square
11 filled circle
12 filled star
== ===============
* x = Independent variable, if None use index
* y = Dependent variable
* e = if nonNone, error in y
* err = ObitErr error stack
Optional parameters on plot InfoList:
====== ======== ==================================================
XMAX (float) maximum X value (defaults to actual value)
XMIN (float) minimum X value (defaults to actual value)
YMAX (float) maximum Y value (defaults to actual value)
YMIN (float) minimum Y value (defaults to actual value)
TITLE (string) Label for the plot (defaults to none), max 120
XLABEL (string) Label for horizontal axis (defaults to none)
XOPT (string) Options for horizontal axis (default "BCNTS")
See PDrawAxes for details.
YLABEL (string) Label for vertical axis (defaults to none)
YOPT (string) Options for vertical axis (default "BCNTS")
See PDrawAxes for details.
XTICK (float) world coordinate interval between major tick marks
on X axis. If xtick=0.0 [def], the interval is chosen.
NXSUB (int) the number of subintervals to divide the major
coordinate interval into. If xtick=0.0 or nxsub=0,
the number is chosen. [def 0]
YTICK (float) like xtick for the Y axis.
NYSUB (int) like nxsub for the Y axis
CSIZE (int) Scaling factor for characters(default = 1)
SSIZE (int) Scaling factor for symbols(default = 1)
LWIDTH (int) Line width (default = 1)
JUST (int) If !=0 then force X and Y axis scaling to be the same
====== ======== ==================================================
"""
################################################################
# Checks
if not PIsA(plot):
print("Actually ",plot.__class__)
raise TypeError("plot MUST be a Python Obit Plot")
n = len(y) # How many points?
Obit.PlotXYErr (plot.me, symbol, n, x, y, e, err.me)
# end PXYErr
def PContour (plot, label, image, lev, cntfac, err):
"""
Contour plot of image
Contours at lev times powers of cntfac
Plot should be finalized and displayed with PShow
* plot = plot
* label = Label for plot
* image = ObitImage to plot, BLC, TRC on info member honored
* lev = basic contour level (def 0.1 peak)
* cntfac = factor for spacing between contours (def sqrt(2)
* err = ObitErr error stack
Optional parameters on plot InfoList:
====== ======= ==================================================
XTICK (float) world coordinate interval between major tick marks
on X axis. If xtick=0.0 [def], the interval is chosen.
NXSUB (int) the number of subintervals to divide the major
coordinate interval into. If xtick=0.0 or nxsub=0,
the number is chosen. [def 0]
YTICK (float) like xtick for the Y axis.
NYSUB (int) like nxsub for the Y axis
CSIZE (int) Scaling factor for characters(default = 1)
LWIDTH (int) Line width (default = 1)
====== ======= ==================================================
"""
################################################################
# Checks
if not PIsA(plot):
print("Actually ",plot.__class__)
raise TypeError("plot MUST be a Python Obit Plot")
if not Image.PIsA(image):
print("Actually ",image.__class__)
raise TypeError("image MUST be a Python Obit Image")
Obit.PlotContour (plot.me, label, image.me, lev, cntfac, err.me)
# end PContour
def PGrayScale (plot, label, image, err):
"""
Gray Scale plot of image
Gray Scales plot of image
Plot should be finalized and displayed with PShow
* plot = plot
* label = Label for plot
* image = ObitImage to plot, BLC, TRC on info member honored
* err = ObitErr error stack
Optional parameters on plot InfoList:
======= ======== =================================================
XTICK (float) world coordinate interval between major tick marks
on X axis. If xtick=0.0 [def], the interval is chosen.
NXSUB (int) the number of subintervals to divide the major
coordinate interval into. If xtick=0.0 or nxsub=0,
the number is chosen. [def 0]
YTICK (float) like xtick for the Y axis.
NYSUB (int) like nxsub for the Y axis
CSIZE (int) Scaling factor for characters(default = 1)
SQRT (bool) If present and true plot sqrt (pixel_value)
INVERT (bool) If present and true ionvert colors
COLOR (string) Color scheme 'GRAY', 'CONTOUR', 'PHLAME'
default 'GRAY'
PIX_MAX (float) maximum pixel value [def min in image]
PIX_MIN (float) minimum pixel value [def max in image]
======= ======== =================================================
"""
################################################################
# Checks
if not PIsA(plot):
print("Actually ",plot.__class__)
raise TypeError("plot MUST be a Python Obit Plot")
if not Image.PIsA(image):
print("Actually ",image.__class__)
raise TypeError("image MUST be a Python Obit Image")
Obit.PlotGrayScale (plot.me, label, image.me, err.me)
# end PGrayScale
def PMarkCross (plot, image, ra, dec, err, size=5.0):
"""
Mark positions on Contour plot of image
Place cross at positions.
Plot should be finalized and displayed with PShow
* plot = plot
* image = ObitImage to plot
* ra = list of RAs (deg)
* dec = list of Declinations (deg)
* err = ObitErr error stack
* size = size of cross in pixels
Optional parameters on plot InfoList
====== ===== ============================================
CSIZE (int) Scaling factor for characters(default = 1)
LWIDTH (int) Line width (default = 1)
====== ===== ============================================
"""
################################################################
# Checks
if not PIsA(plot):
print("Actually ",plot.__class__)
raise TypeError("plot MUST be a Python Obit Plot")
if not Image.PIsA(image):
print("Actually ",image.__class__)
raise TypeError("image MUST be a Python Obit Image")
n = len(ra)
Obit.PlotMarkCross (plot.me, image.me, n, ra, dec, size, err.me)
# end PMarkCross
def PShow (plot, err):
"""
Display plot
* plot = Python Plot object
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotFinishPlot(plot.me, err.me)
# end PShow
def PSetPlot (plot, xmin, xmax, ymin, ymax, just, axis, err):
"""
Define plotting area
* plot = Python Plot object
* xmin = the world x-coordinate at the bottom left corner of the viewport.
* xmax = the world x-coordinate at the top right corner of the viewport
(note XMAX may be less than XMIN).
* ymin = the world y-coordinate at the bottom left corner
of the viewport.
* ymax = the world y-coordinate at the top right corner
of the viewport (note YMAX may be less than YMIN)
* just = if JUST=1, the scales of the x and y axes (in
world coordinates per inch) will be equal,
otherwise they will be scaled independently.
* axis = controls the plotting of axes, tick marks, etc:
== ===========================================
-2 draw no box, axes or labels;
-1 draw box only;
0 draw box and label it with coordinates;
1 same as axis=0, but also draw the
coordinate axes (X=0, Y=0);
2 same as axis=1, but also draw grid lines
at major increments of the coordinates;
10 draw box and label X-axis logarithmically;
20 draw box and label Y-axis logarithmically;
30 draw box and label both axes logarithmically.
== ===========================================
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotSetPlot(plot.me, xmin, xmax, ymin, ymax, just, axis, err.me)
# end PSetPlot
def PLabel (plot, xlabel, ylabel, title, err):
"""
Display plot
* plot = Python Plot object
* xlabel = a label for the x-axis (centered below the viewport).
* ylabel = a label for the y-axis (centered to the left
of the viewport, drawn vertically)
* title = a label for the entire plot (centered above the viewport)
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotLabel(plot.me, xlabel, ylabel, title, err.me)
# end PLabel
def PDrawAxes(plot, xopt, xtick, nxsub, yopt, ytick, nysub, err):
"""
Draw axes for a plot, label
* plot = Python Plot object
* xopt = string of options for X (horizontal) axis of plot.
Options are single letters, and may be in any order:
= ======================================================================
A draw Axis (X axis is horizontal line Y=0, Y axis is vertical line X=0).
B draw bottom (X) or left (Y) edge of frame.
C draw top (X) or right (Y) edge of frame.
G draw Grid of vertical (X) or horizontal (Y) lines
I Invert the tick marks; ie draw them outside the viewport instead of inside.
L label axis Logarithmically
N write Numeric labels in the conventional location below the
viewport (X) or to the left of the viewport (Y).
M write numeric labels in the unconventional location above the
viewport (X) or to the right of the viewport (Y).
P extend ("Project") major tick marks outside the box (ignored if
option I is specified)
T draw major Tick marks at the major coordinate interval.
S draw minor tick marks (Subticks).
= ======================================================================
* xtick = World coordinate interval between major tick marks
on X axis. If xtick=0.0, the interval is chosen.
* nxsub = The number of subintervals to divide the major coordinate interval
into. If xtick=0.0 or nxsub=0, the number is chosen.
* yopt = string of options for Y (vertical) axis of plot.
Coding is the same as for xopt.
* ytick = like xtick for the Y axis.
* nysub = like nxsub for the Y axis
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotDrawAxes(plot.me, xopt, xtick, nxsub, yopt, ytick, nysub, err.me)
# end DrawAxes
def PSetCharSize (plot,cscale, err):
"""
Set scaling for characters
* plot = Python Plot object
* cscale = new character size (integer multiple of the default size).
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotSetCharSize (plot.me, cscale, err.me)
# end PSetCharSize
def PSetLineWidth (plot, lwidth, err):
"""
Set line width
* plot = Python Plot object
* lwidth = Width of line (integer multiple of the default size).
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotSetLineWidth(plot.me, lwidth, err.me)
# end PetLineWidth
def PSetLineStyle (plot, lstyle, err):
"""
Set line style
* plot = Python Plot object
* lstyle = Style of line (integer multiple of the default size).
1 = continious, 2 = dashed, 3=dot dash, 4 = dotted,
5 = dash dot dot dot
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotSetLineStyle(plot.me, lstyle, err.me)
# end PetLineStyle
def PSetColor (plot, color, err):
"""
Set foreground color
* plot = Python Plot object
* color = color index (1-15), symbolic names:
BLACK (notreally), RED(default), YELLOW, GREEN, AQUAMARINE, BLACK, WHEAT,
GRAY, BROWN, BLUE, BLUEVIOLET, CYAN, TURQUOISE, MAGENTA, SALMON, WHITE
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotSetColor(plot.me, color, err.me)
# end PSetColor
def PSetPage (plot, sub, err):
"""
Set or advance sub page
Note: some functions such as PContour advance the page
* plot = Python Plot object
* sub = if <=0 advance page, if >0 set current subpage to sub
numbering starts at the top left at 1 and increases along
rows and columns
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotSetPage(plot.me, sub, err.me)
# end PSetPage
def PText (plot, x, y, angle, just, text, err):
"""
Write text on plot
* plot = Python Plot object
* x = Plot x in world coordinates
* y = Plot y in world coordinates
* angle = Orientation of the text in deg, 0=horizontal
* just = Controls justification of the string parallel to
the specified edge of the viewport. If
FJUST = 0.0, the left-hand end of the string will
be placed at (x,y); if JUST = 0.5, the center of
the string will be placed at (x,y); if JUST = 1.0,
the right-hand end of the string will be placed at
at (x,y). Other values between 0 and 1 give intermediate
placing, but they are not very useful.
* text = The text string to be plotted. Trailing spaces are
ignored when justifying the string, but leading spaces are
significant.
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
dx = math.cos(angle/57.296)
dy = math.sin(angle/57.296)
Obit.PlotText(plot.me, x, y, dx, dy, just, text, err.me)
# end PText
def PRelText (plot, side, disp, coord, fjust, text, err):
"""
Write text on plot relative to port
* plot = Python Plot object
* side = Must include one of the characters 'B', 'L', 'T',
or 'R' signifying the Bottom, Left, Top, or Right
margin of the viewport. If it includes 'LV' or
'RV', the string is written perpendicular to the
frame rather than parallel to it.
* disp = The displacement of the character string from the
specified edge of the viewport, measured outwards
from the viewport in units of the character
height. Use a negative value to write inside the
viewport, a positive value to write outside.
* coord = The location of the character string along the
specified edge of the viewport, as a fraction of
the length of the edge.
* just = Controls justification of the string parallel to
the specified edge of the viewport. If
* just = 0.0, the left-hand end of the string will
be placed at COORD; if JUST = 0.5, the center of
the string will be placed at COORD; if JUST = 1.0,
the right-hand end of the string will be placed at
at COORD. Other values between 0 and 1 give
intermediate placing, but they are not very useful.
* text = The text string to be plotted. Trailing spaces are
ignored when justifying the string, but leading
spaces are significant.
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotRelText(plot.me, side, disp, coord, fjust, text, err.me)
# end PRelText
def PDrawLine (plot, x1, y1, x2, y2, err):
"""
Draw a line.
* plot = Python Plot object
* x1 = world x-coordinate of the new pen position.
* y1 = world y-coordinate of the new pen position.
* x2 = world x-coordinate of the new pen position.
* y2 = world y-coordinate of the new pen position.
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotDrawLine(plot.me, x1, y1, x2, y2, err.me)
# end PDrawLine
def PDrawCurve (plot, x, y, err):
"""
Draw a curve.
* plot = Python Plot object
* x = Array of world x-coordinates of points
* y = Array of world y-coordinates of points
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
n = len(x)
Obit.PlotDrawCurve (plot.me, n, x, y, err.me)
# end PDrawCurve
def PDrawCircle (plot, x, y,radius, err):
"""
Draw a circle.
* plot = Python Plot object
* x = World x-coordinate of center
* y = World y-coordinate of center
* radius = World coordinate radius
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotDrawCircle (plot.me, x, y, radius, err.me)
# end PDrawCircle
def PDrawSymbol (plot, x, y, symbol, err):
"""
Draw a Symbol
* plot = Python Plot object
* x = world x-coordinate of the center of the symbol
* y = world y-coordinate of the center of the symbol
* symbol = Symbol index to use for plotting. Values in the range [1,12]
are usable. If negative, use abs value and connect points.
== ===============
0 line only
1 dot
2 plus
3 \*
4 open circle
5 x
6 open square
7 open triangle
8 open star
9 filled triangle
10 filled square
11 filled circle
12 filled star
== ===============
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
Obit.PlotDrawSymbol(plot.me, x, y, symbol, err.me)
# end PDrawSymbol
def PDrawPoly (plot, x, y, fill, err):
"""
Draw a Polygon, possibly filled
* plot = Python Plot object
* n = number of vertices
* x = array of world x-coordinates of the vertices
* y = array of world y-coordinates of the vertices
* fill = Fill pattern, plot package dependent
* values in the range [0,8] are usable
== ===============
0 no fill
1 hatched
2 crosshatched
3 plplot:lines 45 deg downwards
4 plplot:lines 30 deg upwards
5 plplot:lines 30 deg downwards
6 plplot:horizontal/vertical lines crossed
7 plplot:horizontal lines
8 plplot:vertical lines
== ===============
* err = ObitErr error stack
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
scale = 1.0
Obit.PlotDrawPoly(plot.me, len(x), x, y, fill, scale, err.me)
# end PDrawPoly
def PGetList (plot):
"""
Return the member InfoList
returns InfoList
* plot = Python Obit Plot object
"""
################################################################
# Checks
if not PIsA(plot):
raise TypeError("plot MUST be a Python Obit plot")
#
out = InfoList.InfoList()
out.me = Obit.PlotGetList(plot.me)
return out
# end PGetList
def PIsA (plot):
"""
Tells if the input is a Python ObitPlot
returns true Or false
* Plot = Python Obit Plot to test
"""
################################################################
# Checks
if not isinstance(plot, OPlot):
return False
return Obit.OPlotIsA(plot.me)!=0
# end PIsA
| 2.59375 | 3 |
code.py | VinayDhurwe/python-mini-challenges | 0 | 12790464 | # --------------
#Code starts here
import sys
def palindrome(num):
numstr = str(num)
for i in range(num+1,sys.maxsize):
if str(i)== str(i)[::-1]:
return i
palindrome(123)
# --------------
#Code starts here
from collections import Counter
def a_scramble(str_1,str_2):
list_str1 = Counter(str_1.lower())
print(list_str1)
list_str2 = Counter(str_2.lower())
print(list_str2)
if not list_str2 - list_str1:
return True
else:
return False
a_scramble("<NAME>","Voldemort")
# --------------
#Code starts here
import math
def isPerfectSquare(x):
s = int(math.sqrt(x))
return s*s == x
def check_fib(num):
return isPerfectSquare(5*num*num + 4)or isPerfectSquare(5*num*num - 4)
check_fib(377)
# --------------
#Code starts here
def compress(word):
string = word.lower()
res = ""
count = 1
res += string[0]
for i in range(len(string)-1):
if(string[i]==string[i+1]):
count+=1
else:
if(count >= 1):
res += str(count)
res += string[i+1]
count = 1
if(count >= 1):
res += str(count)
return res
compress("abbs")
# --------------
#Code starts here
#Code starts here
from collections import Counter
def k_distinct(string,k):
c = Counter(string.lower())
if k==len(c.keys()):
return True
return False
k_distinct('Messoptamia',8)
k_distinct('SUBBOOKKEEPER',7)
| 3.546875 | 4 |
Longest_Absolute_File_Path.py | thydeyx/LeetCode-Python | 1 | 12790465 | # -*- coding:utf-8 -*-
#
# Author : TangHanYi
# E-mail : <EMAIL>
# Create Date : 2017-01-06 07:09:38 PM
# Last modified : 2017-01-06 07:34:29 PM
# File Name : Longest_Absolute_File_Path.py
# Desc :
class Solution(object):
def lengthLongestPath(self, inp):
tmp = ''
stack = []
k = 0
ret = 0
n = len(inp)
i = 0
while i < n:
if inp[i] == '\n':
i += 1
t = 0
while inp[i] == '\t':
i += 1
t += 1
if t != k:
stack.append(tmp)
else:
if len('/'.join(stack)) > ret:
ret = len('/'.join(stack))
print '/'.join(stack)
stack.pop()
k -= 1
tmp = ''
k += 1
else:
tmp += inp[i]
i += 1
return ret
if __name__ == "__main__":
s = Solution()
inp = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
print s.lengthLongestPath(inp)
| 3.28125 | 3 |
project/workspace.py | felixsteinke/Motion-Planner | 0 | 12790466 | from utils import open_image, open_greyscale_bmp
from workspace_calc import WorkspaceCalculator
from workspace_view import WorkspaceView
class Workspace:
def __init__(self, app_page, room_name, robot_name):
room_bmp = open_greyscale_bmp(room_name)
robot_bmp = open_greyscale_bmp(robot_name)
robot_png = open_image(robot_name, 'png')
self.__calculator = WorkspaceCalculator(room_bmp, robot_bmp)
self.__view = WorkspaceView(app_page, room_bmp, robot_png)
self.__init_config_xy = [] # point -> [0] = x , [1] = y
self.__goal_config_xy = [] # point -> [0] = x , [1] = y
self.current_position_xy = [] # point -> [0] = x , [1] = y
def bind_click_callback(self, action_ref) -> None:
self.__view.set_click_callback(action_ref)
def is_in_collision(self, x, y) -> bool:
return self.__calculator.is_robot_in_collision(x, y)
def reset(self) -> None:
self.__init_config_xy = []
self.__goal_config_xy = []
self.current_position_xy = []
self.__view.reset()
def set_init_config(self, x, y) -> None:
self.__init_config_xy = [x, y]
self.draw_robot_state(x, y)
def set_goal_config(self, x, y) -> None:
self.__goal_config_xy = [x, y]
self.draw_robot_state(x, y)
def draw_robot_state(self, x, y) -> None:
self.__view.reset()
if self.__init_config_xy:
self.__view.draw_robot(self.__init_config_xy[0], self.__init_config_xy[1])
if self.__goal_config_xy:
self.__view.draw_robot(self.__goal_config_xy[0], self.__goal_config_xy[1])
self.__view.draw_robot(x, y)
| 2.421875 | 2 |
DjangoFiles/Customers/migrations/0002_create_tenant_public.py | Nasjoe/Django-Tenant-Example | 0 | 12790467 | # Generated by Django 2.2.13 on 2021-06-08 10:08
import os
from django.db import migrations
def create_premier_tenant(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Client = apps.get_model('Customers', 'Client')
Domain = apps.get_model('Customers', 'Domain')
DNS = os.getenv('DOMAIN')
tenant_public = Client.objects.get_or_create(schema_name='public',
name='Tibillet Public',
paid_until='2200-12-05',
on_trial=False)[0]
# Add one or more domains for the tenant
domaine_seul = Domain.objects.get_or_create(domain=DNS,
tenant=tenant_public,
is_primary=True,
)
domaine_www = Domain.objects.get_or_create(domain=f'www.{DNS}',
tenant=tenant_public,
is_primary=False,
)
return tenant_public, domaine_seul[0], domaine_www[0]
def reverse(apps, schema_editor):
tenant_public, domaine_seul, domaine_www = create_premier_tenant(apps, schema_editor)
tenant_public.delete()
domaine_seul.delete()
domaine_www.delete()
class Migration(migrations.Migration):
dependencies = [
('Customers', '0001_initial'),
]
operations = [
migrations.RunPython(create_premier_tenant, reverse),
]
| 2.125 | 2 |
common/test_input_validation.py | lbozarth/exercise-toes | 4 | 12790468 | from common.input_validation import (
extract_phone_number,
)
def test_extract_phone_number():
assert extract_phone_number('510501622') == None
assert extract_phone_number('5105016227') == '15105016227'
assert extract_phone_number('15105016227') == '15105016227'
assert extract_phone_number('+15105016227') == '15105016227'
assert extract_phone_number('My number is 510 501 6227') == '15105016227'
assert extract_phone_number('My number is (510) 501-6227.') == '15105016227'
| 2.6875 | 3 |
evaluation/datasets/test_datasets.py | hsiehkl/pdffigures2 | 296 | 12790469 | import unittest
import math
import datasets
from pdffigures_utils import get_num_pages_in_pdf
class TestDataset(unittest.TestCase):
def test_pages_annotated_consistency(self):
for dataset in datasets.DATASETS.values():
dataset = dataset()
pages_annotated = dataset.get_annotated_pages_map()
if pages_annotated is None:
continue
pdf_file_map = dataset.get_pdf_file_map()
annotations = dataset.get_annotations("all")
docs = dataset.get_doc_ids("all")
self.assertEqual(set(docs), pages_annotated.keys())
for doc, pages in pages_annotated.items():
filename = pdf_file_map[doc]
self.assertTrue(len(pages) <= dataset.MAX_PAGES_TO_ANNOTATE)
num_pages = get_num_pages_in_pdf(filename)
self.assertTrue(num_pages >= max(pages) - 1)
expected_pages = math.ceil(num_pages*dataset.PAGE_SAMPLE_PERCENT)
expected_pages = min(expected_pages, dataset.MAX_PAGES_TO_ANNOTATE)
self.assertTrue(len(pages) == expected_pages)
if doc in annotations:
ann = annotations[doc]
self.assertEqual(set(ann["annotated_pages"]), set(pages))
for fig in ann["figures"]:
self.assertTrue(fig.page in pages)
def test_consistency(self):
for dataset in datasets.DATASETS.values():
dataset = dataset()
all_docs = set(dataset.get_doc_ids(datasets.DatasetPartition("all")))
doc_map = dataset.get_pdf_file_map()
self.assertEqual(len(all_docs - doc_map.keys()), 0)
doc_map = dataset.get_color_image_file_map()
if doc_map is not None:
self.assertEqual(len(all_docs - doc_map.keys()), 0)
doc_map = dataset.get_gray_image_file_map()
if doc_map is not None:
self.assertEqual(len(all_docs - doc_map.keys()), 0)
documents = dataset.load_doc_ids(all_docs)
self.assertEqual(all_docs, set([x.doc_id for x in documents]))
for doc in documents:
if doc.color_images is not None and doc.gray_images is not None:
self.assertEqual(doc.gray_images.keys(), doc.color_images.keys())
pages_annotated = doc.pages_annotated
for fig in doc.figures:
self.assertTrue(fig.page in pages_annotated)
self.assertEqual(doc.pdffile.split("/")[-1][:-4], doc.doc_id)
if __name__ == '__main__':
unittest.main()
| 2.75 | 3 |
Lib/fontParts/fontshell/groups.py | sanjaymsh/fontParts | 66 | 12790470 | import defcon
from fontParts.base import BaseGroups
from fontParts.fontshell.base import RBaseObject
class RGroups(RBaseObject, BaseGroups):
wrapClass = defcon.Groups
def _get_side1KerningGroups(self):
return self.naked().getRepresentation("defcon.groups.kerningSide1Groups")
def _get_side2KerningGroups(self):
return self.naked().getRepresentation("defcon.groups.kerningSide2Groups")
def _items(self):
return self.naked().items()
def _contains(self, key):
return key in self.naked()
def _setItem(self, key, value):
self.naked()[key] = list(value)
def _getItem(self, key):
return self.naked()[key]
def _delItem(self, key):
del self.naked()[key]
| 2.359375 | 2 |
sample.py | hondasports/awsSample | 0 | 12790471 | # -*- coding: utf-8 -*-
import botocore
import boto3
import io
from datetime import datetime
import s3Uploader
# Refs : https://boto3.readthedocs.io/en/latest/reference/services/s3.html
s3 = boto3.client('s3')
def main():
# [追加する時]
# バケットがなければ作成
# あればそれを使う。
# ファイルの重複チェック
# 重複していれば、削除し更新
# 重複していなければ追加。
# [読み込み]
# ファイルを読み込む
# response = s3.delete_bucket( Bucket='bun-chan-bot-images')
# print(response)
# response = s3.create_bucket(
# Bucket='bun-chan-bot-images',
# CreateBucketConfiguration={'LocationConstraint': 'ap-northeast-1'}
# )
# print(response)
# response = None
# response = s3.list_buckets()
# # 指定したBucketが存在しなければ例外発生する。確認用に使える。
# try:
# response = s3.head_bucket(Bucket='bun-chan-bot-images')
# # response = s3.head_bucket(Bucket='test-lambda-on-java')
# print(response)
# except botocore.exceptions.ClientError as e:
# print('The bucket does not found')
# print(e)
# response = s3.head_bucket(Bucket='bun-chan-bot-images')
# print(response)
# for bucket in response['Buckets']:
# print(bucket.get('Name'))
# if bucket.get('Name') != 'bun-chan-bot-images':
# print('Not Found')
# if isExistBucketFor(bucketName):
# else:
# print('Delet bucket...')
# response = s3.delete_bucket( Bucket='bun-chan-bot-images')
# print(response)
# print('Create bucket...')
# response = s3.create_bucket(
# Bucket='bun-chan-bot-images',
# CreateBucketConfiguration={'LocationConstraint': 'ap-northeast-1'}
# )
bucketName = 'bun-chan-bot-images'
objectName = "image_{name}.jpg".format(name=datetime.now().strftime("%Y%m%d_%H%M%S"))
uploader = s3Uploader.s3Uploader(bucketName, objectName, './image.jpg')
uploader.upload()
if __name__ == '__main__':
main() | 2.359375 | 2 |
majority_judgment/tests.py | roipoussiere/moje | 7 | 12790472 | from django.test import TestCase
from majority_judgment.tools import get_ranking, get_ratings, majority_grade
class MajorityJudgmentTestCase(TestCase):
fixtures = ['election.json']
# def setUp(self):
def test_ranking(self):
election_id = 2
ranking = get_ranking(election_id)
ranking = [candidate.pk for candidate in ranking]
ground_truth = [ 2, 3, 4, 13, 6, 7, 15, 14, 8, 12, 16, 5, 11, 17, 10, 1, 9]
self.assertEqual(ranking, ground_truth)
def test_majority_grade(self):
election_id = 2
ranking = get_ranking(election_id)
# ratings = get_ratings(election_id)
majority_grades = [majority_grade(candidate.ratings) for candidate in ranking]
ground_truth = [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
self.assertEqual(majority_grades, ground_truth)
| 2.65625 | 3 |
mmdet/core/anchor/__init__.py | MinliangLin/TSD | 454 | 12790473 | from .anchor_generator import AnchorGenerator
from .anchor_target import anchor_inside_flags, anchor_target, images_to_levels, unmap
from .guided_anchor_target import ga_loc_target, ga_shape_target
from .point_generator import PointGenerator
from .point_target import point_target
__all__ = [
"AnchorGenerator",
"anchor_target",
"anchor_inside_flags",
"ga_loc_target",
"ga_shape_target",
"PointGenerator",
"point_target",
"images_to_levels",
"unmap",
]
| 1.304688 | 1 |
MWDF Project/MasterworkDwarfFortress/Utilities/Quickfort/src/qfconvert/xlsx.py | ML-SolInvictus/modified-MWDF | 25 | 12790474 | <reponame>ML-SolInvictus/modified-MWDF
"""Reading and parsing .xlsx format blueprints."""
import re
import zipfile
from xml2obj import xml2obj
from errors import FileError
def read_xlsx_file(filename, sheetid):
"""
Read contents of specified sheet in Excel 2007 (.xlsx) workbook file.
.xlsx files are actually zip files containing xml files.
Returns a 2d list of cell values.
"""
if sheetid is None:
sheetid = 1
else:
sheetid += 1 # sheets are numbered starting from 1 in xlsx files
# Get cell data from specified worksheet.
try:
zf = zipfile.ZipFile(filename)
sheetdata = zf.read('xl/worksheets/sheet%s.xml' % sheetid)
xml = xml2obj(sheetdata)
rows = xml.sheetData.row
except:
raise FileError("Could not read xlsx file %s, worksheet id %s" % (
filename, sheetid - 1))
# Get shared strings xml. Cell values are given as ordinal index
# references into sharedStrings.xml:ssi.si elements, whose string-value
# is found in the node's .t element.
try:
stringdata = zf.read('xl/sharedStrings.xml')
xml = xml2obj(stringdata)
strings = xml.si
except:
raise FileError("Could not parse sharedStrings.xml of xlsx file")
# Map strings to row values and return result
return extract_xlsx_lines(rows, strings)
def extract_xlsx_lines(sheetrows, strings):
"""
Extract cell values into lines; cell values are given as ordinal index
references into sharedStrings.xml:ssi.si elements, whose string-value
is found in the node's .t element.
Returns 2d list of strings (cell values).
"""
lines = []
lastrownum = 0
for row in sheetrows:
rownum = int(row.r)
if rownum > lastrownum + 1: # interpolate missing rows
lines.extend([[]] * (rownum - lastrownum - 1))
lastrownum = rownum
cells = row.c
line = []
lastcolnum = 0
for c in cells:
# get column number
colcode = re.match('^([A-Z]+)', str(c.r)).group(1)
colnum = colcode_to_colnum(colcode)
if colnum > lastcolnum + 1: # interpolate missing columns
line.extend([''] * (colnum - lastcolnum - 1))
lastcolnum = colnum
# add cell value looked-up from shared strings
line.append(str(
'' if c.v is None or c.v == 'd' else strings[int(c.v)].t
))
lines.append(line)
return lines
def read_xlsx_sheet_names(filename):
"""Get a list of sheets and their ids from xlsx file."""
try:
zf = zipfile.ZipFile(filename)
sheetsdata = zf.read('xl/workbook.xml')
xml = xml2obj(sheetsdata)
sheets = xml.sheets.sheet
except:
raise FileError("Could not open '%s' for sheet listing." % filename)
output = []
for sheet in sheets:
m = re.match('rId(\d+)', sheet.r_id)
if not m:
raise FileError("Could not read list of xlsx's worksheets.")
output.append((sheet.name, int(m.group(1)) - 1))
return output
def colcode_to_colnum(colcode):
"""Convert Excel style column ids (A, BB, XFD, ...) to a column number."""
if len(colcode) == 0:
return 0
else:
return (ord(colcode[-1]) - ord('A') + 1) + \
(26 * colcode_to_colnum(colcode[:-1]))
| 2.671875 | 3 |
simphas/play.py | DaDaCheng/mutiagent | 0 | 12790475 | <filename>simphas/play.py
#!/usr/bin/env python3
import logging
import click
import numpy as np
from os.path import abspath, dirname, join
from gym.spaces import Tuple
from mujoco_py import const, MjViewer
from mae_envs.viewer.env_viewer import EnvViewer
from mae_envs.wrappers.multi_agent import JoinMultiAgentActions
from mujoco_worldgen.util.envs import examine_env, load_env
from mujoco_worldgen.util.types import extract_matching_arguments
from mujoco_worldgen.util.parse_arguments import parse_arguments
from runpy import run_path
from mae_envs.modules.util import (uniform_placement, center_placement,
uniform_placement_middle)
from gym.spaces import Box, MultiDiscrete, Discrete
#from simphas.MRL import mpolicy
#import gym
#from RL_brain_2 import PolicyGradient
from RL_brain_3 import PolicyGradientAgent
import matplotlib.pyplot as plt
def edge_punish(x,y,l=0.2,p=3.53,w=0):
xx=0.0
if (np.abs(x-0)<l) | (np.abs(x-p)<l):
xx = xx + 1.0
elif (np.abs(y-0)<l) | (np.abs(y-p)<l):
xx = xx + 1.0
return w*xx*1.0
def matdis(n, obs_x):
dism = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i != j:
dism[i, j] = np.sqrt(np.sum((obs_x[i, :2] - obs_x[j, :2])**2))
return dism
def matmas(n,mas):
matm = np.empty([n,n],dtype= bool)
for i in range(n):
for j in range(n):
if i > j:
matm[i, j] = mas[i,j]
elif i < j:
matm[i, j] = mas[i, j-1]
else:
matm[i, j] = False
return matm
def game_rew(n,n_seekers, dism, matm, thr=1.0):
return np.sum( (np.sum ( ((dism < np.ones((n,n))*thr) & (matm))[-n_seekers:], axis=0)>0))
kwargs = {}
env_name = 'mae_envs/envs/mybase.py'
display = True
n_agents= 2
n_seekers=1
n_hiders=1
episode=350
n_episode=10000
kwargs.update({
'n_agents': n_agents,
'n_seekers': n_seekers,
'n_hiders': n_hiders,
'n_boxes':0,
'cone_angle': 2 * np.pi,
#'n_substeps' : 1
})
module = run_path(env_name)
make_env = module["make_env"]
args_to_pass, args_remaining = extract_matching_arguments(make_env, kwargs)
env = make_env(**args_to_pass)
env.reset()
env_viewer = EnvViewer(env)
rhlist=[]
rslist=[]
def main(sk=None,hd=None, output='output',speed=1,vlag=0):
'''
RL = mpolicy(
n_actions=9,
n_features=8,
#n_features=4,
learning_rate=0.01,
reward_decay=0.9,
units=30
# output_graph=True,
)
'''
'''
Hider = PolicyGradient(
n_actions=9,
n_features=4,
learning_rate=0.01,
reward_decay=0.99,
policy_name=Hpolicy_name
# output_graph=True,
)
Seeker = PolicyGradient(
n_actions=9,
n_features=4,
learning_rate=0.01,
reward_decay=0.99,
policy_name=Spolicy_name
# output_graph=True,
)
'''
if vlag == 0:
Seeker=PolicyGradientAgent(0.001,[8],n_actions=9,layer1_size=20,layer2_size=10)
Hider = PolicyGradientAgent(0.001, [8], n_actions=9, layer1_size=20, layer2_size=10)
else:
Seeker=sk
Hider=hd
a=[]
rs=[]
rh=[]
for ii in range(n_episode):
env_viewer.env_reset()
sampleaction = np.array([[5, 5, 5], [5, 5, 5]])
action = {'action_movement': sampleaction}
obs, rew, down, _ = env_viewer.step(action)
observation = np.array([obs['observation_self'][0][0], obs['observation_self'][0][1],obs['observation_self'][0][4],obs['observation_self'][0][5],obs['observation_self'][1][0], obs['observation_self'][1][1], obs['observation_self'][1][4],obs['observation_self'][1][5]])
#observation = np.array([obs['observation_self'][1][0], obs['observation_self'][1][1],obs['observation_self'][1][4],obs['observation_self'][1][5]])
for i in range(episode):
action_Seeker = Seeker.choose_action(observation)
action_Hider = Hider.choose_action(observation)
#print(action_Seeker)
if np.random.rand()>0.95:
action_Hider=np.random.randint(9)
h1=(action_Hider//3-1)*1+5
h2 = (action_Hider%3 - 1) * 1 + 5
#h1,h2=5,5
#print(action)
if np.random.rand()>0.95:
action_Seeker=np.random.randint(9)
s1=(action_Seeker//3-1)*speed+5
s2=(action_Seeker%3-1)*speed+5
ac = {'action_movement': np.array([[h1, h2, 5], [s1, s2, 5]])}
#print(ac)
obs_, reward, done, info = env_viewer.step(ac, show=False)
observation_ = np.array([obs_['observation_self'][0][0], obs_['observation_self'][0][1],obs_['observation_self'][0][4], obs_['observation_self'][0][5],obs_['observation_self'][1][0], obs_['observation_self'][1][1], obs_['observation_self'][1][4], obs_['observation_self'][1][5]])
# if not obs_['mask_aa_obs'][1][0]:
# rew= 5.0-( np.sqrt((observation_[4] - observation_[0]) ** 2 + (observation_[5] - observation_[1]) ** 2)*5)
#else:
# rew= 5.0-( np.sqrt((observation_[4] - observation_[0]) ** 2 + (observation_[5] - observation_[1]) ** 2)*5)
rew=1.0/np.sqrt((observation_[4] - observation_[0]) ** 2 + (observation_[5] - observation_[1]) ** 2)*5-3
#print(observation_)
#rrew=3-edge_punish(observation_[0],observation_[1])
#print(observation_[0],observation_[1])
#Seeker.store_transition(observation[-4:], action_Seeker, +rew)
Seeker.store_rewards(rew-edge_punish(observation_[4],observation_[5]))
Hider.store_rewards(-rew-edge_punish(observation_[0],observation_[1]))
#print(-rrew)
#Hider.store_transition(observation[4:], action_Hider, rrew)
#print(50-edge_punish(observation_[0],observation_[1]))
observation = observation_
print(ii)
#print(np.mean(Seeker.reward_memory[0]))
rs.append(np.mean(Seeker.reward_memory))
rh.append(np.mean(Hider.reward_memory))
if ii>(n_episode-201):
#a.append(Hider.ep_rs)
a.append(Seeker.reward_memory)
if vlag == 0:
Hider.learn()
Seeker.learn()
else:
Seeker.reward_memory=[]
Seeker.action_memory = []
Hider.reward_memory = []
Hider.action_memory = []
##########
np.save(output+'.npy', a)
rhlist.append(rh)
rslist.append(rs)
#np.save('SGLDS'+output + '.npy', rs)
#np.save('RMS' + output + '.npy', rh)
#print(ii,R)
return Seeker,Hider
if __name__ == '__main__':
#S2, H2 = main(output='2', speed=2)
#S3, H3 = main(output='3', speed=3)
#S4, H4 = main(output='4', speed=4)
#S1, H1 = main(output='1', speed=1)
S1, H1 = main(output='1', speed=4)
#import pickle
#pickle_file = open('objS2.pkl', 'wb')
#pickle.dump(S2, pickle_file)
#pickle_file.close()
#pickle_file = open('objH2.pkl', 'wb')
#pickle.dump(H2, pickle_file)
#pickle_file.close()
#main(sk=S1, hd=H4, output='41', speed=1, vlag=1)
#main(sk=S1, hd=H3, output='31', speed=1, vlag=1)
#main(sk=S1, hd=H2, output='21', speed=1, vlag=1)
#test()
np.save('SGLDS.npy', rslist)
np.save('SGLDH.npy', rhlist)
| 2.203125 | 2 |
tests/test_davidlebovitz.py | gloriousDan/recipe-scrapers | 0 | 12790476 | <gh_stars>0
from recipe_scrapers.davidlebovitz import DavidLebovitz
from tests import ScraperTest
class TestDavidLebovivtzScraper(ScraperTest):
scraper_class = DavidLebovitz
def test_host(self):
self.assertEqual("davidlebovitz.com", self.harvester_class.host())
def test_author(self):
self.assertEqual("David", self.harvester_class.author())
def test_title(self):
self.assertEqual("Faux Gras", self.harvester_class.title())
def test_total_time(self):
self.assertEqual(None, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("0 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.davidlebovitz.com/wp-content/uploads/2015/06/Faux-Gras-Lentil-Pate-8.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"12 medium-sized (100g, about 1 cup) button mushrooms",
"2 tablespoons olive oil",
"2 tablespoons butter (salted or unsalted)",
"1 small onion (peeled and diced)",
"2 cloves garlic (peeled and minced)",
"2 cups (400g) cooked green lentils",
"1 cup (140g) toasted walnuts or pecans",
"2 tablespoons freshly squeezed lemon juice",
"1 tablespoon soy sauce or tamari",
"2 teaspoons minced fresh rosemary",
"2 teaspoons fresh thyme (minced)",
"2 tablespoons fresh sage or flat leaf parsley",
"optional: 2 teaspoons Cognac or brandy",
"1 teaspoon brown sugar",
"1/8 teaspoon cayenne pepper",
"salt and freshly ground black pepper",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
self.assertEqual(
"""Wipe the mushrooms clean. Slice off a bit of the stem end (the funky parts) and slice them. Heat the olive oil and butter in a skillet or wide saucepan. Add the onions and garlic, and cook, stirring frequently, until the onions become translucent, 5 to 6 minutes. Add the mushrooms and cook, stirring occasionally, until they’re soft and cooked through, another 5 to 8 minutes. Remove from heat.\nIn a food processor, combine the cooked lentils, nuts, lemon juice, soy sauce, rosemary, thyme, sage or parsley, Cognac (if using), brown sugar, and cayenne. Scrape in the cooked mushroom mixture and process until completely smooth. Taste, and add salt, pepper, and additional cognac, soy sauce, or lemon juice, if it needs balancing.\nScrape the pâté into a small serving bowl and refrigerate for a few hours, until firm.""",
self.harvester_class.instructions(),
)
def test_description(self):
self.assertEqual(
"""Adapted from Très Green, Très Clean, Très Chic by <NAME> Lentils double in volume when cooked, so 1 cup (160g) of dried lentils will yield close to the correct amount. They usually take about 20 to 30 minutes to cook until soft, but check the directions on the package for specific guidelines. If avoiding gluten, use tamari instead of soy sauce. For a vegan version, replace the butter with the same quantity of olive oil, for a total of 1/4 cup (60ml) of olive oil. The cognac or brandy is optional, but it does give the faux gras a little je ne sais quoi.""",
self.harvester_class.description(),
)
| 2.78125 | 3 |
zvt/factors/technical_factor.py | manstiilin/zvt | 1 | 12790477 | from typing import List, Union
import pandas as pd
from zvdata import IntervalLevel
from zvt.api.common import get_kdata_schema
from zvt.factors.algorithm import MacdTransformer, MaTransformer
from zvt.factors.factor import Factor, Transformer, Accumulator
class TechnicalFactor(Factor):
def __init__(self,
entity_ids: List[str] = None,
entity_type: str = 'stock',
exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None,
filters: List = None,
order: object = None,
limit: int = None,
provider: str = 'joinquant',
level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id',
time_field: str = 'timestamp',
computing_window: int = None,
keep_all_timestamp: bool = False,
fill_method: str = 'ffill',
effective_number: int = 10,
transformer: Transformer = MacdTransformer(),
accumulator: Accumulator = None,
persist_factor: bool = False,
dry_run: bool = True) -> None:
self.data_schema = get_kdata_schema(entity_type, level=level)
if transformer:
self.indicator_cols = transformer.indicator_cols
if not columns:
columns = ['id', 'entity_id', 'timestamp', 'level', 'open', 'close', 'high', 'low']
super().__init__(self.data_schema, entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp,
end_timestamp, columns, filters, order, limit, provider, level, category_field, time_field,
computing_window, keep_all_timestamp, fill_method, effective_number,
transformer, accumulator, persist_factor, dry_run)
def __json__(self):
result = super().__json__()
result['indicator_cols'] = self.indicator_cols
return result
for_json = __json__ # supported by simplejson
class BullFactor(TechnicalFactor):
def __init__(self,
entity_ids: List[str] = None,
entity_type: str = 'stock',
exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None,
filters: List = None,
order: object = None,
limit: int = None,
provider: str = 'joinquant',
level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id',
time_field: str = 'timestamp',
persist_factor: bool = False, dry_run: bool = False) -> None:
transformer = MacdTransformer()
super().__init__(entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp, end_timestamp,
columns, filters, order, limit, provider, level, category_field, time_field, 26,
False, None, None, transformer, None, persist_factor, dry_run)
def do_compute(self):
super().do_compute()
s = (self.factor_df['diff'] > 0) & (self.factor_df['dea'] > 0)
self.result_df = s.to_frame(name='score')
if __name__ == '__main__':
factor = TechnicalFactor(entity_type='stock',
codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider='joinquant',
computing_window=26,
transformer=MacdTransformer())
print(factor.get_factor_df().tail())
factor.move_on(to_timestamp='2019-06-17')
diff = factor.get_factor_df()['diff']
dea = factor.get_factor_df()['dea']
macd = factor.get_factor_df()['macd']
assert round(diff.loc[('stock_sz_000338', '2019-06-17')], 2) == 0.06
assert round(dea.loc[('stock_sz_000338', '2019-06-17')], 2) == -0.03
assert round(macd.loc[('stock_sz_000338', '2019-06-17')], 2) == 0.19
| 2.0625 | 2 |
backend/api/services/throttling.py | ferdn4ndo/infotrem | 0 | 12790478 | from django.contrib.auth.models import AnonymousUser
from rest_framework.throttling import SimpleRateThrottle
class BaseRateThrottle(SimpleRateThrottle):
scope = 'baseThrottle'
class Meta:
abstract = True
def get_cache_key(self, request, view):
return self.cache_format % {
'scope': self.scope,
'ident': self.get_ident(request)
}
def allow_request(self, request, view):
"""
Implement the check to see if the request should be throttled.
On success calls `throttle_success`.
On failure calls `throttle_failure`.
"""
# Bypass CORS OPTIONS requests
if request.method == "OPTIONS":
return True
if self.rate is None:
return True
self.key = self.get_cache_key(request, view)
if self.key is None:
return True
self.history = self.cache.get(self.key, [])
self.now = self.timer()
while len(self.history) and self.history[-1] <= self.now - self.duration:
self.history.pop()
if len(self.history) >= self.num_requests:
return self.throttle_failure()
return self.throttle_success(request)
def throttle_success(self, request):
"""
Inserts the current request's timestamp along with the key into the cache.
"""
# if type(request.user) is not AnonymousUser:
# self.history.insert(0, request.user.id)
self.history.insert(0, self.now)
self.cache.set(self.key, self.history, self.duration)
return True
class UserLoginRateThrottle(BaseRateThrottle):
scope = 'loginAttempts'
class ContactRateThrottle(BaseRateThrottle):
scope = 'contact'
class EmailValidationRateThrottle(BaseRateThrottle):
scope = 'emailValidation'
| 2.015625 | 2 |
a3/clean_data.py | WendyH1108/IntSys-Education | 0 | 12790479 | <gh_stars>0
import pickle
import numpy as np
from PIL import Image, ExifTags,ImageOps
def load_pickle_file(path_to_file):
"""
Loads the data from a pickle file and returns that object
"""
## Look up: https://docs.python.org/3/library/pickle.html
## The code should look something like this:
# with open(path_to_file, 'rb') as f:
# obj = pickle....
## We will let you figure out which pickle operation to use
with open(path_to_file,'rb')as f:
new_data=pickle.load(f)
return new_data
## You should define functions to resize, rotate and crop images
## below. You can perform these operations either on numpy arrays
## or on PIL images (read docs: https://pillow.readthedocs.io/en/stable/reference/Image.html)
def resize(image, height, width):
newSize = (width, height)
image = image.resize(newSize)
return image
def crop(image, left, top, right, bottom):
image = image.crop((left, top, right, bottom))
return image
## We want you to clean the data, and then create a train and val folder inside
## the data folder (so your data folder in a3/ should look like: )
# data/
# train/
# val/
## Inside the train and val folders, you will have to dump the CLEANED images and
## labels. You can dump images/annotations in a pickle file (because our data loader
## expects the path to a pickle file.)
## Most code written in this file will be DIY. It's important that you get to practice
## cleaning datasets and visualising them, so we purposely won't give you too much starter
## code. It'll be up to you to look up documentation and understand different Python modules.
## That being said, the task shouldn't be too hard, so we won't send you down any rabbit hole.
# function to clean the data automatically
def auto_op(my_file, size):
dataObject = []
for im in my_file:
width, height = im.size
if width != height:
padding = abs(width-height)/2
if width>height:
im = crop(im, padding, 0, width-padding, height) #make new file
else:
im = crop(im, 0, padding, width, height-padding)
if width != size:
im = resize(im, size, size)
dataObject.append(im)
return dataObject
# function to clean the data by hand
def byhand_op(data_list, label_list):
pre_data = []
pre_label = []
pre_data.append(data_list[0].rotate(270))
pre_label.append(label_list[0])
pre_data.append(data_list[1].rotate(90))
pre_label.append(0)
pre_data.append(data_list[2].rotate(180))
pre_label.append(label_list[2])
pre_data.append(data_list[3].rotate(180))
pre_label.append(3)
pre_data.append(data_list[4])
pre_label.append(3)
pre_data.append(data_list[5].rotate(180))
pre_label.append(2)
pre_data.append(data_list[6].rotate(315))
pre_label.append(7)
pre_data.append(data_list[7].rotate(180))
pre_label.append(label_list[7])
pre_data.append(data_list[8].rotate(315))
pre_label.append(5)
pre_data.append(data_list[9].rotate(315))
pre_label.append(5)
pre_data.append(data_list[10].rotate(315))
pre_label.append(label_list[10])
pre_data.append(data_list[11].rotate(90))
pre_label.append(9)
pre_data.append(data_list[12].rotate(180))
pre_label.append(label_list[12])
pre_data.append(data_list[13].rotate(90))
pre_label.append(7)
pre_data.append(data_list[14].rotate(180))
pre_label.append(7)
pre_data.append(data_list[15].rotate(315))
pre_label.append(9)
pre_data.append(data_list[16].rotate(315))
pre_label.append(label_list[16])
pre_data.append(data_list[17].rotate(315))
pre_label.append(0)
pre_data.append(data_list[18].rotate(90))
pre_label.append(2)
pre_data.append(data_list[19].rotate(90))
pre_label.append(label_list[19])
pre_data.append(data_list[20].rotate(315))
pre_label.append(3)
pre_data.append(data_list[21].rotate(315))
pre_label.append(1)
pre_data.append(data_list[22].rotate(180))
pre_label.append(4)
pre_data.append(data_list[23].rotate(135))
pre_label.append(8)
pre_data.append(data_list[24].rotate(90))
pre_label.append(2)
pre_data.append(data_list[25].rotate(90))
pre_label.append(3)
pre_data.append(data_list[26].rotate(90))
pre_label.append(0)
pre_data.append(data_list[27].rotate(180))
pre_label.append(2)
pre_data.append(data_list[28].rotate(315))
pre_label.append(4)
pre_data.append(data_list[29].rotate(90))
pre_label.append(2)
pre_data.append(data_list[30].rotate(90))
pre_label.append(label_list[30])
pre_data.append(data_list[31].rotate(90))
pre_label.append(label_list[31])
pre_data.append(data_list[32].rotate(315))
pre_label.append(label_list[32])
pre_data.append(data_list[33].rotate(315))
pre_label.append(label_list[33])
pre_data.append(data_list[34].rotate(90))
pre_label.append(label_list[34])
pre_data.append(data_list[35].rotate(270))
pre_label.append(8)
pre_data.append(data_list[36].rotate(315))
pre_label.append(9)
pre_data.append(data_list[37].rotate(180))
pre_label.append(2)
pre_data.append(data_list[38].rotate(270))
pre_label.append(1)
pre_data.append(data_list[39].rotate(270))
pre_label.append(6)
pre_data.append(data_list[40].rotate(315))
pre_label.append(label_list[40])
pre_data.append(data_list[41].rotate(180))
pre_label.append(7)
pre_data.append(data_list[42].rotate(90))
pre_label.append(9)
pre_data.append(data_list[43].rotate(270))
pre_label.append(5)
pre_data.append(data_list[44].rotate(270))
pre_label.append(9)
pre_data.append(data_list[45].rotate(180))
pre_label.append(label_list[45])
pre_data.append(data_list[46].rotate(180))
pre_label.append(7)
pre_data.append(data_list[47].rotate(270))
pre_label.append(label_list[47])
pre_data.append(data_list[48].rotate(180))
pre_label.append(0)
pre_data.append(data_list[49].rotate(315))
pre_label.append(label_list[49])
pre_data.append(data_list[50].rotate(90))
pre_label.append(label_list[50])
pre_data.append(data_list[51].rotate(90))
pre_label.append(3)
pre_data.append(data_list[52].rotate(180))
pre_label.append(label_list[52])
pre_data.append(data_list[53].rotate(180))
pre_label.append(2)
pre_data.append(data_list[54].rotate(180))
pre_label.append(label_list[54])
pre_data.append(data_list[55].rotate(90))
pre_label.append(0)
pre_data.append(data_list[56].rotate(315))
pre_label.append(label_list[56])
pre_data.append(data_list[57].rotate(180))
pre_label.append(8)
pre_data.append(data_list[58].rotate(90))
pre_label.append(label_list[58])
pre_data.append(data_list[59].rotate(90))
pre_label.append(label_list[59])
return pre_data, pre_label
if __name__ == "__main__":
## Running this script should read the input images.pkl and labels.pkl and clean the data
## and store cleaned data into the data/train and data/val folders
## To correct rotated images and add missing labels, you might want to prompt the terminal
## for input, so that you can input the angle and the missing label
## Remember, the first 60 images are rotated, and might contain missing labels.
#clean the data for first 60 images by hand
data_list = load_pickle_file('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a3/data/data/images.pkl')
label_list = load_pickle_file('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a3/data/data/labels.pkl')
pre_data, pre_label = byhand_op(data_list, label_list)
#auto process the images after 60
new_data_list = data_list[60:]
new_label_list = label_list[60:]
new_data_list = auto_op(new_data_list, 28)
# combine and dump two parts into corresponding file
new_data = open('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a3/data/data/cleaned_data.pkl', 'wb')
new_label = open('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a3/data/data/cleaned_label.pkl', 'wb')
pickle.dump(pre_data+new_data_list,new_data)
pickle.dump(pre_label+new_label_list,new_label)
new_data.close()
new_label.close()
| 3.65625 | 4 |
kevin/tests/leet/test_is_anagram.py | kalyons11/kevin | 1 | 12790480 | <reponame>kalyons11/kevin<filename>kevin/tests/leet/test_is_anagram.py
"""
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/585/week-2-february-8th-february-14th/3636/
"""
from unittest import TestCase
from kevin.leet.is_anagram import Solution
class TestIsAnagram(TestCase):
def _base_test_is_anagram(self, s: str, t: str, expected: bool):
sol = Solution()
actual = sol.is_anagram(s, t)
assert expected == actual, (expected, actual)
def test_is_anagram_easy(self):
self._base_test_is_anagram('anagram', 'nagaram', True)
def test_is_anagram_easy_false(self):
self._base_test_is_anagram('rat', 'car', False)
def test_is_anagram_easy_false_repeats(self):
self._base_test_is_anagram('aa', 'a', False)
| 3.328125 | 3 |
PartSongSet/__init__.py | jcksnvllxr80/MidiController | 1 | 12790481 | <reponame>jcksnvllxr80/MidiController
from PartSongSet import *
| 1.023438 | 1 |
MMMaker/app/memes/migrations/0002_auto_20200616_1314.py | C4Ution/MMMaker | 9 | 12790482 | # Generated by Django 2.2.12 on 2020-06-16 13:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('memes', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='name',
),
migrations.AddField(
model_name='task',
name='result_url',
field=models.URLField(null=True),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.IntegerField(choices=[(10, '서버 작업 대기중'), (20, '서버 작업 시작'), (30, '서버 리소스 다운로드'), (0, '작업 실패')], default=10),
),
migrations.AlterField(
model_name='taskresource',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_resources', to='memes.Task'),
),
]
| 1.601563 | 2 |
ubb/fop/BusCompani_exam/app_coordinator.py | AlexanderChristian/private_courses | 0 | 12790483 | <gh_stars>0
from tester.tester import Tester
from ui.application import Application
from controller.controller import Controller
from repository.repository import Repository
with open("database.txt", "r") as f:
t = Tester()
repo = Repository()
controller = Controller(repo, f)
app = Application(controller, repo)
app.run() | 1.8125 | 2 |
tinder_scraper.py | lhandal/tinder-bot | 1 | 12790484 | <filename>tinder_scraper.py
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from time import sleep
from secrets import username, password
import os
from webptools import webplib as webp
import urllib.request
import datetime
import subprocess
pictures_folder = ""
class TinderBot():
def __init__(self):
self.driver = webdriver.Chrome(ChromeDriverManager().install())
# def passport(self, lat=40.7128, lon=-74.0060):
# params = {
# "latitude": lat,
# "longitude": lon,
# "accuracy": 100}
# self.driver.execute_cdp_cmd("Page.setGeolocationOverride", params)
def login(self):
self.driver.maximize_window()
self.driver.get('https://tinder.com')
sleep(3)
# Click on login button
login_button = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/header/div[1]/div[2]/div/button')
login_button.click()
sleep(1)
# Log in with Facebook
login_with_facebook_button = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/span/div[2]/button/span[2]')
login_with_facebook_button.click()
# Marks base window to go back after facebook login window
base_window = self.driver.window_handles[0]
# Selects Facebook login window for interaction
self.driver.switch_to.window(self.driver.window_handles[1])
# Inputs Facebook credentials and submits
email_input_field = self.driver.find_element_by_xpath('//*[@id="email"]')
email_input_field.send_keys(username)
password_input_field = self.driver.find_element_by_xpath('//*[@id="pass"]')
password_input_field.send_keys(password)
facebook_login_button = self.driver.find_element_by_xpath('//*[@id="loginbutton"]')
facebook_login_button.click()
sleep(8)
# Switch back to Tinder main window
self.driver.switch_to.window(base_window)
# Dismiss pop-ups
# Cookies
accept_cookies_button = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[2]/div/div/div[1]/button/span')
accept_cookies_button.click()
# Location
popup_1 = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]/span')
popup_1.click()
# Notifications
popup_2 = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[2]')
popup_2.click()
sleep(5)
# Agree to passport mode and select city
# passport_popup_button = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[1]/a')
# passport_popup_button.click()
# sleep(5)
# select_location_accept = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[2]/button')
# select_location_accept.click()
sleep(2)
# Like action
def like(self):
like_btn = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[4]/button')
like_btn.click()
# Dislike action
def dislike(self):
dislike_btn = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[2]/button')
dislike_btn.click()
# Save picture
def get_pic_path(self):
try:
pic_path = bot.driver.find_element_by_xpath(
'//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[1]/div[3]/div[1]/div[1]/span[1]/div')
pic_url = pic_path.get_attribute('style').split('"')[1]
except:
pic_path = self.driver.find_element_by_xpath(
'//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[1]/div[3]/div[1]/div[1]/div/div[1]/div/div')
pic_url = pic_path.get_attribute('style')[0].split('"')[1]
return pic_url
def download_tinder_jpeg(self, file_name):
path = pictures_folder
try:
pic_path = bot.driver.find_element_by_xpath(
'//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[1]/div[3]/div[1]/div[1]/span[1]/div')
pic_url = pic_path.get_attribute('style').split('"')[1]
except:
pic_path = self.driver.find_element_by_xpath(
'//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[1]/div[3]/div[1]/div[1]/div/div[1]/div/div')
pic_url = pic_path.get_attribute('style')[0].split('"')[1]
full_path = path + '/' + file_name + '.webp'
urllib.request.urlretrieve(pic_url, full_path)
print('Photo downloaded...')
decoder = webp.dwebp(full_path, full_path[:-5] + ".jpg", "-o")
print("Converting to jpeg...")
print(decoder['stderr'])
os.remove(full_path)
print("WebP file removed!")
# Auto-swipe right
def auto_swipe(self):
while True:
sleep(1)
try:
pic_id = datetime.datetime.now().strftime('%d%H%M%S%f')
print(pic_id)
try:
self.download_tinder_jpeg(pic_id)
self.dislike()
except IndexError:
self.dislike()
except Exception:
print('Auto_swipe exception...')
try:
self.close_popup()
except Exception:
self.close_match()
# Close offer pop-up
def close_popup(self):
popup_3 = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[2]/button[2]')
popup_3.click()
# Close match pop-up
def close_match(self):
match_popup = self.driver.find_element_by_xpath('//*[@id="modal-manager-canvas"]/div/div/div[1]/div/div[3]/a')
match_popup.click()
bot = TinderBot()
logged_in = False
while not logged_in:
try:
bot.login()
logged_in = True
break
except:
bot.driver.quit()
bot = TinderBot()
logged_in = False
sleep(5)
# bot.passport()
bot.auto_swipe()
# subprocess.call(['osascript', '-e', 'tell application "Chrome" to quit'])
# os._exit(0)
| 2.234375 | 2 |
backend/custom_models/migrations/0004_auto_20190529_0846.py | code-for-canada/django-nginx-reactjs-docker | 3 | 12790485 | <reponame>code-for-canada/django-nginx-reactjs-docker
# Generated by Django 2.1.7 on 2019-05-29 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('custom_models', '0003_auto_20190528_1156'),
]
operations = [
migrations.AlterField(
model_name='itemtext',
name='text_detail',
field=models.CharField(max_length=3000),
),
]
| 1.164063 | 1 |
app/api/v1/models/__init__.py | lprichar/electionguard-api-python | 19 | 12790486 | from .auth import *
from .ballot import *
from .base import *
from .decrypt import *
from .encrypt import *
from .election import *
from .guardian import *
from .key_ceremony import *
from .key_guardian import *
from .manifest import *
from .tally import *
from .tally_decrypt import *
from .user import *
| 1.070313 | 1 |
demos/instance_occlsegm/examples/instance_occlsegm/panoptic_occlusion_segmentation/view_dataset.py | pazeshun/jsk_apc | 0 | 12790487 | <reponame>pazeshun/jsk_apc
#!/usr/bin/env python
import argparse
from instance_occlsegm_lib.contrib import instance_occlsegm
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'--split',
choices=['train', 'test'],
default='train',
help='dataset split',
)
parser.add_argument(
'--augmentation',
action='store_true',
help='do augmentation',
)
args = parser.parse_args()
data = instance_occlsegm.datasets.PanopticOcclusionSegmentationDataset(
args.split, augmentation=args.augmentation
)
instance_occlsegm.datasets.view_panoptic_occlusion_segmentation_dataset(
data
)
| 2 | 2 |
simulation/topology.py | uzum/cran-simulator | 0 | 12790488 | from entities.remote_radio_head import RemoteRadioHead
from entities.hypervisor import Hypervisor
from entities.baseband_unit import BasebandUnit
from entities.switch import Switch
from forwarding.forwarding import Forwarding
class StatHistory(object):
history = {}
def get(key, current):
if (key in StatHistory.history):
value = current - StatHistory.history[key]
StatHistory.history[key] = current
return value
StatHistory.history[key] = current
return current
class Topology(object):
def __init__(self, env, configuration):
self.env = env
self.forwarding = Forwarding(self.env, self)
self.rrhs = []
self.hypervisors = []
self.external_switch = None
self.stat_history = {}
self.total_migrations = 0
self.setup(configuration)
def update_load(self, rrh_id, arrival_rate = None, packet_mean = None, packet_dev = None):
for rrh in self.rrhs:
if (rrh.id == rrh_id):
if arrival_rate is not None:
rrh.set_arrival_rate(arrival_rate)
if packet_mean is not None:
rrh.set_packet_mean(packet_mean)
if packet_dev is not None:
rrh.set_packet_dev(packet_dev)
def migrate(self, bbu_id, target_hypervisor_id):
target_hypervisor = [hv for hv in self.hypervisors if hv.id == target_hypervisor_id][0]
if (target_hypervisor is None):
raise Exception("Target hypervisor not found with the given id")
for hypervisor in self.hypervisors:
subject_bbu = hypervisor.find_baseband_unit(bbu_id)
if (subject_bbu is not None and hypervisor.id != target_hypervisor.id):
hypervisor.remove_baseband_unit(subject_bbu)
target_hypervisor.add_baseband_unit(subject_bbu)
self.total_migrations += 1
def get_cluster_load(self, cluster):
load = 0
for rrh in self.rrhs:
mapping = self.forwarding.get_mapping(rrh.id)
for bbu in cluster.baseband_units:
if (bbu.id in mapping):
load += (rrh.arrival_rate * rrh.packet_mean)
# break the loop once we found a single transmission
break
return load
def get_common_load(self, bbu_x, bbu_y):
if (bbu_x.id == bbu_y.id): return 0
load = 0
for rrh in self.rrhs:
mapping = self.forwarding.get_mapping(rrh.id)
if (bbu_x.id in mapping and bbu_y.id in mapping):
load += (rrh.arrival_rate * rrh.packet_mean)
return load
def get_transmission_cost(self):
return StatHistory.get('transmission_cost', self.forwarding.get_transmission_cost())
def get_migration_count(self):
return StatHistory.get('migration_count', self.total_migrations)
def get_current_load(self):
total = 0
for rrh in self.rrhs:
total += (rrh.arrival_rate * rrh.packet_mean * len(self.forwarding.get_mapping(rrh.id)))
return total
def get_lifetime_replication_factor(self):
total_received = 0
for hypervisor in self.hypervisors:
total_received += hypervisor.switch.packets_rec
return total_received / self.external_switch.packets_rec
def get_current_replication_factor(self):
total_received = 0
for hypervisor in self.hypervisors:
total_received += StatHistory.get('hypervisor.%d.switch.packets_rec' % hypervisor.id, hypervisor.switch.packets_rec)
if (total_received == 0): return 0.0
return total_received / StatHistory.get('extswitch.packets_rec', self.external_switch.packets_rec)
def get_current_wait(self):
total = 0
bbu_count = 0
for hypervisor in self.hypervisors:
for bbu in hypervisor.bbus:
total += bbu.get_current_wait()
bbu_count += 1
return total / bbu_count
def get_lifetime_wait(self):
total = 0
bbu_count = 0
for hypervisor in self.hypervisors:
for bbu in hypervisor.bbus:
total += bbu.get_lifetime_wait()
bbu_count += 1
return total / bbu_count
def get_current_delay(self):
total = 0
bbu_count = 0
for hypervisor in self.hypervisors:
for bbu in hypervisor.bbus:
total += bbu.get_current_delay()
bbu_count += 1
return total / bbu_count
def get_lifetime_delay(self):
total = 0
bbu_count = 0
for hypervisor in self.hypervisors:
for bbu in hypervisor.bbus:
total += bbu.get_lifetime_delay()
bbu_count += 1
return total / bbu_count
def get_lifetime_drop_rate(self):
total = 0
total_drop = 0
for hypervisor in self.hypervisors:
stats = hypervisor.switch.get_lifetime_stats()
total += (stats['rec'] + stats['drop'])
total_drop += stats['drop']
if (total == 0): return 0.0
return total_drop / total
def get_current_drop_rate(self):
total = 0
total_drop = 0
for hypervisor in self.hypervisors:
stats = hypervisor.switch.get_current_stats()
total += (stats['rec'] + stats['drop'])
total_drop += stats['drop']
if (total == 0): return 0.0
return total_drop / total
def get_current_utilization(self, hypervisor):
load = 0
for rrh in self.rrhs:
mapping = self.forwarding.get_mapping(rrh.id)
for bbu in hypervisor.bbus:
if (bbu.id in mapping):
load += (rrh.arrival_rate * rrh.packet_mean)
# break the loop once we found a single transmission
break
return load / hypervisor.switch.rate
def get_utilization_gain(self):
stopped_hypervisors = 0
for hypervisor in self.hypervisors:
if (len(hypervisor.bbus) == 0):
stopped_hypervisors += 1
return stopped_hypervisors / len(self.hypervisors)
def setup(self, configuration):
self.external_switch = Switch(self.env, 'physical', 'external')
self.external_switch.set_forwarding_function(self.forwarding.forwarding_function)
for remote_radio_head in configuration['remote_radio_heads']:
rrh_object = RemoteRadioHead(self.env, remote_radio_head['id'])
rrh_object.set_arrival_rate(remote_radio_head['arrival_rate'])
rrh_object.set_packet_mean(remote_radio_head['packet_mean'])
rrh_object.set_packet_dev(remote_radio_head['packet_dev'])
rrh_object.out = self.external_switch
self.rrhs.append(rrh_object)
self.forwarding.add_mapping(remote_radio_head['id'], remote_radio_head['baseband_units'])
for hypervisor in configuration['hypervisors']:
hypervisor_object = Hypervisor(self.env, hypervisor['id'])
for baseband_unit in hypervisor['baseband_units']:
bbu_object = BasebandUnit(self.env, baseband_unit['id'])
hypervisor_object.add_baseband_unit(bbu_object)
hypervisor_object.switch.set_forwarding_function(self.forwarding.forwarding_function)
self.hypervisors.append(hypervisor_object)
| 2.34375 | 2 |
tools/nightly-e2e-tests/src/lint.py | buranmert/dd-sdk-ios | 1 | 12790489 | <gh_stars>1-10
# -----------------------------------------------------------
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-2020 Datadog, Inc.
# -----------------------------------------------------------
import re
from src.linter import Linter, linter_context
from src.test_file_parser import TestMethod, MonitorConfiguration, MonitorVariable
def lint_test_methods(test_methods: [TestMethod]):
for test_method in test_methods:
with linter_context(code_reference=test_method.code_reference):
if test_method.monitors:
for monitor in test_method.monitors:
with linter_context(code_reference=monitor.code_reference):
# `tested_method_name` is computed from test method name, e.g.:
# for `test_logs_logger_DEBUG_log_with_error` it is `logs_logger_debug_log_with_error`
tested_method_name = __remove_prefix(test_method.method_name.lower(), 'test_')
__monitor_id_has_method_name_prefix(
monitor=monitor, tested_method_name=tested_method_name
)
__method_name_occurs_in_monitor_name_and_query(
monitor=monitor, tested_method_name=tested_method_name
)
elif not __is_excluded_from_lint(method=test_method):
Linter.shared.emit_warning(f'Test method `{test_method.method_name}` defines no E2E monitors.')
def __is_excluded_from_lint(method: TestMethod):
"""
Method can be excluded its signature is suffixed by `// E2E:wip`, e.g.:
` func test_logs_logger_DEBUG_log_with_error() { // E2E:wip`
"""
return method.code_reference.line_text.endswith('// E2E:wip\n')
def __monitor_id_has_method_name_prefix(monitor: MonitorConfiguration, tested_method_name: str):
"""
$monitor_id must start with the test method name, e.g. method:
`func test_logs_logger_DEBUG_log_with_error() {`
must define monitor ID starting with `logs_logger_debug_log_with_error`.
"""
if monitor_id_variable := __find_monitor_variable(monitor=monitor, variable_name='$monitor_id'):
if not monitor_id_variable.value.startswith(tested_method_name):
with linter_context(code_reference=monitor_id_variable.code_reference):
Linter.shared.emit_error(f'$monitor_id must start with method name ({tested_method_name})')
def __method_name_occurs_in_monitor_name_and_query(monitor: MonitorConfiguration, tested_method_name: str):
"""
The test method name must occur in $monitor_name and $monitor_query.
"""
regex = re.compile(rf"^.*(\W+){tested_method_name}(\W+).*$")
if monitor_name_variable := __find_monitor_variable(monitor=monitor, variable_name='$monitor_name'):
if not re.match(regex, monitor_name_variable.value):
with linter_context(code_reference=monitor_name_variable.code_reference):
Linter.shared.emit_warning(f'$monitor_name must include method name ({tested_method_name})')
if monitor_query_variable := __find_monitor_variable(monitor=monitor, variable_name='$monitor_query'):
if not re.match(regex, monitor_query_variable.value):
with linter_context(code_reference=monitor_query_variable.code_reference):
Linter.shared.emit_warning(f'$monitor_query must include method name ({tested_method_name})')
def lint_monitors(monitors: [MonitorConfiguration]):
__have_unique_variable_values(monitors=monitors, variable_name='$monitor_id')
__have_unique_variable_values(monitors=monitors, variable_name='$monitor_name')
__have_unique_variable_values(monitors=monitors, variable_name='$monitor_query')
def __have_unique_variable_values(monitors: [MonitorConfiguration], variable_name: str):
"""
Checks if $variable_name is unique among all `monitors`.
"""
variables: [MonitorVariable] = []
for monitor in monitors:
if variable := __find_monitor_variable(monitor=monitor, variable_name=variable_name):
variables.append(variable)
values: [str] = list(map(lambda var: var.value, variables))
for unique_value in set(values):
occurrences = list(filter(lambda var: var.value == unique_value, variables))
if len(occurrences) > 1:
for occurrence in occurrences:
with linter_context(code_reference=occurrence.code_reference):
Linter.shared.emit_error(f'{variable_name} must be unique - {occurrence.value} is already used.')
def __find_monitor_variable(monitor: MonitorConfiguration, variable_name: str):
return next((v for v in monitor.variables if v.name == variable_name), None)
def __remove_prefix(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s | 2 | 2 |
accessify/gui/utils.py | jscholes/accessify-prototype | 0 | 12790490 | <gh_stars>0
import functools
import threading
import wx
def find_last_child(widget):
children = widget.GetChildren()
if not children:
return widget
else:
last = children[len(children) - 1]
return find_last_child(last)
def show_error(parent, message):
if not wx.IsMainThread():
raise RuntimeError('utils.show_error called from thread {0}, must only be called from main thread'.format(threading.current_thread().name))
else:
wx.MessageBox(message, 'Error', parent=parent, style=wx.ICON_ERROR)
def main_thread(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return wx.CallAfter(func, *args, **kwargs)
return wrapper
| 2.359375 | 2 |
include/utils.py | cns-iu/myaura | 0 | 12790491 | # coding=utf-8
# Author: <NAME> & <NAME>
# Date: Jan 06, 2021
#
# Description: Utility functions
#
import os
import re
import functools
import pickle
import numpy as np
#
# Functions to handle Twitter text
#
re_all_after_retweet = re.compile(r"rt @[a-zA-Z0-9_]+.+", re.IGNORECASE | re.UNICODE)
def removeAllAfterRetweet(text):
""" Remove everything after a retweet is seen."""
return re_all_after_retweet.sub(text, '')
#
# Functions to handle Instagram Caption/Hashtag
#
re_repostapp = re.compile(r"(#Repost @\w+ with @repostapp)|(#EzRepost @\w+ with @ezrepostapp)|(Regrann from @\w+ -)")
def addSpacesBetweenHashTags(text):
""" Add spaces between hastags: #i#love#newyork -> #i #love #newyork """
if len(text) == 0:
return ''
# Add spaces if hashtags are togerther
new_text = ''
for i, c in enumerate(text, start=0):
if (c in ['#', '@']) and (i > 0):
if text[i - 1] != ' ':
new_text += ' '
new_text += c
return new_text
def combineTagsAndText(text, tags):
""" Combine Both Tags and Text Fields."""
text = addSpacesBetweenHashTags(text)
tags = [tag for tag in tags if tag not in text]
if len(tags):
new_tags = '. '.join(['#' + w for w in tags])
tagsandtext = text + '. ' + new_tags + '.'
else:
tagsandtext = text
return tagsandtext
def removeNewLines(sentence):
""" Remove new lines """
sentence = sentence.replace('\r\n', ' ').replace('\n', ' ').replace('\r', ' ')
return sentence
def removeRepostApp(caption):
""" Remove content that was posted by another person using the @repostapp """
m = re_repostapp.search(caption)
if m:
start, finish = m.span()
return caption[:start]
else:
return caption
#
# Functions to handle general social media text
#
re_atmention = re.compile(r"@[a-zA-Z0-9_]+")
re_hashtagsymbol = re.compile(r"#([a-zA-Z0-9_]+)")
re_links = re.compile(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+")
def removeAtMention(text):
""" Remove @mentions"""
return re_atmention.sub('', text)
def removeHashtagSymbol(text):
""" # - remove # symbol """
return re_hashtagsymbol.sub(r'\1', text)
def removeLinks(text):
""" remove links from text """
return re_links.sub('', text)
#
# File handling functions
#
def ensurePathExists(path):
""" Ensure path exists."""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
print('-- Creating Folders: %s --' % (dirname))
os.makedirs(dirname)
def load_save_return(dbname):
""" What does this do? """
def LS_decorator(func):
@functools.wraps(func)
def LS_wrapper(*args, **kwargs):
# dbpath = os.path.join(godbpath, dbname)
dbpath = dbname
if os.path.isfile(dbpath):
with open(dbpath, 'rb') as db_fp:
return pickle.load(db_fp)
else:
result = func(*args, **kwargs)
with open(dbpath, 'wb') as db_fp:
pickle.dump(result, db_fp)
return result
return LS_wrapper
return LS_decorator
#
# Network functions
#
def prox2dist(p):
"""Transforms a non-negative ``[0,1]`` proximity to distance in the ``[0,inf]`` interval:
Args:
p (float): proximity value
Returns:
d (float): distance value
"""
if (p == 0):
return np.inf
else:
return (1 / float(p)) - 1
| 3.25 | 3 |
gravtr/__init__.py | gfabricio/gravtr | 6 | 12790492 | import hashlib
import sys
if sys.version_info[0] < 3:
import urllib
else:
import urllib.parse as urllib
class Gravtr(object):
GRAVATAR_URL = 'https://www.gravatar.com/avatar/'
GRAVATAR_URL_UNSECURE = 'http://www.gravatar.com/avatar/'
class ratingType(object):
G = 'g'
PG = 'pg'
R = 'r'
X = 'x'
def __init__(self, email):
self.email = email.encode('utf-8')
def generate(self, unsecure=False, size=None, typed=False, default=None, force_default=False, rating_type=None):
gravatar_url = self.GRAVATAR_URL if not unsecure else self.GRAVATAR_URL_UNSECURE
self.url = gravatar_url + hashlib.md5(self.email).hexdigest()
params = dict()
if size:
params['s'] = str(size)
if typed:
self.url = self.url + '.jpg'
if default:
params['d'] = str(default)
if force_default:
params['f'] = 'y'
if rating_type:
params['r'] = str(rating_type)
return self.url + '?' + urllib.urlencode(params)
| 2.734375 | 3 |
MuseParse/tests/testUsingXML/testArpegGliss.py | Godley/MusIc-Parser | 5 | 12790493 | import os
import unittest
from MuseParse.tests.testUsingXML.xmlSet import xmlSet, parsePiece
from MuseParse.classes.ObjectHierarchy.TreeClasses.BaseTree import Search, FindByIndex
from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode
from MuseParse.classes.ObjectHierarchy.ItemClasses import Note
from MuseParse.SampleMusicXML import testcases
partname = "arpeggiosAndGlissandos.xml"
directory = testcases.__path__._path[0]
piece = parsePiece(os.path.join(directory, partname))
class testArpeg(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Piccolo"
self.note_num = {1: 4, 2: 4, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1,
11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1,
20: 1, 21: 1, 22: 1, 23: 1, 24: 1, 25: 1, 26: 1, 27: 1, 28: 1,
29: 1, 30: 1, 31: 1, 32: 1}
def testParts(self):
global piece
self.assertTrue(piece.getPart(self.p_id) is not None)
self.assertEqual(self.p_name, piece.getPart(self.p_id).GetItem().name)
def testMeasures(self):
self.assertIsInstance(
FindByIndex(
piece.getPart(
self.p_id),
self.m_num),
MeasureNode)
def testNotes(self):
part = piece.getPart(self.p_id)
staff = part.getStaff(1)
keys = staff.GetChildrenIndexes()
for measure in keys:
if measure in self.note_num:
measure_obj = part.getMeasure(measure=measure, staff=1)
self.assertIsInstance(
Search(
NoteNode,
measure_obj.getVoice(1),
self.note_num[measure]),
NoteNode)
class testBar(unittest.TestCase):
def testInstance(self):
if hasattr(self, "instance_type"):
self.assertIsInstance(
self.item.wrap_notation[0],
self.instance_type)
def testEquality(self):
if hasattr(self, "value"):
self.assertEqual(self.item, self.value)
class Note1Measure1(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=1, staff=1)
self.item = Search(NoteNode, measure, 1).GetItem()
self.instance_type = Note.Arpeggiate
class Note2Measure1(testBar):
def setUp(self):
part = piece.getPart("P1")
measure = part.getMeasure(measure=1, staff=1)
self.item = Search(NoteNode, measure, 2).GetItem()
self.instance_type = Note.Arpeggiate
class Note2Measure1DirectionValue(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=1, staff=1)
note = Search(NoteNode, measure, 2).GetItem()
self.item = note.wrap_notation[0].direction
self.value = "up"
class Note3Measure1(testBar):
def setUp(self):
part = piece.getPart("P1")
measure = part.getMeasure(measure=1, staff=1)
self.item = Search(NoteNode, measure, 3).GetItem()
self.instance_type = Note.Arpeggiate
class Note3Measure1DirectionValue(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=1, staff=1)
note = Search(NoteNode, measure, 3).GetItem()
self.item = note.wrap_notation[0].direction
self.value = "down"
class Note4Measure1FirstNotation(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=1, staff=1)
self.item = Search(NoteNode, measure, 4).GetItem()
self.instance_type = Note.NonArpeggiate
class Note4Measure1SecondNotation(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=1, staff=1)
self.item = Search(NoteNode, measure, 4).GetItem()
self.instance_type = Note.NonArpeggiate
# TODO: fix this
# class Note4Measure1Notation1Type(testBar):
# def setUp(self):
# self.p_id = "P1"
# part = piece.getPart(self.p_id)
# measure = part.getMeasure(measure=1,staff=1)
# self.item = Search(NoteNode, measure, 4).GetItem().wrap_notation[0].type
# self.value = "bottom"
class Note4Measure1Notation2Type(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=1, staff=1)
self.item = Search(
NoteNode,
measure,
4).GetItem().wrap_notation[1].type
self.value = "top"
class Note1Measure2(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(NoteNode, measure, 1).GetItem()
self.instance_type = Note.Slide
class Note1Measure2Type(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
1).GetItem().wrap_notation[0].type
self.value = "start"
class Note1Measure2Number(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
1).GetItem().wrap_notation[0].number
self.value = 1
class Note1Measure2LineType(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
1).GetItem().wrap_notation[0].lineType
self.value = "solid"
class Note2Measure2(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(NoteNode, measure, 2).GetItem()
self.instance_type = Note.Slide
class Note2Measure2Type(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
2).GetItem().wrap_notation[0].type
self.value = "stop"
class Note2Measure2Number(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
2).GetItem().wrap_notation[0].number
self.value = 1
class Note2Measure2LineType(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
2).GetItem().wrap_notation[0].lineType
self.value = "solid"
class Note3Measure2(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(NoteNode, measure, 3).GetItem()
self.instance_type = Note.Glissando
class Note3Measure2Type(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
3).GetItem().wrap_notation[0].type
self.value = "start"
class Note3Measure2Number(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
3).GetItem().wrap_notation[0].number
self.value = 1
class Note3Measure2LineType(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
3).GetItem().wrap_notation[0].lineType
self.value = "wavy"
class Note4Measure2(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(NoteNode, measure, 4).GetItem()
self.instance_type = Note.Glissando
class Note4Measure2Type(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
4).GetItem().wrap_notation[0].type
self.value = "stop"
class Note4Measure2Number(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
4).GetItem().wrap_notation[0].number
self.value = 1
class Note4Measure2LineType(testBar):
def setUp(self):
self.p_id = "P1"
part = piece.getPart(self.p_id)
measure = part.getMeasure(measure=2, staff=1)
self.item = Search(
NoteNode,
measure,
4).GetItem().wrap_notation[0].lineType
self.value = "wavy"
| 2.203125 | 2 |
packages/pegasus-python/src/Pegasus/json.py | ryantanaka/pegasus | 0 | 12790494 | <filename>packages/pegasus-python/src/Pegasus/json.py<gh_stars>0
"""
Abstract :mod:`json` with Pegasus specific defaults.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import io
import json as _json
import logging
import uuid
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Iterator, List, Optional
__all__ = (
"load",
"loads",
"load_all",
"dump",
"dumps",
"dump_all",
)
class _CustomJSONEncoder(_json.JSONEncoder):
def default(self, o):
if isinstance(o, uuid.UUID):
return str(o)
elif isinstance(o, Enum):
return o.name
elif isinstance(o, Path):
# Serializing Python `Path` objects to `str`
# NOTE: Path("./aaa") serializes to "aaa"
return str(o)
elif hasattr(o, "__html__"):
return o.__html__()
elif hasattr(o, "__json__"):
return o.__json__()
elif hasattr(o, "__table__"):
return {k: getattr(o, k) for k in o.__table__.columns.keys()}
else:
logging.getLogger(__name__).warning(
"Don't know how to handle type %s" % type(o)
)
return _json.JSONEncoder.default(self, o)
load = _json.load
loads = _json.loads
def load_all(s, *args, **kwargs) -> Iterator:
"""
Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance containing a JSON document) to a Python dictionary.
[extended_summary]
:param obj: [description]
:type obj: Dict
:return: [description]
:rtype: Iterator
"""
if isinstance(s, str):
fp = io.StringIO(s)
elif hasattr(s, "read"):
fp = s
else:
raise TypeError("s must either be a string or an open text file")
for d in fp.readline():
yield loads(d.strip(), *args, **kwargs)
dump = partial(_json.dump, cls=_CustomJSONEncoder)
dumps = partial(_json.dumps, cls=_CustomJSONEncoder)
def dump_all(objs: List, fp=None, *args, **kwargs) -> Optional[str]:
"""
Serialize ``obj`` to a JSON formatted ``str``.
[extended_summary]
:param objs: [description]
:type objs: List
:return: [description]
:rtype: str
"""
if fp is None:
fp = io.StringIO()
elif hasattr(fp, "write"):
fp = fp
else:
raise TypeError("s must either be None or an open text file")
# Disables pretty printing, when :meth:`dump_all` is called; to support ndjson.
kwargs.update({"indent": None, "separators": None})
for d in objs:
fp.write(dumps(d, *args, **kwargs) + "\n")
return fp.getvalue() if isinstance(fp, io.StringIO) else None
| 2.578125 | 3 |
chemicalExamination.py | goowell/DrAdvice | 0 | 12790495 | <reponame>goowell/DrAdvice
from datetime import datetime
from logger import logger
from os import listdir, path
from db import chemical_source, DuplicateKeyError,chemical_splited,paients_info
def chemical_examination_parse(file_root):
chemical_source.drop()
dir_list = listdir(file_root)
file_list=[path.join(file_root,f) for f in dir_list]
for f in file_list:
logger.info(f)
id=''
with open(f,encoding='utf8') as fp:
for l in fp.readlines():
l=l.strip()
if len(l)==6:
id=l.upper()
else:
if len(l)>0 and id !='':
try:
chemical_source.insert_one({'_id':id,'data':l})
except DuplicateKeyError as identifier:
logger.error('duplicateKeyError: '+id)
id=''
else:
logger.error('wrong format: '+id)
def chemical_examination_split():
'''
{
date:20160202,
name:白细胞计数,
value:16.93,
unit:10^9/L,
mark:'↑'
}
'''
chemical_splited.drop()
for r in chemical_source.find():
results = []
logger.info(r['_id'])
for rr in r['data'].split('|'):
rr = rr.strip()
if rr:
# print(rr)
if '):' in rr:
rrs=rr.split(':')
date = rrs[0][-9:-1]
rr=rrs[1]
rr=rr.split(' ')
one_r= {
'date':date,
'name':rr[0],
'value':'',
'unit':'',
'mark':''
}
if len(rr)>=2:
one_r['value']=rr[1]
if len(rr)>=3:
one_r['mark']=rr[2]
if len(rr)>=4:
one_r['unit']=' '.join(rr[3:])
results.append(one_r)
chemical_splited.insert_one({'_id':r['_id'], 'data':results})
def find_xx():
with open('chemical_examination.csv',encoding='utf8',mode='a') as f:
f.write('住院号,血红蛋白(入科),血红蛋白(出科),白蛋白(入科),白蛋白(出科)'+'\n')
for p in chemical_splited.find():
info= paients_info.find_one({'住院号':p['_id']})
if not info:
info= paients_info.find_one({'住院号':p['_id'].lower()})
if not info:
logger.error('cannot find: '+p['_id'])
continue
# logger.info(info)
date_in=info['入科日期'][0:10].replace('-','')
date_out=info['出科日期'][0:10].replace('-','')
xhdb_in=''
xhdb_out=''
bdb_in=''
bdb_out=''
date_xhdb_in=date_out
date_xhdb_out=date_in
date_bdb_in=date_out
date_bdb_out=date_in
# f.write(p['_id']+'\n')
# [].sort()
# p['data'].sort(key=lambda a: a['date'])
for c in p['data']:
if '白蛋白' == c['name'] or '白蛋白(干片法)' == c['name']:
if date_bdb_in>=c['date'] and c['date']>=date_in:
bdb_in=c['value']
date_bdb_in=c['date']
if date_bdb_out<=c['date'] and c['date']<=date_out:
bdb_out=c['value']
date_bdb_out=c['date']
# if '血红蛋白'in c['name'] and '平均' not in c['name']:
if '血红蛋白'== c['name']:
# print(c)
if date_xhdb_in>=c['date'] and c['date']>=date_in:
xhdb_in=c['value']
date_xhdb_in=c['date']
if date_xhdb_out<=c['date'] and c['date']<=date_out:
xhdb_out=c['value']
date_xhdb_out=c['date']
f.write(','.join([p['_id'],xhdb_in,xhdb_out,bdb_in,bdb_out])+'\n')
def main():
start = datetime.now()
logger.info('hello..')
dir_root = r"C:\pdata\xxxxxxxxx\huayan"
# chemical_examination_parse(dir_root)
# chemical_examination_split()
find_xx()
logger.info('done: '+str(datetime.now() - start))
if __name__ == '__main__':
main() | 2.78125 | 3 |
ps3api/tmapi.py | iMoD1998/PS3API | 8 | 12790496 | import os
import pathlib
from ctypes import *
from ctypes import _SimpleCData
from ctypes import _Pointer
from .common import CEnum
class SNResult(CEnum):
SN_S_OK = (0)
SN_S_PENDING = (1)
SN_S_NO_MSG = (3)
SN_S_TM_VERSION = (4)
SN_S_REPLACED = (5)
SN_S_NO_ACTION = (6)
SN_S_CONNECTED = SN_S_NO_ACTION
SN_S_TARGET_STILL_REGISTERED = (7)
SN_E_NOT_IMPL = (-1)
SN_E_TM_NOT_RUNNING = (-2)
SN_E_BAD_TARGET = (-3)
SN_E_NOT_CONNECTED = (-4)
SN_E_COMMS_ERR = (-5)
SN_E_TM_COMMS_ERR = (-6)
SN_E_TIMEOUT = (-7)
SN_E_HOST_NOT_FOUND = (-8)
SN_E_TARGET_IN_USE = (-9)
SN_E_LOAD_ELF_FAILED = (-10)
SN_E_BAD_UNIT = (-11)
SN_E_OUT_OF_MEM = (-12)
SN_E_NOT_LISTED = (-13)
SN_E_TM_VERSION = (-14)
SN_E_DLL_NOT_INITIALISED = (-15)
SN_E_TARGET_RUNNING = (-17)
SN_E_BAD_MEMSPACE = (-18)
SN_E_NO_TARGETS = (-19)
SN_E_NO_SEL = (-20)
SN_E_BAD_PARAM = (-21)
SN_E_BUSY = (-22)
SN_E_DECI_ERROR = (-23)
SN_E_INSUFFICIENT_DATA = (-25)
SN_E_DATA_TOO_LONG = (-26)
SN_E_DEPRECATED = (-27)
SN_E_BAD_ALIGN = (-28)
SN_E_FILE_ERROR = (-29)
SN_E_NOT_SUPPORTED_IN_SDK_VERSION = (-30)
SN_E_LOAD_MODULE_FAILED = (-31)
SN_E_CHECK_TARGET_CONFIGURATION = (-33)
SN_E_MODULE_NOT_FOUND = (-34)
SN_E_CONNECT_TO_GAMEPORT_FAILED = (-35)
SN_E_COMMAND_CANCELLED = (-36)
SN_E_PROTOCOL_ALREADY_REGISTERED = (-37)
SN_E_CONNECTED = (-38)
SN_E_COMMS_EVENT_MISMATCHED_ERR = (-39)
SN_E_TARGET_IS_POWERED_OFF = (-40)
class SNTargetInfoFlags(CEnum):
SN_TI_TARGETID = (0x00000001)
SN_TI_NAME = (0x00000002)
SN_TI_INFO = (0x00000004)
SN_TI_HOMEDIR = (0x00000008)
SN_TI_FILESERVEDIR = (0x00000010)
SN_TI_BOOT = (0x00000020)
class SNPS3TargetInfo(Structure):
_fields_ = [
("nFlags", c_uint32 ),
("hTarget", c_uint32 ),
("pszName", c_char_p ),
("pszType", c_char_p ),
("pszInfo", c_char_p ),
("pszHomeDir", c_char_p ),
("pszFSDir", c_char_p ),
("boot", c_uint64 ),
]
class TMAPIExports:
def __init__(self):
os.add_dll_directory(os.getcwd())
os.add_dll_directory(os.path.join(os.getenv('SN_PS3_PATH'), "bin"))
self.TMAPI_DLL = CDLL("ps3tmapi.dll")
'''
SNAPI SNRESULT SNPS3InitTargetComms(void);
Initialises target communications and launches Target Manager.
'''
self.SNPS3InitTargetComms = self.TMAPI_DLL.SNPS3InitTargetComms
self.SNPS3InitTargetComms.argtypes = []
self.SNPS3InitTargetComms.restype = SNResult
'''
SNAPI SNRESULT SNPS3CloseTargetComms(void);
Shuts down internal communications (but does not close the Target Manager) and frees resources.
'''
self.SNPS3CloseTargetComms = self.TMAPI_DLL.SNPS3CloseTargetComms
self.SNPS3CloseTargetComms.argtypes = []
self.SNPS3CloseTargetComms.restype = SNResult
'''
SNAPI SNRESULT SNPS3IsScanning();
Returns SN_E_BUSY if a search is already in progress.
'''
self.SNPS3IsScanning = self.TMAPI_DLL.SNPS3IsScanning
self.SNPS3IsScanning.argtypes = []
self.SNPS3IsScanning.restype = SNResult
'''
SNAPI SNRESULT SNPS3Connect(
HTARGET hTarget,
const char *pszApplication
);
Connect to specified target.
'''
self.SNPS3Connect = self.TMAPI_DLL.SNPS3Connect
self.SNPS3Connect.argtypes = [ c_uint32, c_char_p ]
self.SNPS3Connect.restype = SNResult
'''
SNAPI SNRESULT SNPS3ConnectEx(
HTARGET hTarget,
const char *pszApplication,
BOOL bForceFlag
);
Connect to specified target.
'''
self.SNPS3ConnectEx = self.TMAPI_DLL.SNPS3ConnectEx
self.SNPS3ConnectEx.argtypes = [ c_uint32, c_char_p, c_bool ]
self.SNPS3ConnectEx.restype = SNResult
'''
SNAPI SNRESULT SNPS3GetTargetInfo(
SNPS3TargetInfo *pTargetInfo
);
Retrieves information for a target specified by hTarget member of SNPS3TargetInfo() structure.
'''
self.SNPS3GetTargetInfo = self.TMAPI_DLL.SNPS3GetTargetInfo
self.SNPS3GetTargetInfo.argtypes = [ POINTER(SNPS3TargetInfo) ]
self.SNPS3GetTargetInfo.restype = SNResult
'''
SNAPI SNRESULT SNPS3GetDefaultTarget(
HTARGET *pTarget
);
Gets the default target.
'''
self.SNPS3GetDefaultTarget = self.TMAPI_DLL.SNPS3GetDefaultTarget
self.SNPS3GetDefaultTarget.argtypes = [ POINTER(c_uint32) ]
self.SNPS3GetDefaultTarget.restype = SNResult
'''
SNAPI SNRESULT SNPS3SetDefaultTarget(
HTARGET hTarget
);
Gets the default target.
'''
self.SNPS3SetDefaultTarget = self.TMAPI_DLL.SNPS3SetDefaultTarget
self.SNPS3SetDefaultTarget.argtypes = [ c_uint32 ]
self.SNPS3SetDefaultTarget.restype = SNResult
'''
SNAPI SNRESULT SNPS3ProcessList(
HTARGET hTarget,
UINT32 *puCount,
UINT32 *puBuffer
);
Fetches a list of processes running on the specified target.
'''
self.SNPS3ProcessList = self.TMAPI_DLL.SNPS3ProcessList
self.SNPS3ProcessList.argtypes = [ c_uint32, POINTER(c_uint32), POINTER(c_uint32) ]
self.SNPS3ProcessList.restype = SNResult
'''
SNAPI SNRESULT SNPS3ProcessAttach(
HTARGET hTarget,
UINT32 uUnitID,
UINT32 uProcessID
);
Attach to a process.
'''
self.SNPS3ProcessAttach = self.TMAPI_DLL.SNPS3ProcessAttach
self.SNPS3ProcessAttach.argtypes = [ c_uint32, c_uint32, c_uint32 ]
self.SNPS3ProcessAttach.restype = SNResult
'''
SNAPI SNRESULT SNPS3ProcessContinue(
HTARGET hTarget,
UINT32 uProcessID
);
Continues all threads from a specified process.
'''
self.SNPS3ProcessContinue = self.TMAPI_DLL.SNPS3ProcessContinue
self.SNPS3ProcessContinue.argtypes = [ c_uint32, c_uint32 ]
self.SNPS3ProcessContinue.restype = SNResult
'''
SNAPI SNRESULT SNPS3ProcessStop(
HTARGET hTarget,
UINT32 uProcessID
);
Stops all threads from a specified process.
'''
self.SNPS3ProcessStop = self.TMAPI_DLL.SNPS3ProcessStop
self.SNPS3ProcessStop.argtypes = [ c_uint32, c_uint32 ]
self.SNPS3ProcessStop.restype = SNResult
'''
SNAPI SNRESULT SNPS3ProcessGetMemory(
HTARGET hTarget,
UINT32 uUnit,
UINT32 uProcessID,
UINT64 uThreadID,
UINT64 uAddress,
int nCount,
BYTE *pBuffer
);
'''
self.SNPS3ProcessGetMemory = self.TMAPI_DLL.SNPS3ProcessGetMemory
self.SNPS3ProcessGetMemory.argtypes = [ c_uint32, c_uint32, c_uint32, c_uint64, c_uint64, c_int32, POINTER(c_char) ]
self.SNPS3ProcessGetMemory.restype = SNResult
'''
SNAPI SNRESULT SNPS3ProcessSetMemory(
HTARGET hTarget,
UINT32 uUnit,
UINT32 uProcessID,
UINT64 uThreadID,
UINT64 uAddress,
int nCount,
const BYTE *pBuffer
);
'''
self.SNPS3ProcessSetMemory = self.TMAPI_DLL.SNPS3ProcessSetMemory
self.SNPS3ProcessSetMemory.argtypes = [ c_uint32, c_uint32, c_uint32, c_uint64, c_uint64, c_int32, POINTER(c_char) ]
self.SNPS3ProcessSetMemory.restype = SNResult
class TMAPI:
def __init__(self):
self.NativeAPI = TMAPIExports()
self.PS3TargetIndex = -1
self.IsConnected = False
if self.NativeAPI.SNPS3InitTargetComms() != SNResult.SN_S_OK:
raise Exception("SNPS3InitTargetComms() Failed")
def ThrowIfNotConnected(self):
if self.IsConnected == False:
raise Exception("Error: Not Connected to PS3")
def GetDefaultTarget(self):
DefaultTargetIndex = pointer(c_uint32(0))
if self.NativeAPI.SNPS3GetDefaultTarget(DefaultTargetIndex) != SNResult.SN_S_OK:
raise Exception("SNPS3InitTargetComms() Failed")
return DefaultTargetIndex[0]
def ConnectTarget(self, TargetIndex=-1):
self.IsConnected = False
if TargetIndex == -1:
TargetIndex = self.GetDefaultTarget()
if self.NativeAPI.SNPS3ConnectEx(TargetIndex, None, True) not in [ SNResult.SN_S_OK, SNResult.SN_S_CONNECTED ]:
return False
self.PS3TargetIndex = TargetIndex
self.IsConnected = True
return True
def GetProcessList(self):
self.ThrowIfNotConnected()
NumProcessesPtr = pointer(c_uint32(0))
if self.NativeAPI.SNPS3ProcessList(self.PS3TargetIndex, NumProcessesPtr, None) != SNResult.SN_S_OK:
raise Exception("SNPS3ProcessList(): GetNumProcesses Failed")
NumProcesses = NumProcessesPtr.contents.value
if NumProcesses == 0:
raise Exception("No process running")
ProcessList = (c_uint32*NumProcesses)()
if self.NativeAPI.SNPS3ProcessList(self.PS3TargetIndex, NumProcessesPtr, ProcessList) != SNResult.SN_S_OK:
raise Exception("SNPS3ProcessList(): GetProcessInfos Failed")
return list(ProcessList)
def AttachProcess(self, ProcessID=-1):
self.ThrowIfNotConnected()
if ProcessID == -1:
ProcessList = self.GetProcessList()
if len(ProcessList) == 0:
return False
ProcessID = ProcessList[0]
if self.NativeAPI.SNPS3ProcessAttach(self.PS3TargetIndex, 0, ProcessID) != SNResult.SN_S_OK:
return False
if self.NativeAPI.SNPS3ProcessContinue(self.PS3TargetIndex, ProcessID) != SNResult.SN_S_OK:
raise Exception("SNPS3ProcessContinue() Failed")
self.ProcessID = ProcessID
return True
def ReadMemory(self, Address, Size):
self.ThrowIfNotConnected()
MemoryBuffer = (c_char * Size)()
self.NativeAPI.SNPS3ProcessGetMemory(self.PS3TargetIndex, 0, self.ProcessID, 0, Address, Size, MemoryBuffer)
return bytes(MemoryBuffer)
def WriteMemory(self, Address, Bytes):
self.ThrowIfNotConnected()
WriteBuffer = (c_char * len(Bytes)).from_buffer(bytearray(Bytes))
return self.NativeAPI.SNPS3ProcessSetMemory(self.PS3TargetIndex, 0, self.ProcessID, 0, Address, len(Bytes), WriteBuffer) | 2.265625 | 2 |
tests/unit/test_process.py | tholom/pake | 3 | 12790497 | import sys
import unittest
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, os.path.abspath(
os.path.join(script_dir, os.path.join('..', '..'))))
from pake import process
import pake.program
import pake
class ProcessTest(unittest.TestCase):
def test_call(self):
cmd = [sys.executable, os.path.join(script_dir, 'timeout.py')]
with self.assertRaises(process.TimeoutExpired) as exc:
process.call(*cmd, timeout=0.1, stderr=process.DEVNULL, stdout=process.DEVNULL)
self.assertSequenceEqual((cmd, 0.1), exc.exception.cmd)
self.assertNotEqual(process.call(sys.executable, os.path.join(script_dir, 'throw.py'),
stderr=process.DEVNULL, stdout=process.DEVNULL), 0)
self.assertNotEqual(process.call(sys.executable, os.path.join(script_dir, 'killself.py'),
stderr=process.DEVNULL, stdout=process.DEVNULL), 0)
def test_check_call(self):
cmd = [sys.executable, os.path.join(script_dir, 'timeout.py')]
with self.assertRaises(process.TimeoutExpired) as exc:
process.check_call(cmd, timeout=0.1,
stderr=process.DEVNULL, stdout=process.DEVNULL)
self.assertSequenceEqual((cmd, 0.1), exc.exception.cmd)
_ = str(exc.exception) # just test for serialization exceptions
cmd = [sys.executable, os.path.join(script_dir, 'throw.py')]
with self.assertRaises(process.CalledProcessException) as exc:
process.check_call(cmd, stderr=process.DEVNULL, stdout=process.DEVNULL)
self.assertListEqual(cmd, exc.exception.cmd)
_ = str(exc.exception) # just test for serialization exceptions
# Check pake propagates the exception correctly
pake.de_init(clear_conf=False)
pk = pake.init()
@pk.task
def dummy(ctx):
process.check_call(cmd, stderr=process.DEVNULL, stdout=process.DEVNULL)
with self.assertRaises(pake.TaskException) as exc:
pk.run(tasks=dummy)
self.assertEqual(type(exc.exception.exception), process.CalledProcessException)
def test_check_output(self):
cmd = [sys.executable, os.path.join(script_dir, 'timeout.py')]
with self.assertRaises(process.TimeoutExpired) as exc:
process.check_output(*cmd, timeout=0.1, stderr=process.DEVNULL)
_ = str(exc.exception) # just test for serialization exceptions
cmd = [sys.executable, os.path.join(script_dir, 'throw.py')]
with self.assertRaises(process.CalledProcessException) as exc:
process.check_output(cmd, stderr=process.DEVNULL)
_ = str(exc.exception) # just test for serialization exceptions
# Check pake propagates the exception correctly
pake.de_init(clear_conf=False)
pk = pake.init()
@pk.task
def dummy(ctx):
process.check_output(cmd, stderr=process.DEVNULL)
with self.assertRaises(pake.TaskException) as exc:
pk.run(tasks=dummy)
self.assertEqual(type(exc.exception.exception), process.CalledProcessException)
| 2.578125 | 3 |
research/cv/metric_learn/train.py | leelige/mindspore | 77 | 12790498 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train resnet."""
import os
import time
import argparse
import ast
import numpy as np
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore.communication.management import init
from mindspore.train.callback import Callback
from src.loss import Softmaxloss
from src.loss import Tripletloss
from src.loss import Quadrupletloss
from src.lr_generator import get_lr
from src.resnet import resnet50
from src.utility import GetDatasetGenerator_softmax, GetDatasetGenerator_triplet, GetDatasetGenerator_quadruplet
set_seed(1)
parser = argparse.ArgumentParser(description='Image classification')
# modelarts parameter
parser.add_argument('--train_url', type=str, default=None, help='Train output path')
parser.add_argument('--data_url', type=str, default=None, help='Dataset path')
parser.add_argument('--ckpt_url', type=str, default=None, help='Pretrained ckpt path')
parser.add_argument('--checkpoint_name', type=str, default='resnet-120_625.ckpt', help='Checkpoint file')
parser.add_argument('--loss_name', type=str, default='softmax',
help='loss name: softmax(pretrained) triplet quadruplet')
# Ascend parameter
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--ckpt_path', type=str, default=None, help='ckpt path name')
parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
parser.add_argument('--device_id', type=int, default=0, help='Device id')
parser.add_argument('--run_modelarts', type=ast.literal_eval, default=False, help='Run distribute')
args_opt = parser.parse_args()
class Monitor(Callback):
"""Monitor"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def epoch_begin(self, run_context):
self.losses = []
self.epoch_time = time.time()
dataset_generator.__init__(data_dir=DATA_DIR, train_list=TRAIN_LIST)
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / cb_params.batch_num
print("epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:8.5f}"
.format(epoch_mseconds, per_step_mseconds, np.mean(self.losses)))
print('batch_size:', config.batch_size, 'epochs_size:', config.epoch_size,
'lr_model:', config.lr_decay_mode, 'lr:', config.lr_max, 'step_size:', step_size)
def step_begin(self, run_context):
self.step_time = time.time()
def step_end(self, run_context):
"""step_end"""
cb_params = run_context.original_args()
step_mseconds = (time.time() - self.step_time) * 1000
step_loss = cb_params.net_outputs
if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
step_loss = step_loss[0]
if isinstance(step_loss, Tensor):
step_loss = np.mean(step_loss.asnumpy())
self.losses.append(step_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num
print("epochs: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:8.5f}/{:8.5f}], time:[{:5.3f}], lr:[{:8.5f}]".format(
cb_params.cur_epoch_num, config.epoch_size, cur_step_in_epoch, cb_params.batch_num, step_loss,
np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))
if __name__ == '__main__':
if args_opt.loss_name == 'softmax':
from src.config import config0 as config
from src.dataset import create_dataset0 as create_dataset
elif args_opt.loss_name == 'triplet':
from src.config import config1 as config
from src.dataset import create_dataset1 as create_dataset
elif args_opt.loss_name == 'quadruplet':
from src.config import config2 as config
from src.dataset import create_dataset1 as create_dataset
else:
print('loss no')
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
# init distributed
if args_opt.run_modelarts:
import moxing as mox
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
local_data_url = '/cache/data'
local_ckpt_url = '/cache/ckpt'
local_train_url = '/cache/train'
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
local_data_url = os.path.join(local_data_url, str(device_id))
local_ckpt_url = os.path.join(local_ckpt_url, str(device_id))
mox.file.copy_parallel(args_opt.data_url, local_data_url)
mox.file.copy_parallel(args_opt.ckpt_url, local_ckpt_url)
DATA_DIR = local_data_url + '/'
else:
if args_opt.run_distribute:
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
else:
context.set_context(device_id=args_opt.device_id)
device_num = 1
device_id = args_opt.device_id
DATA_DIR = args_opt.dataset_path + '/'
# create dataset
TRAIN_LIST = DATA_DIR + 'train_half.txt'
if args_opt.loss_name == 'softmax':
dataset_generator = GetDatasetGenerator_softmax(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
elif args_opt.loss_name == 'triplet':
dataset_generator = GetDatasetGenerator_triplet(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
elif args_opt.loss_name == 'quadruplet':
dataset_generator = GetDatasetGenerator_quadruplet(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
else:
print('loss no')
dataset = create_dataset(dataset_generator, do_train=True, batch_size=config.batch_size,
device_num=device_num, rank_id=device_id)
step_size = dataset.get_dataset_size()
# define net
net = resnet50(class_num=config.class_num)
# init weight
if args_opt.run_modelarts:
checkpoint_path = os.path.join(local_ckpt_url, args_opt.checkpoint_name)
else:
checkpoint_path = args_opt.ckpt_path
param_dict = load_checkpoint(checkpoint_path)
load_param_into_net(net.backbone, param_dict)
# init lr
lr = Tensor(get_lr(lr_init=config.lr_init,
lr_end=config.lr_end,
lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs,
total_epochs=config.epoch_size,
steps_per_epoch=step_size,
lr_decay_mode=config.lr_decay_mode))
# define opt
opt = Momentum(params=net.trainable_params(),
learning_rate=lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
loss_scale=config.loss_scale)
# define loss, model
if args_opt.loss_name == 'softmax':
loss = Softmaxloss(sparse=True, smooth_factor=0.1, num_classes=config.class_num)
elif args_opt.loss_name == 'triplet':
loss = Tripletloss(margin=0.1)
elif args_opt.loss_name == 'quadruplet':
loss = Quadrupletloss(train_batch_size=config.batch_size, samples_each_class=2, margin=0.1)
else:
print('loss no')
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
if args_opt.loss_name == 'softmax':
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=None,
amp_level='O3', keep_batchnorm_fp32=False)
else:
model = Model(net.backbone, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=None,
amp_level='O3', keep_batchnorm_fp32=False)
#define callback
cb = []
if config.save_checkpoint and (device_num == 1 or device_id == 0):
config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config.keep_checkpoint_max)
check_name = 'ResNet50_' + args_opt.loss_name
if args_opt.run_modelarts:
ckpt_cb = ModelCheckpoint(prefix=check_name, directory=local_train_url, config=config_ck)
else:
save_ckpt_path = os.path.join(config.save_checkpoint_path, 'model_'+ str(device_id) +'/')
ckpt_cb = ModelCheckpoint(prefix=check_name, directory=save_ckpt_path, config=config_ck)
cb += [ckpt_cb]
cb += [Monitor(lr_init=lr.asnumpy())]
# train model
model.train(config.epoch_size - config.pretrain_epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
if args_opt.run_modelarts and config.save_checkpoint and (device_num == 1 or device_id == 0):
mox.file.copy_parallel(src_url=local_train_url, dst_url=args_opt.train_url)
| 1.585938 | 2 |
src/intranet3/intranet3/utils/mail_fetcher.py | tmodrzynski/intranet-open | 0 | 12790499 | <reponame>tmodrzynski/intranet-open
# -*- coding: utf-8 -*-
"""
Sending emails
"""
import re
import email
import quopri
import datetime
import time
import poplib
from base64 import b64decode
from pprint import pformat
from email.header import decode_header
from email.utils import parsedate
import transaction
from intranet3.models import ApplicationConfig, Project, Tracker, TrackerCredentials, DBSession
from intranet3.models.project import SelectorMapping
from intranet3.log import DEBUG_LOG, WARN_LOG, EXCEPTION_LOG, INFO_LOG
from intranet3.utils.timeentry import add_time
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
LOG = INFO_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
WARN = WARN_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
MIN_HOURS = 6.995 #record hours
decode = lambda header: u''.join(
val.decode('utf-8' if not encoding else encoding)
for val, encoding in decode_header(header)
).strip()
Q_ENCODING_REGEXP = re.compile(r'(\=\?[^\?]+\?[QB]\?[^\?]+\?\=)')
def decode_subject(val):
for value in Q_ENCODING_REGEXP.findall(val):
val = val.replace(value, decode(value))
return val.strip()
def get_msg_payload(msg):
encoding = msg.get('Content-Transfer-Encoding')
payload = msg.get_payload()
if type(payload) == list:
a_msg = payload[0] # first is plaintext, second - html
encoding = a_msg.get('Content-Transfer-Encoding')
payload = a_msg.get_payload()
DEBUG(u'Extracted email msg %r with encoding %r' % (payload, encoding))
if encoding == 'quoted-printable':
payload = quopri.decodestring(payload)
elif encoding == 'base64':
payload = b64decode(payload)
return payload
class TimeEntryMailExtractor(object):
"""
Extracts timeentry from mail
"""
SUBJECT_REGEXP = re.compile(r'^\[Bug (\d+)\](.*)')
HOURS_REGEXP = re.compile(r'^\s*Hours Worked\|\s*\|(\d+(\.\d+)?)$')
HOURS_NEW_BUG_REGEXP = re.compile(r'^\s*Hours Worked: (\d+(\.\d+)?)$')
TRAC_SUBJECT_REGEXP = re.compile(r'^(Re\:\ +)?\[.+\] \#\d+\: (.*)')
TRAC_HOURS_REGEXP = re.compile(r'.*Add Hours to Ticket:\ *(\d+(\.\d+)?)')
TRAC_AUTHOR_REGEXP = re.compile(r'^Changes \(by (.*)\)\:')
TRAC_COMPONENT_REGEXP = re.compile(r'.*Component:\ *([^|]*)')
def __init__(self, trackers, logins_mappings, projects, selector_mappings):
self.trackers = trackers
self.logins_mappings = logins_mappings
self.projects = projects
self.selector_mappings = selector_mappings
def handle_trac_email(self, msg, tracker):
date = decode(msg['Date'])
subject = msg['Subject']
DEBUG(u'Message with subject %r retrieved from date %r' % (subject, date))
date = datetime.datetime.fromtimestamp(time.mktime(parsedate(date)))
bug_id = decode(msg['X-Trac-Ticket-ID'])
subject = decode(subject.replace('\n', u''))
match = self.TRAC_SUBJECT_REGEXP.match(subject)
if not match:
WARN(u"Trac subject not matched %r" % (subject, ))
return
subject = match.group(2)
hours = 0.0
who = ''
component = ''
payload = get_msg_payload(msg)
for line in payload.split('\n'):
match = self.TRAC_HOURS_REGEXP.match(line)
if match:
hours = float(match.group(1))
continue
match = self.TRAC_AUTHOR_REGEXP.match(line)
if match:
who = match.group(1)
continue
match = self.TRAC_COMPONENT_REGEXP.match(line)
if match:
component = match.group(1).strip()
continue
DEBUG(u'Found bug title %(subject)s component %(component)s, by %(who)s from %(date)s, hours %(hours)s' % locals())
if hours <= 0.0:
DEBUG(u"Ignoring bug with no hours")
return
who = who.lower()
if not who in self.logins_mappings[tracker.id]:
DEBUG(u'User %s not in logins mapping' % (who, ))
return
user = self.logins_mappings[tracker.id][who]
DEBUG(u'Found user %s' % (user.name, ))
mapping = self.selector_mappings[tracker.id]
project_id = mapping.match(bug_id, 'none', component)
if project_id is None:
DEBUG(u'Project not found for component %s' % (component, ))
return
project = self.projects[project_id]
LOG(u"Will add entry for user %s project %s bug #%s hours %s title %s" % (
user.name, project.name, bug_id, hours, subject
))
return user.id, date, bug_id, project_id, hours, subject
def handle_bugzilla_email(self, msg, tracker):
date = decode(msg['Date'])
component = decode(msg['X-Bugzilla-Component'])
product = decode(msg['X-Bugzilla-Product'])
who = decode(msg['X-Bugzilla-Who'])
subject = msg['Subject']
DEBUG(u'Message with subject %r retrieved from date %r' % (subject, date))
date = datetime.datetime.fromtimestamp(time.mktime(parsedate(date)))
subject = decode_subject(subject.replace('\n', u'').replace(u':', u' '))
match = self.SUBJECT_REGEXP.match(subject)
if not match:
DEBUG(u"Subject doesn't match regexp: %r" % subject)
return
bug_id, subject = match.groups()
subject = subject.strip()
is_new_bug = subject.startswith('New ')
payload = get_msg_payload(msg)
username = who.lower()
if username not in self.logins_mappings[tracker.id]:
DEBUG(u'User %s not in logins mapping' % (who, ))
return
DEBUG(u'Found bug title %(subject)s product %(product)s, component %(component)s, by %(who)s from %(date)s' % locals())
bug_id = int(bug_id)
newline = '\n'
# some emails have \r\n insted of \n
if '\r\n' in payload:
DEBUG(u'Using CRLF istead of LF')
newline = '\r\n'
for line in payload.split(newline):
if is_new_bug:
match = self.HOURS_NEW_BUG_REGEXP.match(line)
else:
match = self.HOURS_REGEXP.match(line)
if match:
hours = float(match.groups()[0])
break
else:
hours = 0.0
DEBUG(u'Found bug #%(bug_id)s with title %(subject)s product %(product)s, component %(component)s, by %(who)s, hours %(hours)f %(date)s' % locals())
if is_new_bug:
# new bug - create with 0 h, first strip title
subject = subject[4:].strip()
DEBUG(u'Bug creation found %s' % (subject, ))
elif hours == 0.0:
DEBUG(u'Ignoring non-new bug without hours')
return
user = self.logins_mappings[tracker.id][username]
DEBUG(u'Found user %s' % (user.name, ))
# selector_mapping given explicitly to avoid cache lookups
mapping = self.selector_mappings[tracker.id]
project_id = mapping.match(bug_id, product, component)
if project_id is None:
DEBUG(u'Project not found for product %s, component %s' % (
product,
component,
))
return
project = self.projects[project_id]
LOG(u"Will add entry for user %s project %s bug #%s hours %s title %s" % (
user.name, project.name, bug_id, hours, subject
))
return user.id, date, bug_id, project_id, hours, subject
handle_cookie_trac_email = handle_trac_email
handle_igozilla_email = handle_bugzilla_email
handle_rockzilla_email = handle_bugzilla_email
def match_tracker(self, msg):
sender = decode(msg['From'])
for email in self.trackers:
if email in sender:
return self.trackers[email]
else:
return None
def get(self, msg):
""" When single message was retrieved """
sender = decode(msg['From'])
tracker = self.match_tracker(msg)
if tracker is None:
DEBUG(u'Email from %s ignored, no tracker matched' % (sender, ))
return
# find appopriate handler
handler = getattr(self, 'handle_%s_email' % tracker.type)
# handler should parse the response and return essential info or None
data = handler(msg, tracker)
if data is None: # email should be ignored
return
user_id, date, bug_id, project_id, hours, subject = data
return add_time(user_id, date, bug_id, project_id, hours, subject)
class MailFetcher(object):
HOST = 'pop.gmail.com'
MAX_EMAILS = 100
def __init__(self, login, password):
self.login = login
self.password = password
def __iter__(self):
pop_conn = poplib.POP3_SSL(self.HOST)
pop_conn.user(self.login)
pop_conn.pass_(self.password)
stats = pop_conn.stat()
LOG(u'Emails: %s' % (pformat(stats)))
num, _ = stats
num = num if num < self.MAX_EMAILS else self.MAX_EMAILS
messages = (pop_conn.retr(i) for i in range(1, num + 1))
messages = ("\n".join(mssg[1]) for mssg in messages)
messages = (email.parser.Parser().parsestr(mssg) for mssg in messages)
for msg in messages:
yield msg
pop_conn.quit()
class MailCheckerTask(object):
def __call__(self, *args, **kwargs):
config = ApplicationConfig.get_current_config(allow_empty=True)
if config is None:
WARN(u'Application config not found, emails cannot be checked')
return
trackers = dict(
(tracker.mailer, tracker)
for tracker in Tracker.query.filter(Tracker.mailer != None).filter(Tracker.mailer != '')
)
if not len(trackers):
WARN(u'No trackers have mailers configured, email will not be checked')
return
username = config.google_user_email.encode('utf-8')
password = config.google_user_password.encode('<PASSWORD>')
# TODO
logins_mappings = dict(
(tracker.id, TrackerCredentials.get_logins_mapping(tracker))
for tracker in trackers.itervalues()
)
selector_mappings = dict(
(tracker.id, SelectorMapping(tracker))
for tracker in trackers.itervalues()
)
# find all projects connected to the tracker
projects = dict(
(project.id, project)
for project in Project.query.all()
)
# all pre-conditions should be checked by now
# start fetching
fetcher = MailFetcher(
username,
password,
)
# ok, we have all mails, lets create timeentries from them
extractor = TimeEntryMailExtractor(
trackers,
logins_mappings,
projects,
selector_mappings,
)
for msg in fetcher:
timeentry = extractor.get(msg)
if timeentry:
DBSession.add(timeentry)
transaction.commit()
| 1.914063 | 2 |
fitnick/activity/models/calories.py | kcinnick/fitnick | 1 | 12790500 | <filename>fitnick/activity/models/calories.py
from sqlalchemy import MetaData, Table, Column, UniqueConstraint, Numeric, Date, Integer
from sqlalchemy.ext.declarative import declarative_base
meta = MetaData()
Base = declarative_base()
class Calories(Base):
__tablename__ = 'calories'
date = Column('date', Date, nullable=False, primary_key=True)
total = Column('total', Integer())
calories_bmr = Column('calories_bmr', Integer())
activity_calories = Column('activity_calories', Integer())
UniqueConstraint('date', name='date')
schema = 'activity'
def __eq__(self, other):
return self.date, self.total, self.calories_bmr == other.date, other.total, other.calories_bmr
def __str__(self):
return f"{self.date}, {self.total}, {self.calories_bmr}, {self.activity_calories}"
calories_table = Table(
'calories',
meta,
Column('date', Date),
Column('total', Integer()),
Column('calories_bmr', Numeric(10, 5)),
Column('activity_calories', Numeric(10, 5)),
UniqueConstraint('date', name='date'),
schema='activity'
)
class CaloriesIntraday(Base):
__tablename__ = 'intraday'
date = Column('date', Date, nullable=False, primary_key=True)
level = Column('level', Integer())
mets = Column('mets', Integer())
value = Column('value', Numeric())
schema = 'calories'
| 2.703125 | 3 |
ia870/iaero.py | rdenadai/ia870p3 | 5 | 12790501 | <gh_stars>1-10
# -*- encoding: utf-8 -*-
# Module iaero
def iaero(f, b=None):
from ia870 import ianeg,iadil,iasereflect,iasecross
if b is None: b = iasecross()
y = ianeg( iadil( ianeg(f),iasereflect(b)))
return y
| 2.40625 | 2 |
_includes/code/02_parallel_jobs/print_hostname_and_time.py | bkmgit/hpc-parallel-novice | 32 | 12790502 | <gh_stars>10-100
#/usr/bin/env python3
from mpi4py import MPI
from datetime import datetime
def print_hostname():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
hname = MPI.Get_processor_name()
tod = datetime.now().isoformat(' ')
print("this is rank = %2i (total: %2i) running on %s at %s" % (rank,size,hname,tod))
comm.Barrier()
if __name__ == '__main__':
print_hostname()
| 2.609375 | 3 |
setup.py | parenthetical-e/clouds_are_fun | 0 | 12790503 | <gh_stars>0
from setuptools import setup
setup(
name='clouds_are_fun',
version='0.0.1',
description="Clouds are (as stated) fun!",
author='<NAME>',
author_email='<EMAIL>',
license='',
packages=['clouds_are_fun'],
zip_safe=False)
| 1.023438 | 1 |
dockit/views/edit.py | zbyte64/django-dockit | 5 | 12790504 | <filename>dockit/views/edit.py
from django.core.exceptions import ImproperlyConfigured
from django.views.generic import edit as editview
from detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, BaseDetailView
from dockit.forms import DocumentForm
class DocumentFormMixin(editview.FormMixin, SingleObjectMixin):
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
if self.document is not None:
# If a document has been explicitly provided, use it
document = self.document
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
document = self.object.__class__
else:
# Try to get a queryset and extract the document class
# from that
document = self.get_queryset().document
#fields = fields_for_document(document)
class CustomDocumentForm(DocumentForm):
class Meta:
document = document
#CustomDocumentForm.base_fields.update(fields)
return CustomDocumentForm
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = super(DocumentFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the document.")
return url
def form_valid(self, form):
self.object = form.save()
return super(DocumentFormMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
context = kwargs
if getattr(self, 'object', None):
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class BaseCreateView(DocumentFormMixin, editview.ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(DocumentFormMixin, editview.ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
DeletionMixin = editview.DeletionMixin
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
| 2.109375 | 2 |
main/views/admin/resource/resource_form.py | tiberiucorbu/av-website | 0 | 12790505 | <filename>main/views/admin/resource/resource_form.py
import urllib
from flask.ext import wtf
from google.appengine.ext import blobstore
import flask
import wtforms
import auth
import config
import model
import util
from main import app
from views import ListField
class ResourceForm(wtf.Form):
name = wtforms.TextField('Name', [wtforms.validators.optional()])
description = wtforms.StringField(
'Description', [wtforms.validators.optional()])
tags = ListField('Tags', [wtforms.validators.optional()])
image_average_color = wtforms.StringField(
'Average Color', [wtforms.validators.optional()])
| 2.25 | 2 |
evidence/data_sources/__init__.py | cancervariants/evidence-normalization | 0 | 12790506 | <filename>evidence/data_sources/__init__.py
"""Import data sources"""
from .gnomad import GnomAD
from .cbioportal import CBioPortal
from .cancer_hotspots import CancerHotspots
| 1.273438 | 1 |
src/fts3/cli/jobshower.py | Jar-win/fts-rest | 1 | 12790507 | <filename>src/fts3/cli/jobshower.py<gh_stars>1-10
# Copyright notice:
# Copyright Members of the EMI Collaboration, 2013.
#
# See www.eu-emi.eu for details on the copyright holders
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from fts3.rest.client import Inquirer
from base import Base
from utils import *
class JobShower(Base):
def __init__(self):
super(JobShower, self).__init__(
extra_args='JOB_ID',
description="This command can be used to check the current status of a given job",
example="""
$ %(prog)s -s https://fts3-devel.cern.ch:8446 c079a636-c363-11e3-b7e5-02163e009f5a
Request ID: c079a636-c363-11e3-b7e5-02163e009f5a
Status: FINISHED
Client DN: /DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=saketag/CN=678984/CN=<NAME>
Reason:
Submission time: 2014-04-13T23:31:34
Priority: 3
VO Name: dteam
"""
)
def validate(self):
if len(self.args) == 0:
self.logger.critical('Need a job id')
sys.exit(1)
def run(self):
job_id = self.args[0]
context = self._create_context()
inquirer = Inquirer(context)
job = inquirer.get_job_status(job_id, list_files=self.options.json)
if not self.options.json:
self.logger.info(job_human_readable(job))
else:
self.logger.info(job_as_json(job))
| 1.960938 | 2 |
pcep/mod5_moddigit.py | gliverm/devnet-study-group | 1 | 12790508 | bday = input("Enter your birthday [YYYYMMDD or YYYYDDMM or MMDDYYYY]:")
if len(bday) != 8 or not date.isdigit():
print("Birthday dat must be 8 digits in length")
else:
while len(bday) != 1:
lst = list(bday)
sum = 0
for num in lst:
sum += int(num)
bday = str(sum)
print(bday)
# Better solution follows
# date = input("Enter your birthday date (in the following format: YYYYMMDD or YYYYDDMM, 8 digits): ")
# if len(date) != 8 or not date.isdigit():
# print("Invalid date - sorry, we can do nothing with it.")
# else:
# # while there is more than one digit in the date...
# while len(date) > 1:
# sum = 0
# # ... sum all the digits...
# for dig in date:
# sum += int(dig)
# print(date)
# # ... and store sum inside the string
# date = str(sum)
# print("Your Digit of Life is: " + date)
| 4.1875 | 4 |
Array/4.Modifying-items.py | manish1822510059/Python-1000-program | 1 | 12790509 | <filename>Array/4.Modifying-items.py
import array as arr
numarr = arr.array('i',[10,20,30,40,50,60,70,80])
print("Array items:")
print(numarr)
#changing index 3 value
numarr[3] = 44
print('\n Array items (after modifing):')
print(numarr)
#changing 1st to 5th index values
numarr[1:5] = arr.array('i',[-8,-5,-6,-7])
print("\n Array items (after modifing in range ):")
print(numarr) | 3.96875 | 4 |
src/nnets/utils.py | Zaharid/nnets | 0 | 12790510 | <reponame>Zaharid/nnets
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 24 12:05:09 2014
@author: zah
"""
import numpy as np
from sympy.printing.lambdarepr import LambdaPrinter
import numba
#more nonsensical code...
@numba.jit('void(f8[:],f8[:],u2)', nopython=True)
def memcopy(dest, src, size):
"""Copy the first `size` elements of `src` into the first `size`
elements of `dest`.
Note that there is no bounds checking and can break the program,"""
for i in range(size):
dest[i] = src[i]
def reps_to_converge(x, value = 0.1):
return np.argwhere(x<value)[0][0] + 1
class SubstitutionPrinter(LambdaPrinter):
d = {}
def _print_Symbol(self, expr):
return self.d.get(expr, super()._print_Symbol(expr) )
class ArraySubstitutionPrinter(SubstitutionPrinter):
def __init__(self, arr_name ,arr_symbols,*args, **kwargs):
self.d = {symbol : '%s[%i]'%(arr_name, i)
for i, symbol in enumerate(arr_symbols)}
super().__init__(*args, **kwargs)
class ReplaceComaPrinter(LambdaPrinter):
def _print_Symbol(self, expr):
result = super()._print_Symbol(expr).replace(',' , '_')
return result
class NeuralPrinter(ArraySubstitutionPrinter, ReplaceComaPrinter):
pass
def cv_split(*arrs, prob_testing = 0.3, even_splits = None):
if even_splits is not None:
raise NotImplementedError
is_validation = np.random.rand(*arrs[0].shape) < prob_testing
return ((arr[~is_validation],arr[is_validation]) for arr in arrs) | 2.46875 | 2 |
CursoemVideo/ex019.py | arthxvr/coding--python | 0 | 12790511 | <gh_stars>0
from random import choice
n1 = str(input('Primeiro aluno: '))
n2 = str(input('Segundo aluno: '))
n3 = str(input('Terceiro aluno: '))
n4 = str(input('Quarto aluno: '))
escolhido = choice([n1, n2, n3, n4])
print(f'Aluno escolhido: {escolhido}')
| 3.40625 | 3 |
Trajectory_Mining/Bag_of_Words/test/test_sklearn.py | AdamCoscia/eve-trajectory-mining | 0 | 12790512 | # -*- coding: utf-8 -*-
"""testing script"""
import os
import sys
from functools import reduce
import numpy as np
import pandas as pd
import nltk # Natural Language Tool Kit
from fuzzywuzzy import fuzz, process # Fuzzy String Matching
import jellyfish # Distance metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def get_cosine_distance(doc1, doc2):
"""
"""
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
# killmail_id, killmail_time, solar_system_id, character_id, ship_type_id
# 46643819, 2015-05-15 19:02:00, 30000157, 90000814, 630
l1 = [('Large Shield Extender II', 'Shield Extender'),
('Rapid Light Missile Launcher II', 'Missile Launcher Rapid Light'),
('Caldari Navy Mjolnir Light Missile', 'Light Missile'),
('Damage Control II', 'Damage Control'),
('50MN Cold-Gas Enduring Microwarpdrive', 'Propulsion Module'),
('Large Shield Extender II', 'Shield Extender'),
('Caldari Navy Scourge Light Missile', 'Light Missile'),
('Caldari Navy Inferno Light Missile', 'Light Missile'),
('Rapid Light Missile Launcher II', 'Missile Launcher Rapid Light'),
('Phased Scoped Target Painter', 'Target Painter'),
('Caldari Navy Inferno Light Missile', 'Light Missile'),
('Medium Polycarbon Engine Housing I', 'Rig Navigation'),
('Nanofiber Internal Structure II', 'Nanofiber Internal Structure'),
('Ballistic Control System II', 'Ballistic Control system'),
('Ballistic Control System II', 'Ballistic Control system'),
('Rapid Light Missile Launcher II', 'Missile Launcher Rapid Light'),
('Caldari Navy Inferno Light Missile', 'Light Missile'),
('Caldari Navy Inferno Light Missile', 'Light Missile'),
('Caldari Navy Nova Light Missile', 'Light Missile'),
('Medium Core Defense Field Extender I', 'Rig Shield'),
('Caldari Navy Inferno Light Missile', 'Light Missile'),
('Warp Disruptor II', 'Warp Scrambler'),
('Rapid Light Missile Launcher II', 'Missile Launcher Rapid Light'),
('Medium Core Defense Field Extender I', 'Rig Shield')]
# killmail_id, killmail_time, solar_system_id, character_id, ship_type_id
# 46643869, 2015-05-15 19:05:00, 30000157, 90000814, 32872
l2 = [('Caldari Navy Antimatter Charge S', 'Hybrid Charge'),
('Caldari Navy Antimatter Charge S', 'Hybrid Charge'),
('Drone Damage Amplifier II', 'Drone Damage Modules'),
('F85 Peripheral Damage System I', 'Damage Control'),
('Null S', 'Advanced Blaster Charge'),
('Caldari Navy Antimatter Charge S', 'Hybrid Charge'),
('Light Ion Blaster II', 'Hybrid Weapon'),
('J5b Enduring Warp Scrambler', 'Warp Scrambler'),
('Light Ion Blaster II', 'Hybrid Weapon'),
('Caldari Navy Antimatter Charge S', 'Hybrid Charge'),
('Drone Damage Amplifier II', 'Drone Damage Modules'),
('Small Transverse Bulkhead I', 'Rig Armor'),
('5MN Y-T8 Compact Microwarpdrive', 'Propulsion Module'),
('Light Ion Blaster II', 'Hybrid Weapon'),
('X5 Enduring Stasis Webifier', 'Stasis Web'),
('Small Transverse Bulkhead I', 'Rig Armor'),
('Warrior II', 'Combat Drone'),
('Small Transverse Bulkhead I', 'Rig Armor'),
('Light Ion Blaster II', 'Hybrid Weapon'),
('Light Ion Blaster II', 'Hybrid Weapon'),
('Caldari Navy Antimatter Charge S', 'Hybrid Charge'),
('Caldari Navy Antimatter Charge S', 'Hybrid Charge')]
# [TEST] Long Text Vectorizers
# The same document should have cosine distance of 1
doc1_lt = reduce(lambda x, y: f'{x} {y}', [x[0] for x in l1]) # Create bag of words
doc2_lt = reduce(lambda x, y: f'{x} {y}', [x[0] for x in l1]) # Create bag of words
cos_dist_lt = get_cosine_distance(doc1_lt, doc2_lt)
print(f"Document 1: {doc1_lt}")
print(f"Document 2: {doc2_lt}")
print(f"Cosine Distance:\n {cos_dist_lt}")
print("==========")
# Long Text Vectorizers
# Let's see how close the long texts are
doc1_lt = reduce(lambda x, y: f'{x} {y}', [x[0] for x in l1])
doc2_lt = reduce(lambda x, y: f'{x} {y}', [x[0] for x in l2])
cos_dist_lt = get_cosine_distance(doc1_lt, doc2_lt)
print(f"Document 1: {doc1_lt}")
print(f"Document 2: {doc2_lt}")
print(f"Cosine Distance:\n {cos_dist_lt}")
print("==========")
# [TEST] Short Text Vectorizers
# Again same texts should have cosine distance of 1
doc1_st = reduce(lambda x, y: f'{x} {y}', [x[1] for x in l2])
doc2_st = reduce(lambda x, y: f'{x} {y}', [x[1] for x in l2])
cos_dist_st = get_cosine_distance(doc1_st, doc2_st)
print(f"Document 1: {doc1_st}")
print(f"Document 2: {doc2_st}")
print(f"Cosine Distance:\n {cos_dist_st}")
print("==========")
# Short Text Vectorizers
# Let's see how close the short texts are
doc1_st = reduce(lambda x, y: f'{x} {y}', [x[1] for x in l1])
doc2_st = reduce(lambda x, y: f'{x} {y}', [x[1] for x in l2])
cos_dist_st = get_cosine_distance(doc1_st, doc2_st)
print(f"Document 1: {doc1_st}")
print(f"Document 2: {doc2_st}")
print(f"Cosine Distance:\n {cos_dist_st}")
print("==========")
# Short Text Vectorizers
# Cosine distance should be commutable
doc1_st = reduce(lambda x, y: f'{x} {y}', [x[1] for x in l2])
doc2_st = reduce(lambda x, y: f'{x} {y}', [x[1] for x in l1])
cos_dist_st = get_cosine_distance(doc1_st, doc2_st)
print(f"Document 1: {doc1_st}")
print(f"Document 2: {doc2_st}")
print(f"Cosine Distance:\n {cos_dist_st}")
| 2.421875 | 2 |
Packages/Dead/help/Lib/RebuildSearch.py | xylar/cdat | 62 | 12790513 | <gh_stars>10-100
#############################################################################
#############################################################################
# File: RebuildSearch.py #
# Date: 04-Dec-2007 #
#############################################################################
#############################################################################
from Tkinter import *
import Pmw
import geoparse
class RebuildSearch(Toplevel):
def __init__(self, parent, onClose, depth, onChangeSearchDepth):
Toplevel.__init__ (self, parent)
self.transient(parent)
self.title ('Rebuild Search')
self.onClose = onClose
self.onChangeSearchDepth = onChangeSearchDepth
# Position the dialog box relative to parent.
xpos = int(geoparse.get_x (parent.geometry())) + 100
ypos = int(geoparse.get_y (parent.geometry())) + 100
self.geometry ('+' + str(xpos) + '+' + str(ypos))
Label(self).grid(row=1)
Label(self,text='This will rebuild the search table.').grid(row=5)
Label(self).grid(row=7)
Label(self,text='A large search depth may').grid(row=9)
Label(self,text='take a long time.').grid(row=10)
Label(self).grid(row=13)
self.depthCounter = Pmw.Counter(self,
labelpos='w',
label_text='Search depth:',
entryfield_value=depth,
entryfield_validate = {'validator' : 'integer',
'min' : 1,
'max' : 4},
entry_width = 2,
entry_bg = 'white'
)
self.depthCounter.grid(row=15)
Label(self).grid(row=17)
Label(self,text='Click OK to proceed.').grid(row=20)
box = Pmw.ButtonBox(self)
box.add('OK', command = self.onOK)
box.add('Cancel', command = self.close)
box.setdefault('OK')
box.alignbuttons()
box.grid(row=25)
# Override DELETE_WINDOW handler to a local callback.
self.protocol ('WM_DELETE_WINDOW', self.close)
def onOK(self):
self.withdraw()
self.onChangeSearchDepth(int(self.depthCounter.get()))
self.close()
# Event handler when user closes the window.
def close(self):
self.onClose()
self.destroy()
| 2.640625 | 3 |
dashboard/migrations/0002_auto_20190523_1231.py | favefan/sams | 0 | 12790514 | # Generated by Django 2.2 on 2019-05-23 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='entrylist',
name='awards',
field=models.CharField(blank=True, default='未设置', max_length=200, null=True),
),
migrations.AlterField(
model_name='entrylist',
name='score_kind',
field=models.CharField(blank=True, default='未设置', max_length=100, null=True),
),
]
| 1.554688 | 2 |
cell.py | cdated/conway-curses | 0 | 12790515 | #!/usr/bin/env python
class Cell():
# postion(X, Y)
def __init__(self, alive=False, position=(0,0), bounds=(0, 5)):
# position(x, y)
self.position = position
self.alive = alive
self.next_state = False
# bounds(min, max)
self.bounds = bounds
self.neighbors = self.get_neighbors()
def get_neighbors(self):
neighbors = []
(x_coord, y_coord) = (self.position[0], self.position[1])
for i in range(3):
for j in range(3):
new_position = (x_coord + j - 1, y_coord + i - 1)
if self.get_valid_position(new_position):
neighbors.append(new_position)
return neighbors
def get_valid_position(self, position):
(x_coord, y_coord) = (position[0], position[1])
(min, max) = (self.bounds[0], self.bounds[1])
if position == self.position:
return False
# Test X bounds
if (x_coord < min) or (x_coord > max - 1):
return False
# Test Y bounds
if (y_coord < min) or (y_coord > max - 1):
return False
return True
def is_alive(self, num_living_neighbors):
''' if alive, stay alive with 2 or 3 neighbors
if dead, revive with exactly 3 neighbors
otherwise die'''
if self.alive:
if num_living_neighbors in [2, 3]:
return True
else:
return False
else:
if num_living_neighbors == 3:
return True
return False
def apply_next_state(self):
self.alive = self.next_state
| 3.609375 | 4 |
src/api-examples/slowpairs.py | cern-fts/fts-monitoring | 1 | 12790516 | <reponame>cern-fts/fts-monitoring
#!/usr/bin/env python2
import json
from common import get_url
from optparse import OptionParser
def get_slow_pairs(threshold = 1, vo = None):
content = get_url('https://fts3-pilot.cern.ch:8449/fts3/ftsmon/overview', vo = vo, page = 'all')
pairs = json.loads(content)
slow = []
for pair in pairs['items']:
if 'current' in pair and pair['current'] < threshold:
slow.append(pair)
return slow
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-v', '--vo', dest = 'vo', help = 'Query only for a given VO', default = None)
parser.add_option('-t', '--threshold', dest = 'threshold', help = 'Threshold in MB', default = 1, type = 'float')
(options, args) = parser.parse_args()
slow = get_slow_pairs(options.threshold, options.vo)
for pair in slow:
print "%(source_se)s => %(dest_se)s with throughput %(current).2f" % pair
| 2.84375 | 3 |
language_acts/cms/management/commands/wt_update_index.py | kingsdigitallab/language-acts-docker | 0 | 12790517 | from wagtail.search.management.commands import update_index
class Command(update_index.Command):
pass
| 1.078125 | 1 |
viewformer/cli.py | jkulhanek/viewformer | 87 | 12790518 | from aparse import click
from viewformer.utils.click import LazyGroup
@click.group(cls=LazyGroup)
def main():
pass
@main.group(cls=LazyGroup)
def dataset():
pass
@main.group(cls=LazyGroup)
def visualize():
pass
@main.group(cls=LazyGroup)
def model():
pass
@main.group(cls=LazyGroup)
def evaluate():
pass
dataset.add_command('viewformer.data.commands.visualize', 'visualize')
dataset.add_command('viewformer.data.commands.generate', 'generate')
dataset.add_command('viewformer.data.commands.shuffle', 'shuffle')
visualize.add_command('viewformer.commands.visualize_codebook', 'codebook')
model.add_command('viewformer.commands.model_info', 'info')
evaluate.add_command("viewformer.evaluate.evaluate_transformer", "transformer")
evaluate.add_command("viewformer.evaluate.evaluate_transformer_multictx", "transformer-multictx")
evaluate.add_command("viewformer.evaluate.evaluate_transformer_multictx_allimg", "transformer-multictx-allimg")
evaluate.add_command("viewformer.evaluate.evaluate_codebook", "codebook")
evaluate.add_command("viewformer.evaluate.evaluate_sevenscenes", "7scenes")
evaluate.add_command("viewformer.evaluate.evaluate_sevenscenes_baseline", "7scenes-baseline")
evaluate.add_command("viewformer.evaluate.evaluate_sevenscenes_multictx", "7scenes-multictx")
evaluate.add_command("viewformer.evaluate.evaluate_co3d", "co3d")
evaluate.add_command("viewformer.evaluate.generate_gqn_images", "generate-gqn-images")
main.add_command("viewformer.train", "train")
main.add_command("viewformer.commands.generate_codes", 'generate-codes')
main.add_command("viewformer.commands.download_model", 'download-model')
if __name__ == '__main__':
main()
| 2.140625 | 2 |
webui_alternative/server/app.py | mlz000/Tok | 0 | 12790519 | <gh_stars>0
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
import firebase_admin
import time
import json
from firebase_admin import credentials
from firebase_admin import db
from flask import Flask, request, jsonify
from settings import PROJECT_ROOT
from chatbot.botpredictor import BotPredictor
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
app = Flask(__name__)
@app.route('/reply', methods=['POST', 'GET'])
def reply():
# user_id = request.args.get('userID')
# question = request.args.get('question')
session_id = 1
data = json.loads(request.get_data(as_text=True))
print(data)
user_id = data['userID']
question = data['message']
if user_id not in predictor.session_data.id_dict: # Including the case of 0
session_id = predictor.session_data.add_session(user_id)
else:
session_id = predictor.session_data.id_dict[user_id]
# print(session_id, question)
answer = predictor.predict(session_id, question)
ref = db.reference('messages')
ref2 = ref.child(user_id)
ref3 = ref2.child('messages')
ref3.push().set(
{
'content' : answer,
'data' : time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
'user' : 'Tok'
})
return answer
# return jsonify({'sessionId': session_id, 'sentence': answer})
if __name__ == "__main__":
cred = credentials.Certificate('key2.json')
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://uci-tok.firebaseio.com'
})
corp_dir = os.path.join(PROJECT_ROOT, 'Data', 'Corpus')
knbs_dir = os.path.join(PROJECT_ROOT, 'Data', 'KnowledgeBase')
res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
with tf.Session() as sess:
predictor = BotPredictor(sess, corpus_dir=corp_dir, knbase_dir=knbs_dir,
result_dir=res_dir, result_file='basic')
app.run(port=5000)
print("Web service started.")
| 2.234375 | 2 |
openGaussBase/testcase/SQL/DML/set/Opengauss_Function_DML_Set_Case0032.py | opengauss-mirror/Yat | 0 | 12790520 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : 使用gs_guc工具设置客户端编码,不生效
Description :
1. gs_guc set 设置客户端编码集SQL_ASCII
2. gs_guc reload 设置客户端编码集GBK
Expect :
1. 设置不生效
2. 设置不生效
History :
"""
import unittest
from yat.test import macro
from yat.test import Node
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
class Function(unittest.TestCase):
def setUp(self):
self.commonsh = CommonSH('dbuser')
self.user_node = Node('dbuser')
self.log = Logger()
self.cluster_path = macro.DB_INSTANCE_PATH
self.log.info('''---Opengauss_Function_DML_Set_Case0032开始---''')
def test_encode(self):
cmd0 = "show client_encoding;"
msg0 = self.commonsh.execut_db_sql(cmd0)
self.log.info(msg0)
init = msg0.splitlines()[2].strip()
client = ['SQL_ASCII', 'UTF8']
self.var = client if init == 'UTF8' else list(reversed(client))
def restart_check():
self.commonsh.restart_db_cluster()
status = self.commonsh.get_db_cluster_status()
self.assertTrue("Normal" in status or 'Degraded' in status)
# 检查未生效,还是utf8
cmd = 'show client_encoding;'
msg = self.commonsh.execut_db_sql(cmd)
self.log.info(msg)
return msg
# gs_guc set 设置客户端编码集SQL_ASCII
cmd1 = f'''source {macro.DB_ENV_PATH}
gs_guc set -N all -I all -c "client_encoding='{self.var[0]}'"'''
self.log.info(cmd1)
msg1 = self.user_node.sh(cmd1).result()
self.log.info(msg1)
res = restart_check()
self.assertTrue(self.var[1] in res)
# gs_guc reload 设置客户端编码集GBK
cmd2 = f'''source {macro.DB_ENV_PATH}
gs_guc reload -D {self.cluster_path} -c "client_encoding = 'GBK'"'''
self.log.info(cmd2)
msg2 = self.user_node.sh(cmd2).result()
self.log.info(msg2)
res = restart_check()
self.assertTrue(self.var[1] in res)
def tearDown(self):
self.log.info('''---Opengauss_Function_DML_Set_Case0032结束---''') | 1.890625 | 2 |
scphylo/tl/__init__.py | faridrashidi/scphylo-tools | 0 | 12790521 | <reponame>faridrashidi/scphylo-tools
"""Tools Module."""
from scphylo.tl.cna import infercna
from scphylo.tl.consensus import consensus, consensus_day
from scphylo.tl.fitch import fitch
from scphylo.tl.partition_function import partition_function
from scphylo.tl.score import ad, caset, cc, disc, dl, gs, mltd, mp3, rf, tpted
from scphylo.tl.solver import (
bnb,
booster,
cardelino,
dendro,
gpps,
grmt,
huntress,
infscite,
iscistree,
onconem,
phiscs_readcount,
phiscsb,
phiscsb_bulk,
phiscsi,
phiscsi_bulk,
rscistree,
sbm,
sciphi,
scistree,
scite,
siclonefit,
sphyr,
)
__all__ = (
infercna,
consensus,
consensus_day,
partition_function,
sbm,
ad,
cc,
dl,
mltd,
tpted,
bnb,
booster,
cardelino,
dendro,
huntress,
infscite,
iscistree,
onconem,
phiscsi_bulk,
phiscs_readcount,
phiscsb,
phiscsb_bulk,
phiscsi,
rscistree,
scistree,
scite,
siclonefit,
fitch,
caset,
disc,
mp3,
rf,
gs,
sphyr,
grmt,
sciphi,
gpps,
)
| 1.210938 | 1 |
Day-110/list_comprehension.py | arvimal/100DaysofCode-Python | 1 | 12790522 | #!/usr/bin/env python3
# Return a list
# The element at the index should be multiplied by 2
# Rest of the elements should be the same.
def double_index(lst, index):
if index > len(lst):
return lst
else:
return([n for n in lst[:index]] + [2 * lst[index]] + [n for n in lst[index + 1:]])
print(double_index([3, 8, -10, 12], 2))
| 4.0625 | 4 |
core/textRender.py | chiluf/visvis.dev | 0 | 12790523 | <reponame>chiluf/visvis.dev
# -*- coding: utf-8 -*-
# Copyright (C) 2012, <NAME>
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module textRender
For rendering text in visvis.
Defines a wibject and a wobject: Label and Text,
which are both able to produce a single line of text
oriented at a certain angle.
Formatting
----------
Text can be formatted using the following constructs (which can be mixed):
* hello^2 or hello^{there}, makes one or more charactes superscript.
* hello_2 or hello_{there}, makes one or more charactes subscript.
* hell\io or hell\i{ohoo}, makes one or more charactes italic.
* hell\bo or hell\b{ohoo}, makes one or more charactes bold.
* hello\_there, a backslash escapes, thus keeping the _^ or \ after it.
Special characters
------------------
Characters are available for the following unicode sets:
* u0020 - u003f numbers
* u0040 - u00bf alphabet
* u00c0 - u037f latin
* u0380 - u03ff greek
* u2000 - u23ff symbols
There are several escape sequences for (mathematical) characters
that can be inserted using the backslash (for example '\infty').
People familiar with Latex know what they do:
* Re Im null infty
* int iint iiint forall
* leq geq approx approxeq ne in
* leftarrow uparrow rightarrow downarrow
* Leftarrow Uparrow Rightarrow Downarrow
* leftceil rightceil leftfloor rightfloor
* times cdot pm
* oplus ominus otimes oslash
Letters from the greek alfabet can be inserted in the same
way (By starting the name with an uppercase letter, the
corresponding upper case greek letter is inserted):
* alpha beta gamma delta
* epsilon zeta eta theta
* iota kappa lambda mu
* nu xi omicron pi
* rho varsigma sigma tau
* upsilon phi chi psi
* omega
Note: In case one needs a character that is not in this list,
one can always look up its unicode value and use that instead.
"""
import OpenGL.GL as gl
import OpenGL.GLU as glu
import os
import numpy as np
import visvis
from visvis import ssdf
from visvis.pypoints import Pointset
#
from visvis.core.baseTexture import TextureObject
from visvis.core.base import Wobject
from visvis.core.misc import Property, PropWithDraw
from visvis.core.misc import getResourceDir, getColor
#
from visvis.core.cameras import depthToZ
from visvis.core.baseWibjects import Box
escapes = {
# upper case greek
'Alpha':0x0391, 'Beta':0x0392, 'Gamma':0x0393, 'Delta':0x0394,
'Epsilon':0x0395, 'Zeta':0x0396, 'Eta':0x0397, 'Theta':0x0398,
'Iota':0x0399, 'Kappa':0x039A, 'Lambda':0x039B, 'Mu':0x039C,
'Nu':0x039D, 'Xi':0x039E, 'Omicron':0x039F,
'Pi':0x03A0, 'Rho':0x03A1, 'Sigma':0x03A3, 'Tau':0x03A4,
'Upsilon':0x03A5, 'Phi':0x03A6, 'Chi':0x03A7, 'Psi':0x03A8, 'Omega':0x03A9,
# lower case greek
'alpha':0x03B1, 'beta':0x03B2, 'gamma':0x03B3, 'delta':0x03B4,
'epsilon':0x03B5, 'zeta':0x03B6, 'eta':0x03B7, 'theta':0x03B8,
'iota':0x03B9, 'kappa':0x03BA, 'lambda':0x03BB, 'mu':0x03BC,
'nu':0x03BD, 'xi':0x03BE, 'omicron':0x03BF,
'pi':0x03C0, 'rho':0x03C1, 'varsigma':0x03C2, 'sigma':0x03C3,
'tau':0x03C4, 'upsilon':0x03C5,
'phi':0x03C6, 'chi':0x03C7, 'psi':0x03C8, 'omega':0x03C9,
# some math
'Re':0x211c, 'Im':0x2111, 'null':0x2300, 'infty':0x221e,
'int':0x222b, 'iint':0x222c, 'iiint':0x222d,
'forall':0x2200,
'leq':0x22dc, 'geq':0x22dd, 'approx':0x2248, 'approxeq':0x2243, 'ne':0x2260,
'in':0x22f9,
'leftarrow':0x2190,'uparrow':0x2191,'rightarrow':0x2192,'downarrow':0x2193,
'Leftarrow':0x21D0,'Uparrow':0x21D1,'Rightarrow':0x21D2,'Downarrow':0x21D3,
'leftceil':0x2308,'rightceil':0x2309,'leftfloor':0x230A,'rightfloor':0x230B,
'times':0x2217, 'cdot':0x2219, 'pm':0x00b1,
'oplus':0x2295, 'ominus':0x2296, 'otimes':0x2297, 'oslash':0x2298,
}
# sort the keys, such that longer names are replaced first
escapesKeys = escapes.keys()
escapesKeys.sort( lambda x,y:len(y)-len(x))
class Font(TextureObject):
""" Font(info)
A Font object holds the texture that contains all the characters.
"""
def __init__(self, info):
TextureObject.__init__(self, 2)
# store font information
self.info = info
# set data
self.SetData(self.info.data)
def _UploadTexture(self, data, *args):
""" Overload to make it an alpha map.
"""
# Add lumincance channel
data2 = np.zeros((data.shape[0],data.shape[1],2), dtype=np.uint8)
data2[:,:,0] = 255
data2[:,:,1] = data
shape = data.shape
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, 2, shape[1],shape[0], 0,
# gl.GL_ALPHA, gl.GL_UNSIGNED_BYTE, data)
gl.GL_LUMINANCE_ALPHA, gl.GL_UNSIGNED_BYTE, data2)
tmp1 = gl.GL_LINEAR
tmp2 = gl.GL_LINEAR
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, tmp1)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, tmp2)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
class FontManager:
""" FontManager()
Manager of fonts.
There should be only one instance of this class for each figure/context.
"""
def __init__(self):
# load font data
path = getResourceDir()
self.s = ssdf.load(os.path.join(path, 'fonts.ssdf'))
# list of fonts
self.fonts = {}
def GetFont(self, fontname):
""" GetFont(fontname)
Get a font instance. If that font was created earlier,
that font is returned, otherwise it is created and stored
for reuse.
"""
if fontname in self.fonts:
return self.fonts[fontname]
elif hasattr(self.s, fontname):
tmp = Font(self.s[fontname])
self.fonts[fontname] = tmp
return tmp
else:
raise ValueError("Invalid font name.")
class Glyph(object):
""" Glyph(font, char, size=12, styles=None)
A glyph is a character. It is visualized by rendering
the proper part from the texture stored in the Font object.
* sizex and sizey represent the size of the glyph.
* dy represents the offset in y direction (for sub/super scripts)
* width specifies how much space there should be before the next char
* s1 s2 t1 t2 represent texture coordinates
"""
# the font.info contains
# - a string of charcodes
# - an array of origin 's
# - an array of size's
# - fontsize of the font in the data array
def __init__(self, font, char, size=12, styles=None):
# unwind the style for this glyph
self.style = MiniStyle()
if styles:
for style in styles:
self.style += style
style = self.style
# store font
self.font = font
info = self.font.info
# get asci code and check it
if isinstance(char, basestring):
ac = ord(char)
elif isinstance(char, int):
ac = char
else:
raise ValueError('To create a glyph, supply an int or character.')
# do we have that char?
if ac not in info.charcodes:#ac < 32 or ac > 255:
print "Warning: Cannot draw character %i! " % ord(char)
ac = 32 # make space
# default
infoSize, infoOrigin, infoWidth = info.size, info.origin, info.width
# should and can we display in italic or bold?
# Note: italic is now realized by printing it skewed rather using the
# italic glyphs. The reason is that when using the texture one would
# see artifacts from neighbouring characters. Additionally, it's now
# possible to mix bold and italic text, and one can make any supported
# unicode character italic.
# if style.italic and ac in info.charcodes_i:
# # italic text
# infoSize, infoOrigin, infoWidth = (
# info.size_i, info.origin_i, info.width_i)
if style.bold and ac in info.charcodes_b:
# bold text
infoSize, infoOrigin, infoWidth = (
info.size_b, info.origin_b, info.width_b)
# Find position in texture, normalized to texture coordinates
x1 = infoOrigin[ac,0]
x2 = x1 + infoSize[ac,0]
tmp = float(info.data.shape[1])
self.s1, self.s2 = (x1) / tmp, (x2-1) / tmp
y1 = infoOrigin[ac,1]
y2 = y1 + infoSize[ac,1]
tmp = float(info.data.shape[0])
self.t1, self.t2 = (y1) / tmp, (y2-1) / tmp
# Define skew factor to handle italics correctly
self.skewFactor = 0.0
if style.italic:
self.skewFactor = 0.5
# calculate width on screen, given the size
factor = size / float(info.fontsize)
self.sizex = infoSize[ac,0] * factor
self.sizey = infoSize[ac,1] * factor
self.width = float(infoWidth[ac]) * factor # is spacing?
smaller = 0.6
self.dy = 0.0 # normal script
if style.script == 1:
# sub script
self.dy = (1-smaller) * self.sizey
if style.script:
# super or subscript
self.skewFactor *= smaller
self.sizex = self.sizex * smaller
self.sizey = self.sizey * smaller
self.width = self.width * smaller#- self.sizex * (1.0-smaller)
class MiniStyle:
""" MiniStyle(script=0, bold=False, italic=False)
Class that represents the style of characters (sub/super script,
bold, and italic. Used when compiling the text.
script = {0:'normal', 1:'sub', 2:'super'}
"""
def __init__(self, script=0, bold=False, italic=False):
self.script = script
self.bold = bold
self.italic = italic
def __add__(self, other):
# allow None
if other is None:
return self
# set script
script = other.script
if script == 0:
script = self.script
# done
return MiniStyle( script,
self.bold or other.bold,
self.italic or other.italic )
def __repr__(self):
tmp = self.script, self.bold, self.italic
return '<MiniStyle script:%i, bold:%i, italic:%i>' % tmp
class BaseText(object):
""" BaseText(text='', fontname=None)
Base object for the Text wobject and Label wibject.
fontname may be 'mono', 'sans', 'serif' or None, in which case
the vv.settings.defaultFontName is used.
"""
def __init__(self, text='', fontname=None):
# init drawing data
self._texCords = None # coords in the font texture
self._vertices1 = None # the coords in screen coordinates (raw)
self._vertices2 = None # dito, but corrected for angle and alignment
# relative position of edges in pixels. (taking angle into account)
self._deltax = 0,0
self._deltay = 0,0
# store text
self._text = text
# Set and check fontname
if fontname is None:
fontname = visvis.settings.defaultFontName
fontname = fontname.lower()
if fontname not in ['mono', 'sans', 'serif']:
raise ValueError('Invalid font name.')
# more properties
self._size = 9
self._fontname = fontname
self._color = (0,0,0)
self._angle = 0
self._halign = -1
self._valign = 0
self._charSpacing = 1
def _Invalidate(self):
""" Invalidate this object, such that the text is recompiled
the next time it is drawn. """
self._texCords = None
self._vertices1 = None
self._vertices2 = None
@Property # Smart draw
def text():
"""Get/Set the text to display.
"""
def fget(self):
return self._text
def fset(self, value):
if value != self._text:
self._text = value
self._Invalidate() # force recalculation
self.Draw()
return locals()
@Property # Smart draw
def textAngle():
"""Get/Set the angle of the text in degrees.
"""
def fget(self):
return self._angle
def fset(self, value):
if value != self._angle:
self._angle = value
self._vertices2 = None # force recalculation
self.Draw()
return locals()
@Property
def textSpacing():
"""Get/Set the spacing between characters.
"""
def fget(self):
return self._charSpacing
def fset(self, value):
if value != self._charSpacing:
self._charSpacing = value
self._Invalidate() # force recalculation
self.Draw()
return locals()
@Property
def fontSize():
"""Get/Set the size of the text.
"""
def fget(self):
return self._size
def fset(self, value):
if value != self._size:
self._size = value
self._Invalidate() # force recalculation
self.Draw()
return locals()
@Property
def fontName():
"""Get/Set the font type by its name.
"""
def fget(self):
return self._fontname
def fset(self, value):
if value != self._fontname:
self._fontname = value
self._Invalidate() # force recalculation
self.Draw()
return locals()
@Property
def textColor():
"""Get/Set the color of the text.
"""
def fget(self):
return self._color
def fset(self, value):
value = getColor(value,'setting textColor')
if value != self._color:
self._color = value
self.Draw()
return locals()
@Property
def halign():
"""Get/Set the horizontal alignment. Specify as:
* 'left', 'center', 'right'
* -1, 0, 1
"""
def fget(self):
return self._halign
def fset(self, value):
if isinstance(value, int):
pass
elif isinstance(value, basestring):
value = value.lower()
tmp = {'left':-1,'center':0,'centre':0,'right':1 }
if not value in tmp:
raise ValueError('Invalid value for halign.')
value = tmp[value.lower()]
else:
raise ValueError('halign must be an int or string.')
value = int(value>0) - int(value<0)
if value != self._halign:
self._halign = value
self._vertices2 = None # force recalculation
self.Draw()
return locals()
@Property
def valign():
"""Get/Set the vertical alignment. Specify as:
* 'up', 'center', 'down'
* 'top', 'center', 'bottom'
* -1, 0, 1
"""
def fget(self):
return self._valign
def fset(self, value):
if isinstance(value, int):
pass
elif isinstance(value, basestring):
value = value.lower()
tmp={'up':-1,'top':-1,'center':0,'centre':0,'down':1,'bottom':1}
if not value in tmp:
raise ValueError('Invalid value for valign.')
value = tmp[value.lower()]
else:
raise ValueError('valign must be an int or string.')
value = int(value>0) - int(value<0)
if value != self._valign:
self._valign = value
self._vertices2 = None # force recalculation
self.Draw()
return locals()
def _Compile(self):
""" Create a series of glyphs from the given text. From these Glyphs
the textureCords in the font texture can be calculated.
Also the relative vertices are calculated, which are then corrected
for angle and alignment in _PositionText().
-> Produces _vertices1 (and is called when that is None)
"""
# make invalid first
self._Invalidate()
# get font instance from figure
f = self.GetFigure()
if not f:
return
font = f._fontManager.GetFont(self._fontname)
# clear glyphs
glyphs = []
self._xglyph = Glyph(font, 'X', self._size)
tt = self._text
# transform greek characters that were given without double backslash
tt = tt.replace('\alpha', unichr(escapes['alpha']))
tt = tt.replace('\beta', unichr(escapes['beta']))
tt = tt.replace('\rho', unichr(escapes['rho']))
tt = tt.replace('\theta', unichr(escapes['theta']))
# transform other chars
tt = tt.replace(r'\\', '\t') # double backslashes do not escape
for c in escapesKeys:
tt = tt.replace('\\'+c, unichr(escapes[c]))
tt = tt.replace('\t', r'\\')
# get italic and bold modifiers
tt = tt.replace('\i', '\x06') # just use some char that is no string
tt = tt.replace('\b', '\x07')
# build list of glyphs, take sub/super scripting into account.
escape = False
styles = []
style = None # Style to set
for i in range(len(tt)):
c = tt[i]
if escape:
g = Glyph(font, c, self._size, styles)
glyphs.append( g )
escape = False
elif c=='{':
# Append style to the list
if style:
styles.append(style)
style = None
elif c=='}':
# Remove style
if styles:
styles.pop()
elif c=='^':
style = MiniStyle(2)
elif c=='_':
style = MiniStyle(1)
elif c=='\x06':
style = MiniStyle(0,False,True)
elif c=='\x07':
style = MiniStyle(0,True,False)
elif c=='\\' and i+1<len(tt) and tt[i+1] in ['_^\x06\x07']:
escape = True
else:
# create glyph (with new style (or not))
g = Glyph(font, c, self._size, styles+[style])
glyphs.append( g )
style = None
# build arrays with vertices and coordinates
x1, y1, z = 0, 0, 0
vertices = Pointset(3)
texCords = Pointset(2)
for g in glyphs:
x2 = x1 + g.sizex
y2 = g.sizey
#y2 = y1 - g.sizey
dy = g.dy
# append texture coordinates
texCords.append(g.s1, g.t1)
texCords.append(g.s2, g.t1)
texCords.append(g.s2, g.t2)
texCords.append(g.s1, g.t2)
# set skewing for position
skew = self._size * g.skewFactor
# append vertices
vertices.append(x1+skew, y1+dy, z)
vertices.append(x2+skew, y1+dy, z)
vertices.append(x2, y2+dy, z)
vertices.append(x1, y2+dy, z)
# prepare for next glyph
x1 = x1 + g.width + self._charSpacing
# store
self._texCords = texCords
self._vertices1 = vertices
def _PositionText(self, event=None):
""" The name is ment as a verb. The vertices1 are corrected
for angle and alignment.
-> produces _vertices2 from _vertices1
(and is called when the first is None)
"""
# get figure
fig = self.GetFigure()
# get vertices
if self._vertices1 is None:
return
vertices = self._vertices1.copy()
# scale text according to global text size property
vertices *= fig._relativeFontSize
# obtain dimensions
if len(vertices):
x1, x2 = vertices[:,0].min(), vertices[:,0].max()
else:
x1, x2 = 0,0
y1, y2 = 0, self._xglyph.sizey
# set anchor
if self._halign < 0: anchorx = x1
elif self._halign > 0: anchorx = x2
else: anchorx = x1 + (x2-x1)/2.0
#
if self._valign < 0: anchory = y1
elif self._valign > 0: anchory = y2
else: anchory = y1 + (y2-y1)/2.0
# apply anchor
angle = self._angle
if isinstance(self, Text):
# Text is a wobject, so must be flipped on y axis
vertices[:,0] = vertices[:,0] - anchorx
vertices[:,1] = -(vertices[:,1] - anchory)
elif isinstance(self, Label):
angle = -self._angle
vertices[:,0] = vertices[:,0] - anchorx
vertices[:,1] = vertices[:,1] - anchory
# apply angle
if angle != 0.0:
cos_angle = np.cos(angle*np.pi/180.0)
sin_angle = np.sin(angle*np.pi/180.0)
vertices[:,0], vertices[:,1] = (
vertices[:,0] * cos_angle - vertices[:,1] * sin_angle,
vertices[:,0] * sin_angle + vertices[:,1] * cos_angle)
# Move anchor in label
if isinstance(self, Label):
w,h = self.position.size
# determine whether the text is vertical or horizontal
halign, valign = self._halign, self._valign
if self._angle > 135 or self._angle < -135:
halign, valign = -halign, valign
elif self._angle > 45:
halign, valign = valign, -halign
elif self._angle < -45:
halign, valign = valign, halign
# set anchor y
if valign < 0: anchory = 0
elif valign > 0: anchory = h
else: anchory = h/2.0
# set anchor x
if halign < 0: anchorx = 0
elif halign > 0: anchorx = w
else: anchorx = w/2.0
# apply
vertices[:,0] = vertices[:,0] + anchorx
vertices[:,1] = vertices[:,1] + anchory
# store
self._vertices2 = vertices
# calculate edges (used by for example the AxisLabel class)
if vertices is not None and len(vertices):
self._deltax = vertices[:,0].min(), vertices[:,0].max()
self._deltay = vertices[:,1].min(), vertices[:,1].max()
def _DrawText(self, x=0, y=0, z=0):
# Translate
if x or y or z:
gl.glPushMatrix()
gl.glTranslatef(x, y, z)
# make sure the glyphs are created
if self._vertices1 is None or self._texCords is None:
self._Compile()
if self._vertices2 is None:
self._PositionText()
# get font instance from figure
fig = self.GetFigure()
if not fig:
return
font = fig._fontManager.GetFont(self._fontname)
# enable texture
font.Enable()
# prepare
texCords = self._texCords#.copy()
vertices = self._vertices2#.copy()
# init vertex and texture array
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glVertexPointerf(vertices.data)
gl.glTexCoordPointerf(texCords.data)
# draw
if self.textColor and len(vertices):
clr = self.textColor
gl.glColor(clr[0], clr[1], clr[2])
gl.glDrawArrays(gl.GL_QUADS, 0, len(vertices))
gl.glFlush()
# disable texture and clean up
if x or y or z:
gl.glPopMatrix()
font.Disable()
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
class Text(Wobject, BaseText):
""" Text(parent, text='', x=0, y=0, z=0, fontname=None)
A wobject representing a string of characters. The text has
a certain position in the scene. The fontname can be
'mono', 'sans' or 'serif'. If not given, the vv.settings.defaultFontName
is used.
"""
def __init__(self, parent, text='', x=0, y=0, z=0, fontname=None):
Wobject.__init__(self, parent)
BaseText.__init__(self, text, fontname)
# store coordinates
self._x, self._y, self._z = x, y, z
# for internal use
self._screenx, self._screeny, self._screenz = 0, 0, 0
@PropWithDraw
def x():
"""Get/Set the x position of the text.
"""
def fget(self):
return self._x
def fset(self, value):
self._x = value
return locals()
@PropWithDraw
def y():
"""Get/Set the y position of the text.
"""
def fget(self):
return self._y
def fset(self, value):
self._y = value
return locals()
@PropWithDraw
def z():
"""Get/Set the z position of the text.
"""
def fget(self):
return self._z
def fset(self, value):
self._z = value
return locals()
def OnDraw(self):
# get screen position and store
tmp = glu.gluProject(self._x, self._y, self._z)
self._screenx, self._screeny, self._screenz = tuple(tmp)
# make integer (to prevent glitchy behaviour), but not z!
self._screenx = int(self._screenx+0.5)
self._screeny = int(self._screeny+0.5)
def OnDrawScreen(self):
self._DrawText( self._screenx, self._screeny, depthToZ(self._screenz) )
class Label(Box, BaseText):
""" Label(parent, text='', fontname=None)
A wibject (inherits from box) with text inside.
The fontname can be 'mono', 'sans' or 'serif'. If not given, the
vv.settings.defaultFontName is used.
"""
def __init__(self, parent, text='', fontname=None):
Box.__init__(self, parent)
BaseText.__init__(self, text, fontname)
# no edge
self.edgeWidth = 0
# init position (this is to set the size)
self.position = 10,10,100,16
# we need to know about position changes to update alignment
self.eventPosition.Bind(self._PositionText)
def OnDraw(self):
# Draw the box
Box.OnDraw(self)
# Draw the text
self._DrawText()
| 2.109375 | 2 |
migrations/versions/59564f63b0ae_.py | RaihanStark/raven-raffles-web | 0 | 12790524 | <filename>migrations/versions/59564f63b0ae_.py
"""empty message
Revision ID: 59564f63b0ae
Revises: c2<PASSWORD>ee965a5
Create Date: 2020-06-10 23:03:10.493772
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'c2889ee965a5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('profile_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'task', 'profile', ['profile_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'task', type_='foreignkey')
op.drop_column('task', 'profile_id')
# ### end Alembic commands ###
| 1.203125 | 1 |
SZR/apps/groups/tests/test_view.py | Alek96/SZR | 1 | 12790525 | from GitLabApi import objects
from core.tests.test_view import LoginMethods
from core.tests.test_view import SimpleUrlsTestsCases
from django.db.models import QuerySet
from django.urls import reverse
from groups import models
from groups.sidebar import GroupSidebar, FutureGroupSidebar
from groups.tests import test_forms
from groups.tests import models as test_models
class GitlabWrapperAppNameCase:
class GitlabWrapperAppNameTest(SimpleUrlsTestsCases.SimpleUrlsTests):
app_name = 'groups'
class InitSidebarPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'init_sidebar'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sidebar.html')
self.assertIn('group', response.context)
self.assertIn('sidebar', response.context)
self.assertIsInstance(response.context['group'], objects.Group)
self.assertIsInstance(response.context['sidebar'], GroupSidebar)
class IndexPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'index'
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/index.html')
self.assertIn('group_list', response.context)
all(self.assertIsInstance(group, objects.Group) for group in response.context['group_list'])
class DetailPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'detail'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/detail.html')
self.assertIn('group', response.context)
self.assertIn('sidebar', response.context)
self.assertIn('unfinished_add_subgroup_list', response.context)
self.assertIn('unfinished_add_project_list', response.context)
self.assertIsInstance(response.context['group'], objects.Group)
self.assertIsInstance(response.context['sidebar'], GroupSidebar)
self.assertIsInstance(response.context['unfinished_add_subgroup_list'], QuerySet)
self.assertIsInstance(response.context['unfinished_add_project_list'], QuerySet)
class MembersPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'members'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/members.html')
self.assertIn('group', response.context)
self.assertIn('sidebar', response.context)
self.assertIn('unfinished_task_list', response.context)
self.assertIsInstance(response.context['group'], objects.Group)
self.assertIsInstance(response.context['sidebar'], GroupSidebar)
self.assertIsInstance(response.context['unfinished_task_list'], QuerySet)
class TasksPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'tasks'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/tasks.html')
self.assertIn('group', response.context)
self.assertIn('sidebar', response.context)
self.assertIn('unfinished_task_list', response.context)
self.assertIn('finished_task_list', response.context)
self.assertIn('new_group_links', response.context)
self.assertIsInstance(response.context['group'], objects.Group)
self.assertIsInstance(response.context['sidebar'], GroupSidebar)
self.assertIsInstance(response.context['unfinished_task_list'], list)
self.assertIsInstance(response.context['finished_task_list'], list)
self.assertIsInstance(response.context['new_group_links'], list)
new_group_links = [
('New Task Group', reverse('groups:new_task_group', kwargs=self.args)),
('New Subgroup', reverse('groups:new_subgroup_task', kwargs=self.args)),
('New Project', reverse('groups:new_project_task', kwargs=self.args)),
('New Member', reverse('groups:new_member_task', kwargs=self.args))
]
for group_link in response.context['new_group_links']:
self.assertIn(group_link, new_group_links)
class NewGroupPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_group'
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddSubgroupFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:index'))
class NewSubgroupPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_subgroup'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddSubgroupFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:detail', kwargs=self.args))
class NewTaskGroupPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_task_group'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.TaskGroupFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:tasks', kwargs=self.args))
model = models.TaskGroup.objects.get(
gitlab_group=models.GitlabGroup.objects.get(
gitlab_id=self.args['group_id']))
for key, value in test_forms.TaskGroupFormTests.valid_form_data.items():
self.assertEqual(getattr(model, key), value)
class FutureTaskGroupPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_task_group'
args = {'task_id': None}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddSubgroupCreateMethods().create_parent_task()
self.args['task_id'] = self.parent_task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.TaskGroupFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs=self.args))
model = models.TaskGroup.objects.get(parent_task=self.parent_task)
for key, value in test_forms.TaskGroupFormTests.valid_form_data.items():
self.assertEqual(getattr(model, key), value)
class EditTaskGroupPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'edit_task_group'
args = {'task_group_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddSubgroupCreateMethods().create_parent_task()
self.task_group = test_models.AddSubgroupCreateMethods().create_task_group(
parent_task=self.parent_task
)
self.args['task_group_id'] = self.task_group.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_group_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
data = self.get_initial_form_data()
self.assertEqual(data['name'], self.task_group.name)
data['name'] = 'Another Name'
response = self.client.post(self.get_url(), data)
self.assertEqual(response.status_code, 302)
self.task_group.refresh_from_db()
self.assertEqual(self.task_group.name, data['name'])
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task_group.gitlab_group.gitlab_id = 42
self.task_group.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task_group.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_future_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class NewSubgroupTaskPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_subgroup_task'
args = {'task_group_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddSubgroupCreateMethods().create_parent_task()
self.task_group = test_models.AddSubgroupCreateMethods().create_task_group(
parent_task=self.parent_task
)
self.args['task_group_id'] = self.task_group.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_group_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddSubgroupFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
model = models.AddSubgroup.objects.get(task_group=self.task_group)
for key, value in test_forms.AddSubgroupFormTests.valid_form_data.items():
self.assertEqual(getattr(model, key), value)
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task_group.gitlab_group.gitlab_id = 42
self.task_group.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task_group.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class EditSubgroupTaskPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'edit_subgroup_task'
args = {'task_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddSubgroupCreateMethods().create_parent_task()
self.task = test_models.AddSubgroupCreateMethods().create_task(
parent_task=self.parent_task)
self.args['task_id'] = self.task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
data = self.get_initial_form_data()
self.assertEqual(data['name'], self.task.name)
data['name'] = 'Another Name'
data['description'] = 'Description'
response = self.client.post(self.get_url(), data)
self.assertEqual(response.status_code, 302)
self.task.refresh_from_db()
self.assertEqual(self.task.name, data['name'])
self.assertEqual(self.task.description, data['description'])
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task.gitlab_group.gitlab_id = 42
self.task.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_future_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class NewProjectPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_project'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddProjectFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:detail', kwargs=self.args))
class NewProjectTaskPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_project_task'
args = {'task_group_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddProjectCreateMethods().create_parent_task()
self.task_group = test_models.AddProjectCreateMethods().create_task_group(
parent_task=self.parent_task
)
self.args['task_group_id'] = self.task_group.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_group_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddProjectFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
model = models.AddProject.objects.get(task_group=self.task_group)
for key, value in test_forms.AddProjectFormTests.valid_form_data.items():
self.assertEqual(getattr(model, key), value)
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task_group.gitlab_group.gitlab_id = 42
self.task_group.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task_group.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class EditProjectTaskPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'edit_project_task'
args = {'task_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddProjectCreateMethods().create_parent_task()
self.task = test_models.AddProjectCreateMethods().create_task(
parent_task=self.parent_task)
self.args['task_id'] = self.task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
data = self.get_initial_form_data()
self.assertEqual(data['name'], self.task.name)
data['name'] = 'Another Name'
data['description'] = 'Description'
response = self.client.post(self.get_url(), data)
self.assertEqual(response.status_code, 302)
self.task.refresh_from_db()
self.assertEqual(self.task.name, data['name'])
self.assertEqual(self.task.description, data['description'])
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task.gitlab_group.gitlab_id = 42
self.task.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_future_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class NewMemberPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_member'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddMemberFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:members', kwargs=self.args))
class NewMemberTaskPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_member_task'
args = {'task_group_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddMemberCreateMethods().create_parent_task()
self.task_group = test_models.AddMemberCreateMethods().create_task_group(
parent_task=self.parent_task
)
self.args['task_group_id'] = self.task_group.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_group_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
response = self.client.post(self.get_url(), test_forms.AddMemberFormTests.valid_form_data)
self.assertEqual(response.status_code, 302)
model = models.AddMember.objects.get(task_group=self.task_group)
for key, value in test_forms.AddMemberFormTests.valid_form_data.items():
self.assertEqual(getattr(model, key), value)
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task_group.gitlab_group.gitlab_id = 42
self.task_group.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task_group.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class EditMemberTaskPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'edit_member_task'
args = {'task_id': 1}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddMemberCreateMethods().create_parent_task()
self.task = test_models.AddMemberCreateMethods().create_task(
parent_task=self.parent_task)
self.args['task_id'] = self.task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
def _test_page_post_valid_data(self):
data = self.get_initial_form_data()
self.assertEqual(data['username'], self.task.username)
data['username'] = 'Another username'
response = self.client.post(self.get_url(), data)
self.assertEqual(response.status_code, 302)
self.task.refresh_from_db()
self.assertEqual(self.task.username, data['username'])
return response
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_tasks(self):
self.task.gitlab_group.gitlab_id = 42
self.task.gitlab_group.save()
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:tasks', kwargs={'group_id': self.task.gitlab_group.gitlab_id}))
@LoginMethods.login_wrapper
def test_page_post_valid_data_redirect_to_future_tasks(self):
response = self._test_page_post_valid_data()
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs={'task_id': self.parent_task.id}))
class NewMembersFromFilePageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_members_from_file'
args = {'group_id': '1'}
def setUp(self):
super().setUp()
for key, value in test_forms.MembersFromFileFormTests.valid_file_data.items():
value.file.seek(0)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), {**test_forms.MembersFromFileFormTests.valid_form_data,
**test_forms.MembersFromFileFormTests.valid_file_data})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:tasks', kwargs=self.args))
class FutureNewMembersFromFilePageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_members_from_file'
args = {'task_id': None}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddSubgroupCreateMethods().create_parent_task()
self.args['task_id'] = self.parent_task.id
for key, value in test_forms.MembersFromFileFormTests.valid_file_data.items():
value.file.seek(0)
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), {**test_forms.MembersFromFileFormTests.valid_form_data,
**test_forms.MembersFromFileFormTests.valid_file_data})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs=self.args))
class NewSubgroupsAndMembersFromFilePageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_subgroup_and_members_from_file'
args = {'group_id': '1'}
def setUp(self):
super().setUp()
for key, value in test_forms.SubgroupAndMembersFromFileFormTests.valid_file_data.items():
value.file.seek(0)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), {**test_forms.SubgroupAndMembersFromFileFormTests.valid_form_data,
**test_forms.SubgroupAndMembersFromFileFormTests.valid_file_data})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('groups:tasks', kwargs=self.args))
class FutureNewSubgroupsAndMembersFromFilePageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'new_subgroup_and_members_from_file'
args = {'task_id': None}
def setUp(self):
super().setUp()
self.parent_task = test_models.AddSubgroupCreateMethods().create_parent_task()
self.args['task_id'] = self.parent_task.id
for key, value in test_forms.SubgroupAndMembersFromFileFormTests.valid_file_data.items():
value.file.seek(0)
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_not_valid_data(self):
response = self.client.post(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/form_base_site.html')
@LoginMethods.login_wrapper
def test_page_post_valid_data(self):
response = self.client.post(self.get_url(), {**test_forms.SubgroupAndMembersFromFileFormTests.valid_form_data,
**test_forms.SubgroupAndMembersFromFileFormTests.valid_file_data})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url,
reverse('groups:future_group_tasks', kwargs=self.args))
class FutureGroupDetailPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'future_group_detail'
args = {'task_id': None}
def setUp(self):
super().setUp()
self.task = test_models.AddSubgroupCreateMethods().create_task()
self.args['task_id'] = self.task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/tasks/detail.html')
self.assertIn('task', response.context)
self.assertIn('sidebar', response.context)
self.assertIn('unfinished_add_subgroup_list', response.context)
self.assertIn('unfinished_add_project_list', response.context)
self.assertIsInstance(response.context['task'], models.AddSubgroup)
self.assertIsInstance(response.context['sidebar'], FutureGroupSidebar)
self.assertIsInstance(response.context['unfinished_add_subgroup_list'], QuerySet)
self.assertIsInstance(response.context['unfinished_add_project_list'], QuerySet)
class FutureGroupMembersPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'future_group_members'
args = {'task_id': None}
def setUp(self):
super().setUp()
self.task = test_models.AddSubgroupCreateMethods().create_task()
self.args['task_id'] = self.task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/tasks/members.html')
self.assertIn('task', response.context)
self.assertIn('sidebar', response.context)
self.assertIn('unfinished_task_list', response.context)
self.assertIsInstance(response.context['task'], models.AddSubgroup)
self.assertIsInstance(response.context['sidebar'], FutureGroupSidebar)
self.assertIsInstance(response.context['unfinished_task_list'], QuerySet)
class FutureGroupTasksPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'future_group_tasks'
args = {'task_id': None}
def setUp(self):
super().setUp()
self.task = test_models.AddSubgroupCreateMethods().create_task()
self.args['task_id'] = self.task.id
@LoginMethods.login_wrapper
def test_page_not_found(self):
self.args['task_id'] += 1
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 404)
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/tasks/tasks.html')
self.assertIn('task', response.context)
self.assertIn('sidebar', response.context)
self.assertIn('unfinished_task_list', response.context)
self.assertIn('finished_task_list', response.context)
self.assertIn('new_group_links', response.context)
self.assertIsInstance(response.context['task'], models.AddSubgroup)
self.assertIsInstance(response.context['sidebar'], FutureGroupSidebar)
self.assertIsInstance(response.context['unfinished_task_list'], list)
self.assertIsInstance(response.context['finished_task_list'], list)
self.assertIsInstance(response.context['new_group_links'], list)
new_group_links = [
('New Task Group', reverse('groups:new_task_group', kwargs=self.args)),
('New Subgroup', reverse('groups:new_subgroup_task', kwargs=self.args)),
('New Project', reverse('groups:new_project_task', kwargs=self.args)),
('New Member', reverse('groups:new_member_task', kwargs=self.args))
]
for group_link in response.context['new_group_links']:
self.assertIn(group_link, new_group_links)
class AjaxLoadSubgroupPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'ajax_load_subgroups'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/ajax/load_subgroups_and_projects.html')
self.assertIn('group_list', response.context)
self.assertIsInstance(response.context['group_list'], list)
all(self.assertIsInstance(group, objects.GroupSubgroup) for group in response.context['group_list'])
self.assertIn('project_list', response.context)
self.assertEqual(response.context['project_list'], [])
class AjaxLoadSubgroupAndProjectsPageTest(GitlabWrapperAppNameCase.GitlabWrapperAppNameTest):
name = 'ajax_load_subgroups_and_projects'
args = {'group_id': '1'}
@LoginMethods.login_wrapper
def test_page_found(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/ajax/load_subgroups_and_projects.html')
self.assertIn('group_list', response.context)
self.assertIsInstance(response.context['group_list'], list)
all(self.assertIsInstance(group, objects.GroupSubgroup) for group in response.context['group_list'])
self.assertIn('project_list', response.context)
all(self.assertIsInstance(project, objects.GroupProject) for project in response.context['project_list'])
| 2.09375 | 2 |
gist/repo.py | thisisibrahimd/gist | 0 | 12790526 | <reponame>thisisibrahimd/gist<filename>gist/repo.py
import logging
from sqlalchemy import create_engine, select, func, funcfilter
from sqlalchemy.orm import sessionmaker, lazyload, joinedload, subqueryload
from gist.entities import EligibilityCriterion, Person, ConditionOccurrence, DrugExposure, Measurement, Observation, ProcedureOccurrence
class Repo:
def __init__(self, conn_str):
self.conn_str = conn_str
self.engine = create_engine(self.conn_str)
self.session = sessionmaker(self.engine)
class CritRepo(Repo):
def get_all_trial_ids(self):
stmt = (
select(EligibilityCriterion.nct_id)
)
logging.debug(f"query for get_all_trial_ids: {stmt}")
with self.session() as session:
trial_ids = session.execute(stmt).scalars().unique()
return trial_ids
def get_criteria_by_trial_id(self, trial_id):
stmt = (
select(EligibilityCriterion)
.filter(EligibilityCriterion.nct_id == trial_id)
)
logging.debug(f"query for get_criteria_by_trial_id: {stmt}")
with self.session() as session:
trials = session.execute(stmt).scalars().all()
return trials
class EhrRepo(Repo):
def get_ehr(self):
stmt = (
select(Person)
.options(subqueryload(Person.condition_occurrence))
.options(subqueryload(Person.drug_exposure))
.options(subqueryload(Person.procedure_occurrence))
.options(subqueryload(Person.observation))
.options(subqueryload(Person.measurement))
)
logging.debug(f"query for get_ehr: {stmt}")
with self.session() as session:
ehr = session.execute(stmt).scalars().unique().all()
return ehr
| 2.234375 | 2 |
api/service/listing_service.py | build-week-optimal-pricing/Data-science | 0 | 12790527 | #!/usr/bin/env python3
from api import DB
from api.models.listing import Listing
def get_all_queries():
"""
Returns all stored listing queries.
"""
return list(Listing.query.all())
| 2.15625 | 2 |
oops/#009.py | krishankansal/PythonPrograms | 0 | 12790528 | <filename>oops/#009.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 18:15:37 2020
@author: krishan
"""
class ContactList(list):
def search(self, name):
'''Return all contacts that contain the search value
in their name.'''
matching_contacts = []
for contact in self:
if name in contact.name:
matching_contacts.append(contact)
return matching_contacts
class Contact:
'''this class is responsible for maintaining a list of all contacts
in a class variable all_contacts'''
all_contacts = ContactList()
def __init__(self, name, email):
self.name = name
self.email = email
Contact.all_contacts.append(self)
class Supplier(Contact):
def order(self, order):
print("If this were a real system we would send "
"'{}' order to '{}'".format(order, self.name))
| 3.96875 | 4 |
src/benzak_etl/load.py | tgrx/benzak-etl | 0 | 12790529 | import asyncio
import json
from datetime import date
from decimal import Decimal
from typing import Dict
from aiohttp import ClientResponse
from dynaconf import settings
_PRICE_HISTORY_API = f"{settings.BENZAK_API_URL}/price-history/"
async def load_price(logger, session, price: Dict):
logger.debug(
f"calling Benzak price history API:"
f' POST "{_PRICE_HISTORY_API}"'
f" json={json.dumps(price, indent=2, sort_keys=True)}"
)
response: ClientResponse = await session.post(
_PRICE_HISTORY_API,
json=price,
headers={"AUTHORIZATION": settings.BENZAK_API_TOKEN},
)
logger.debug(f"got response: [{response.status} {response.reason}]")
if settings.DEBUG and response.status != 201:
payload = json.dumps(await response.json(), indent=2, sort_keys=True)
logger.debug(f"API response: {payload}")
async def load_prices(
logger, session, prices: Dict[date, Decimal], currency: int, fuel: int
):
logger.debug(
f"loading prices"
f" for currency={currency}, fuel={fuel}:"
f" {len(prices)} prices"
)
logger.debug("creating tasks: load price")
tasks = []
for actual_at, price in prices.items():
payload = {
"at": actual_at.strftime("%Y-%m-%d"),
"price": str(price),
"currency": currency,
"fuel": fuel,
}
task = asyncio.create_task(load_price(logger, session, payload))
tasks.append(task)
logger.debug(f"created {len(tasks)} tasks")
logger.debug("awaiting tasks: load price")
for task in tasks:
await task
logger.debug(f"loaded {len(prices)} prices")
| 2.6875 | 3 |
src/embeds.py | ayman2598/GabbyGums | 2 | 12790530 | <reponame>ayman2598/GabbyGums<filename>src/embeds.py
"""
"""
import discord
from discord.ext import commands
from datetime import datetime
from typing import Optional, Dict, Union
from db import StoredInvite, CachedMessage
import logging
from utils.moreColors import gabby_gums_dark_green, gabby_gums_light_green, gabby_gums_purple
log = logging.getLogger(__name__)
def split_message(message: str) -> (str, str):
# TODO: Make better
msg1 = message[:1000]
msg2 = message[1000:]
return msg1, msg2
def edited_message_embed(author_id, author_name: str, author_discrim, channel_id, before_msg: str, after_msg: str,
message_id: str, guild_id) -> discord.Embed:
before_msg = before_msg if before_msg else "Message not in the cache."
embed = discord.Embed(title="Edited Message",
description="<@{}> - {}#{}".format(author_id, author_name, author_discrim),
color=0x61cd72, timestamp=datetime.utcnow())
embed.set_thumbnail(
url="https://i.imgur.com/Q8SzUdG.png")
embed.add_field(name="Info:",
value="A message by <@{author_id}>, was edited in <#{channel_id}>\n"
"[Go To Message](https://discordapp.com/channels/{guild_id}/{channel_id}/{message_id})".format(author_id=author_id, channel_id=channel_id, guild_id=guild_id, message_id=message_id),
inline=False)
if len(before_msg) > 1024 or len(after_msg) > 1024: # To simplify things, if one is greater split both
before_msg1, before_msg2 = split_message(before_msg)
after_msg1, after_msg2 = split_message(after_msg)
embed.add_field(name="Message Before Edit:", value=before_msg1, inline=False)
if len(before_msg2.strip()) > 0:
embed.add_field(name="Message Before Edit Continued:", value=before_msg2, inline=False)
embed.add_field(name="Message After Edit:", value=after_msg1, inline=False)
if len(after_msg2.strip()) > 0:
embed.add_field(name="Message After Edit Continued:", value=after_msg2, inline=False)
else:
embed.add_field(name="Message Before Edit:", value=before_msg, inline=True)
embed.add_field(name="Message After Edit:", value=after_msg, inline=True)
embed.set_footer(text="User ID: {}".format(author_id))
return embed
def deleted_message_embed(message_content: Optional[str], author: Optional[discord.Member], channel_id: int, message_id: int = -1,
webhook_info: Optional[CachedMessage] = None, pk_system_owner: Optional[discord.Member] = None,
cached: bool = True) -> discord.Embed:
# If the webhook_info is none, create dummy object to make if's neater
if webhook_info is None:
webhook_info = CachedMessage(None, None, None, None, None, None, None, None, None, None)
if cached:
pk_id_msg = ""
if webhook_info.member_pkid is not None or webhook_info.system_pkid is not None:
s = '\u205f' # Medium Mathematical Space
pk_id_msg = f"{s}\n{s}\nSystem ID: {s}{s}{s}**{webhook_info.system_pkid}** \nMember ID: {s}**{webhook_info.member_pkid}**"
log.info("pk_id_msg set")
if author is None:
log.info("Author is None")
# We have NO info on the author of the message.
if webhook_info.webhook_author_name is not None:
log.info("Webhook Author is NOT None")
description_text = f"{webhook_info.webhook_author_name}{pk_id_msg}"
info_author = f"**{webhook_info.webhook_author_name}**"
else:
log.info("Webhook Author is None")
description_text = info_author = "Uncached User"
elif author.discriminator == "0000":
description_text = f"{author.name}{pk_id_msg}"
info_author = f"**{author.name}**"
else:
description_text = f"<@{author.id}> - {author.name}#{author.discriminator}"
info_author = f"<@{author.id}>"
embed = discord.Embed(title="Deleted Message",
description=description_text,
color=0x9b59b6,
timestamp=datetime.utcnow())
embed.set_thumbnail(url="http://i.imgur.com/fJpAFgN.png")
embed.add_field(name="Info:",
value="A message by {}, was deleted in <#{}>".format(info_author, channel_id),
inline=False)
if pk_system_owner is not None:
embed.add_field(name="Linked Discord Account:",
value=f"<@{pk_system_owner.id}> - {pk_system_owner.name}#{pk_system_owner.discriminator}",
inline=False)
if message_content == "": # Make sure we don't end up throwing an error due to an empty field value.
message_content = "None"
if len(message_content) > 1024:
msg_cont_1, msg_cont_2 = split_message(message_content)
embed.add_field(name="Message:", value=msg_cont_1, inline=False)
embed.add_field(name="Message continued:", value=msg_cont_2, inline=False)
else:
embed.add_field(name="Message:", value=message_content, inline=False)
if author is not None:
embed.set_footer(text=f"User ID: {author.id}")
return embed
else:
return unknown_deleted_message(channel_id, message_id)
def unknown_deleted_message(channel_id, message_id) -> discord.Embed:
embed = discord.Embed(title="Deleted Message",
description="Unknown User", color=0x9b59b6, timestamp=datetime.utcnow())
embed.set_thumbnail(url="http://i.imgur.com/fJpAFgN.png")
embed.add_field(name="Info:",
value="A message not in the cache was deleted in <#{}>".format(channel_id),
inline=False)
embed.add_field(name="Message ID:", value=message_id, inline=False)
return embed
def member_join(member: discord.Member, invite: Optional[StoredInvite], pk_info: Optional[Dict], manage_guild=True) -> discord.Embed:
embed = discord.Embed(description="<@!{}> - {}#{}".format(member.id, member.name, member.discriminator),
color=0x00ff00, timestamp=datetime.utcnow())
embed.set_author(name="New Member Joined!!!",
icon_url="https://www.emoji.co.uk/files/twitter-emojis/objects-twitter/11031-inbox-tray.png")
# Need to use format other than WebP for image to display on iOS. (I think this is a recent discord bug.)
ios_compatible_avatar_url = member.avatar_url_as(static_format="png")
embed.set_thumbnail(url=ios_compatible_avatar_url)
embed.add_field(name="Info:",
value="{} has joined the server!!!".format(member.display_name),
inline=False)
account_age = datetime.utcnow() - member.created_at
if account_age.days > 0:
account_age_name = "Account Age"
account_age_value = f"**{account_age.days}** days old"
else:
account_age_name = "**New Account!**"
hours = account_age.seconds // 3600
minutes = (account_age.seconds % 3600) // 60
if hours > 0:
account_age_value = f"\N{WARNING SIGN} **Warning!** Account is only **{hours}** hours and **{minutes}** minutes old! \N{WARNING SIGN}"
else:
seconds = account_age.seconds % 60
account_age_value = f"\N{WARNING SIGN} **Warning!** Account is only **{minutes}** minutes and **{seconds}** seconds old! \N{WARNING SIGN}"
embed.add_field(name=account_age_name,
value=account_age_value,
inline=True)
embed.add_field(name="Current Member Count", value="**{}** Members".format(member.guild.member_count), inline=True)
if pk_info is not None:
embed.add_field(name="\N{Zero Width Space}", value="\n__**Plural Kit Information**__", inline=False)
# embed.add_field(name="\N{Zero Width Space}", value="\N{Zero Width Space}", inline=True) # Add a blank embed to force the PK info onto it's own line.
if "name" in pk_info:
embed.add_field(name="System Name", value=pk_info['name'], inline=True)
embed.add_field(name="System ID", value=pk_info['id'], inline=True)
# Compute the account age
pk_created_date = datetime.strptime(pk_info['created'], '%Y-%m-%dT%H:%M:%S.%fZ')
pk_account_age = datetime.utcnow() - pk_created_date
embed.add_field(name="PK Account Age", value=f"**{pk_account_age.days}** days old", inline=True)
if invite is not None:
embed.add_field(name=" ", value="\n__**Invite Information**__", inline=False)
if invite.invite_name is not None:
embed.add_field(name="Name:", value="{}".format(invite.invite_name))
if invite.invite_id is not None:
embed.add_field(name="Code", value="{}".format(invite.invite_id))
if invite.actual_invite is not None:
embed.add_field(name="Uses", value="{}".format(invite.actual_invite.uses))
embed.add_field(name="Created By", value="<@!{}> - {}#{}".format(invite.actual_invite.inviter.id,
invite.actual_invite.inviter.name,
invite.actual_invite.inviter.discriminator))
embed.add_field(name="Created on",
value=invite.actual_invite.created_at.strftime("%b %d, %Y, %I:%M:%S %p UTC"))
else:
embed.add_field(name="Uses", value="{}".format(invite.uses))
if invite.inviter_id is not None:
embed.add_field(name="Created By", value="<@{}>".format(invite.inviter_id))
if invite.created_ts is not None:
embed.add_field(name="Created on", value=invite.created_at().strftime("%b %d, %Y, %I:%M:%S %p UTC"))
else:
if not manage_guild:
embed.add_field(name="Permissions Warning!",
value="**Manage Server Permissions** needed for invite tracking.")
elif member.bot:
embed.add_field(name=" ", value="\n__**Invite Information**__", inline=False)
embed.add_field(name="Code", value="Bot OAuth Link")
else:
embed.add_field(name="__**Invite Information**__",
value="Unable to determine invite information. It's possible the invite was a one time use invite."
" You may be able to determine the inviter by using the Audit Log.\n"
"Additionally, you can greatly improve the reliability of invite tracking by giving Gabby Gums the **Manage Channels** permission.", inline=False)
embed.set_footer(text="User ID: {}".format(member.id))
return embed
def member_leave(member: discord.Member) -> discord.Embed:
embed = discord.Embed(description="<@{}> - {}#{}".format(member.id, member.name, member.discriminator),
color=0xf82125, timestamp=datetime.utcnow())
embed.set_author(name="Member Left 😭",
icon_url="https://www.emoji.co.uk/files/mozilla-emojis/objects-mozilla/11928-outbox-tray.png")
# Need to use format other than WebP for image to display on iOS. (I think this is a recent discord bug.)
ios_compatible_avatar_url = member.avatar_url_as(static_format="png")
embed.set_thumbnail(url=ios_compatible_avatar_url)
embed.add_field(name="Info:",
value="{} has left the server 😭.".format(member.display_name),
inline=False)
embed.set_footer(text="User ID: {}".format(member.id))
return embed
def member_kick(member: discord.Member, audit_log: Optional[discord.AuditLogEntry]) -> discord.Embed:
embed = discord.Embed(description="<@{}> - {}#{}".format(member.id, member.name, member.discriminator),
color=discord.Color.dark_orange(), timestamp=datetime.utcnow())
embed.set_author(name="Member Kicked",
icon_url="https://i.imgur.com/o96t3cV.png")
# Need to use format other than WebP for image to display on iOS. (I think this is a recent discord bug.)
ios_compatible_avatar_url = member.avatar_url_as(static_format="png")
embed.set_thumbnail(url=ios_compatible_avatar_url)
embed.add_field(name="Info:",
value="{} was kicked from the server.".format(member.display_name),
inline=False)
if audit_log is not None:
embed.add_field(name="Kicked By:",
value="<@{}> - {}#{}".format(audit_log.user.id, audit_log.user.name, audit_log.user.discriminator), inline=False)
reason = f"{audit_log.reason}" if audit_log.reason else "No Reason was given."
embed.add_field(name="Reason:",
value=reason,
inline=False)
embed.set_footer(text="User ID: {}".format(member.id))
return embed
def member_ban(member: discord.Member, audit_log: Optional[discord.AuditLogEntry]) -> discord.Embed:
embed = discord.Embed(description="<@{}> - {}#{}".format(member.id, member.name, member.discriminator),
color=discord.Color.dark_red(), timestamp=datetime.utcnow())
embed.set_author(name="Member Banned", icon_url="http://i.imgur.com/Imx0Znm.png")
# Need to use format other than WebP for image to display on iOS. (I think this is a recent discord bug.)
ios_compatible_avatar_url = str(member.avatar_url_as(static_format="png"))
embed.set_thumbnail(url=ios_compatible_avatar_url)
embed.add_field(name="Info:",
value="**{}** was banned from the server.".format(member.display_name),
inline=False)
if audit_log is not None:
embed.add_field(name="Banned By:",
value="<@{}> - {}#{}".format(audit_log.user.id, audit_log.user.name, audit_log.user.discriminator), inline=False)
reason = f"{audit_log.reason}" if audit_log.reason else "No Reason was given."
embed.add_field(name="Reason:",
value=reason,
inline=False)
# else:
# embed.add_field(name="Need `View Audit Log` Permissions to show more information",
# value="\N{zero width space}")
embed.set_footer(text="User ID: {}".format(member.id))
return embed
def member_unban(member: discord.User, audit_log: Optional[discord.AuditLogEntry]) -> discord.Embed:
embed = discord.Embed(description="<@{}> - {}#{}".format(member.id, member.name, member.discriminator),
color=discord.Color.dark_green(), timestamp=datetime.utcnow())
embed.set_author(name="Member Unbanned", icon_url="https://i.imgur.com/OCcebCO.png")
# Need to use format other than WebP for image to display on iOS. (I think this is a recent discord bug.)
ios_compatible_avatar_url = str(member.avatar_url_as(static_format="png"))
embed.set_thumbnail(url=ios_compatible_avatar_url)
embed.add_field(name="Info:",
value="**{}** was unbanned from the server.".format(member.display_name),
inline=False)
if audit_log is not None:
embed.add_field(name="Unbanned By:",
value="<@{}> - {}#{}".format(audit_log.user.id, audit_log.user.name, audit_log.user.discriminator), inline=False)
reason = f"{audit_log.reason}" if audit_log.reason else "No Reason was given."
embed.add_field(name="Reason:",
value=reason,
inline=False)
# else:
# embed.add_field(name="Need `View Audit Log` Permissions to show more information",
# value="\N{zero width space}")
embed.set_footer(text="User ID: {}".format(member.id))
return embed
def member_nick_update(before: discord.Member, after: discord.Member) -> discord.Embed:
embed = discord.Embed(
description="<@{}> - {}#{} changed their nickname.".format(after.id, after.name, after.discriminator),
color=0x00ffff, timestamp=datetime.utcnow())
embed.set_author(name="Nickname Changed")
embed.set_thumbnail(url="https://i.imgur.com/HtQ53lx.png")
embed.add_field(name="Old Nickname", value=before.nick, inline=True)
embed.add_field(name="New Nickname", value=after.nick, inline=True)
embed.set_footer(text="User ID: {}".format(after.id))
return embed
def user_name_update(before: discord.User, after: discord.User) -> discord.Embed:
if before.name != after.name and before.discriminator == after.discriminator:
# Name changed, discriminator did not
changed_txt = "Username"
elif before.name == after.name and before.discriminator != after.discriminator:
# Discrim changed, Name did not
changed_txt = "Discriminator"
else:
# Both changed
changed_txt = "Username & Discriminator"
embed = discord.Embed(description=f"<@{after.id}> - {after.name}#{after.discriminator} changed their {changed_txt}.",
color=discord.Color.teal(), timestamp=datetime.utcnow())
embed.set_author(name=f"{changed_txt} Changed")
if before.name != after.name:
embed.add_field(name="Old Username:", value=before.name, inline=True)
embed.add_field(name="New Username:", value=after.name, inline=True)
if before.discriminator != after.discriminator:
embed.add_field(name="Old Discriminator:", value=before.discriminator, inline=True)
embed.add_field(name="New Discriminator:", value=after.discriminator, inline=True)
embed.set_footer(text="User ID: {}".format(after.id))
return embed
def user_avatar_update(before: discord.User, after: discord.User, embed_image_filename: str) -> discord.Embed:
embed = discord.Embed(description="<@{}> - {}#{} changed their avatar.".format(after.id, after.name, after.discriminator),
color=0x00aaaa, timestamp=datetime.utcnow())
embed.set_author(name="Avatar Changed")
embed.set_image(url=f"attachment://{embed_image_filename}")
embed.set_footer(text="User ID: {}".format(after.id))
return embed
def command_timed_out_embed(message: str = "The command has timed out.", color: discord.Color = discord.Color.dark_orange()) -> discord.Embed:
"""Returns an embed formatted for command time outs"""
embed = discord.Embed(title="Command Timed Out!",
description=f"❌ {message}",
color=color)
return embed
def command_canceled_embed(message: str = "The command was canceled.", color: discord.Color = discord.Color.dark_orange()) -> discord.Embed:
"""Returns an embed formatted for canceled commands"""
embed = discord.Embed(title="**Command Canceled**",
description=f"❌ {message}",
color=color)
return embed
def exception_w_message(message: discord.Message) -> discord.Embed:
embed = discord.Embed()
embed.colour = 0xa50000
embed.title = message.content
guild_id = message.guild.id if message.guild else "DM Message"
embed.set_footer(text="Server: {}, Channel: {}, Sender: <@{}> - {}#{}".format(
message.author.name, message.author.discriminator, message.author.id,
guild_id, message.channel.id))
return embed
| 2.71875 | 3 |
get_inventory_from_awx.py | jonnymccullagh/get_inventory_from_awx | 0 | 12790531 | <filename>get_inventory_from_awx.py
#!/usr/bin/env python3
"""
Creates a local inventory file from an inventory in AWX
Usage:
python ../get_inventory_from_awx.py \
--url https://awx.domain.com \
-u admin \
-p "topsecret" \
"my-ec2-dev-inventory"
"""
import argparse
import sys
import requests
parser = argparse.ArgumentParser(
description="Convert Ansible AWX Inventory to standard inventory"
)
parser.add_argument("--url", required=True, help="base url of AWX/Tower")
parser.add_argument("-u", "--username", help="username")
parser.add_argument("-p", "--password", help="password")
parser.add_argument("inventory", nargs=1, help="inventory name")
args = parser.parse_args()
all_inventories = requests.get(
f"{args.url}/api/v2/inventories/", auth=(args.username, args.password)
)
inventory_id = -1
for inventory in all_inventories.json()["results"]:
if inventory["name"] == args.inventory[0]:
inventory_id = inventory["id"]
break
if inventory_id == -1:
print(f"Inventory {args.inventory[0]} not found ")
sys.exit(1)
inventory_url = (
f"{args.url}/api/v2/inventories/{inventory_id}"
"/script/?hostvars=1&towervars=1&all=1"
)
inventory = requests.get(inventory_url, auth=(args.username, args.password))
hosts = inventory.json()
for key in sorted(hosts):
if key == "all":
continue
if key == "_meta":
continue
if "hosts" in hosts[key]:
print(f"[{key}]")
for host in hosts[key]["hosts"]:
print(host)
print("")
if "children" in hosts[key]:
print("[{key}:children]")
for child in hosts[key]["children"]:
print(child)
print("")
if "vars" in hosts[key]:
print("[{key}:vars]")
for var in hosts[key]["vars"]:
print("{var}={hosts[key]['vars'][var]}")
print("")
print("")
| 3.25 | 3 |
playnetmano_rm/objects/base.py | rickyhai11/playnetmano_rm | 0 | 12790532 | """playnetmano_rm common internal object model"""
from oslo_utils import versionutils
from oslo_versionedobjects import base
from playnetmano_rm import objects
VersionedObjectDictCompat = base.VersionedObjectDictCompat
class Playnetmano_rmObject(base.VersionedObject):
"""Base class for playnetmano_rm objects.
This is the base class for all objects that can be remoted or instantiated
via RPC. Simply defining a sub-class of this class would make it remotely
instantiatable. Objects should implement the "get" class method and the
"save" object method.
"""
OBJ_PROJECT_NAMESPACE = 'playnetmano_rm'
VERSION = '1.0'
@staticmethod
def _from_db_object(context, obj, db_obj):
if db_obj is None:
return None
for field in obj.fields:
if field == 'metadata':
obj['metadata'] = db_obj['meta_data']
else:
obj[field] = db_obj[field]
obj._context = context
obj.obj_reset_changes()
return obj
class Playnetmano_rmObjectRegistry(base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
"""Callback for object registration.
When an object is registered, this function will be called for
maintaining playnetmano_rm.objects.$OBJECT as the highest-versioned
implementation of a given object.
"""
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
curr_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= curr_version:
setattr(objects, cls.obj_name(), cls)
| 2.4375 | 2 |
scrubadub/filth/date_of_birth.py | datascopeanalytics/scrubadub | 190 | 12790533 | import random
import datetime
import dateparser
from faker import Faker
from .base import Filth
class DateOfBirthFilth(Filth):
type = 'date_of_birth'
min_age_years = 18
max_age_years = 100
@staticmethod
def generate(faker: Faker) -> str:
"""Generates an example of this ``Filth`` type, usually using the faker python library.
:param faker: The ``Faker`` class from the ``faker`` library
:type faker: Faker
:return: An example of this ``Filth``
:rtype: str
"""
formats = [
'%c', # Tue Aug 16 21:30:00 1988 (en_US); locale dependant
'%x', # 08/16/1988 (en_US); locale dependant
'%a %d %b %Y', # Sun 19 Jan 1999
'%A %d %B %Y', # Sunday 19 January 1999
'%d-%m-%Y', # 15-01-1999
'%A %dth, %B, %Y', # Monday 08th, January, 1973
]
return faker.date_of_birth().strftime(random.choice(formats))
def is_valid(self) -> bool:
"""Check to see if the found filth is valid."""
found_date = dateparser.parse(self.text)
if found_date is None:
return False
years_since_identified_date = datetime.date.today().year - found_date.year
return DateOfBirthFilth.min_age_years <= years_since_identified_date <= DateOfBirthFilth.max_age_years
| 3.703125 | 4 |
problem/01000~09999/01598/1598.py3.py | njw1204/BOJ-AC | 1 | 12790534 | A,B=map(int,input().split())
print(abs((A-1)%4-(B-1)%4)+abs((A-1)//4-(B-1)//4)) | 2.46875 | 2 |
aiomongodb/__init__.py | jdavidls/aiomongodb | 0 | 12790535 | <reponame>jdavidls/aiomongodb<gh_stars>0
'''
HighLevel -> LowLevel (protocol) classes
'''
#cybson
import collections, asyncio, bson, random
from .connection import Connection
MappingProxy = type(type.__dict__)
class AttributeKeyError(AttributeError, KeyError):
pass
class odict(collections.OrderedDict):
__getattr__ = collections.OrderedDict.__getitem__
__setattr__ = collections.OrderedDict.__setitem__
__delattr__ = collections.OrderedDict.__delitem__
def __missing__(self, key):
raise AttributeKeyError(key)
_empty_doc = MappingProxy(odict())
bson_encode = bson.BSON.encode
_bson_decode = bson.BSON.decode
_bson_decode_all = bson.decode_all
#def bson_encode(doc):
# return _bson_encode(doc)
def bson_decode(raw):
return _bson_decode(raw, odict)
def bson_encode_multi(docs):
return b''.join( _bson_encode(doc) for doc in docs )
def bson_decode_multi(raw):
doc = _bson_decode_all(raw, odict)
return isinstance(doc, list) and doc or [doc]
class Client:
def __init__(self, loop=None, host=None, port=None, connections=8):
'''
'''
self._loop = loop = loop or asyncio.get_event_loop()
self._host = host = host or connection.default_host
self._port = port = port or connection.default_port
self._next_connection = 0
self._connection_pool = [ Connection(loop, host, port) for n in range(connections) ]
self._databases = databases = {}
self._databases_proxy = MappingProxy(databases)
self._is_connected = False
self._cursors = set()
self._server_version = None
@property
def databases(self):
return self._databases_proxy
async def connect(self):
disconnection_futures = await asyncio.gather(
*(c.connect() for c in self._connection_pool),
loop = self._loop
)
for disconnection_future in disconnection_futures:
disconnection_future.add_done_callback(self._connection_lost)
self._is_connected = True
def _connection_lost(self, connection):
print('connection lost', connection)
self._connection_pool.remove(connection)
if not self._is_connected:
return
reconnection = self._loop.create_task(connection.connect())
@reconnection.add_done_callback
def reconnection_made(disconnection_future):
print('reconnection made', connection)
self._connection_pool.add(connection)
disconnection_future.add_done_callback(self._connection_lost)
async def disconnect():
self._is_connected = False
raise NotImplementedError
def connection(self):
#next_connection_idx = self._next_connection_idx
#connection = self._connection_pool[next_connection_idx]
# la conexion debe estar activa,
#self._next_connection_idx = (next_connection_idx + 1) % len(self._connection_pool)
#return connection
return random.sample(self._connection_pool, 1)[0]
def database(self, name):
database = self._databases.get(name, None)
if database is None:
database = Database(self, name)
self._databases[name] = database
return database
class Database:
def __init__(self, client, name):
self._client = client
self._name = name
self._collections = collections = {}
self._collections_proxy = MappingProxy(collections)
self._cmd = Collection(self, '$cmd')
client = property(lambda self: self._client)
name = property(lambda self: self._name)
collections = property(lambda self: self._collections_proxy)
def collection(self, name):
collection = self._collections.get(name, None)
if collection is None:
collection = Collection(self, name)
self._collections[name] = collection
return collection
#
# def __getattr__(self, command):
# async def cmd(**parameters):
# odict((command, 1) + parameters.items())
# return cmd
class Collection:
'''
contempla:
sesiones (colleciones)
sistemas de comunicacion (cursores esperando datos)
'''
# batch_size = 100
def __init__(self, database, name):
self._client = database._client
self._database = database # proxy(database)
self._name = name
self._cstr_name = b'.'.join((database._name.encode(), name.encode()))
def find(self, query, projection=_empty_doc):
return Query(self, query, projection)
def find_one(self, query, projection=_empty_doc):
return Query(self, query, projection, 0, 1)
def __getitem__(self, id):
return Query(self, {'_id': id}, _empty_doc, 0, 1)
class Query:
'''
Representa una consulta, cachea la codificacion
para futuros usos.
'''
def __init__(self, collection, query, projection=_empty_doc, skip=0, limit=None):
self._client = collection._client
self._collection = collection
self._query = query
self._encoded_query = bson_encode(query)
self._projection = projection
self._encoded_projection = projection and bson_encode(projection) or b''
self._skip = skip
self._limit = limit
self._tailable = False
self._exhaust = False
async def __aiter__(self):
'''
returns a cursor.
'''
return Cursor(self)
def __len__(self):
'''
returna un future que se resolverá con el
numero de elementos que alcanza esta query
'''
return asyncio.Future()
# getitem -> future
# setitem -> future(error)
# slice -> cursor
class Cursor:
'''
'''
def __init__(self, query):
self._client = query._client
self._query = query
self._deque = collections.deque()
self._cursor_id = None
self._cstr_collection = query._collection._cstr_name
self._encoded_query = query._encoded_query
self._encoded_projection = query._encoded_projection
self._batch_length = 25
self._skip = query._skip
self._limit = query._limit
self._connection = connection = self._client.connection()
self._future = future = connection.OP_QUERY(
self._cstr_collection,
self._encoded_query,
self._encoded_projection,
min(self._limit or 0xFFFFFFFF, self._batch_length),
self._skip,
)
async def __anext__(self):
deque = self._deque
if not deque:
future = self._future
if future is None:
raise StopAsyncIteration
try:
reply = await future
except:
## make reconection and request new query
raise
## try
items = bson_decode_multi(reply.bson_payload)
## raise BSON DECODE ERROR
deque.extend(items)
if self._limit:
self._limit -= reply.number_returned
self._skip += reply.number_returned
self._cursor_id = cursor_id = reply.cursor_id
if cursor_id:
self._future = self._connection.OP_GET_MORE(
self._cstr_collection,
min(self._limit or 0xFFFFFFFF, self._batch_length),
cursor_id
)
else:
self._future = None ## stop the cursor here
item = deque.popleft()
## process item
item = self._process_item(item)
return item
def _process_item(self, item):
return item
| 2.1875 | 2 |
scripts/generateSE.py | tijsmaas/TrafficPrediction | 17 | 12790536 | <filename>scripts/generateSE.py
import argparse
import os
import scripts
import numpy as np
import networkx as nx
from gensim.models import Word2Vec
def write_edgelist(adj_file, edgelist_file):
adj = np.load(adj_file, allow_pickle=True)[2]
with open(edgelist_file, 'w') as f:
n_nodes = adj.shape[0]
for i in range(n_nodes):
for j in range(n_nodes):
w = adj[i][j]
f.write(str(i) + ' ' + str(j) + ' ' + str(w) + '\n')
def read_graph(edgelist_file):
G = nx.read_edgelist(
edgelist_file, nodetype=int, data=(('weight',float),),
create_using=nx.DiGraph())
return G
def learn_embeddings(walks, dimensions, iter, output_file):
walks = [list(map(str, walk)) for walk in walks]
model = Word2Vec(
walks, size = dimensions, window = 10, min_count = 0, sg = 1,
workers = 4, iter = iter)
print ('Writing embedding to', output_file)
model.wv.save_word2vec_format(output_file)
def main(args):
# Author settings
is_directed = True
dimensions = 64
window_size = 10
p = 2
q = 1
write_edgelist(args.adj_file, args.edgelist_file)
nx_G = read_graph(args.edgelist_file)
G = scripts.Graph(nx_G, is_directed, p, q)
G.preprocess_transition_probs()
walks = G.simulate_walks(args.num_walks, args.walk_length)
learn_embeddings(walks, dimensions, args.iter, args.SE_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--adj_file", type=str, default="data/sensor_graph/adj_mx.pkl", help="Input file adjacency matrix to build graph."
)
parser.add_argument(
"--SE_file", type=str, help="Output file with sensor embeddings. (e.g. data/sensor_graph/SE.txt)",
)
parser.add_argument('--walk_length', type=int, default=80,
help='Length of random walks')
parser.add_argument('--num_walks', type=int, default=100,
help='Number of random walks per iteration')
parser.add_argument('--iter', type=int, default=1000,
help='Number of iterations')
args = parser.parse_args()
basepath = os.path.dirname(args.adj_file)
args.edgelist_file = os.path.join(basepath, 'edgelist.txt')
if not args.SE_file:
args.SE_file = os.path.join(basepath, 'SE.txt')
main(args) | 2.671875 | 3 |
src/utils/db.py | Dimwest/iot-garden-backend | 0 | 12790537 | import psycopg2
from psycopg2.extensions import connection, cursor
from psycopg2.extras import DictCursor
from typing import Dict
from src.log.logger import logger
from contextlib import contextmanager
@contextmanager
def get_connection(params: Dict[str, str]) -> connection:
"""
Get a connection using a context manager.
:param params: database connection parameters dictionary
:return: psycopg2 connection object
"""
try:
conn = psycopg2.connect(**params)
yield conn
except Exception as e:
logger.error(f"{str(type(e))} during database operation: {e}")
raise e
finally:
# Close database connection if defined.
logger.debug("Closing database connection")
try:
conn.close()
except UnboundLocalError:
pass
@contextmanager
def get_cursor(params: Dict[str, str], commit: bool = True) -> cursor:
"""
Get a connection cursor using a context manager.
:param params: database connection parameters dictionary
:param commit: boolean determining whether changes should be committed
:return: psycopg2 cursor object
"""
with get_connection(params) as conn:
# Acquire cursor from connection
logger.debug("Obtaining database cursor.")
cur = conn.cursor(cursor_factory=DictCursor)
try:
yield cur
if commit:
conn.commit()
finally:
# Close cursor
logger.debug("Closing database cursor.")
cur.close()
def get_sensors_data(cur: psycopg2.extensions.cursor):
"""
Fetches data from sensors' tables
TODO -> parallelize queries
:param cur: database cursor
:return: JSON formatted results
"""
data = {
"temperature": cur.execute("SELECT * FROM sensors.temperature").fetchall(),
"humidity": cur.execute("SELECT * FROM sensors.humidity").fetchall(),
"light": cur.execute("SELECT * FROM sensors.light").fetchall(),
}
return data
| 3.109375 | 3 |
scheduler/tests/scheduler_factories.py | annalee/alienplan | 5 | 12790538 | <gh_stars>1-10
import factory
import random
import datetime
from django.template.defaultfilters import slugify
from scheduler.models import Conference, Room, Track, Panelist, Panel, Day
class ConferenceFactory(factory.django.DjangoModelFactory):
class Meta:
model = Conference
name = factory.Sequence(lambda n: 'TestCon {}'.format(n))
@factory.lazy_attribute
def slug(self):
slug = slugify(self.name)
return slug
class DayFactory(factory.django.DjangoModelFactory):
class Meta:
model = Day
conference = factory.SubFactory(ConferenceFactory)
day = datetime.date(year=2020, month=1, day=17)
start_time = datetime.time(hour=10)
end_time = datetime.time(hour=19)
class RoomFactory(factory.django.DjangoModelFactory):
class Meta:
model = Room
conference = factory.SubFactory(ConferenceFactory)
capacity = 50
category = Room.PANEL
av = False
@factory.lazy_attribute
def name(self):
name = factory.Faker('city').generate()
return name
class TrackFactory(factory.django.DjangoModelFactory):
class Meta:
model = Track
conference = factory.SubFactory(ConferenceFactory)
start = datetime.datetime(year=2020, month=1, day=17, hour=17)
end = datetime.datetime(year=2020, month=1, day=19, hour=16)
@factory.lazy_attribute
def name(self):
name = factory.Faker('word').generate()
return name
@factory.lazy_attribute
def slug(self):
slug = slugify(self.name)
return slug
class PanelistFactory(factory.django.DjangoModelFactory):
class Meta:
model = Panelist
conference = factory.SubFactory(ConferenceFactory)
inarow = 2
reading_requested = True
signing_requested = True
@factory.lazy_attribute
def badge_name(self):
return factory.Faker('name').generate()
@factory.lazy_attribute
def email(self):
return factory.Faker('email').generate()
@factory.lazy_attribute
def program_name(self):
return self.badge_name
@factory.lazy_attribute
def pronouns(self):
pronoun_choices = ['She/Her', 'He/Him', 'They/Them', 'She/They', 'E/Em']
return random.choice(pronoun_choices)
class PanelFactory(factory.django.DjangoModelFactory):
class Meta:
model = Panel
description = factory.Faker('paragraph', nb_sentences=6).generate()
conference = factory.SubFactory(ConferenceFactory)
publish = True
@factory.lazy_attribute
def title(self):
title = factory.Faker('text', max_nb_chars=50).generate()
return title
@factory.post_generation
def assign_panelists(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# select interested panelists at random
intpan = random.randrange(3, int(len(extracted)/2))
intmod = random.randrange(1, int(len(extracted)/4))
for panelist in random.sample(extracted, intpan):
self.interested_panelists.add(panelist)
for panelist in random.sample(extracted, intmod):
self.interested_moderators.add(panelist)
# roll a d10 to see if this one has required panelists
required = random.randrange(1, 10)
# if yes, add them.
if required == 10:
reqmod = random.randrange(1, 4)
for panelist in random.sample(extracted, reqmod):
self.required_panelist.add(panelist)
@factory.post_generation
def assign_track(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# assign track via a weighted random
track = random.choices(extracted, weights=[45, 5, 5, 10, 10, 5])
track[0].panels.add(self)
| 2.140625 | 2 |
0012-tables_format/test_formats.py | villoro/villoro_posts | 0 | 12790539 | <filename>0012-tables_format/test_formats.py
"""
Test different file formats for storing tables.
There are 3 files with different sizes:
small: bike_sharing_daily (64 KB)
medium: cbg_patterns (233 MB)
big: checkouts-by-title (6,62 GB)
"""
import os
from time import time
import yaml
import pandas as pd
from tqdm import tqdm
PATH_DATA = "data/"
PATH_RESULTS = "results/"
FILES = ["bike_sharing_daily", "cbg_patterns", "checkouts-by-title"]
FUNCS = {
"read": {
"csv": pd.read_csv,
"xlsx": pd.read_excel,
"pickle": pd.read_pickle,
"feather": pd.read_feather,
"parquet": pd.read_parquet,
"msgpack": pd.read_msgpack,
},
"write": {
"csv": pd.DataFrame.to_csv,
"xlsx": pd.DataFrame.to_excel,
"pickle": pd.DataFrame.to_pickle,
"feather": pd.DataFrame.to_feather,
"parquet": pd.DataFrame.to_parquet,
"msgpack": pd.DataFrame.to_msgpack,
},
}
COMPRESSIONS = {
"csv": {
"param_name": "compression",
"read_with_param": True,
"list": ["infer", "gzip", "bz2", "zip", "xz", None],
},
"pickle": {
"param_name": "compression",
"read_with_param": True,
"list": ["infer", "gzip", "bz2", "zip", "xz", None],
},
"parquet": {
"param_name": "compression",
"read_with_param": False, # Read function don't use compression param
"list": ["snappy", "gzip", "brotli", None],
},
"msgpack": {
"param_name": "compress",
"read_with_param": False, # Read function don't use compression param
"list": ["zlib", "blosc", None],
},
}
def clean():
""" Clean previously created files """
for name in os.listdir(PATH_DATA):
if "." in name and name.split(".")[0] == "data":
os.remove(f"{PATH_DATA}{name}")
def iterate_one_test(iterations, extension, func, args, kwargs):
"""
Do some iterations for some function
Args:
size: size of the file to test (0: small, 1: mediumn, 2: big)
iterations: number of times to run the test
func: function to test
args: arguments for that function
kwargs: extra keyworded arguments
"""
out = []
for _ in tqdm(range(iterations), desc=f"- {extension:8}", leave=True):
try:
t0 = time()
func(*args, **kwargs)
# Store time
out.append(time() - t0)
except Exception as e:
print(f"- Error with {extension}: {e}")
return out
def test_write(size, iterations, exclude_formats, test_compress):
"""
Test writting for one file
Args:
size: size of the file to test (0: small, 1: mediumn, 2: big)
iterations: number of times to run the test
exclude_formats: formats to exclude in this test
test_compress: if True it will try all compressions
Returns:
dictionary with out
"""
out = {}
df = pd.read_csv(f"{PATH_DATA}{FILES[size]}.csv")
for extension, func in tqdm(FUNCS["write"].items(), desc=f"{'write':10}", leave=True):
# Skip this extension
if extension in exclude_formats:
continue
if not test_compress or extension not in COMPRESSIONS:
args = [df, f"{PATH_DATA}data.{extension}"]
out[extension] = iterate_one_test(iterations, extension, func, args, {})
# Try all compressions
else:
if extension not in COMPRESSIONS:
continue
# Get name of compression parameter and list of extensions
comp_list = COMPRESSIONS[extension]["list"]
comp_param_name = COMPRESSIONS[extension]["param_name"]
for comp in tqdm(comp_list, desc=f"{extension:10}", leave=True):
name = f"{extension}_{str(comp)}"
out[name] = iterate_one_test(
iterations,
extension=name,
func=func,
args=[df, f"{PATH_DATA}data.{extension}_{comp}"],
kwargs={comp_param_name: comp},
)
return out
def test_read(size, iterations, exclude_formats, test_compress):
"""
Test read for one file
Args:
size: size of the file to test (0: small, 1: mediumn, 2: big)
iterations: number of times to run the test
exclude_formats: formats to exclude in this test
test_compress: if True it will try all compressions
Returns:
dictionary with out
"""
out = {}
for extension, func in tqdm(FUNCS["read"].items(), desc=f"{'read':10}", leave=True):
# Skip this extension
if extension in exclude_formats:
continue
if not test_compress or extension not in COMPRESSIONS:
args = [f"{PATH_DATA}data.{extension}"]
out[extension] = iterate_one_test(iterations, extension, func, args, {})
# Try all compressions
else:
if extension not in COMPRESSIONS:
continue
# Get name of compression parameter and list of extensions
comp_list = COMPRESSIONS[extension]["list"]
comp_param_name = COMPRESSIONS[extension]["param_name"]
use_param = COMPRESSIONS[extension]["read_with_param"]
for comp in tqdm(comp_list, desc=f"{extension:10}", leave=True):
name = f"{extension}_{str(comp)}"
out[name] = iterate_one_test(
iterations,
extension=name,
func=func,
args=[f"{PATH_DATA}data.{extension}_{comp}"],
kwargs={comp_param_name: comp} if use_param else {},
)
return out
def store_results(data, size, iterations):
""" Store results as a yaml """
with open(f"{PATH_RESULTS}results_s{size}_i{iterations}.yaml", "w") as outfile:
yaml.dump(data, outfile, default_flow_style=False)
print(f"\n- Data {PATH_RESULTS}results_s{size}_i{iterations}.yaml stored")
def full_test(size, iterations=10, exclude_formats=[], test_compress=False):
""" Do both tests and store the results"""
clean()
print(f"\nFULL TEST. size: {size}, iterations: {iterations}")
out = {
"write": test_write(size, iterations, exclude_formats, test_compress),
"read": test_read(size, iterations, exclude_formats, test_compress),
}
# Also get file sizes
out["file_size"] = {}
for file in os.listdir(PATH_DATA):
name, extension = file.split(".")
if name == "data":
out["file_size"][extension] = os.path.getsize(f"{PATH_DATA}{file}")
store_results(out, size, iterations)
def test_1():
""" Runs some tests with all extensions and exclude big dataframe """
full_test(0, iterations=100)
full_test(1, iterations=10)
def test_2():
""" Run tests trying all compressions without xlsx extension """
full_test(1, iterations=5, exclude_formats=["xlsx"], test_compress=True)
def test_3():
""" Run test with the big dataframe and trying the compressions """
full_test(2, iterations=1, exclude_formats=["xlsx", "csv"], test_compress=True)
if __name__ == "__main__":
# Dummy test
# full_test(0, iterations=20, exclude_formats=["xlsx"], test_compress=True)
# test_1()
# test_2()
test_3()
| 2.578125 | 3 |
images/core/context/free5gc/lib/nextepc/nas/support/cache/nas_msg_92.py | my5G/OPlaceRAN | 1 | 12790540 | ies = []
ies.append({ "iei" : "", "value" : "EMM cause", "type" : "EMM cause", "reference" : "9.9.3.9", "presence" : "M", "format" : "V", "length" : "1"})
ies.append({ "iei" : "30", "value" : "Authentication failure parameter", "type" : "Authentication failure parameter", "reference" : "9.9.3.1", "presence" : "O", "format" : "TLV", "length" : "16"})
msg_list[key]["ies"] = ies
| 1.9375 | 2 |
uploads/core/forms.py | joshua-taylor/dataIntegrator | 0 | 12790541 | <filename>uploads/core/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AuthenticationForm
from uploads.core.models import CustomUser
from uploads.core.models import Document
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = ('description', 'document')
widgets={
'description': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter a friendly description of the file'
})
}
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email',)
widgets={
# 'username': forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Enter your username'
# }),
'email': forms.EmailInput(attrs={
'class':'form-control'
})
}
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('email',)
widgets={
# 'username': forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Enter your username'
# }),
'email': forms.EmailInput(attrs={
'class':'form-control'
})
}
class LoginForm(AuthenticationForm):
class Meta:
model = CustomUser
fields = ('email','password')
widgets={
'email': forms.EmailInput(attrs={
'placeholder': 'Enter your email',
'class':'form-control'
}),
'password': forms.PasswordInput(attrs={
'class': 'form-control',
})
}
# email = forms.EmailInput(widget=forms.TextInput(attrs={'class': 'form-control'}))
# password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
| 2.34375 | 2 |
python/test/test02.py | andreluizdsantos/Curso_ADS | 1 | 12790542 | import unittest
from test.test01 import soma
class TesteSoma(unittest.TestCase):
def test_retorno_soma_10_10(self):
self .assertEqual(soma(10, 10), 20)
| 2.515625 | 3 |
ewsonprem_consts.py | splunk-soar-connectors/ewsonprem | 0 | 12790543 | <filename>ewsonprem_consts.py
# File: ewsonprem_consts.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
EWSONPREM_JSON_DEVICE_URL = "url"
EWSONPREM_JSON_TEST_USER = "test_user"
EWSONPREM_JSON_SUBJECT = "subject"
EWSONPREM_JSON_FROM = "sender"
EWSONPREM_JSON_INT_MSG_ID = "internet_message_id"
EWSONPREM_JSON_EMAIL = "email"
EWSONPREM_JSON_FOLDER = "folder"
EWSONPREM_JSON_BODY = "body"
EWSONPREM_JSON_QUERY = "query"
EWSONPREM_JSON_RANGE = "range"
EWSONPREM_JSON_ID = "id"
EWSONPREM_JSON_GROUP = "group"
EWSONPREM_JSON_INGEST_EMAIL = "ingest_email"
EWS_JSON_CONTAINER_ID = "container_id"
EWS_JSON_VAULT_ID = "vault_id"
EWSONPREM_SEARCH_FINISHED_STATUS = "Finished Searching {0:.0%}"
EWS_JSON_POLL_USER = "poll_user"
EWS_JSON_USE_IMPERSONATE = "use_impersonation"
EWS_JSON_AUTH_TYPE = "auth_type"
EWS_JSON_CLIENT_ID = "client_id"
EWS_JSON_POLL_FOLDER = "poll_folder"
EWS_JSON_INGEST_MANNER = "ingest_manner"
EWS_JSON_INGEST_TIME = "ingest_time"
EWS_JSON_FIRST_RUN_MAX_EMAILS = "first_run_max_emails"
EWS_JSON_POLL_MAX_CONTAINERS = "max_containers"
EWS_JSON_DONT_IMPERSONATE = "dont_impersonate"
EWS_JSON_IMPERSONATE_EMAIL = "impersonate_email"
EWS_JSON_AUTH_URL = "authority_url"
EWS_JSON_FED_PING_URL = "fed_ping_url"
EWS_JSON_FED_VERIFY_CERT = "fed_verify_server_cert"
EWS_JSON_IS_PUBLIC_FOLDER = "is_public_folder"
EWSONPREM_ERR_CONNECTIVITY_TEST = "Test Connectivity Failed"
EWSONPREM_SUCC_CONNECTIVITY_TEST = "Test Connectivity Passed"
EWSONPREM_ERR_SERVER_CONNECTION = "Connection failed"
EWSONPREM_ERR_FED_PING_URL = "Parameter validation failed for the Federated Auth Ping URL"
EWSONPREM_ERR_FROM_SERVER = "API failed. Status code: {code}. Message: {message}"
EWSONPREM_ERR_API_UNSUPPORTED_METHOD = "Unsupported method"
EWSONPREM_USING_BASE_URL = "Using url: {base_url}"
EWSONPREM_ERR_VAULT_INFO = "Could not retrieve vault file"
EWSONPREM_ERR_JSON_PARSE = "Unable to parse reply, raw string reply: '{raw_text}'"
EWSONPREM_EXCEPTION_ERR_MESSAGE = "Error Code: {0}. Error Message: {1}"
EWSONPREM_ERR_CODE_MESSAGE = "Error code unavailable"
EWSONPREM_ERR_MESSAGE = "Error message unavailable. Please check the asset configuration and|or action parameters."
TYPE_ERR_MESSAGE = "Error occurred while connecting to the EWS server. Please check the asset configuration and|or the action parameters."
EWSONPREM_VALIDATE_INTEGER_MESSAGE = "Please provide a valid integer value in the {key} parameter"
EWSONPREM_MAIL_TYPES = [
"t:Message",
"t:MeetingRequest",
"t:MeetingResponse",
"t:MeetingMessage",
"t:MeetingCancellation"
]
EWSONPREM_MAX_END_OFFSET_VAL = 2147483646
EWS_O365_RESOURCE = "https://outlook.office365.com"
EWS_LOGIN_URL = "https://login.windows.net"
EWS_MODIFY_CONFIG = "Toggling the impersonation configuration on the asset might help, or login user does not have privileges to the mailbox."
EWS_INGEST_LATEST_EMAILS = "latest first"
EWS_INGEST_OLDEST_EMAILS = "oldest first"
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
AUTH_TYPE_AZURE = "Azure"
AUTH_TYPE_AZURE_INTERACTIVE = "Azure (interactive)"
AUTH_TYPE_FEDERATED = "Federated"
AUTH_TYPE_BASIC = "Basic"
DEFAULT_REQUEST_TIMEOUT = 30 # in seconds
STATE_FILE_CORRUPT_ERR = (
"Error occurred while loading the state file due to its unexpected format. "
"Resetting the state file with the default format. Please try again."
)
| 1.351563 | 1 |
tests/test_heap.py | ZachElkins/PythonDataStructures | 1 | 12790544 | import pytest
from data_structures.heap import Heap
@pytest.fixture
def base_heap():
heap = Heap()
heap.push(1)
heap.push(2)
heap.push(3)
heap.push(4)
heap.push(5)
return heap
def test_heap_init():
basic_heap = Heap()
init_list_heap = Heap([9, 8, 7, 5, 1, 2])
assert isinstance(basic_heap, Heap)
assert isinstance(init_list_heap, Heap)
def test_heap_push():
heap = Heap()
heap.push(2)
heap.push(3)
heap.push(1)
def test_heap_pop(base_heap):
assert base_heap.pop() == 1
assert base_heap.pop() == 2
def test_heap_peek(base_heap):
assert base_heap.peek() == 1
def test_heap_empty():
heap = Heap()
assert heap.empty()
heap.push(1)
assert not heap.empty()
def test_heapify_up_and_down(base_heap):
base_heap.pop()
base_heap.pop()
base_heap.push(8)
base_heap.push(1)
base_heap.push(0)
base_heap.push(9)
assert base_heap.get_heap() == [0, 3, 1, 8, 4, 5, 9]
def test_heapify():
heap = Heap([8, 9, 5, 1, 3, 2, 0, 6])
assert heap.get_heap() == [0, 1, 2, 6, 3, 8, 5, 9]
| 3.015625 | 3 |
scripts/kopf/example.py | victoriouscoder/oreilly-kubernetes | 323 | 12790545 | <gh_stars>100-1000
import kopf
@kopf.on.create('oreilly.com', 'v1alpha1', 'book')
def create_fn(spec, **kwargs):
print(f"And here we are! Creating: {spec}")
return {'message': 'hello world'} # will be the new status
<EMAIL>('<EMAIL>', 'v1alpha1', 'book')
#def update_fn(old, new, diff, **kwargs):
# print('UPDATED')
# print(f"The following object got updated: {spec}")
# return {'message': 'updated'}
<EMAIL>.delete('ore<EMAIL>', 'v1alpha1', 'book')
#def delete_fn(metadata, **kwargs):
| 2 | 2 |
src/lambda/vault/getVaultItem/getVaultItem.py | VasudhaJha/PasswordManager | 0 | 12790546 | import os
import json
import boto3
from botocore.exceptions import ClientError
from cryptography.fernet import Fernet
dynamodb = boto3.resource('dynamodb')
s3 = boto3.resource('s3')
vault_table = dynamodb.Table(os.environ.get('VAULT_TABLE_NAME'))
vault_table_partition_key = os.environ.get('VAULT_TABLE_KEY')
vault_table_sort_key = os.environ.get('VAULT_SORT_KEY')
bucket_name = os.environ.get('S3_BUCKET_NAME')
key_file_name = os.environ.get('ENCRYPTION_KEY')
key_file_destination = "/tmp/" + key_file_name
s3.meta.client.download_file(Bucket=bucket_name, Key=key_file_name, Filename=key_file_destination)
key = open(key_file_destination, "rb").read()
def _decrypt_item_value(value):
f = Fernet(key)
decrypted_value = f.decrypt(value)
return decrypted_value.decode("utf-8")
def _get_vault_item(email, name):
try:
response = vault_table.get_item(
Key={
vault_table_partition_key: email,
vault_table_sort_key: name
}
)
except Exception as e:
print(e)
raise
else:
return response['Item']
def lambda_handler(event, context):
email = event['pathParameters']['email']
name = event['pathParameters']['name']
try:
response = _get_vault_item(email, name)
del response['email']
print(f"RESPONSE: {response}")
response['value'] = json.loads(_decrypt_item_value(response['value'].value))
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps(response)
}
except Exception as e:
return {
"statusCode": 500,
"headers": {
"Content-Type": "application/json"
},
"body": str(e)
} | 2.03125 | 2 |
pypadre/pod/repository/local/file/code_repository.py | padre-lab-eu/pypadre | 3 | 12790547 | import errno
import glob
import os
import re
import shutil
from pypadre.core.model.code.code_mixin import CodeMixin, PythonPackage, PythonFile, GenericCall, \
GitIdentifier, RepositoryIdentifier, PipIdentifier, Function
from pypadre.pod.backend.i_padre_backend import IPadreBackend
from pypadre.pod.repository.i_repository import ICodeRepository
from pypadre.pod.repository.local.file.generic.i_file_repository import File
from pypadre.pod.repository.local.file.generic.i_git_repository import IGitRepository
from pypadre.pod.repository.local.file.project_code_repository import CODE_FILE
from pypadre.pod.repository.serializer.serialiser import JSonSerializer
def copy(src, dest):
try:
shutil.copytree(src, dest)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else:
print('Directory not copied. Error: %s' % e)
NAME = "code"
META_FILE = File("metadata.json", JSonSerializer)
# CODE_FILE = File("code.bin", DillSerializer)
class CodeFileRepository(IGitRepository, ICodeRepository):
@staticmethod
def placeholder():
return '{CODE_ID}'
def __init__(self, backend: IPadreBackend):
super().__init__(root_dir=os.path.join(backend.root_dir, NAME), backend=backend)
def _get_by_dir(self, directory):
path = glob.glob(os.path.join(self._replace_placeholders_with_wildcard(self.root_dir), directory))[0]
metadata = self.get_file(path, META_FILE)
return self._create_object(metadata, directory)
def _create_object(self, metadata, directory, root_dir=None):
identifier_type = metadata.get(CodeMixin.REPOSITORY_TYPE)
identifier_data = metadata.get(CodeMixin.IDENTIFIER)
identifier = None
if identifier_type == RepositoryIdentifier._RepositoryType.pip:
version = identifier_data.get(PipIdentifier.VERSION)
pip_package = identifier_data.get(PipIdentifier.PIP_PACKAGE)
identifier = PipIdentifier(version=version, pip_package=pip_package)
if identifier_type == RepositoryIdentifier._RepositoryType.git:
path = identifier_data.get(GitIdentifier.PATH)
git_hash = identifier_data.get(GitIdentifier.GIT_HASH)
identifier = GitIdentifier(path=path, git_hash=git_hash)
if identifier is None:
raise ValueError(
"Identifier is not present in the meta information of code object in directory " + directory)
if metadata.get(CodeMixin.CODE_TYPE) == str(CodeMixin._CodeType.function):
if root_dir is not None:
fn_dir = glob.glob(os.path.join(self._replace_placeholders_with_wildcard(root_dir),
os.path.abspath(os.path.join(directory, '..', 'function'))))[0]
else:
fn_dir = glob.glob(os.path.join(self._replace_placeholders_with_wildcard(self.root_dir),
os.path.abspath(os.path.join(directory, '..', 'function'))))[0]
fn = self.get_file(fn_dir, CODE_FILE)
code = Function(fn=fn, metadata=metadata, repository_identifier=identifier)
elif metadata.get(CodeMixin.CODE_TYPE) == str(CodeMixin._CodeType.package):
code = PythonPackage(metadata=metadata, path=metadata.get(PythonFile.PATH), package=metadata.get(PythonPackage.PACKAGE), variable=metadata.get(PythonPackage.VARIABLE), repository_identifier=identifier)
elif metadata.get(CodeMixin.CODE_TYPE) == str(CodeMixin._CodeType.python_file):
code = PythonFile(metadata=metadata, path=metadata.get(PythonFile.PATH), package=metadata.get(PythonFile.PACKAGE), variable=metadata.get(PythonFile.VARIABLE), repository_identifier=identifier)
elif metadata.get(CodeMixin.CODE_TYPE) == str(CodeMixin._CodeType.file):
code = GenericCall(metadata=metadata, cmd=metadata.get(GenericCall.CMD), repository_identifier=identifier)
else:
raise NotImplementedError(metadata.get(CodeMixin.CODE_TYPE) + " couldn't load from type.")
return code
def to_folder_name(self, code):
# TODO only name for folder okay? (maybe a uuid, a digest of a config or similar?)
return str(code.id)
def list(self, search, offset=0, size=100):
if search is not None and "name" in search:
# Shortcut because we know name is the folder name. We don't have to search in metadata.json
name = search.pop("name")
search[self.FOLDER_SEARCH] = re.escape(name)
return super().list(search, offset, size)
def _put(self, obj, *args, directory: str, **kwargs):
code = obj
if isinstance(code, Function):
# TODO fn repository
if not os.path.exists(os.path.abspath(os.path.join(directory, '..', 'function'))):
os.mkdir(os.path.abspath(os.path.join(directory, '..', 'function')))
self.write_file(os.path.abspath(os.path.join(directory, '..', 'function')), CODE_FILE, code.fn, mode="wb")
self.write_file(directory, META_FILE, code.metadata)
# if store_code:
# if isinstance(code, CodeFile):
# code_dir = os.path.join(directory, "code")
# if code.file is not None:
# if not os.path.exists(code_dir):
# os.mkdir(code_dir)
# copy(os.path.join(code.path, code.file), os.path.join(directory, "code", code.file))
# else:
# copy(code.path, code_dir)
# def get_code_hash(self, obj=None, path=None, init_repo=False, **kwargs):
#
# code_hash = git_hash(path=path)
# if code_hash is None and init_repo is True:
# # if there is no repository present in the path, but the user wants to create a repo then
# # Create a repo
# # Add any untracked files and commit those files
# # Get the code_hash of the repo
# # TODO give git an id and hold some reference in workspace???
# dir_path = os.path.dirname(path)
# create_repo(dir_path)
# add_and_commit(dir_path)
# code_hash = git_hash(path=dir_path)
#
# if obj is not None:
# obj.set_hash(code_hash)
| 2 | 2 |
perfkitbenchmarker/providers/azure/azure_disk.py | xiaolihope/PerfKitBenchmarker-1.7.0 | 0 | 12790548 | <gh_stars>0
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to Azure disks.
Disks can be created, deleted, attached to VMs, and detached from VMs.
At this time, Azure only supports one disk type, so the disk spec's disk type
is ignored.
See http://msdn.microsoft.com/en-us/library/azure/dn790303.aspx for more
information about azure disks.
"""
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.azure import flags as azure_flags
AZURE_PATH = 'azure'
FLAGS = flags.FLAGS
DRIVE_START_LETTER = 'c'
PREMIUM_STORAGE = 'premium-storage'
STANDARD_DISK = 'standard-disk'
DISK_TYPE = {disk.STANDARD: STANDARD_DISK,
disk.REMOTE_SSD: PREMIUM_STORAGE}
AZURE = 'Azure'
disk.RegisterDiskTypeMap(AZURE, DISK_TYPE)
PREMIUM_STORAGE_METADATA = {
disk.MEDIA: disk.SSD,
disk.REPLICATION: disk.ZONE,
disk.LEGACY_DISK_TYPE: disk.REMOTE_SSD
}
AZURE_REPLICATION_MAP = {
azure_flags.LRS: disk.ZONE,
azure_flags.ZRS: disk.REGION,
# Deliberately omitting PLRS, because that is handled by
# PREMIUM_STORAGE_METADATA, and (RA)GRS, because those are
# asynchronously replicated.
}
LOCAL_SSD_PREFIXES = {
'Standard_D',
'Standard_G'
}
def LocalDiskIsSSD(machine_type):
"""Check whether the local disk is an SSD drive."""
return any((machine_type.startswith(prefix)
for prefix in LOCAL_SSD_PREFIXES))
class AzureDisk(disk.BaseDisk):
"""Object representing an Azure Disk."""
_lock = threading.Lock()
num_disks = {}
def __init__(self, disk_spec, vm_name, machine_type):
super(AzureDisk, self).__init__(disk_spec)
self.host_caching = FLAGS.azure_host_caching
self.name = None
self.vm_name = vm_name
self.lun = None
if self.disk_type == PREMIUM_STORAGE:
self.metadata = PREMIUM_STORAGE_METADATA
elif self.disk_type == STANDARD_DISK:
self.metadata = {
disk.MEDIA: disk.HDD,
disk.REPLICATION: AZURE_REPLICATION_MAP[FLAGS.azure_storage_type],
disk.LEGACY_DISK_TYPE: disk.STANDARD
}
elif self.disk_type == disk.LOCAL:
media = disk.SSD if LocalDiskIsSSD(machine_type) else disk.HDD
self.metadata = {
disk.MEDIA: media,
disk.REPLICATION: disk.NONE,
disk.LEGACY_DISK_TYPE: disk.LOCAL
}
def _Create(self):
"""Creates the disk."""
if self.disk_type == PREMIUM_STORAGE:
assert FLAGS.azure_storage_type == azure_flags.PLRS
else:
assert FLAGS.azure_storage_type != azure_flags.PLRS
with self._lock:
create_cmd = [AZURE_PATH,
'vm',
'disk',
'attach-new',
'--host-caching=%s' % self.host_caching,
self.vm_name,
str(self.disk_size)]
vm_util.IssueRetryableCommand(create_cmd)
if self.vm_name not in AzureDisk.num_disks:
AzureDisk.num_disks[self.vm_name] = 0
self.lun = AzureDisk.num_disks[self.vm_name]
AzureDisk.num_disks[self.vm_name] += 1
self.created = True
def _Delete(self):
"""Deletes the disk."""
delete_cmd = [AZURE_PATH,
'vm',
'disk',
'delete',
'--blob-delete',
self.name]
logging.info('Deleting disk %s. This may fail while the associated VM '
'is deleted, but will be retried.', self.name)
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the disk exists."""
if self.name is None and self.created:
return True
elif self.name is None:
return False
show_cmd = [AZURE_PATH,
'vm',
'disk',
'show',
'--json',
self.name]
stdout, _, _ = vm_util.IssueCommand(show_cmd, suppress_warning=True)
try:
json.loads(stdout)
except ValueError:
return False
return True
@vm_util.Retry()
def _PostCreate(self):
"""Get the disk's name."""
show_cmd = [AZURE_PATH,
'vm',
'show',
'--json',
self.vm_name]
stdout, _, _ = vm_util.IssueCommand(show_cmd)
response = json.loads(stdout)
data_disk = response['DataDisks'][self.lun]
assert ((self.lun == 0 and 'logicalUnitNumber' not in data_disk)
or (self.lun == int(data_disk['logicalUnitNumber'])))
self.name = data_disk['name']
def Attach(self, vm):
"""Attaches the disk to a VM.
Args:
vm: The AzureVirtualMachine instance to which the disk will be attached.
"""
pass # TODO(user): Implement Attach()
# (not critical because disks are attached to VMs when created)
def Detach(self):
"""Detaches the disk from a VM."""
pass # TODO(user): Implement Detach()
def GetDevicePath(self):
"""Returns the path to the device inside the VM."""
if self.disk_type == disk.LOCAL:
return '/dev/sdb'
else:
return '/dev/sd%s' % chr(ord(DRIVE_START_LETTER) + self.lun)
| 1.734375 | 2 |
Chapter10/clean_sample.py | fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition | 244 | 12790549 | <reponame>fbitti/Bioinformatics-with-Python-Cookbook-Second-Edition<gh_stars>100-1000
import sys
sys.stdout.write('ID_1 ID_2 missing\n0 0 0 \n')
for line in sys.stdin:
ind = line.rstrip()
sys.stdout.write('%s %s 0\n' % (ind, ind))
| 2.296875 | 2 |
karaoke.py | andrealpezr/ptavi-p3 | 0 | 12790550 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 20:54:26 2018
@author: andrea
"""
import sys
import json
from xml.sax import make_parser
from urllib.request import urlretrieve
from smallsmilhandler import SmallSMILHandler
class KaraokeLocal(SmallSMILHandler):
def __init__(self, fichero):
# Inicializo y construyo la lista
parser = make_parser() # Creo parser
cHandler = SmallSMILHandler() # Creo manejador
parser.setContentHandler(cHandler) # Le paso el parser al manejador
parser.parse(open(fichero))
self.lista = cHandler.get_tags()
def __str__(self):
""" Método para crear la lista de etiquetas """
linea = " "
for elem in self.lista:
linea = linea + elem[0]
atributos = elem[1].items()
for nombre, valor in atributos:
if valor != '':
linea = linea + '\t' + nombre + '=' + '"' + valor + '"'
print(linea)
def to_json(self, fich, fich_json=None):
# Creamos un fichero en formato json
fich_json = json.dumps(self.lista)
if fich_json is None:
fich_json = fich.split('.')[0] + '.json'
json.dump(self.lista, open(fich_json, 'w'))
def do_local(self):
# Recorre la lista y descarga recursos remotos
for diccs in self.lista:
atrib = diccs[1]
for atributos, posi in atrib.items():
if atributos == "src" and posi[0:7] == "http://":
atrib_Nuevo = posi.split('/')[-1]
urlretrieve(posi, atrib_Nuevo)
print("Descargando %s ..." % posi)
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: python3 karaoke.py file.smil")
try:
obj = open(sys.argv[1])
except (ValueError, IndexError, FileNotFoundError):
sys.exit("Usage: python3 karaoke.py file.smil")
fichero = sys.argv[1]
fich_json = sys.argv[1].replace(".smil", ".json")
obj = KaraokeLocal(fichero)
obj.__init__(fichero)
obj.__str__()
obj.to_json(fich_json)
obj.do_local()
obj.to_json(fich_json, 'local.json')
obj.__str__()
| 2.734375 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.