repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
neuropoly/spinalcordtoolbox
|
spinalcordtoolbox/scripts/sct_maths.py
|
1
|
20433
|
#!/usr/bin/env python
#########################################################################################
#
# Perform mathematical operations on images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os
import sys
import pickle
import gzip
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import spinalcordtoolbox.math as sct_math
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
def get_parser():
parser = SCTArgumentParser(
description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '
'several 3d images separated with ","'
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help="Input file. Example: data.nii.gz",
required=True)
mandatory.add_argument(
"-o",
metavar=Metavar.file,
help='Output file. Example: data_mean.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
basic = parser.add_argument_group('BASIC OPERATIONS')
basic.add_argument(
"-add",
metavar='',
nargs="+",
help='Add following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-sub",
metavar='',
nargs="+",
help='Subtract following input. Can be a number or an image.',
required=False)
basic.add_argument(
"-mul",
metavar='',
nargs="+",
help='Multiply by following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-div",
metavar='',
nargs="+",
help='Divide by following input. Can be a number or an image.',
required=False)
basic.add_argument(
'-mean',
help='Average data across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-rms',
help='Compute root-mean-squared across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-std',
help='Compute STD across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
"-bin",
type=float,
metavar=Metavar.float,
help='Binarize image using specified threshold. Example: 0.5',
required=False)
thresholding = parser.add_argument_group("THRESHOLDING METHODS")
thresholding.add_argument(
'-otsu',
type=int,
metavar=Metavar.int,
help='Threshold image using Otsu algorithm (from skimage). Specify the number of bins (e.g. 16, 64, 128)',
required=False)
thresholding.add_argument(
"-adap",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Adaptive algorithm (from skimage). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-adap 7,0' corresponds to a block size of 7 and an "
"offset of 0.\n"
" - Block size: Odd size of pixel neighborhood which is used to calculate the threshold value. \n"
" - Offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold "
"value. Suggested offset is 0.",
required=False)
thresholding.add_argument(
"-otsu-median",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Median Otsu algorithm (from dipy). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-otsu-median 3,5' corresponds to a filter size of 3 "
"repeated over 5 iterations.\n"
" - Size: Radius (in voxels) of the applied median filter.\n"
" - Iterations: Number of passes of the median filter.",
required=False)
thresholding.add_argument(
'-percent',
type=int,
help="Threshold image using percentile of its histogram.",
metavar=Metavar.int,
required=False)
thresholding.add_argument(
"-thr",
type=float,
help='Use following number to threshold image (zero below number).',
metavar=Metavar.float,
required=False)
mathematical = parser.add_argument_group("MATHEMATICAL MORPHOLOGY")
mathematical.add_argument(
'-dilate',
type=int,
metavar=Metavar.int,
help="Dilate binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-erode',
type=int,
metavar=Metavar.int,
help="Erode binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-shape',
help="R|Shape of the structuring element for the mathematical morphology operation. Default: ball.\n"
"If a 2D shape {'disk', 'square'} is selected, -dim must be specified.",
required=False,
choices=('square', 'cube', 'disk', 'ball'),
default='ball')
mathematical.add_argument(
'-dim',
type=int,
help="Dimension of the array which 2D structural element will be orthogonal to. For example, if you wish to "
"apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.",
required=False,
choices=(0, 1, 2))
filtering = parser.add_argument_group("FILTERING METHODS")
filtering.add_argument(
"-smooth",
metavar=Metavar.list,
type=list_type(',', float),
help='Gaussian smoothing filtering. Supply values for standard deviations in mm. If a single value is provided, '
'it will be applied to each axis of the image. If multiple values are provided, there must be one value '
'per image axis. (Examples: "-smooth 2.0,3.0,2.0" (3D image), "-smooth 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-laplacian',
metavar=Metavar.list,
type=list_type(',', float),
help='Laplacian filtering. Supply values for standard deviations in mm. If a single value is provided, it will '
'be applied to each axis of the image. If multiple values are provided, there must be one value per '
'image axis. (Examples: "-laplacian 2.0,3.0,2.0" (3D image), "-laplacian 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-denoise',
help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with ". Example: p=1,b=3\n'
' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\n'
' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '
' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\n'
'To use default parameters, write -denoise 1',
required=False)
similarity = parser.add_argument_group("SIMILARITY METRIC")
similarity.add_argument(
'-mi',
metavar=Metavar.file,
help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',
required=False)
similarity.add_argument(
'-minorm',
metavar=Metavar.file,
help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',
required=False)
similarity.add_argument(
'-corr',
metavar=Metavar.file,
help='Compute the cross correlation (CC) between both input files (-i and -cc).',
required=False)
misc = parser.add_argument_group("MISC")
misc.add_argument(
'-symmetrize',
type=int,
help='Symmetrize data along the specified dimension.',
required=False,
choices=(0, 1, 2))
misc.add_argument(
'-type',
required=False,
help='Output type.',
choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',
'uint64'))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
dim_list = ['x', 'y', 'z', 't']
fname_in = arguments.i
fname_out = arguments.o
output_type = arguments.type
# Open file(s)
im = Image(fname_in)
data = im.data # 3d or 4d numpy array
dim = im.dim
# run command
if arguments.otsu is not None:
param = arguments.otsu
data_out = sct_math.otsu(data, param)
elif arguments.adap is not None:
param = arguments.adap
data_out = sct_math.adap(data, param[0], param[1])
elif arguments.otsu_median is not None:
param = arguments.otsu_median
data_out = sct_math.otsu_median(data, param[0], param[1])
elif arguments.thr is not None:
param = arguments.thr
data_out = sct_math.threshold(data, param)
elif arguments.percent is not None:
param = arguments.percent
data_out = sct_math.perc(data, param)
elif arguments.bin is not None:
bin_thr = arguments.bin
data_out = sct_math.binarize(data, bin_thr=bin_thr)
elif arguments.add is not None:
data2 = get_data_or_scalar(arguments.add, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.sum(data_concat, axis=3)
elif arguments.sub is not None:
data2 = get_data_or_scalar(arguments.sub, data)
data_out = data - data2
elif arguments.laplacian is not None:
sigmas = arguments.laplacian
if len(sigmas) == 1:
sigmas = [sigmas for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.laplacian(data, sigmas)
elif arguments.mul is not None:
data2 = get_data_or_scalar(arguments.mul, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.prod(data_concat, axis=3)
elif arguments.div is not None:
data2 = get_data_or_scalar(arguments.div, data)
data_out = np.divide(data, data2)
elif arguments.mean is not None:
dim = dim_list.index(arguments.mean)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.mean(data, dim)
elif arguments.rms is not None:
dim = dim_list.index(arguments.rms)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))
elif arguments.std is not None:
dim = dim_list.index(arguments.std)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.std(data, dim, ddof=1)
elif arguments.smooth is not None:
sigmas = arguments.smooth
if len(sigmas) == 1:
sigmas = [sigmas[0] for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.smooth(data, sigmas)
elif arguments.dilate is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -dilate with 2D morphological kernel'))
data_out = sct_math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim)
elif arguments.erode is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -erode with 2D morphological kernel'))
data_out = sct_math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim)
elif arguments.denoise is not None:
# parse denoising arguments
p, b = 1, 5 # default arguments
list_denoise = (arguments.denoise).split(",")
for i in list_denoise:
if 'p' in i:
p = int(i.split('=')[1])
if 'b' in i:
b = int(i.split('=')[1])
data_out = sct_math.denoise_nlmeans(data, patch_radius=p, block_radius=b)
elif arguments.symmetrize is not None:
data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)
elif arguments.mi is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.mi)
compute_similarity(im, im_2, fname_out, metric='mi', metric_full='Mutual information', verbose=verbose)
data_out = None
elif arguments.minorm is not None:
im_2 = Image(arguments.minorm)
compute_similarity(im, im_2, fname_out, metric='minorm', metric_full='Normalized Mutual information', verbose=verbose)
data_out = None
elif arguments.corr is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.corr)
compute_similarity(im, im_2, fname_out, metric='corr', metric_full='Pearson correlation coefficient', verbose=verbose)
data_out = None
# if no flag is set
else:
data_out = None
printv(parser.error('ERROR: you need to specify an operation to do on the input image'))
if data_out is not None:
# Write output
nii_out = Image(fname_in) # use header of input file
nii_out.data = data_out
nii_out.save(fname_out, dtype=output_type)
# TODO: case of multiple outputs
# assert len(data_out) == n_out
# if n_in == n_out:
# for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
# im_in.data = d_out
# im_in.absolutepath = fn_out
# if arguments.w is not None:
# im_in.hdr.set_intent('vector', (), '')
# im_in.save()
# elif n_out == 1:
# nii[0].data = data_out[0]
# nii[0].absolutepath = fname_out[0]
# if arguments.w is not None:
# nii[0].hdr.set_intent('vector', (), '')
# nii[0].save()
# elif n_out > n_in:
# for dat_out, name_out in zip(data_out, fname_out):
# im_out = nii[0].copy()
# im_out.data = dat_out
# im_out.absolutepath = name_out
# if arguments.w is not None:
# im_out.hdr.set_intent('vector', (), '')
# im_out.save()
# else:
# printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))
# display message
if data_out is not None:
display_viewer_syntax([fname_out], verbose=verbose)
else:
printv('\nDone! File created: ' + fname_out, verbose, 'info')
def get_data(list_fname):
"""
Get data from list of file names
:param list_fname:
:return: 3D or 4D numpy array.
"""
try:
nii = [Image(f_in) for f_in in list_fname]
except Exception as e:
printv(str(e), 1, 'error') # file does not exist, exit program
data0 = nii[0].data
data = nii[0].data
# check that every images have same shape
for i in range(1, len(nii)):
if not np.shape(nii[i].data) == np.shape(data0):
printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
printv('\nERROR: All input images must have same dimensions.', 1, 'error')
else:
data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
return data
def get_data_or_scalar(argument, data_in):
"""
Get data from list of file names (scenario 1) or scalar (scenario 2)
:param argument: list of file names of scalar
:param data_in: if argument is scalar, use data to get np.shape
:return: 3d or 4d numpy array
"""
# try to convert argument in float
try:
# build data2 with same shape as data
data_out = data_in[:, :, :] * 0 + float(argument[0])
# if conversion fails, it should be a string (i.e. file name)
except ValueError:
data_out = get_data(argument)
return data_out
def compute_similarity(img1: Image, img2: Image, fname_out: str, metric: str, metric_full: str, verbose):
"""
Sanitize input and compute similarity metric between two images data.
"""
if img1.data.size != img2.data.size:
raise ValueError(f"Input images don't have the same size! \nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space")
res, data1_1d, data2_1d = sct_math.compute_similarity(img1.data, img2.data, metric=metric)
if verbose > 1:
matplotlib.use('Agg')
plt.plot(data1_1d, 'b')
plt.plot(data2_1d, 'r')
plt.title('Similarity: ' + metric_full + ' = ' + str(res))
plt.savefig('fig_similarity.png')
path_out, filename_out, ext_out = extract_fname(fname_out)
if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:
raise ValueError(f"The output file should a text file or a pickle file. Received extension: {ext_out}")
if ext_out == '.txt':
with open(fname_out, 'w') as f:
f.write(metric_full + ': \n' + str(res))
elif ext_out == '.pklz':
pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)
else:
pickle.dump(res, open(fname_out, 'w'), protocol=2)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
mit
| 2,048,659,252,759,735,300 | 39.222441 | 192 | 0.597856 | false | 3.655931 | false | false | false |
Ultimaker/Cura
|
plugins/DigitalLibrary/src/DFFileExportAndUploadManager.py
|
1
|
19837
|
# Copyright (c) 2021 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import json
import threading
from json import JSONDecodeError
from typing import List, Dict, Any, Callable, Union, Optional
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtNetwork import QNetworkReply
from UM.FileHandler.FileHandler import FileHandler
from UM.Logger import Logger
from UM.Message import Message
from UM.Scene.SceneNode import SceneNode
from cura.CuraApplication import CuraApplication
from .DFLibraryFileUploadRequest import DFLibraryFileUploadRequest
from .DFLibraryFileUploadResponse import DFLibraryFileUploadResponse
from .DFPrintJobUploadRequest import DFPrintJobUploadRequest
from .DFPrintJobUploadResponse import DFPrintJobUploadResponse
from .DigitalFactoryApiClient import DigitalFactoryApiClient
from .ExportFileJob import ExportFileJob
class DFFileExportAndUploadManager:
"""
Class responsible for exporting the scene and uploading the exported data to the Digital Factory Library. Since 3mf
and UFP files may need to be uploaded at the same time, this class keeps a single progress and success message for
both files and updates those messages according to the progress of both the file job uploads.
"""
def __init__(self, file_handlers: Dict[str, FileHandler],
nodes: List[SceneNode],
library_project_id: str,
library_project_name: str,
file_name: str,
formats: List[str],
on_upload_error: Callable[[], Any],
on_upload_success: Callable[[], Any],
on_upload_finished: Callable[[], Any] ,
on_upload_progress: Callable[[int], Any]) -> None:
self._file_handlers = file_handlers # type: Dict[str, FileHandler]
self._nodes = nodes # type: List[SceneNode]
self._library_project_id = library_project_id # type: str
self._library_project_name = library_project_name # type: str
self._file_name = file_name # type: str
self._upload_jobs = [] # type: List[ExportFileJob]
self._formats = formats # type: List[str]
self._api = DigitalFactoryApiClient(application = CuraApplication.getInstance(), on_error = lambda error: Logger.log("e", str(error)))
# Functions of the parent class that should be called based on the upload process output
self._on_upload_error = on_upload_error
self._on_upload_success = on_upload_success
self._on_upload_finished = on_upload_finished
self._on_upload_progress = on_upload_progress
# Lock used for updating the progress message (since the progress is changed by two parallel upload jobs) or
# show the success message (once both upload jobs are done)
self._message_lock = threading.Lock()
self._file_upload_job_metadata = self.initializeFileUploadJobMetadata() # type: Dict[str, Dict[str, Any]]
self.progress_message = Message(
title = "Uploading...",
text = "Uploading files to '{}'".format(self._library_project_name),
progress = -1,
lifetime = 0,
dismissable = False,
use_inactivity_timer = False
)
self._generic_success_message = Message(
text = "Your {} uploaded to '{}'.".format("file was" if len(self._file_upload_job_metadata) <= 1 else "files were", self._library_project_name),
title = "Upload successful",
lifetime = 0,
)
self._generic_success_message.addAction(
"open_df_project",
"Open project",
"open-folder", "Open the project containing the file in Digital Library"
)
self._generic_success_message.actionTriggered.connect(self._onMessageActionTriggered)
def _onCuraProjectFileExported(self, job: ExportFileJob) -> None:
"""Handler for when the DF Library workspace file (3MF) has been created locally.
It can now be sent over the Digital Factory API.
"""
if not job.getOutput():
self._onJobExportError(job.getFileName())
return
self._file_upload_job_metadata[job.getFileName()]["export_job_output"] = job.getOutput()
request = DFLibraryFileUploadRequest(
content_type = job.getMimeType(),
file_name = job.getFileName(),
file_size = len(job.getOutput()),
library_project_id = self._library_project_id
)
self._api.requestUpload3MF(request, on_finished = self._uploadFileData, on_error = self._onRequestUploadCuraProjectFileFailed)
def _onPrintFileExported(self, job: ExportFileJob) -> None:
"""Handler for when the DF Library print job file (UFP) has been created locally.
It can now be sent over the Digital Factory API.
"""
if not job.getOutput():
self._onJobExportError(job.getFileName())
return
self._file_upload_job_metadata[job.getFileName()]["export_job_output"] = job.getOutput()
request = DFPrintJobUploadRequest(
content_type = job.getMimeType(),
job_name = job.getFileName(),
file_size = len(job.getOutput()),
library_project_id = self._library_project_id
)
self._api.requestUploadUFP(request, on_finished = self._uploadFileData, on_error = self._onRequestUploadPrintFileFailed)
def _uploadFileData(self, file_upload_response: Union[DFLibraryFileUploadResponse, DFPrintJobUploadResponse]) -> None:
"""Uploads the exported file data after the file or print job upload has been registered at the Digital Factory
Library API.
:param file_upload_response: The response received from the Digital Factory Library API.
"""
if isinstance(file_upload_response, DFLibraryFileUploadResponse):
file_name = file_upload_response.file_name
elif isinstance(file_upload_response, DFPrintJobUploadResponse):
file_name = file_upload_response.job_name if file_upload_response.job_name is not None else ""
else:
Logger.log("e", "Wrong response type received. Aborting uploading file to the Digital Library")
return
with self._message_lock:
self.progress_message.show()
self._file_upload_job_metadata[file_name]["file_upload_response"] = file_upload_response
job_output = self._file_upload_job_metadata[file_name]["export_job_output"]
with self._message_lock:
self._file_upload_job_metadata[file_name]["upload_status"] = "uploading"
self._api.uploadExportedFileData(file_upload_response,
job_output,
on_finished = self._onFileUploadFinished,
on_success = self._onUploadSuccess,
on_progress = self._onUploadProgress,
on_error = self._onUploadError)
def _onUploadProgress(self, filename: str, progress: int) -> None:
"""
Updates the progress message according to the total progress of the two files and displays it to the user. It is
made thread-safe with a lock, since the progress can be updated by two separate upload jobs
:param filename: The name of the file for which we have progress (including the extension).
:param progress: The progress percentage
"""
with self._message_lock:
self._file_upload_job_metadata[filename]["upload_progress"] = progress
self._file_upload_job_metadata[filename]["upload_status"] = "uploading"
total_progress = self.getTotalProgress()
self.progress_message.setProgress(total_progress)
self.progress_message.show()
self._on_upload_progress(progress)
def _onUploadSuccess(self, filename: str) -> None:
"""
Sets the upload status to success and the progress of the file with the given filename to 100%. This function is
should be called only if the file has uploaded all of its data successfully (i.e. no error occurred during the
upload process).
:param filename: The name of the file that was uploaded successfully (including the extension).
"""
with self._message_lock:
self._file_upload_job_metadata[filename]["upload_status"] = "success"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
self._on_upload_success()
def _onFileUploadFinished(self, filename: str) -> None:
"""
Callback that makes sure the correct messages are displayed according to the statuses of the individual jobs.
This function is called whenever an upload job has finished, regardless if it had errors or was successful.
Both jobs have to have finished for the messages to show.
:param filename: The name of the file that has finished uploading (including the extension).
"""
with self._message_lock:
# All files have finished their uploading process
if all([(file_upload_job["upload_progress"] == 100 and file_upload_job["upload_status"] != "uploading") for file_upload_job in self._file_upload_job_metadata.values()]):
# Reset and hide the progress message
self.progress_message.setProgress(-1)
self.progress_message.hide()
# All files were successfully uploaded.
if all([(file_upload_job["upload_status"] == "success") for file_upload_job in self._file_upload_job_metadata.values()]):
# Show a single generic success message for all files
self._generic_success_message.show()
else: # One or more files failed to upload.
# Show individual messages for each file, according to their statuses
for filename, upload_job_metadata in self._file_upload_job_metadata.items():
if upload_job_metadata["upload_status"] == "success":
upload_job_metadata["file_upload_success_message"].show()
else:
upload_job_metadata["file_upload_failed_message"].show()
# Call the parent's finished function
self._on_upload_finished()
def _onJobExportError(self, filename: str) -> None:
"""
Displays an appropriate message when the process to export a file fails.
:param filename: The name of the file that failed to be exported (including the extension).
"""
Logger.log("d", "Error while exporting file '{}'".format(filename))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename]["upload_status"] = "failed"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
self._file_upload_job_metadata[filename]["file_upload_failed_message"] = Message(
text = "Failed to export the file '{}'. The upload process is aborted.".format(filename),
title = "Export error",
lifetime = 0
)
self._on_upload_error()
self._onFileUploadFinished(filename)
def _onRequestUploadCuraProjectFileFailed(self, reply: "QNetworkReply", network_error: "QNetworkReply.NetworkError") -> None:
"""
Displays an appropriate message when the request to upload the Cura project file (.3mf) to the Digital Library fails.
This means that something went wrong with the initial request to create a "file" entry in the digital library.
"""
reply_string = bytes(reply.readAll()).decode()
filename_3mf = self._file_name + ".3mf"
Logger.log("d", "An error occurred while uploading the Cura project file '{}' to the Digital Library project '{}': {}".format(filename_3mf, self._library_project_id, reply_string))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename_3mf]["upload_status"] = "failed"
self._file_upload_job_metadata[filename_3mf]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename_3mf]["file_upload_failed_message"] = Message(
text = "Failed to upload the file '{}' to '{}'. {}".format(filename_3mf, self._library_project_name, human_readable_error),
title = "File upload error",
lifetime = 0
)
self._on_upload_error()
self._onFileUploadFinished(filename_3mf)
def _onRequestUploadPrintFileFailed(self, reply: "QNetworkReply", network_error: "QNetworkReply.NetworkError") -> None:
"""
Displays an appropriate message when the request to upload the print file (.ufp) to the Digital Library fails.
This means that something went wrong with the initial request to create a "file" entry in the digital library.
"""
reply_string = bytes(reply.readAll()).decode()
filename_ufp = self._file_name + ".ufp"
Logger.log("d", "An error occurred while uploading the print job file '{}' to the Digital Library project '{}': {}".format(filename_ufp, self._library_project_id, reply_string))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename_ufp]["upload_status"] = "failed"
self._file_upload_job_metadata[filename_ufp]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename_ufp]["file_upload_failed_message"] = Message(
title = "File upload error",
text = "Failed to upload the file '{}' to '{}'. {}".format(filename_ufp, self._library_project_name, human_readable_error),
lifetime = 0
)
self._on_upload_error()
self._onFileUploadFinished(filename_ufp)
@staticmethod
def extractErrorTitle(reply_body: Optional[str]) -> str:
error_title = ""
if reply_body:
try:
reply_dict = json.loads(reply_body)
except JSONDecodeError:
Logger.logException("w", "Unable to extract title from reply body")
return error_title
if "errors" in reply_dict and len(reply_dict["errors"]) >= 1 and "title" in reply_dict["errors"][0]:
error_title = reply_dict["errors"][0]["title"]
return error_title
def _onUploadError(self, filename: str, reply: "QNetworkReply", error: "QNetworkReply.NetworkError") -> None:
"""
Displays the given message if uploading the mesh has failed due to a generic error (i.e. lost connection).
If one of the two files fail, this error function will set its progress as finished, to make sure that the
progress message doesn't get stuck.
:param filename: The name of the file that failed to upload (including the extension).
"""
reply_string = bytes(reply.readAll()).decode()
Logger.log("d", "Error while uploading '{}' to the Digital Library project '{}'. Reply: {}".format(filename, self._library_project_id, reply_string))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename]["upload_status"] = "failed"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename]["file_upload_failed_message"] = Message(
title = "File upload error",
text = "Failed to upload the file '{}' to '{}'. {}".format(self._file_name, self._library_project_name, human_readable_error),
lifetime = 0
)
self._on_upload_error()
def getTotalProgress(self) -> int:
"""
Returns the total upload progress of all the upload jobs
:return: The average progress percentage
"""
return int(sum([file_upload_job["upload_progress"] for file_upload_job in self._file_upload_job_metadata.values()]) / len(self._file_upload_job_metadata.values()))
def _onMessageActionTriggered(self, message, action):
if action == "open_df_project":
project_url = "{}/app/library/project/{}?wait_for_new_files=true".format(CuraApplication.getInstance().ultimakerDigitalFactoryUrl, self._library_project_id)
QDesktopServices.openUrl(QUrl(project_url))
message.hide()
def start(self) -> None:
for job in self._upload_jobs:
job.start()
def initializeFileUploadJobMetadata(self) -> Dict[str, Any]:
metadata = {}
self._upload_jobs = []
if "3mf" in self._formats and "3mf" in self._file_handlers and self._file_handlers["3mf"]:
filename_3mf = self._file_name + ".3mf"
metadata[filename_3mf] = {
"export_job_output" : None,
"upload_progress" : -1,
"upload_status" : "",
"file_upload_response": None,
"file_upload_success_message": Message(
text = "'{}' was uploaded to '{}'.".format(filename_3mf, self._library_project_name),
title = "Upload successful",
lifetime = 0,
),
"file_upload_failed_message": Message(
text = "Failed to upload the file '{}' to '{}'.".format(filename_3mf, self._library_project_name),
title = "File upload error",
lifetime = 0
)
}
job_3mf = ExportFileJob(self._file_handlers["3mf"], self._nodes, self._file_name, "3mf")
job_3mf.finished.connect(self._onCuraProjectFileExported)
self._upload_jobs.append(job_3mf)
if "ufp" in self._formats and "ufp" in self._file_handlers and self._file_handlers["ufp"]:
filename_ufp = self._file_name + ".ufp"
metadata[filename_ufp] = {
"export_job_output" : None,
"upload_progress" : -1,
"upload_status" : "",
"file_upload_response": None,
"file_upload_success_message": Message(
text = "'{}' was uploaded to '{}'.".format(filename_ufp, self._library_project_name),
title = "Upload successful",
lifetime = 0,
),
"file_upload_failed_message": Message(
text = "Failed to upload the file '{}' to '{}'.".format(filename_ufp, self._library_project_name),
title = "File upload error",
lifetime = 0
)
}
job_ufp = ExportFileJob(self._file_handlers["ufp"], self._nodes, self._file_name, "ufp")
job_ufp.finished.connect(self._onPrintFileExported)
self._upload_jobs.append(job_ufp)
return metadata
|
lgpl-3.0
| -487,210,490,165,346,050 | 52.182306 | 188 | 0.616978 | false | 4.331223 | false | false | false |
scotthuang1989/Python-3-Module-of-the-Week
|
concurrency/asyncio/asyncio_echo_server_protocol.py
|
1
|
1651
|
import asyncio
import logging
import sys
SERVER_ADDRESS = ('localhost', 10000)
logging.basicConfig(
level=logging.DEBUG,
format='%(name)s: %(message)s',
stream=sys.stderr,
)
log = logging.getLogger('main')
event_loop = asyncio.get_event_loop()
class EchoServer(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.address = transport.get_extra_info('peername')
self.log = logging.getLogger(
'EchoServer_{}_{}'.format(*self.address)
)
self.log.debug('connection accepted')
def data_received(self, data):
self.log.debug('received {!r}'.format(data))
self.transport.write(data)
self.log.debug('sent {!r}'.format(data))
def eof_received(self):
self.log.debug('received EOF')
if self.transport.can_write_eof():
self.transport.write_eof()
def connection_lost(self, error):
if error:
self.log.error('ERROR: {}'.format(error))
else:
self.log.debug('closing')
super().connection_lost(error)
# Create the server and let the loop finish the coroutine before
# starting the real event loop.
factory = event_loop.create_server(EchoServer, *SERVER_ADDRESS)
server = event_loop.run_until_complete(factory)
log.debug('starting up on {} port {}'.format(*SERVER_ADDRESS))
# Enter the event loop permanently to handle all connections.
try:
event_loop.run_forever()
finally:
log.debug('closing server')
server.close()
event_loop.run_until_complete(server.wait_closed())
log.debug('closing event loop')
event_loop.close()
|
apache-2.0
| -3,189,786,440,921,151,000 | 27.964912 | 64 | 0.654755 | false | 3.76082 | false | false | false |
Tenrec-Builders/pi-scan
|
src/stick.py
|
1
|
3424
|
import dbus
def search():
bus = dbus.SystemBus()
udisks = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
'/org/freedesktop/UDisks2'),
'org.freedesktop.DBus.ObjectManager')
listDevices = udisks.get_dbus_method('GetManagedObjects')
result = []
for key, value in listDevices().items():
try:
if ('org.freedesktop.UDisks2.Block' in value and
'org.freedesktop.UDisks2.Filesystem' in value):
block = value['org.freedesktop.UDisks2.Block']
drive = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
block['Drive']),
'org.freedesktop.UDisks2.Drive')
driveprop = dbus.Interface(
drive,
'org.freedesktop.DBus.Properties')
busType = driveprop.Get('org.freedesktop.UDisks2.Drive',
'ConnectionBus')
if busType == 'usb':
result.append(Stick(key))
except Exception as e:
pass
return result
def searchAndUnmount(shouldForce):
result = 0
sticks = search()
for stick in sticks:
if stick.get_mount_point() is not None:
result += 1
stick.unmount(shouldForce)
return result
class Stick:
def __init__(self, path):
self.path = path
def mount(self):
mount_point = self.get_mount_point()
try:
if mount_point is None:
bus = dbus.SystemBus()
fs = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
self.path),
'org.freedesktop.UDisks2.Filesystem')
mount = fs.get_dbus_method(
"Mount",
dbus_interface="org.freedesktop.UDisks2.Filesystem")
mount_point = mount([])
except Exception, e:
print 'Failed to mount: ', e
return mount_point
def get_mount_point(self):
mount_point = None
try:
bus = dbus.SystemBus()
fs = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
self.path),
'org.freedesktop.UDisks2.Filesystem')
fsprop = dbus.Interface(fs, 'org.freedesktop.DBus.Properties')
old_mounts = fsprop.Get('org.freedesktop.UDisks2.Filesystem',
'MountPoints')
if len(old_mounts) > 0:
mount_point = bytearray(old_mounts[0]).decode('utf-8')
except Exception, e:
print 'Failed to get/parse mount point', e
return mount_point
def unmount(self, should_force):
mount_point = self.get_mount_point()
try:
if mount_point is not None:
bus = dbus.SystemBus()
fs = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
self.path),
'org.freedesktop.UDisks2.Filesystem')
unmount = fs.get_dbus_method(
"Unmount",
dbus_interface="org.freedesktop.UDisks2.Filesystem")
unmount({'force': should_force})
except Exception, e:
print 'Failed to unmount: ', e
def main():
mount_point = None
sticks = search()
if len(sticks) == 0:
print 'No Stick Found'
elif len(sticks) > 1:
print len(sticks), ' sticks found. Try unplugging one.'
else:
mount_point = sticks[0].get_mount_point()
if mount_point is None:
mount_point = sticks[0].mount()
print 'Mounted at: ' + mount_point
else:
print 'Unmounting. Was mounted at: ' + mount_point
sticks[0].unmount(True)
#main()
|
bsd-2-clause
| 8,485,670,437,875,832,000 | 29.846847 | 68 | 0.592874 | false | 3.53719 | false | false | false |
dhalleine/tensorflow
|
tensorflow/contrib/layers/python/layers/optimizers.py
|
1
|
9885
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
moving_average_decay=0.9,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None):
"""Given loss and parameters for optimizer, returns a training op.
Args:
loss: Tensor, 0 dimensional.
global_step: Tensor, step counter for each update.
learning_rate: float or Tensor, magnitude of update per each training step.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of tf.Optimizer that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantion of tf.Optimizer sub-class
and have `compute_gradients` and `apply_gradients` functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float or `None`, clips gradients by this value.
moving_average_decay: float or None, takes into account previous loss
to make learning smoother due to outliers.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: tf.train.exponential_decay.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
Returns:
Training op.
Raises:
ValueError: if optimizer is wrong type.
"""
with vs.variable_op_scope([loss, global_step], name, "OptimizeLoss"):
# Update ops take UPDATE_OPS collection if not provided.
update_ops = (set(update_ops or []) or
set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)))
# Make sure update ops are ran before computing loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name="update_barrier")
loss = control_flow_ops.with_dependencies([barrier], loss)
# Moving average of the loss with decay.
if moving_average_decay is not None:
# Generate moving averages of the loss.
loss_averages = train.ExponentialMovingAverage(moving_average_decay,
name="avg")
loss_averages_op = loss_averages.apply([loss])
logging_ops.scalar_summary("loss/mean", loss_averages.average(loss))
loss = control_flow_ops.with_dependencies([loss_averages_op], loss)
# Learning rate variable, with possible decay.
if (isinstance(learning_rate, ops.Tensor)
and learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
lr = vs.get_variable(
"learning_rate", [], trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (
str(learning_rate), str(type(learning_rate))))
if learning_rate_decay_fn is not None:
lr = learning_rate_decay_fn(lr, global_step)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s."
% (", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif isinstance(optimizer, type) and issubclass(optimizer,
optimizer_.Optimizer):
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer or instance of "
"subclass of Optimizer. Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(loss, variables)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(
gradients, gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
# Optionally clip gradients by global norm.
if clip_gradients is not None:
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
# Add scalar summary for loss.
logging_ops.scalar_summary("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
logging_ops.histogram_summary(variable.name, variable)
logging_ops.histogram_summary(variable.name + "/gradients", grad_values)
logging_ops.histogram_summary(variable.name + "/gradient_norm",
clip_ops.global_norm([grad_values]))
# Create gradient updates.
grad_updates = opt.apply_gradients(gradients,
global_step=global_step,
name="train")
# Make sure total_loss is valid.
final_loss = array_ops.check_numerics(loss, "Loss is inf or nan")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies(
[grad_updates], final_loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients,
clip_gradients)
return list(zip(clipped_gradients, variables))
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.name
grad *= constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
apache-2.0
| 2,485,348,243,574,969,000 | 42.165939 | 80 | 0.653111 | false | 4.350792 | false | false | false |
lab-robotics-unipv/pyFUZZYgenerator
|
core/ModelType.py
|
1
|
2708
|
class ModelType:
def __init__(self):
self.type = None
self.properties = {}
self.models = []
self.logic_function_name = None
self.init_function_name = None
self.output_function_name = None
def __eq__(self, other):
if other.type == self.type:
return True
return False
def update(self, model):
self.models.append(model)
class TooBigDimensionsException(Exception):
pass
class ModelNotFoundException(Exception):
pass
class FINDModelType(ModelType):
def __init__(self):
super().__init__()
self.type = "F-IND"
self.properties["max_input_n"] = 0
self.logic_function_name = "findLogic"
self.init_function_name = "initFindLogic"
self.output_function_name = "calculateFindIndex"
def update(self, model):
super().update(model)
self.properties["max_input_n"] = max(
len(model.input_var), self.properties["max_input_n"])
def get_squaredint_t(self):
ninput = self.properties["max_input_n"]
if ninput < 8:
return "uint8_t"
elif ninput < 16:
return "uint16_t"
elif ninput < 32:
return "uint32_t"
else:
raise TooBigDimensionsException
class FISModelType(ModelType):
def __init__(self):
super().__init__()
self.type = "FIS"
self.logic_function_name = "fisLogic"
self.init_function_name = "initFisLogic"
self.output_function_name = "calculateFisOutput"
def update(self, model):
super().update(model)
class FEQModelType(ModelType):
def __init__(self):
super().__init__()
self.type = "FIS"
self.logic_function_name = "feqLogic"
self.init_function_name = "initFeqLogic"
self.output_function_name = "calculateFeqOutput"
def update(self, model):
super().update(model)
class ModelTypeSet:
def __init__(self):
self.model_type_list = []
def update(self, model):
model_type = None
if model.type.upper() == 'F-IND':
model_type = FINDModelType()
elif model.type.upper() == 'FEQ':
model_type = FEQModelType()
elif model.type.upper() != 'FIS':
model_type = FISModelType()
else:
raise ModelNotFoundException
if model_type not in self.model_type_list:
self.model_type_list.append(model_type)
actual_model_type = self.model_type_list[self.model_type_list.index(
model_type)]
actual_model_type.update(model)
def __iter__(self):
return self.model_type_list.__iter__()
|
lgpl-3.0
| 2,298,482,648,726,240,300 | 26.08 | 76 | 0.57644 | false | 3.735172 | false | false | false |
sippeproject/vagoth
|
vagoth/virt/utils/mc_json_rpc.py
|
1
|
1790
|
#!/usr/bin/python
#
# Vagoth Cluster Management Framework
# Copyright (C) 2013 Robert Thomson
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
from subprocess import Popen, PIPE
from os.path import abspath, dirname, join
import json
class MCollectiveException(Exception): pass
def mcollective_call(agent, action, identity=None, timeout=None, **kwargs):
mcdict = {
"agent": agent,
"action": action,
"arguments": kwargs,
}
if identity is not None:
mcdict["identity"] = identity
if timeout is not None:
mcdict["timeout"] = timeout
mcjson = json.dumps(mcdict)
ruby_script=join(abspath(dirname(__file__)), "mc_json_rpc.rb")
process = Popen([ruby_script, "-"], stdin=PIPE, stdout=PIPE)
process.stdin.write(mcjson)
process.stdin.close()
result = process.stdout.read()
process.stdout.close()
process.wait()
if process.returncode == 0:
return json.loads(result)
else:
raise MCollectiveException(
"mc-json-rpc.rb exited with {0}: {1}".format(
process.returncode, result))
|
lgpl-2.1
| 1,314,612,439,898,897,400 | 34.098039 | 80 | 0.694413 | false | 3.925439 | false | false | false |
slackeater/anal-beh
|
firefoxSelenium.py
|
1
|
1686
|
#! /usr/bin/env python
import sys
from classes.sitemanager import SiteManager
from classes.gui.main import MainWindow
from classes.printer import Printer
from PyQt4 import QtGui
import argparse
__author__ = 'snake'
def main():
if len(sys.argv) == 1:
app = QtGui.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
else:
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--site", help="specify the sites you want to use."
" You can use country name to run only websites of that country.")
parser.add_argument("-c", "--cred", help="specify a file with the credentials for the log in of the sites.")
parser.add_argument("-u", "--urls", help="specify a file with a list of url to browse")
args = parser.parse_args()
p = Printer('console')
if args.urls:
sm = SiteManager("", p)
sm.urlsfromfile(args.urls)
else:
# Get the command line parameters
sitename = args.site
fileName = args.cred
sitecount = 0
sitearray = []
countries = ['usa', 'ch', 'it', 'uk', 'fr']
for site in sitename.split(','):
if len(site) != 0:
sitearray.append(site)
sitecount += 1
#Browse the site in order to collect some data
sm = SiteManager(fileName, p)
if sitename in countries:
sm.countrysession(sitename)
elif sitecount >= 1:
sm.surf(sitearray)
if __name__ == "__main__":
main()
|
gpl-2.0
| 5,399,769,859,326,694,000 | 30.222222 | 116 | 0.544484 | false | 4.236181 | false | false | false |
levilucio/SyVOLT
|
ECore_Copier_MM/transformation/HEPackage.py
|
1
|
2231
|
from core.himesis import Himesis
import uuid
class HEPackage(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule EPackage.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HEPackage, self).__init__(name='HEPackage', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """EPackage"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EPackage')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class EPackage() node
self.add_node()
self.vs[3]["mm__"] = """EPackage"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class EPackage()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# apply class EPackage() node
self.add_node()
self.vs[5]["mm__"] = """EPackage"""
self.vs[5]["attr1"] = """1"""
# apply_contains node for class EPackage()
self.add_node()
self.vs[6]["mm__"] = """apply_contains"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class EPackage()
(1,6), # applymodel -> apply_contains
(6,5), # apply_contains -> apply_class EPackage()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'name'),(3,'name')), ((5,'nsURI'),(3,'nsURI')), ((5,'nsPrefix'),(3,'nsPrefix')), ((5,'ApplyAttribute'),('constant','solveRef')), ]
|
mit
| 5,270,245,473,863,462,000 | 27.602564 | 163 | 0.469745 | false | 3.983929 | false | false | false |
javiercantero/streamlink
|
src/streamlink/plugins/tv5monde.py
|
1
|
2401
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HLSStream, HTTPStream, RTMPStream
from streamlink.utils import parse_json
from streamlink.plugins.common_jwplayer import _js_to_json
class TV5Monde(Plugin):
_url_re = re.compile(r'http://(.+\.)?(tv|tivi)5monde(plus(afrique)?)?\.com')
_videos_re = re.compile(r'"?(?:files|sources)"?:\s*(?P<videos>\[.+?\])')
_videos_embed_re = re.compile(r'(?:file:\s*|src=)"(?P<embed>.+?\.mp4|.+?/embed/.+?)"')
_videos_schema = validate.Schema(
validate.transform(_js_to_json),
validate.transform(parse_json),
validate.all([
validate.any(
validate.Schema(
{'url': validate.url()},
validate.get('url')
),
validate.Schema(
{'file': validate.url()},
validate.get('file')
),
)
])
)
@classmethod
def can_handle_url(cls, url):
return TV5Monde._url_re.match(url)
def _get_non_embed_streams(self, page):
match = self._videos_re.search(page)
if match is not None:
videos = self._videos_schema.validate(match.group('videos'))
return videos
return []
def _get_embed_streams(self, page):
match = self._videos_embed_re.search(page)
if match is None:
return []
url = match.group('embed')
if '.mp4' in url:
return [url]
res = http.get(url)
videos = self._get_non_embed_streams(res.text)
if videos:
return videos
return []
def _get_streams(self):
res = http.get(self.url)
match = self._videos_re.search(res.text)
if match is not None:
videos = self._videos_schema.validate(match.group('videos'))
else:
videos = self._get_embed_streams(res.text)
for url in videos:
if '.m3u8' in url:
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
yield stream
elif 'rtmp' in url:
yield 'vod', RTMPStream(self.session, {'rtmp': url})
elif '.mp4' in url:
yield 'vod', HTTPStream(self.session, url)
__plugin__ = TV5Monde
|
bsd-2-clause
| -9,086,185,595,071,028,000 | 29.782051 | 90 | 0.540608 | false | 3.751563 | false | false | false |
techjacker/sitemapgenerator
|
sitemapgenerator/crawler.py
|
1
|
3955
|
import re
import requests
from bs4 import BeautifulSoup
from time import sleep
from random import randint
from numbers import Number
import functools
def handle_requests_failures(func):
'''
This decorator handles request.excptions
'''
@functools.wraps(func)
def wrapper(self, *args, **kw):
'''
Handle RequestException
'''
try:
return func(self, *args, **kw)
except requests.exceptions.RequestException as error:
print(error)
self.links_broken.append(kw['url'])
return wrapper
class Crawler:
def __init__(self, domain, quiet=False, throttle_max=3, limit=10000):
self.set_domain(domain)
self.quiet = quiet
self.limit = limit if \
isinstance(limit, Number) else 10000
self.throttle_max = throttle_max if \
isinstance(throttle_max, Number) else 3
self.links = {}
self.links_broken = []
self.headers = {'User-Agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) '
'Gecko/20100101 Firefox/47.0'
)}
def set_domain(self, domain):
if not domain:
raise Exception('domain must be defined')
if not domain.startswith('http://') and \
not domain.startswith('https://'):
domain = 'http://' + domain
self.domain = domain
@staticmethod
def extract_links(contents):
soup = BeautifulSoup(contents, 'html.parser')
return {
a.get('href'): {"soup": a}
for a in soup.find_all('a')
if a.get('href') is not None and not a.get('href').startswith('#')
}
@handle_requests_failures
def request_url(self, url):
res = requests.get(url, headers=self.headers).text
# set visited flag
if self.strip_domain(url) in self.links:
self.links[self.strip_domain(url)]['visited'] = True
return res
def strip_domain(self, url):
return re.sub('^' + re.escape(self.domain), '', url)
def merge_links(self, links, url):
for k, v in links.items():
# strip domain on internal links
if k.strip().startswith(self.domain):
k = self.strip_domain(k)
# add extra links if not homepage and not already in dict
if k and k != '/' and k not in self.links:
self.links[k] = v
def get_domain_links(self, all=False):
return {
k: v for k, v in self.links.items()
if not k.startswith('http') and (all or len(k.split('.')) == 1)
}
@property
def unvisited_links(self):
return (
k for k, v in self.get_domain_links().items() if 'visited' not in v
)
@property
def domain_links(self):
return (self.domain + l for l in self.get_domain_links(all=True))
def crawl(self, url=''):
text = self.request_url(url=self.domain + url)
links = self.extract_links(text)
self.merge_links(links, url)
def run(self, url='', recurse=False, throttle=None):
if self.quiet is not True:
print('crawling {}'.format(url if url else 'homepage'))
self.crawl(url)
no_visited_links = 1
if recurse is True and no_visited_links < self.limit:
next_unvisited_link = next(self.unvisited_links, None)
while next_unvisited_link:
self.crawl(next_unvisited_link)
next_unvisited_link = next(self.unvisited_links, None)
no_visited_links += 1
sleep(throttle if isinstance(throttle, Number)
else randint(0, self.throttle_max))
if self.quiet is not True:
print('crawled {} URLs'.format(no_visited_links + 1))
if self.links_broken:
print('found broken {} links'.format(len(self.links_broken)))
|
mit
| 4,129,940,450,796,477,400 | 30.64 | 79 | 0.566877 | false | 3.94317 | false | false | false |
troych/PlexKodiConnect
|
contextmenu.py
|
1
|
1380
|
# -*- coding: utf-8 -*-
###############################################################################
import logging
import os
import sys
import xbmc
import xbmcaddon
###############################################################################
_addon = xbmcaddon.Addon(id='plugin.video.plexkodiconnect')
try:
_addon_path = _addon.getAddonInfo('path').decode('utf-8')
except TypeError:
_addon_path = _addon.getAddonInfo('path').decode()
try:
_base_resource = xbmc.translatePath(os.path.join(
_addon_path,
'resources',
'lib')).decode('utf-8')
except TypeError:
_base_resource = xbmc.translatePath(os.path.join(
_addon_path,
'resources',
'lib')).decode()
sys.path.append(_base_resource)
###############################################################################
import loghandler
from context_entry import ContextMenu
###############################################################################
loghandler.config()
log = logging.getLogger("PLEX.contextmenu")
###############################################################################
if __name__ == "__main__":
try:
# Start the context menu
ContextMenu()
except Exception as error:
log.exception(error)
import traceback
log.exception("Traceback:\n%s" % traceback.format_exc())
raise
|
gpl-2.0
| 6,899,344,392,238,884,000 | 25.538462 | 79 | 0.454348 | false | 4.791667 | false | true | false |
danielhkl/matplotlib2tikz
|
matplotlib2tikz/color.py
|
1
|
2761
|
# -*- coding: utf-8 -*-
#
import matplotlib as mpl
import numpy
def mpl_color2xcolor(data, matplotlib_color):
'''Translates a matplotlib color specification into a proper LaTeX xcolor.
'''
# Convert it to RGBA.
my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color))
# If the alpha channel is exactly 0, then the color is really 'none'
# regardless of the RGB channels.
if my_col[-1] == 0.0:
return data, 'none', my_col
xcol = None
# RGB values (as taken from xcolor.dtx):
available_colors = {
'red': numpy.array([1, 0, 0]),
'green': numpy.array([0, 1, 0]),
'blue': numpy.array([0, 0, 1]),
'brown': numpy.array([0.75, 0.5, 0.25]),
'lime': numpy.array([0.75, 1, 0]),
'orange': numpy.array([1, 0.5, 0]),
'pink': numpy.array([1, 0.75, 0.75]),
'purple': numpy.array([0.75, 0, 0.25]),
'teal': numpy.array([0, 0.5, 0.5]),
'violet': numpy.array([0.5, 0, 0.5]),
'black': numpy.array([0, 0, 0]),
'darkgray': numpy.array([0.25, 0.25, 0.25]),
'gray': numpy.array([0.5, 0.5, 0.5]),
'lightgray': numpy.array([0.75, 0.75, 0.75]),
'white': numpy.array([1, 1, 1])
# The colors cyan, magenta, yellow, and olive are also
# predefined by xcolor, but their RGB approximation of the
# native CMYK values is not very good. Don't use them here.
}
available_colors.update(data['custom colors'])
# Check if it exactly matches any of the colors already available.
# This case is actually treated below (alpha==1), but that loop
# may pick up combinations with black before finding the exact
# match. Hence, first check all colors.
for name, rgb in available_colors.items():
if all(my_col[:3] == rgb):
xcol = name
return data, xcol, my_col
# Check if my_col is a multiple of a predefined color and 'black'.
for name, rgb in available_colors.items():
if name == 'black':
continue
if rgb[0] != 0.0:
alpha = my_col[0] / rgb[0]
elif rgb[1] != 0.0:
alpha = my_col[1] / rgb[1]
else:
assert rgb[2] != 0.0
alpha = my_col[2] / rgb[2]
# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are
# already accounted for by checking in available_colors above.
if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0:
xcol = name + ('!%r!black' % (alpha * 100))
return data, xcol, my_col
# Lookup failed, add it to the custom list.
xcol = 'color' + str(len(data['custom colors']))
data['custom colors'][xcol] = my_col[:3]
return data, xcol, my_col
|
mit
| 2,779,563,172,198,260,700 | 36.310811 | 79 | 0.560666 | false | 3.279097 | false | false | false |
open2c/cooltools
|
cooltools/cli/dump_cworld.py
|
1
|
2380
|
import click
from . import cli
from .. import io
@cli.command()
@click.argument(
"cool_paths",
metavar="COOL_PATHS",
type=str,
nargs=-1
)
@click.argument(
"out_path",
metavar="OUT_PATH",
type=click.Path(exists=False, writable=True),
nargs=1,
)
@click.option(
"--cworld-type",
help="The format of the CWorld output. "
"'matrix' converts a single .cool file into the .matrix.txt.gz tab-separated format. "
"'tar' dumps all specified cooler files into a "
"single .tar archive containing multiple .matrix.txt.gz files (use to make "
"multi-resolution archives).",
type=click.Choice(["matrix", "tar"]),
default="matrix",
show_default=True,
)
@click.option(
"--region",
help="The coordinates of a genomic region to dump, in the UCSC format. "
"If empty (by default), dump a genome-wide matrix. This option can be used "
"only when dumping a single cooler file.",
type=str,
default="",
show_default=True,
)
@click.option(
"--balancing-type",
help="The type of the matrix balancing. 'IC_unity' - iteratively corrected "
"for the total number of contacts per locus=1.0; 'IC' - same, but preserving "
"the average total number of contacts; 'raw' - no balancing",
type=click.Choice(["IC_unity", "IC", "raw"]),
default="IC_unity",
show_default=True,
)
def dump_cworld(cool_paths, out_path, cworld_type, region, balancing_type):
"""
Convert a cooler or a group of coolers into the Dekker' lab CWorld text format.
COOL_PATHS : Paths to one or multiple .cool files
OUT_PATH : Output CWorld file path
"""
if (cworld_type == "matrix") and (len(cool_paths) > 1):
raise click.ClickException(
"Only one .cool file can be converted into the matrix " "format at a time."
)
if cworld_type == "matrix":
io.dump_cworld(
cool_paths[0],
out_path,
region=region,
iced=(balancing_type != "raw"),
iced_unity=(balancing_type == "IC_unity"),
buffer_size=int(1e8),
)
elif cworld_type == "tar":
if region:
raise Exception(
"Only genome-wide matrices and not specific regions can be dumpled"
" into a .tar CWorld archive."
)
io.dump_cworld_tar(cool_paths, out_path)
|
mit
| 28,392,404,168,664,796 | 30.733333 | 90 | 0.611765 | false | 3.562874 | false | false | false |
eResearchSA/reporting-unified
|
unified/models/hnas.py
|
1
|
9940
|
from sqlalchemy.sql import func
from . import db, id_column, to_dict, SnapshotMothods
class Owner(db.Model):
"""Storage Account/Owner"""
id = id_column()
name = db.Column(db.String(64), unique=True, nullable=False)
virtual_volume_usage = db.relationship("VirtualVolumeUsage",
backref="owner")
def json(self):
"""JSON"""
return to_dict(self, ["name"])
class Snapshot(db.Model, SnapshotMothods):
"""Storage Snapshot"""
id = id_column()
ts = db.Column(db.Integer, nullable=False, unique=True)
filesystem_usage = db.relationship("FilesystemUsage", backref="snapshot")
virtual_volume_usage = db.relationship("VirtualVolumeUsage",
backref="snapshot")
def json(self):
"""JSON"""
return to_dict(self, ["ts"])
class Filesystem(db.Model):
"""Filesystem"""
id = id_column()
name = db.Column(db.String(256), unique=True, nullable=False)
virtual_volumes = db.relationship("VirtualVolume", backref="filesystem")
usage = db.relationship("FilesystemUsage", backref="filesystem")
def json(self):
"""JSON"""
return to_dict(self, ["name"])
def summarise(self, start_ts=0, end_ts=0):
""""Gets usage of a file system between start_ts and end_ts.
Maximal usage of the period is returned.
"""
snapshot_ids = Snapshot.id_between(start_ts, end_ts)
id_query = FilesystemUsage.query.\
filter(FilesystemUsage.filesystem_id == self.id).\
filter(FilesystemUsage.snapshot_id.in_(snapshot_ids)).\
with_entities(FilesystemUsage.id)
query = FilesystemUsage.query.filter(FilesystemUsage.id.in_(id_query)).\
with_entities(func.max(FilesystemUsage.capacity),
func.min(FilesystemUsage.free),
func.max(FilesystemUsage.live_usage),
func.max(FilesystemUsage.snapshot_usage))
values = query.first()
if values.count(None) == len(values):
return {}
else:
fields = ['capacity', 'free', 'live_usage', 'snapshot_usage']
return dict(zip(fields, values))
def list(self, start_ts=0, end_ts=0):
""""Gets a list of usages of a filesystem between start_ts and end_ts.
"""
snapshots = Snapshot.between(start_ts, end_ts)
query = FilesystemUsage.query.join(snapshots).\
filter(FilesystemUsage.filesystem_id == self.id).\
order_by(snapshots.c.ts).\
with_entities(snapshots.c.ts,
FilesystemUsage.capacity,
FilesystemUsage.free,
FilesystemUsage.live_usage,
FilesystemUsage.snapshot_usage)
fields = ['ts', 'capacity', 'free', 'live_usage', 'snapshot_usage']
rslt = []
for q in query.all():
rslt.append(dict(zip(fields, q)))
return rslt
class FilesystemUsage(db.Model):
"""Filesystem Usage"""
id = id_column()
capacity = db.Column(db.BigInteger, nullable=False)
free = db.Column(db.BigInteger, nullable=False)
live_usage = db.Column(db.BigInteger, nullable=False)
snapshot_usage = db.Column(db.BigInteger, nullable=False)
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
filesystem_id = db.Column(None,
db.ForeignKey("filesystem.id"),
index=True,
nullable=False)
def json(self):
"""JSON"""
return to_dict(self, ["capacity", "free", "live_usage",
"snapshot_usage", "snapshot_id", "filesystem_id"])
@classmethod
def summarise(cls, start_ts=0, end_ts=0):
""""Gets usage from their snapshots between start_ts and end_ts.
Maximal usage of the period is returned.
"""
id_query = Snapshot.id_between(start_ts, end_ts)
query = cls.query.filter(cls.snapshot_id.in_(id_query)).\
group_by(cls.filesystem_id).\
with_entities(cls.filesystem_id,
func.max(cls.capacity).label('capacity'),
func.min(cls.free).label('free'),
func.max(cls.live_usage).label('live_usage'),
func.max(cls.snapshot_usage).label('snapshot_usage'))
file_systems = dict(Filesystem.query.with_entities(Filesystem.id, Filesystem.name).all())
fields = ['filesystem', 'capacity', 'free', 'live_usage', 'snapshot_usage']
rslt = []
for q in query.all():
mappings = (file_systems[q[0]], q[1], q[2], q[3], q[4])
rslt.append(dict(zip(fields, mappings)))
return rslt
class VirtualVolume(db.Model):
"""Virtual Volume"""
id = id_column()
name = db.Column(db.String(256), unique=True, nullable=False)
usage = db.relationship("VirtualVolumeUsage", backref="virtual_volume")
filesystem_id = db.Column(None,
db.ForeignKey("filesystem.id"),
index=True,
nullable=False)
def json(self):
"""JSON"""
return to_dict(self, ["name", "filesystem_id"])
def summarise(self, start_ts=0, end_ts=0):
""""Gets usage of a virtual volume between start_ts and end_ts.
Maximal usage of the period is returned.
"""
snapshot_ids = Snapshot.id_between(start_ts, end_ts)
id_query = VirtualVolumeUsage.query.\
filter(VirtualVolumeUsage.virtual_volume_id == self.id).\
filter(VirtualVolumeUsage.snapshot_id.in_(snapshot_ids)).\
with_entities(VirtualVolumeUsage.id)
query = VirtualVolumeUsage.query.\
filter(VirtualVolumeUsage.id.in_(id_query)).\
group_by(VirtualVolumeUsage.owner_id).\
with_entities(VirtualVolumeUsage.owner_id,
func.max(VirtualVolumeUsage.quota),
func.max(VirtualVolumeUsage.files),
func.max(VirtualVolumeUsage.usage))
fields = ['owner', 'quota', 'files', 'usage']
rslt = []
for q in query.all():
values = list(q)
# almost all usages has no owner, query owner directly if needed
if values[0]:
values[0] = Owner.query.get(q[0]).name
rslt.append(dict(zip(fields, values)))
return rslt
def list(self, start_ts=0, end_ts=0):
""""Gets a list of usages of a virtual volume between start_ts and end_ts.
"""
snapshots = Snapshot.between(start_ts, end_ts)
query = VirtualVolumeUsage.query.join(snapshots).\
filter(VirtualVolumeUsage.virtual_volume_id == self.id).\
order_by(VirtualVolumeUsage.owner_id, snapshots.c.ts).\
with_entities(VirtualVolumeUsage.owner_id,
snapshots.c.ts,
VirtualVolumeUsage.quota,
VirtualVolumeUsage.files,
VirtualVolumeUsage.usage)
fields = ['ts', 'quota', 'files', 'usage']
rslt = {}
for q in query.all():
if q[0]:
owner = Owner.query.get(q[0]).name
else:
owner = 'UNKNOWN' # no owner
if owner not in rslt:
rslt[owner] = []
rslt[owner].append(dict(zip(fields, q[1:])))
return rslt
class VirtualVolumeUsage(db.Model):
"""Virtual Volume Usage"""
id = id_column()
files = db.Column(db.BigInteger, nullable=False)
quota = db.Column(db.BigInteger, nullable=False)
usage = db.Column(db.BigInteger, nullable=False)
owner_id = db.Column(None, db.ForeignKey("owner.id"))
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
virtual_volume_id = db.Column(None,
db.ForeignKey("virtual_volume.id"),
index=True,
nullable=False)
def json(self):
"""JSON"""
return to_dict(self, ["files", "quota", "usage", "owner_id", "snapshot_id",
"virtual_volume_id"])
@classmethod
def summarise(cls, start_ts=0, end_ts=0):
""""Gets usage from their snapshots between start_ts and end_ts.
Maximal usage of the period is returned.
"""
id_query = Snapshot.id_between(start_ts, end_ts)
query = cls.query.filter(cls.snapshot_id.in_(id_query)).\
group_by(cls.virtual_volume_id, cls.owner_id).\
with_entities(cls.virtual_volume_id, cls.owner_id,
func.max(cls.quota).label('quota'),
func.max(cls.files).label('files'),
func.max(cls.usage).label('usage'))
fq = VirtualVolume.query.join(Filesystem).\
with_entities(VirtualVolume.id, Filesystem.name, VirtualVolume.name).all()
file_systems = {}
for fs in fq:
file_systems[fs[0]] = fs[1:]
# Not all virtual volumes has owner
owners = dict(Owner.query.with_entities(Owner.id, Owner.name).all())
fields = ['filesystem', 'virtual_volume', 'owner', 'quota', 'files', 'usage']
rslt = []
for q in query.all():
fn, vn = file_systems[q[0]]
owner = owners[q[1]] if q[1] else ''
mappings = (fn, vn, owner, q[2], q[3], q[4])
rslt.append(dict(zip(fields, mappings)))
return rslt
|
apache-2.0
| -236,425,678,749,025,100 | 37.527132 | 97 | 0.547485 | false | 3.993572 | false | false | false |
pjamesjoyce/lcopt
|
lcopt/interact.py
|
1
|
48802
|
from flask import Flask, request, render_template, redirect, send_file
import webbrowser
import json
from ast import literal_eval
from lcopt.io import exchange_factory
from collections import OrderedDict
from itertools import groupby
import xlsxwriter
from io import BytesIO
import os
from lcopt.bw2_export import Bw2Exporter
from lcopt.export_view import LcoptView
from lcopt.utils import find_port
from lcopt.settings import settings
class FlaskSandbox():
def __init__(self, modelInstance):
self.modelInstance = modelInstance
self.get_sandbox_variables()
# Set up the dictionary of actions that can be processed by POST requests
self.postActions = {
'savePosition': self.savePosition,
'saveModel': self.saveModel,
'newProcess': self.newProcess,
'echo': self.echo,
'searchEcoinvent': self.searchEcoinvent,
'searchBiosphere': self.searchBiosphere,
'newConnection': self.newConnection,
'addInput': self.addInput,
'inputLookup': self.inputLookup,
'parse_parameters': self.parameter_parsing,
'create_function': self.create_function,
'add_parameter': self.add_parameter,
'simaPro_export': self.simaPro_export,
'removeInput': self.removeInput,
'unlinkIntermediate': self.unlinkIntermediate,
'update_settings': self.update_settings,
'export_view_file': self.export_view_file
}
#print (self.modelInstance.newVariable)
def shutdown_server(self): # pragma: no cover
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
def output_code(self, process_id):
exchanges = self.modelInstance.database['items'][process_id]['exchanges']
production_filter = lambda x: x['type'] == 'production'
code = list(filter(production_filter, exchanges))[0]['input'][1]
return code
def get_sandbox_variables(self):
m = self.modelInstance
db = m.database['items']
matrix = m.matrix
ext_dbs = [x['name'] for x in m.external_databases]
sandbox_positions = m.sandbox_positions
products = OrderedDict((k, v) for k, v in db.items() if v['type'] == 'product')
product_codes = [k[1] for k in products.keys()]
processes = OrderedDict((k, v) for k, v in db.items() if v['type'] == 'process')
process_codes = [k[1] for k in processes.keys()]
process_name_map = {k[1]: v['name'] for k, v in processes.items()}
self.reverse_process_name_map = {value: key for key, value in process_name_map.items()}
# note this maps from output code to process
process_output_map = {self.output_code(x): x[1] for x in processes.keys()}
self.reverse_process_output_map = {value: key for key, value in process_output_map.items()}
# map products to producing process name
self.production_map = {k:process_name_map[v] for k, v, in process_output_map.items()}
intermediates = {k: v for k, v in products.items() if v['lcopt_type'] == 'intermediate'}
intermediate_codes = [k[1] for k in intermediates.keys()]
intermediate_map = {k[1]: v['name'] for k, v in intermediates.items()}
self.reverse_intermediate_map = {value: key for key, value in intermediate_map.items()}
#process_output_name_map = {process_code: output_name for x in processes.keys()}
process_output_name_map = {x[1]: intermediate_map[self.reverse_process_output_map[x[1]]] for x in processes.keys()}
inputs = OrderedDict((k, v) for k, v in products.items() if v['lcopt_type'] == 'input')
input_codes = [k[1] for k in inputs.keys()]
input_map = {k[1]: v['name'] for k, v in inputs.items()}
self.reverse_input_map = {value: key for key, value in input_map.items()}
biosphere = OrderedDict((k, v) for k, v in products.items() if v['lcopt_type'] == 'biosphere')
biosphere_codes = [k[1] for k in biosphere.keys()]
biosphere_map = {k[1]: v['name'] for k, v in biosphere.items()}
self.reverse_biosphere_map = {value: key for key, value in biosphere_map.items()}
label_map = {**input_map, **process_output_name_map, **biosphere_map}
#print('label_map = {}\n'.format(label_map))
self.outputlabels = [{'process_id': x, 'output_name': process_output_name_map[x]} for x in process_codes]
link_indices = [process_output_map[x] if x in intermediate_codes else x for x in product_codes]
if matrix is not None:
row_totals = matrix.sum(axis=1)
input_row_totals = {k: row_totals[m.names.index(v)] for k, v in input_map.items()}
biosphere_row_totals = {k: row_totals[m.names.index(v)] for k, v in biosphere_map.items()}
# compute the nodes
i = 1
self.nodes = []
for t in process_codes:
self.nodes.append({'name': process_name_map[t], 'type': 'transformation', 'id': t, 'initX': i * 100, 'initY': i * 100})
i += 1
i = 1
for p in input_codes:
if input_row_totals[p] != 0:
item = db[(m.database['name'], p)]
el = item.get('ext_link')
if el:
if el[0] == m.database['name']:
ext_db_items = m.database['items']
ext_item = ext_db_items[el]
#ext_ref_prod = ext_item.get('reference product','')
ext_name = ext_item.get('name', '')
ext_location = ext_item.get('location', '')
ext_item_data = "<div><i><b>This is an internal link</b></i></br><b>Database: </b>{}</br><b>Process: </b>{}</br><b>Location: </b>{}</br></div>".format(el[0], ext_name, ext_location)
else:
ext_db_ix = ext_dbs.index(el[0])
ext_db_items = m.external_databases[ext_db_ix]['items']
ext_item = ext_db_items[el]
ext_ref_prod = ext_item.get('reference product','')
ext_name = ext_item.get('name', '')
ext_location = ext_item.get('location', '')
ext_item_data = "<div><b>Database: </b>{}</br><b>Reference product: </b>{}</br><b>Process: </b>{}</br><b>Location: </b>{}</br></div>".format(el[0], ext_ref_prod, ext_name, ext_location)
else:
ext_item_data = "<div><i><b>This is a burden free input</b></i></div>"
self.nodes.append({'name': input_map[p], 'type': 'input', 'id': p + "__0", 'initX': i * 50 + 150, 'initY': i * 50, 'ext_item_data': ext_item_data})
i += 1
i = 1
for p in biosphere_codes:
if biosphere_row_totals[p] != 0:
item = db[(m.database['name'], p)]
el = item.get('ext_link')
if el:
ext_db_ix = ext_dbs.index(el[0])
ext_db_items = m.external_databases[ext_db_ix]['items']
ext_item = ext_db_items[el]
if type(ext_item['categories']) == tuple:
ext_categories = "; ".join(ext_item['categories'])
else:
ext_categories = ext_item['categories']
ext_item_data = "<div><b>Database: </b>{}</br><b>Name: </b>{}</br><b>Type: </b>{}</br><b>Categories: </b>{}</br></div>".format(el[0], ext_item['name'], ext_item['type'], ext_categories)
else:
ext_item_data = None
self.nodes.append({'name': biosphere_map[p], 'type': 'biosphere', 'id': p + "__0", 'initX': i * 50 + 150, 'initY': i * 50, 'ext_item_data': ext_item_data})
i += 1
# compute links
self.links = []
input_duplicates = []
biosphere_duplicates = []
#check there is a matrix (new models won't have one until parameter_scan() is run)
if matrix is not None:
for c, column in enumerate(matrix.T):
for r, i in enumerate(column):
if i > 0:
p_from = link_indices[r]
p_to = link_indices[c]
if p_from in input_codes:
suffix = "__" + str(input_duplicates.count(p_from))
input_duplicates.append(p_from)
p_type = 'input'
elif p_from in biosphere_codes:
suffix = "__" + str(biosphere_duplicates.count(p_from))
biosphere_duplicates.append(p_from)
p_type = 'biosphere'
else:
suffix = ""
p_type = 'intermediate'
self.links.append({'sourceID': p_from + suffix, 'targetID': p_to, 'type': p_type, 'amount': 1, 'label': label_map[p_from]})
#add extra nodes
while len(input_duplicates) > 0:
p = input_duplicates.pop()
count = input_duplicates.count(p)
if count > 0:
suffix = "__" + str(count)
ext_item_data = [x['ext_item_data'] for x in self.nodes if x['id'] == p + "__0"][0]
self.nodes.append({'name': input_map[p], 'type': 'input', 'id': p + suffix, 'initX': i * 50 + 150, 'initY': i * 50, 'ext_item_data': ext_item_data})
i += 1
while len(biosphere_duplicates) > 0:
p = biosphere_duplicates.pop()
count = biosphere_duplicates.count(p)
if count > 0:
suffix = "__" + str(count)
ext_item_data = [x['ext_item_data'] for x in self.nodes if x['id'] == p + "__0"][0]
self.nodes.append({'name': biosphere_map[p], 'type': 'biosphere', 'id': p + suffix, 'initX': i * 50 + 150, 'initY': i * 50, 'ext_item_data': ext_item_data})
i += 1
#try and reset the locations
for n in self.nodes:
node_id = n['id']
if node_id in sandbox_positions:
n['initX'] = sandbox_positions[node_id]['x']
n['initY'] = sandbox_positions[node_id]['y']
#print(self.nodes)
#print(inputs)
#print(process_name_map)
def savePosition(self, f):
if f['uuid'] not in self.modelInstance.sandbox_positions:
self.modelInstance.sandbox_positions[f['uuid']] = {}
self.modelInstance.sandbox_positions[f['uuid']]['x'] = f['x']
self.modelInstance.sandbox_positions[f['uuid']]['y'] = f['y']
#print('Setting {} to ({},{})'.format(f['uuid'], f['x'], f['y']))
return "OK"
def saveModel(self, postData): # pragma: no cover
#print ("this is where we save the model")
self.modelInstance.save()
return "OK"
def newProcess(self, postData):
#print ("this is where we're going to create the process, using...")
#print (postData)
m = self.modelInstance
name = postData['process_name']
unit = postData['unit']
output_name = postData['output_name']
exchanges = [{'name': output_name, 'type': 'production', 'unit': unit, 'lcopt_type': 'intermediate'}]
location = 'GLO'
m.create_process(name, exchanges, location, unit)
self.modelInstance.parameter_scan()
#print (m.database['items'][(m.database['name'], postData['uuid'])])
return "OK"
def newConnection(self, postData):
#print(postData)
db = self.modelInstance.database
self.get_sandbox_variables()
source = postData['sourceId']
#print(self.reverse_process_output_map[source])
target = postData['targetId']
label = postData['label']
new_exchange = {'amount': 1,
'comment': 'technosphere exchange of {}'.format(label),
'input': (db['name'], self.reverse_process_output_map[source]),
'type': 'technosphere',
'uncertainty type': 1}
db['items'][(db['name'], target)]['exchanges'].append(new_exchange)
self.modelInstance.parameter_scan()
#print (db['items'][(db['name'], target)]['exchanges'])
return "OK"
def addInput(self, postData):
#print(postData)
my_targetId = postData['targetId']
my_name = postData['name']
#my_type = postData['type']
my_unit = postData['unit']
my_location = postData['location']
m = self.modelInstance
exchange_to_link = m.get_exchange(my_name)
if exchange_to_link is False:
#Create the new product
kwargs = {}
if 'ext_link' in postData.keys():
my_ext_link = literal_eval(postData['ext_link'])
kwargs['ext_link'] = my_ext_link
#exchange_to_link = m.create_product (name = my_name, location =my_location , unit=my_unit, ext_link = my_ext_link)
#print('created linked product')
#else:
if 'lcopt_type' in postData.keys():
lcopt_type = postData['lcopt_type']
kwargs['lcopt_type'] = lcopt_type
exchange_to_link = m.create_product (name=my_name, location=my_location, unit=my_unit, **kwargs)
#print('created unlinked product')
#link the product
#this_exchange = m.get_exchange(my_name)
#print(this_exchange)
this_exchange_object = exchange_factory(exchange_to_link, 'technosphere', 1, 1, '{} exchange of {}'.format('technosphere', my_name))
#print (this_exchange_object)
target_item = m.database['items'][(m.database['name'], my_targetId)]
#[print(target_item)]
target_item['exchanges'].append(this_exchange_object)
#run the parameter scan
m.parameter_scan()
return "OK"
def update_sandbox_on_delete(self, modelInstance, full_id):
id_components = full_id.split("__")
alt_id_sandbox_positions = {tuple(k.split("__")): v for k, v in modelInstance.sandbox_positions.items()}
new_sandbox_positions = {}
for k, v in alt_id_sandbox_positions.items():
#print (k)
#print(id_components)
if len(k) == 1:
new_sandbox_positions['{}'.format(*k)] = v
elif id_components[0] in k and k[1] == id_components[1]:
pass
elif id_components[0] in k and int(k[1]) > int(id_components[1]):
new_sandbox_positions['{0}__{1}'.format(k[0], int(k[1]) - 1)] = v
else:
new_sandbox_positions['{}__{}'.format(*k)] = v
modelInstance.sandbox_positions = new_sandbox_positions
return True
def removeInput(self, postData):
m = self.modelInstance
db_name = m.database.get('name')
process_code = (db_name, postData['targetId'])
input_code = (db_name, postData['sourceId'].split("_")[0])
m.remove_input_link(process_code, input_code)
self.update_sandbox_on_delete(m, postData['sourceId'])
# TODO: Sort out sandbox variables
return "OK"
def unlinkIntermediate(self, postData):
m = self.modelInstance
m.unlink_intermediate(postData['sourceId'], postData['targetId'])
return "OK"
def inputLookup(self, postData):
m = self.modelInstance
myInput = m.database['items'][(m.database['name'], postData['code'])]
return_data = {}
if 'ext_link' in myInput.keys():
ext_link = myInput['ext_link']
if ext_link[0] == m.database['name']:
print('this is an internal external link')
ext_db = m.database['items'] #[x['items'] for x in m.external_databases if x['name'] == ext_link[0]][0]
else:
ext_db = [x['items'] for x in m.external_databases if x['name'] == ext_link[0]][0]
full_link = ext_db[ext_link]
if postData['format'] == 'ecoinvent':
full_link_string = "{} {{{}}} [{}]".format(full_link['name'], full_link['location'], full_link['unit'])
elif postData['format'] == 'biosphere':
if full_link['type'] == 'emission':
full_link_string = '{} (emission to {}) [{}]'.format(full_link['name'], ", ".join(full_link['categories']), full_link['unit'])
else:
full_link_string = '{} ({}) [{}]'.format(full_link['name'], ", ".join(full_link['categories']), full_link['unit'])
return_data['isLinked'] = True
return_data['ext_link'] = str(ext_link)
return_data['ext_link_string'] = full_link_string
return_data['ext_link_unit'] = full_link['unit']
else:
#print('This is an unlinked product')
return_data['isLinked'] = False
return_data['unlinked_unit'] = myInput['unit']
return json.dumps(return_data)
def echo(self, postData):
data = {'message': 'Hello from echo'}
return json.dumps(data)
def searchEcoinvent(self, postData):
search_term = postData['search_term']
location = postData['location']
markets_only = postData['markets_only'] in ['True', 'true', 'on']
m = self.modelInstance
#print(type(markets_only))
#print(location)
if location == "":
#print ('no location')
location = None
result = m.search_databases(search_term, location, markets_only, databases_to_search=m.technosphere_databases, allow_internal=True)
json_dict = {str(k): v for k, v in dict(result).items()}
data = {'message': 'hello from ecoinvent', 'search_term': search_term, 'result': json_dict, 'format': 'ecoinvent'}
return json.dumps(data)
def searchBiosphere(self, postData):
search_term = postData['search_term']
m = self.modelInstance
result = m.search_databases(search_term, databases_to_search=m.biosphere_databases, allow_internal=False)
json_dict = {str(k): v for k, v in dict(result).items()}
data = {'message': 'hello from biosphere3', 'search_term': search_term, 'result': json_dict, 'format': 'biosphere'}
#print (json_dict)
return json.dumps(data)
def create_function(self, postData):
#print(postData)
new_function = postData['my_function']
function_for = postData['for']
if function_for.split("_")[-1] == "production":
parameter = self.modelInstance.production_params[function_for]
elif function_for.split("_")[-1] == "allocation":
parameter = self.modelInstance.allocation_params[function_for]
else:
parameter = self.modelInstance.params[function_for]
parameter['function'] = new_function
parameter['description'] = postData['description']
return "OK"
def parameter_sorting(self):
parameters = self.modelInstance.params
production_params = self.modelInstance.production_params
ext_params = self.modelInstance.ext_params
allocation_params = self.modelInstance.allocation_params
# create a default parameter set if there isn't one yet
if len(self.modelInstance.parameter_sets) == 0:
print ('No parameter sets - creating a default set')
self.modelInstance.parameter_sets['ParameterSet_1'] = OrderedDict()
for param in parameters:
self.modelInstance.parameter_sets['ParameterSet_1'][param] = 0
for param in production_params:
self.modelInstance.parameter_sets['ParameterSet_1'][param] = 1
for param in allocation_params:
self.modelInstance.parameter_sets['ParameterSet_1'][param] = 1
for param in ext_params:
self.modelInstance.parameter_sets['ParameterSet_1'][param['name']] = param['default']
exporter = Bw2Exporter(self.modelInstance)
exporter.evaluate_parameter_sets()
evaluated_parameters = self.modelInstance.evaluated_parameter_sets
subsectionTitles = {
'input': "Inputs from the 'technosphere'",
'intermediate': "Inputs from other processes",
'biosphere': "Direct emissions to the environment"
}
to_name = lambda x: parameters[x]['to_name']
input_order = lambda x: parameters[x]['coords'][1]
type_of = lambda x: parameters[x]['type']
rev_p_params = {v['from_name']: k for k, v in production_params.items()}
rev_a_params = {v['from_name']: k for k, v in allocation_params.items()}
sorted_keys = sorted(parameters, key=input_order)
sorted_parameters = []
for target, items in groupby(sorted_keys, to_name):
section = {'name': target, 'my_items': []}
this_p_param = rev_p_params[target]
if production_params[this_p_param].get('function'):
#print ('{} determined by a function'.format(this_p_param))
values = ['{} = {:.3f}'.format(production_params[this_p_param]['function'], e_ps[this_p_param]) for e_ps_name, e_ps in evaluated_parameters.items()]
isFunction = True
else:
values = [ps[this_p_param] if this_p_param in ps.keys() else '' for ps_name, ps in self.modelInstance.parameter_sets.items()]
isFunction = False
subsection = {'name': 'Production exchange (Output)', 'my_items': []}
#subsection['my_items'].append({'id': this_p_param, 'name': 'Output of {}'.format(production_params[this_p_param]['from_name']), 'existing_values': values, 'unit': production_params[this_p_param]['unit'], 'isFunction': isFunction})
subsection['my_items'].append({'id': this_p_param, 'name': '{}'.format(production_params[this_p_param]['from_name']), 'existing_values': values, 'unit': production_params[this_p_param]['unit'], 'isFunction': isFunction})
section['my_items'].append(subsection)
if self.modelInstance.allow_allocation:
this_a_param = rev_a_params[target]
if allocation_params[this_a_param].get('function'):
#print ('{} determined by a function'.format(this_p_param))
values = ['{} = {:.3f}'.format(allocation_params[this_a_param]['function'], e_ps[this_a_param]) for e_ps_name, e_ps in evaluated_parameters.items()]
isFunction = True
else:
values = [ps[this_a_param] if this_a_param in ps.keys() else '' for ps_name, ps in self.modelInstance.parameter_sets.items()]
isFunction = False
subsection = {'name': 'Allocation parameter', 'my_items': []}
subsection['my_items'].append({'id': this_a_param, 'name': '{}'.format(allocation_params[this_a_param]['from_name']), 'existing_values': values, 'unit': allocation_params[this_a_param]['unit'], 'isFunction': isFunction})
section['my_items'].append(subsection)
sorted_exchanges = sorted(items, key=type_of)
#print (sorted_exchanges)
for type, exchanges in groupby(sorted_exchanges, type_of):
#print('\t{}'.format(type))
subsection = {'name': subsectionTitles[type], 'my_items': []}
for exchange in exchanges:
if parameters[exchange].get('function'):
#print ('{} determined by a function'.format(exchange))
values = ['{} = {:.3f}'.format(parameters[exchange]['function'], e_ps[exchange]) for e_ps_name, e_ps in evaluated_parameters.items()]
isFunction = True
else:
values = [ps[exchange] if exchange in ps.keys() else '' for ps_name, ps in self.modelInstance.parameter_sets.items()]
isFunction = False
#print('\t\t{} ({}) {}'.format(parameters[exchange]['from_name'], exchange, values))
subsection['my_items'].append({'id': exchange, 'name': parameters[exchange]['from_name'], 'existing_values': values, 'unit': parameters[exchange]['unit'], 'isFunction': isFunction})
section['my_items'].append(subsection)
db_code = (self.modelInstance.database['name'], parameters[exchange]['to'])
#print(db_code)
unit = self.modelInstance.database['items'][db_code]['unit']
item_name = self.production_map[db_code[1]]
print(item_name)
#section['name'] = "{}\t({})".format(target, unit)
section['name'] = item_name
sorted_parameters.append(section)
ext_section = {'name': 'Global Parameters', 'my_items': [{'name': 'User created', 'my_items': []}]}
for e_p in self.modelInstance.ext_params:
values = [ps[e_p['name']] if e_p['name'] in ps.keys() else e_p['default'] for ps_name, ps in self.modelInstance.parameter_sets.items()]
ext_section['my_items'][0]['my_items'].append({'id': e_p['name'], 'name': e_p['description'], 'existing_values': values, 'unit': e_p.get('unit', ''), 'isFunction': False})
sorted_parameters.append(ext_section)
return sorted_parameters
def parameter_parsing(self, postData):
#print(postData)
myjson = json.loads(postData['data'], object_pairs_hook=OrderedDict)
#print(myjson)
current_parameter_sets = []
for line in myjson:
line_id = line['id']
if line['Name'] != '':
reserved = ['Name', 'id', 'Unit']
for k in line.keys():
if k not in reserved:
if k not in current_parameter_sets:
current_parameter_sets.append(k)
#print (k, line['id'], line[k])
if line[k] == '':
line[k] = 0
if k in self.modelInstance.parameter_sets.keys():
if line[k] != '[FUNCTION]':
self.modelInstance.parameter_sets[k][line_id] = float(line[k])
else:
self.modelInstance.parameter_sets[k] = OrderedDict()
#print ('created {}'.format(k))
if line[k] != '[FUNCTION]':
self.modelInstance.parameter_sets[k][line_id] = float(line[k])
new_parameter_sets = OrderedDict()
for ps in current_parameter_sets:
new_parameter_sets[ps] = self.modelInstance.parameter_sets[ps]
self.modelInstance.parameter_sets = new_parameter_sets
#print([k for k in self.modelInstance.parameter_sets.keys()])
self.modelInstance.save()
#print('parameters saved')
return 'OK'
#return redirect("/")
def add_parameter(self, postData):
self.modelInstance.add_parameter(postData['param_id'], postData['param_description'], float(postData['param_default']), postData['param_unit'])
#print ('Added {} (default {}) added to global parameters'.format(postData['param_id'], postData['param_default']))
return "OK"
def simaPro_export(self, postData):
self.modelInstance.database_to_SimaPro_csv()
self.modelInstance.generate_parameter_set_excel_file()
return "OK"
def update_settings(self, postData):
print(postData)
try:
new_amount = float(postData['settings_amount'])
except:
new_amount = self.modelInstance.analysis_settings['amount']
if new_amount != 0:
self.modelInstance.analysis_settings['amount'] = new_amount
myjson = json.loads(postData['settings_methods'])
self.modelInstance.analysis_settings['methods'] = [tuple(x) for x in myjson]
if postData['allow_allocation'] == 'true':
self.modelInstance.allow_allocation = True
else:
self.modelInstance.allow_allocation = False
print (self.modelInstance.allow_allocation)
return "OK"
def export_view_file(self, postData):
model = self.modelInstance
exporter = LcoptView(model)
exporter.export()
return "OK"
def create_excel_summary(self):
settings = self.modelInstance.result_set['settings']
results = self.modelInstance.result_set['results']
method_names = ['{}{}'.format(x[0].upper(), x[1:]) for x in settings['method_names']]
ps_names = settings['ps_names']
#create an output stream
output = BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
base_format = {'border': 1, 'align': 'center'}
base_header_format = {'border': 1, 'align': 'center', 'bold': True, 'text_wrap': True}
cell_format = workbook.add_format(base_format)
cell_format.set_align('vcenter')
row_header_format = workbook.add_format(base_header_format)
row_header_format.set_align('vcenter')
col_header_format = workbook.add_format(base_header_format)
col_header_format.set_align('vcenter')
title_format = workbook.add_format({'bold': True, 'font_size': 20})
row_offset = 2
col_offset = 1
worksheet.write(row_offset, col_offset, 'Impact', col_header_format)
worksheet.write(row_offset, col_offset + 1, 'Unit', col_header_format)
worksheet.write(0, 1, '{} summary'.format(self.modelInstance.name), title_format)
for i, m in enumerate(method_names):
for j, p in enumerate(ps_names):
worksheet.write(i + row_offset + 1, j + col_offset + 2, results[j]['scores'][i], cell_format)
for i, m in enumerate(method_names):
worksheet.write(i + row_offset + 1, col_offset, m, row_header_format)
worksheet.write(i + row_offset + 1, col_offset + 1, settings['method_units'][i], row_header_format)
for j, p in enumerate(ps_names):
worksheet.write(row_offset, j + col_offset + 2, p, col_header_format)
start_col, end_col = xlsxwriter.utility.xl_col_to_name(0), xlsxwriter.utility.xl_col_to_name(0)
worksheet.set_column('{}:{}'.format(start_col, end_col), 5)
start_col, end_col = xlsxwriter.utility.xl_col_to_name(col_offset), xlsxwriter.utility.xl_col_to_name(col_offset)
worksheet.set_column('{}:{}'.format(start_col, end_col), 25)
start_col, end_col = xlsxwriter.utility.xl_col_to_name(col_offset + 1), xlsxwriter.utility.xl_col_to_name(col_offset + 1 + len(ps_names))
worksheet.set_column('{}:{}'.format(start_col, end_col), 12)
workbook.close()
#go back to the beginning of the stream
output.seek(0)
return output
def create_excel_method(self, m):
settings = self.modelInstance.result_set['settings']
results = self.modelInstance.result_set['results']
method_names = ['{}{}'.format(x[0].upper(), x[1:]) for x in settings['method_names']]
method = method_names[m]
ps_names = settings['ps_names']
table_data = []
for i, p in enumerate(ps_names):
foreground_results = results[i]['foreground_results']
this_item = []
for k, v in foreground_results.items():
running_total = 0
for j, _ in enumerate(ps_names):
running_total += abs(results[j]['foreground_results'][k][m])
if(running_total != 0):
this_item.append({'name': k, 'value': v[m], 'rt': running_total})
this_item = sorted(this_item, key=lambda x: x['rt'], reverse=True)
table_data.append(this_item)
#print(table_data)
output = BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
base_format = {'border': 1, 'align': 'center'}
base_header_format = {'border': 1, 'align': 'center', 'bold': True, 'text_wrap': True}
cell_format = workbook.add_format(base_format)
cell_format.set_align('vcenter')
total_format = workbook.add_format(base_header_format)
total_format.set_align('vcenter')
total_format.set_bg_color('#eeeeee')
row_header_format = workbook.add_format(base_header_format)
row_header_format.set_align('vcenter')
col_header_format = workbook.add_format(base_header_format)
col_header_format.set_align('vcenter')
title_format = workbook.add_format({'bold': True, 'font_size': 12})
row_offset = 4
col_offset = 1
worksheet.write(0, 1, 'Model', title_format)
worksheet.write(0, 2, self.modelInstance.name, title_format)
worksheet.write(1, 1, 'Method', title_format)
worksheet.write(1, 2, method, title_format)
worksheet.write(2, 1, 'Unit', title_format)
worksheet.write(2, 2, settings['method_units'][m], title_format)
worksheet.write(row_offset, col_offset, 'Process', col_header_format)
worksheet.write(row_offset + 1, col_offset, 'Total', total_format)
for i, p in enumerate(ps_names):
worksheet.write(row_offset, col_offset + i + 1, p, col_header_format)
worksheet.write(row_offset + 1, col_offset + i + 1, results[i]['scores'][m], total_format)
for i, item in enumerate(table_data[0]):
worksheet.write(row_offset + i + 2, col_offset, item['name'], row_header_format)
no_items = len(table_data[0])
for i, item in enumerate(table_data):
for j in range(no_items):
worksheet.write(row_offset + j + 2, col_offset + i + 1, item[j]['value'], cell_format)
start_col, end_col = xlsxwriter.utility.xl_col_to_name(0), xlsxwriter.utility.xl_col_to_name(0)
worksheet.set_column('{}:{}'.format(start_col, end_col), 5)
start_col, end_col = xlsxwriter.utility.xl_col_to_name(col_offset), xlsxwriter.utility.xl_col_to_name(col_offset)
worksheet.set_column('{}:{}'.format(start_col, end_col), 25)
start_col, end_col = xlsxwriter.utility.xl_col_to_name(col_offset + 1), xlsxwriter.utility.xl_col_to_name(col_offset + len(ps_names))
worksheet.set_column('{}:{}'.format(start_col, end_col), 12)
workbook.close()
output.seek(0)
return output
def create_app(self):
app = Flask(__name__)
def uc_first(string):
return string[0].upper() + string[1:]
app.jinja_env.filters['uc_first'] = uc_first
@app.route('/')
def index():
name = self.modelInstance.name
self.get_sandbox_variables()
args = {'model': {'name': name}, 'nodes': self.nodes, 'links': self.links, 'outputlabels': self.outputlabels}
return render_template('sandbox.html', args=args)
@app.route('/process_post', methods=['POST'])
def process_post():
try:
f = request.form
except:
f = request.get_json()
#print(f)
action = self.postActions[f['action']]
return action(f)
#return "OK"
@app.route('/shutdown')
def shutdown(): # pragma: no cover
self.shutdown_server()
return render_template('shutdown.html')
@app.route('/inputs.json')
def inputs_as_json():
"""creates a json file of the reverse input map to send from the server"""
self.get_sandbox_variables()
# to_json = [x for x in self.reverse_input_map.keys()]
#to_json = reverse_input_map
to_json = [{'name': k, 'code': v} for k, v in self.reverse_input_map.items()]
input_json = json.dumps(to_json)
return input_json
@app.route('/biosphere.json')
def biosphere_as_json():
"""creates a json file of the reverse biosphere map to send from the server"""
self.get_sandbox_variables()
# to_json = [x for x in self.reverse_input_map.keys()]
#to_json = reverse_input_map
to_json = [{'name': k, 'code': v} for k, v in self.reverse_biosphere_map.items()]
biosphere_json = json.dumps(to_json)
return biosphere_json
@app.route('/intermediates.json')
def intermediates_as_json():
"""creates a json file of the reverse intermediate map to send from the server"""
self.get_sandbox_variables()
# to_json = [x for x in self.reverse_input_map.keys()]
#to_json = reverse_input_map
to_json = [{'name': k, 'code': v} for k, v in self.reverse_intermediate_map.items()]
intermediate_json = json.dumps(to_json)
return intermediate_json
@app.route('/usednames.json')
def usednames_as_json():
"""creates a json file of the names already used"""
self.get_sandbox_variables()
names = []
names.extend([k.lower() for k in self.reverse_input_map.keys()])
names.extend([k.lower() for k in self.reverse_intermediate_map.keys()])
names.extend([k.lower() for k in self.reverse_biosphere_map.keys()])
names.extend([k.lower() for k in self.reverse_process_name_map.keys()])
names_json = json.dumps(names)
return names_json
@app.route('/testing')
def testbed():
args = {'model': {'name': self.modelInstance.name}}
args['result_sets'] = self.modelInstance.result_set
return render_template('testbed.html', args=args)
@app.route('/functions')
def function_editor():
args = {'model': {'name': self.modelInstance.name}}
return render_template('create_functions.html', args=args)
@app.route('/results.json')
def results_as_json():
return json.dumps(self.modelInstance.result_set)
@app.route('/parameters.json')
def parameter_json():
sorted_parameters = self.parameter_sorting()
return json.dumps(sorted_parameters)
@app.route('/parameter_<param_id>.json')
def param_query(param_id):
if self.modelInstance.params.get(param_id):
param = self.modelInstance.params[param_id]
elif self.modelInstance.production_params.get(param_id):
param = self.modelInstance.production_params[param_id]
elif self.modelInstance.allocation_params.get(param_id):
param = self.modelInstance.allocation_params[param_id]
else:
param = []
#print(param)
return json.dumps(param)
@app.route('/status.json')
def status():
db = self.modelInstance.database['items']
products = OrderedDict((k, v) for k, v in db.items() if v['type'] == 'product')
inputs = OrderedDict((k, v) for k, v in products.items() if v['lcopt_type'] == 'input')
ext_linked_inputs = OrderedDict((k, v) for k, v in inputs.items() if v.get('ext_link'))
#print(ext_linked_inputs)
biosphere = OrderedDict((k, v) for k, v in products.items() if v['lcopt_type'] == 'biosphere')
totals = []
if len(self.modelInstance.parameter_sets):
exporter = Bw2Exporter(self.modelInstance)
exporter.evaluate_parameter_sets()
evaluated_parameters = self.modelInstance.evaluated_parameter_sets
for _, ps in evaluated_parameters.items():
running_total = 0
for k, v in ps.items():
if k[-10:] != 'production':
running_total += abs(v)
totals.append(running_total)
non_zero = sum(totals) > 0
else:
non_zero = False
#print(evaluated_parameters)
#print(totals)
has_model = len(db) != 0
model_has_impacts = len(ext_linked_inputs) + len(biosphere) != 0
model_has_parameters = len (self.modelInstance.parameter_sets) != 0 and non_zero
model_is_runnable = all([has_model, model_has_impacts, model_has_parameters])
model_has_functions = len([x for k, x in self.modelInstance.params.items() if x['function'] is not None]) != 0
model_is_fully_formed = all([has_model, model_has_impacts, model_has_parameters, model_has_functions])
status_object = {
'has_model': has_model,
'model_has_impacts': model_has_impacts,
'model_has_parameters': model_has_parameters,
'model_has_functions': model_has_functions,
'model_is_runnable': model_is_runnable,
'model_is_fully_formed': model_is_fully_formed,
}
return json.dumps(status_object)
@app.route('/analyse')
def analyse_preload():
args = {'model': {'name': self.modelInstance.name}}
item = request.args.get('item')
item_code = request.args.get('item_code')
#print(request.args)
args['item'] = item
args['item_code'] = item_code
#print('PRELOAD {}'.format(args['item_code']))
#self.modelInstance.analyse(item)
return render_template('analysis_preload.html', args=args)
@app.route('/analysis')
def analysis():
item_code = request.args.get('item_code')
item = request.args.get('item')
self.modelInstance.analyse(item, item_code)
args = {'model': {'name': self.modelInstance.name}}
args['item'] = item
args['result_sets'] = self.modelInstance.result_set
#return render_template('analysis.html', args = args)
#return render_template('testbed.html', args = args)
#redirect to the cached results so that reloading doesnt rerun the analysis
return redirect("/results?latest=True")
@app.route('/results')
def analysis_shortcut():
#if hasattr(self.modelInstance, 'result_set'):
if self.modelInstance.result_set is not None:
is_latest = request.args.get('latest')
item = self.modelInstance.result_set['settings']['item']
args = {'model': {'name': self.modelInstance.name}}
args['item'] = item
args['latest'] = is_latest
args['result_sets'] = self.modelInstance.result_set
return render_template('analysis.html', args=args)
else:
return render_template('analysis_fail.html')
#@app.route('/network.json')
#def network_as_json():
# parameter_set = request.args.get('ps')
# return self.modelInstance.result_set[int(parameter_set)]['json']
@app.route('/parameters')
def sorted_parameter_setup():
sorted_parameters = self.parameter_sorting()
args = {'title': 'Parameter set'}
args['sorted_parameters'] = sorted_parameters
args['ps_names'] = [x for x in self.modelInstance.parameter_sets.keys()]
return render_template('parameter_set_table_sorted.html',
args=args)
@app.route('/methods.json')
def methods_as_json():
import brightway2 as bw2
from lcopt.constants import DEFAULT_BIOSPHERE_PROJECT
if settings.model_storage.project == "single":
bw2.projects.set_current(settings.model_storage.single_project_name)
else:
if self.modelInstance.name in bw2.projects:
#print('getting custom methods')
bw2.projects.set_current(self.modelInstance.name)
else:
#print('getting default methods')
bw2.projects.set_current(DEFAULT_BIOSPHERE_PROJECT)
method_list = list(bw2.methods)
return json.dumps(method_list)
@app.route('/settings')
def model_settings():
args = {}
args['current_methods'] = json.dumps(self.modelInstance.analysis_settings['methods'])
args['current_amount'] = self.modelInstance.analysis_settings['amount']
args['allow_allocation'] = self.modelInstance.allow_allocation
return render_template('settings.html', args=args)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(e):
return render_template('500.html'), 500
@app.route('/excel_export')
def excel_export():
export_type = request.args.get('type')
ps = int(request.args.get('ps'))
m = int(request.args.get('m'))
#print (export_type, ps, m)
if export_type == 'summary':
output = self.create_excel_summary()
filename = "{}_summary_results.xlsx".format(self.modelInstance.name)
elif export_type == 'method':
output = self.create_excel_method(m)
filename = "{}_{}_results.xlsx".format(self.modelInstance.name, self.modelInstance.result_set['settings']['method_names'][m])
#finally return the file
return send_file(output, attachment_filename=filename, as_attachment=True)
@app.route('/locations.json')
def locations_as_json():
asset_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'assets')
filename = 'locations.json'
with open(os.path.join(asset_path, filename), 'r', encoding='utf-8') as f:
locations = json.load(f)
all_items = [x['items'] for x in self.modelInstance.external_databases if x['name'] in self.modelInstance.technosphere_databases]
used_locations = set([x['location'] for item in all_items for _, x in item.items()])
filtered_locations = [x for x in locations if x['code'] in used_locations]
#print(filtered_locations)
return json.dumps(filtered_locations)
@app.route('/mass_flow')
def mass_flow():
return render_template('mass_flow.html')
return app
def run(self, port=None, open_browser=True): # pragma: no cover
app = self.create_app()
if port is None:
port = find_port()
if open_browser:
url = 'http://127.0.0.1:{}/'.format(port)
webbrowser.open_new(url)
app.run(port=port)
|
bsd-3-clause
| 9,004,957,291,227,467,000 | 40.498299 | 243 | 0.546863 | false | 3.946147 | false | false | false |
vanessajurtz/lasagne4bio
|
secondary_proteins_prediction/predict.py
|
1
|
2125
|
import sys
import numpy as np
import importlib
import lasagne as nn
import theano
from theano import tensor as T
import os
import glob
import data
import utils
if not (2 <= len(sys.argv) <= 3):
sys.exit("Usage: python predict.py <metadata_path> [subset=test]")
sym_y = T.imatrix('target_output')
sym_x = T.tensor3()
metadata_path_all = glob.glob(sys.argv[1] + "*")
print "shape of metadata_path_all"
print(len(metadata_path_all))
if len(sys.argv) >= 3:
subset = sys.argv[2]
assert subset in ['train', 'valid', 'test', 'train_valid']
else:
subset = 'test'
if subset == "test":
X, mask, _, num_seq = data.get_test()
elif subset == "train":
sys.exit("train not implemented")
elif subset == "train_valid":
sys.exit("train_valid not implemented")
else:
sys.exit("valid not implemented")
for metadata_path in metadata_path_all:
print "Loading metadata file %s" % metadata_path
metadata = np.load(metadata_path)
config_name = metadata['config_name']
config = importlib.import_module("configurations.%s" % config_name)
print "Using configurations: '%s'" % config_name
print "Build model"
l_in, l_out = config.build_model()
print "Build eval function"
inference = nn.layers.get_output(
l_out, sym_x, deterministic=True)
print "Load parameters"
nn.layers.set_all_param_values(l_out, metadata['param_values'])
print "Compile functions"
predict = theano.function([sym_x], inference)
print "Predict"
predictions = []
batch_size = config.batch_size
num_batches = np.size(X,axis=0) // batch_size
for i in range(num_batches):
idx = range(i*batch_size, (i+1)*batch_size)
x_batch = X[idx]
mask_batch = mask[idx]
p = predict(x_batch)
predictions.append(p)
predictions = np.concatenate(predictions, axis = 0)
predictions_path = os.path.join("predictions", os.path.basename(metadata_path).replace("dump_", "predictions_").replace(".pkl", ".npy"))
print "Storing predictions in %s" % predictions_path
np.save(predictions_path, predictions)
|
gpl-3.0
| 356,573,002,627,405,200 | 23.709302 | 140 | 0.653647 | false | 3.410915 | true | false | false |
wwmm/wwplot
|
WWplot/fit.py
|
1
|
1679
|
# -*- coding: utf-8 -*-
import scipy.odr
from numpy import *
from PySide2.QtCore import QObject, Signal
class Fit(QObject):
finished = Signal()
def __init__(self, maxit=1000):
QObject.__init__(self)
self.maxit = maxit
self.ready = False
self.x, self.xerr = [], []
self.y, self.yerr = [], []
self.parameters = []
self.output, self.parameters_err = [], []
self.fit_function = None
self.myglobals = dict(globals())
self.myglobals["__builtins__"] = {}
def init_function(self, equation_str):
self.ready = False
N = equation_str.count("P[")
n_free = 0
for n in range(0, N):
test_str = "P[" + str(n) + "]"
if equation_str.count(test_str) > 0:
n_free = n_free + 1
self.parameters = []
for n in range(0, n_free):
self.parameters.append(1.0)
self.fit_function = lambda P, x: eval(equation_str, self.myglobals, locals())
self.model = scipy.odr.Model(self.fit_function)
def set_data(self, x, y, xerr=None, yerr=None):
if xerr is not None and yerr is not None:
self.fit_data = scipy.odr.RealData(x, y, sx=fabs(xerr), sy=fabs(yerr))
else:
self.fit_data = scipy.odr.RealData(x, y)
def run(self):
odr = scipy.odr.ODR(self.fit_data, self.model, maxit=self.maxit, beta0=self.parameters)
out = odr.run()
out.pprint()
self.parameters_err = sqrt(diag(out.cov_beta))
self.parameters = out.beta
self.ready = True
self.finished.emit()
return out.stopreason
|
gpl-3.0
| 5,255,284,062,352,769,000 | 22.985714 | 95 | 0.54735 | false | 3.337972 | false | false | false |
toast38coza/DJProcess
|
process/tasks/google_natural_language/test_extract_sentiment.py
|
1
|
1090
|
from google.cloud import language
client = language.Client()
import csv
def get_sentiment(message):
doc = client.document_from_text(message)
f_in = open('/Users/toast38coza/Downloads/verbatims.csv', 'rb')
f_out = open('/Users/toast38coza/Downloads/verbatims-new.csv', 'wb')
reader = csv.reader(f_in)
writer = csv.writer(f_out)
entities = {}
default_blank_entity = {'instances': []}
for row in reader:
text = row[5]
doc = client.document_from_text(text)
result = doc.annotate_text(include_sentiment=True, include_syntax=False, include_entities=True)
row.append(result.sentiment.score)
row.append(result.sentiment.magnitude)
writer.writerow(row)
for e in result.entities:
key = '{}:{}'.format(e.name, e.entity_type)
instance = {
'name': e.name,
'type': e.entity_type,
'salience': e.salience,
'sentiment': e.sentiment,
'doc': text
}
entity = entities.get(key, default_blank_entity)
entity.get('instances').append(instance)
# f_in.close()
# f_out.close()
|
mit
| -7,063,101,933,839,828,000 | 27.684211 | 99 | 0.637615 | false | 3.177843 | false | false | false |
mystic123/DeepLearning
|
Basics/optimizers.py
|
1
|
7742
|
import numpy as np
class Optimizer:
"""
Optimizer class
"""
def __init__(self, net, cost, learning_rate, *args, **kwargs):
self.net = net
self.cost = cost
self.learning_rate = learning_rate
def compute_gradients(self, batch, y, *args, **kwargs):
zs, as_ = self.net.forward_pass(batch)
gradients = []
m = y.shape[0]
dA = self.cost.prime(as_[-1], y.T)
for i in range(len(self.net.weights) - 1, 0, -1):
dZ = dA * self.net.activation_prime(zs[i])
dW = np.matmul(dZ, as_[i - 1].T) / m
gradients = [dW] + gradients
dA = np.matmul(self.net.weights[i].T, dZ)
dZ = dA * self.net.activation_prime(zs[0])
dW = np.matmul(dZ, batch) / m
gradients = [dW] + gradients
return gradients
def update_weights(self, *args, **kwargs):
raise NotImplementedError
def name(self):
raise NotImplementedError
class SGD(Optimizer):
"""
Stochastic Gradient Descent Optimizer
"""
def __init__(self, *args, **kwargs):
super(SGD, self).__init__(*args, **kwargs)
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for w, dW in zip(self.net.weights, gradients):
w -= self.learning_rate * dW
def name(self):
return 'SGD'
class MomentumOptimizer(Optimizer):
"""
SGD With Momentum Optimizer
"""
def __init__(self, *args, gamma=0.9, **kwargs):
super(MomentumOptimizer, self).__init__(*args, **kwargs)
self.gamma = gamma
self.past_gradients = []
for w in self.net.weights:
self.past_gradients.append(np.zeros_like(w))
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for i, dW in enumerate(gradients):
# add momemtum term to weights update
self.net.weights[i] -= self.gamma * self.past_gradients[i] + self.learning_rate * dW
self.past_gradients[i] = dW
def name(self):
return 'Momentum'
class NAG(Optimizer):
"""
Nesterov Accelerated Gradient Optimizer
"""
def __init__(self, *args, gamma=0.9, **kwargs):
super(NAG, self).__init__(*args, **kwargs)
self.gamma = gamma
self.past_gradients = []
for w in self.net.weights:
self.past_gradients.append(np.zeros_like(w))
def compute_gradients(self, batch, y, *args, **kwargs):
net_weights = []
for w in self.net.weights:
net_weights.append(np.copy(w))
# compute gradients with respect to approximated future parameters
for i, w in enumerate(self.net.weights):
self.net.weights[i] = w - self.gamma * self.past_gradients[i]
gradients = super(NAG, self).compute_gradients(batch, y)
# restore weights
self.net.weights = net_weights
return gradients
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for i, dW in enumerate(gradients):
# add momentum term
self.net.weights[i] -= self.gamma * self.past_gradients[i] + self.learning_rate * dW
self.past_gradients[i] = dW
def name(self):
return 'NAG'
class Adagrad(Optimizer):
"""
Adagrad Optimizer
"""
def __init__(self, *args, epsilon=1e-8, **kwargs):
super(Adagrad, self).__init__(*args, **kwargs)
self.epsilon = epsilon
self.gradient_squares = []
for w in self.net.weights:
self.gradient_squares.append(np.zeros_like(w))
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for i, dW in enumerate(gradients):
# accumulate gradients squares since the beginning
self.gradient_squares[i] += np.square(dW)
self.net.weights[i] -= self.learning_rate / (np.sqrt(self.gradient_squares[i] + self.epsilon)) * dW
def name(self):
return 'Adagrad'
class Adadelta(Optimizer):
"""
Adadelta Optimizer
"""
def __init__(self, *args, gamma=0.9, epsilon=1e-8, **kwargs):
super(Adadelta, self).__init__(*args, **kwargs)
self.gamma = gamma
self.epsilon = epsilon
self.gradients_squares = []
self.past_updates_squares = []
for w in self.net.weights:
self.gradients_squares.append(np.zeros_like(w))
self.past_updates_squares.append(np.zeros_like(w))
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for i, dW in enumerate(gradients):
# decay accumulated gradients squares
self.gradients_squares[i] = self.gamma * self.gradients_squares[i] + (1 - self.gamma) * dW ** 2
update = -np.sqrt(
(self.past_updates_squares[i] + self.epsilon) / (self.gradients_squares[i] + self.epsilon)) * dW
self.past_updates_squares[i] = np.square(
self.gamma * self.past_updates_squares[i] + (1 - self.gamma) * update)
self.net.weights[i] += update
def name(self):
return 'Adadelta'
class RMSProp(Optimizer):
"""
RMSProp Optimizer
"""
def __init__(self, *args, gamma=0.9, epsilon=1e-8, **kwargs):
super(RMSProp, self).__init__(*args, **kwargs)
self.gamma = gamma
self.epsilon = epsilon
self.gradients_squares = []
for w in self.net.weights:
self.gradients_squares.append(np.zeros_like(w))
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for i, dW in enumerate(gradients):
# decay accumulated gradients squares
self.gradients_squares[i] = self.gamma * self.gradients_squares[i] + (1 - self.gamma) * dW ** 2
update = -self.learning_rate / np.sqrt(self.gradients_squares[i] + self.epsilon) * dW
self.net.weights[i] += update
def name(self):
return 'RMSProp'
class Adam(Optimizer):
"""
Adam Optimizer
"""
def __init__(self, *args, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs):
super(Adam, self).__init__(*args, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.step = 1
self.past_gradients = []
self.gradient_squares = []
for w in self.net.weights:
self.past_gradients.append(np.zeros_like(w))
self.gradient_squares.append(np.zeros_like(w))
def update_weights(self, batch):
batch_xs, batch_ys = batch
gradients = self.compute_gradients(batch_xs, batch_ys)
for i, dW in enumerate(gradients):
# decay accumulated gradients
self.past_gradients[i] = self.beta1 * self.past_gradients[i] + (1 - self.beta1) * dW
# decay accumulated gradients squares
self.gradient_squares[i] = self.beta2 * self.gradient_squares[i] + (1 - self.beta2) * dW ** 2
# compute corrected estimates
mean_estimate = self.past_gradients[i] / (1 - self.beta1 ** self.step)
var_estimate = self.gradient_squares[i] / (1 - self.beta2 ** self.step)
update = -self.learning_rate / (np.sqrt(var_estimate) + self.epsilon) * mean_estimate
self.net.weights[i] += update
self.step += 1
def name(self):
return 'Adam'
|
mit
| 7,283,492,937,311,258,000 | 31.393305 | 112 | 0.580728 | false | 3.572681 | false | false | false |
Miserlou/OpenWatch
|
openwatch/settings.py
|
1
|
4279
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
BASE_URL = 'http://www.openwatch.net'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test_db', # Or path to database file if using sqlite3.
'USER': '', #XXX CHANGEME # Not used with sqlite3.
'PASSWORD': '', #XXX CHANGEME # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
AUTH_PROFILE_MODULE = 'recordings.UserProfile'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = '/home/tuttle/Projects/openwatch/openwatch/static/'
UPLOAD_ROOT = '/var/www/openwatch/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://openwatch.net/static/'
#UPLOAD_ROOT = '/var/www/openwatch/uploads/'
#UPLOAD_ROOT = 'Users/davidbrodsky/Documents/django/OpenWatch_static/uploads'
STATIC_URL = '/static/'
#STATIC_ROOT = '/Users/davidbrodsky/Documents/django/OpenWatch_static'
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), '../static/'),
)
# Deprecated setting
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
#ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '2f=jo^b+x)xu92a93wt3+d9drnzvp%=e&3um6ltw%o03cwn3v$'
###XXX: Change me, obviously
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# Not required with Django 1.4
#'django.middleware.csrf.CsrfResponseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'openwatch.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'tagging',
'captcha',
'openwatch.recordings',
'openwatch.blog',
'openwatch.misc',
'openwatch.map'
)
CAPTCHA_FONT_SIZE = 42
CAPTCHA_LETTER_ROTATION = None
try:
from local_settings import *
except:
pass
|
apache-2.0
| 7,432,822,318,124,921,000 | 31.416667 | 122 | 0.707175 | false | 3.439711 | false | false | false |
krattai/noo-ebs
|
docs/zeroMQ-guide2/examples/Python/ticlient.py
|
1
|
1692
|
"""
Titanic client example
Implements client side of http:rfc.zeromq.org/spec:9
Author : Min RK <[email protected]>
"""
import sys
import time
from mdcliapi import MajorDomoClient
def service_call (session, service, request):
"""Calls a TSP service
Returns reponse if successful (status code 200 OK), else None
"""
reply = session.send(service, request)
if reply:
status = reply.pop(0)
if status == "200":
return reply
elif status == "400":
print "E: client fatal error, aborting"
sys.exit (1)
elif status == "500":
print "E: server fatal error, aborting"
sys.exit (1)
else:
sys.exit (0); # Interrupted or failed
def main():
verbose = '-v' in sys.argv
session = MajorDomoClient("tcp://localhost:5555", verbose)
# 1. Send 'echo' request to Titanic
request = ["echo", "Hello world"]
reply = service_call(session, "titanic.request", request)
uuid = None
if reply:
uuid = reply.pop(0)
print "I: request UUID ", uuid
# 2. Wait until we get a reply
while True:
time.sleep (.1)
request = [uuid]
reply = service_call (session, "titanic.reply", request)
if reply:
reply_string = reply[-1]
print "Reply:", reply_string
# 3. Close request
request = [uuid]
reply = service_call (session, "titanic.close", request)
break
else:
print "I: no reply yet, trying again..."
time.sleep(5) # Try again in 5 seconds
return 0
if __name__ == '__main__':
main()
|
bsd-2-clause
| 7,430,159,579,871,229,000 | 24.651515 | 68 | 0.560284 | false | 3.76 | false | false | false |
kamailio/kamcli
|
kamcli/commands/cmd_uacreg.py
|
1
|
6162
|
import click
from sqlalchemy import create_engine
from kamcli.ioutils import ioutils_dbres_print
from kamcli.cli import pass_context
from kamcli.iorpc import command_ctl
@click.group(
"uacreg",
help="Manage uac remote registrations",
short_help="Manage uac registrations",
)
@pass_context
def cli(ctx):
pass
@cli.command("add", short_help="Add a new remote registration account")
@click.option("realm", "--realm", default="", help='Realm (default: "")')
@click.option(
"authha1", "--auth-ha1", is_flag=True, help="Auth password in HA1 format"
)
@click.option(
"flags", "--flags", type=int, default=0, help="Flags (default: 0)"
)
@click.option(
"regdelay",
"--reg-delay",
type=int,
default=0,
help="Registration delay (default: 0)",
)
@click.option(
"socket", "--socket", default="", help='Local socket (default: "")'
)
@click.argument("l_uuid", metavar="<l_uuid>")
@click.argument("l_username", metavar="<l_username>")
@click.argument("l_domain", metavar="<l_domain>")
@click.argument("r_username", metavar="<r_username>")
@click.argument("r_domain", metavar="<r_domain>")
@click.argument("auth_username", metavar="<auth_username>")
@click.argument("auth_password", metavar="<auth_password>")
@click.argument("auth_proxy", metavar="<auth_proxy>")
@click.argument("expires", metavar="<expires>", type=int)
@pass_context
def uacreg_add(
ctx,
realm,
authha1,
flags,
regdelay,
socket,
l_uuid,
l_username,
l_domain,
r_username,
r_domain,
auth_username,
auth_password,
auth_proxy,
expires,
):
"""Add a new uac remote registration account
\b
Parameters:
<l_uuid> - local user unique id
<l_username> - local username
<l_domain> - local domain
<r_username> - remote username
<r_domain> - remote domain
<auth_username> - auth username
<auth_password> - auth password
<auth_proxy> - auth proxy (sip address)
<expires> - expires interval (int)
"""
ctx.vlog(
"Adding a new uac remote registration account - local uuid: [%s]",
l_uuid,
)
pwval = ""
ha1val = ""
if authha1:
ha1val = auth_password
else:
pwval = auth_password
e = create_engine(ctx.gconfig.get("db", "rwurl"))
e.execute(
"insert into uacreg (l_uuid, l_username, l_domain, r_username, "
"r_domain, realm, auth_username, auth_password, auth_ha1, auth_proxy, "
"expires, flags, reg_delay, socket) values "
"({0!r}, {1!r}, {2!r}, {3!r}, "
"{4!r}, {5!r}, {6!r}, {7!r}, {8!r}, {9!r}, "
"{10}, {11}, {12}, {13!r})".format(
l_uuid.encode("ascii", "ignore").decode(),
l_username.encode("ascii", "ignore").decode(),
l_domain.encode("ascii", "ignore").decode(),
r_username.encode("ascii", "ignore").decode(),
r_domain.encode("ascii", "ignore").decode(),
realm.encode("ascii", "ignore").decode(),
auth_username.encode("ascii", "ignore").decode(),
pwval.encode("ascii", "ignore").decode(),
ha1val.encode("ascii", "ignore").decode(),
auth_proxy.encode("ascii", "ignore").decode(),
expires,
flags,
regdelay,
socket.encode("ascii", "ignore").decode(),
)
)
@cli.command(
"passwd", short_help="Set the password for a remote registration account"
)
@click.option(
"authha1", "--auth-ha1", is_flag=True, help="Auth password in HA1 format"
)
@click.argument("l_uuid", metavar="<l_uuid>")
@click.argument("auth_password", metavar="<auth_password>")
@pass_context
def uacreg_passwd(ctx, realm, authha1, l_uuid, auth_password):
"""Set password for a remote registration account
\b
Parameters:
<l_uuid> - local user unique id
<auth_password> - auth password
"""
ctx.vlog(
"Adding a new uac remote registration account - local uuid: [%s]",
l_uuid,
)
pwval = ""
ha1val = ""
if authha1:
ha1val = auth_password
else:
pwval = auth_password
e = create_engine(ctx.gconfig.get("db", "rwurl"))
e.execute(
"update uacreg set auth_password={0!r}, auth_ha1={1!r} "
"where l_uuid={2!r}".format(
pwval.encode("ascii", "ignore").decode(),
ha1val.encode("ascii", "ignore").decode(),
l_uuid.encode("ascii", "ignore").decode(),
)
)
@cli.command("showdb", short_help="Show dialplan records in database")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("l_uuid", nargs=-1, metavar="[<l_uuid>]")
@pass_context
def uacreg_showdb(ctx, oformat, ostyle, l_uuid):
"""Show details for records in uacreg database table
\b
Parameters:
[<l_uuid>] - local user unique id
"""
e = create_engine(ctx.gconfig.get("db", "rwurl"))
if not l_uuid:
ctx.vlog("Showing all uacreg records")
res = e.execute("select * from uacreg")
ioutils_dbres_print(ctx, oformat, ostyle, res)
else:
for record in l_uuid:
ctx.vlog("Showing uacreg records for l_uuid: " + record)
res = e.execute(
"select * from uacreg where l_uuid={0!r}".format(record)
)
ioutils_dbres_print(ctx, oformat, ostyle, res)
@cli.command(
"list", short_help="Show details for remote registration records in memory"
)
@pass_context
def uacreg_list(ctx):
"""Show details for remote registration records in memory
\b
"""
command_ctl(ctx, "uac.reg_dump", [])
@cli.command(
"reload",
short_help="Reload remote registration records from database into memory",
)
@pass_context
def uacreg_reload(ctx):
"""Reload remote registration records from database into memory
"""
command_ctl(ctx, "uac.reg_reload", [])
|
gpl-2.0
| -7,870,042,269,228,062,000 | 28.342857 | 79 | 0.595099 | false | 3.389439 | false | false | false |
Jonadabe/letsencrypt
|
letsencrypt/achallenges.py
|
1
|
2725
|
"""Client annotated ACME challenges.
Please use names such as ``achall`` to distiguish from variables "of type"
:class:`acme.challenges.Challenge` (denoted by ``chall``)
and :class:`.ChallengeBody` (denoted by ``challb``)::
from acme import challenges
from acme import messages
from letsencrypt import achallenges
chall = challenges.DNS(token='foo')
challb = messages.ChallengeBody(chall=chall)
achall = achallenges.DNS(chall=challb, domain='example.com')
Note, that all annotated challenges act as a proxy objects::
achall.token == challb.token
"""
from acme import challenges
from acme.jose import util as jose_util
from letsencrypt import crypto_util
# pylint: disable=too-few-public-methods
class AnnotatedChallenge(jose_util.ImmutableMap):
"""Client annotated challenge.
Wraps around server provided challenge and annotates with data
useful for the client.
:ivar challb: Wrapped `~.ChallengeBody`.
"""
__slots__ = ('challb',)
acme_type = NotImplemented
def __getattr__(self, name):
return getattr(self.challb, name)
class DVSNI(AnnotatedChallenge):
"""Client annotated "dvsni" ACME challenge."""
__slots__ = ('challb', 'domain', 'key')
acme_type = challenges.DVSNI
def gen_cert_and_response(self, s=None): # pylint: disable=invalid-name
"""Generate a DVSNI cert and save it to filepath.
:returns: ``(cert_pem, response)`` tuple, where ``cert_pem`` is the PEM
encoded certificate and ``response`` is an instance
:class:`acme.challenges.DVSNIResponse`.
:rtype: tuple
"""
response = challenges.DVSNIResponse(s=s)
cert_pem = crypto_util.make_ss_cert(self.key, [
self.domain, self.nonce_domain, response.z_domain(self.challb)])
return cert_pem, response
class SimpleHTTP(AnnotatedChallenge):
"""Client annotated "simpleHttp" ACME challenge."""
__slots__ = ('challb', 'domain', 'key')
acme_type = challenges.SimpleHTTP
class DNS(AnnotatedChallenge):
"""Client annotated "dns" ACME challenge."""
__slots__ = ('challb', 'domain')
acme_type = challenges.DNS
class RecoveryContact(AnnotatedChallenge):
"""Client annotated "recoveryContact" ACME challenge."""
__slots__ = ('challb', 'domain')
acme_type = challenges.RecoveryContact
class RecoveryToken(AnnotatedChallenge):
"""Client annotated "recoveryToken" ACME challenge."""
__slots__ = ('challb', 'domain')
acme_type = challenges.RecoveryToken
class ProofOfPossession(AnnotatedChallenge):
"""Client annotated "proofOfPossession" ACME challenge."""
__slots__ = ('challb', 'domain')
acme_type = challenges.ProofOfPossession
|
apache-2.0
| 8,463,762,847,026,909,000 | 28.619565 | 80 | 0.681835 | false | 3.609272 | false | false | false |
gcasey/cosmotrack
|
scripts/configParsers.py
|
1
|
4387
|
################################################################################
#
# Copyright 2013 Kitware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import re
import pprint
import os
TEMPLATE_RESULTS = {
"version": None,
"visualization" : None,
"viz_server" : None,
"viz_port" : None,
"viz_frequency" : None,
"analysistool" : {}
}
# These shoudl contain the required parameters
ANALYSIS_TEMPLATES = {
'halotracker' : {
'bb' : None,
'merger_tree_file' : None
}
}
class IncompleteConfigurationException(Exception):
pass
class ParseError(Exception):
pass
CHARACTER_CONVERTER = re.compile(r'\W')
def convertKeyName(name):
name = name.lower()
return re.sub(CHARACTER_CONVERTER, '_', name)
def verifyMetaData(obj):
for key, value in obj.iteritems():
if value in (None, {}):
raise IncompleteConfigurationException('Pair: (%s, %s)' % (key, value))
else:
try:
verifyMetaData(value)
except AttributeError:
pass
def yesNoBool(token):
if token.lower() in ['yes', 'true', 'on', 'enabled']:
return True
elif token.lower() in ['no', 'false', 'off', 'disabled']:
return True
raise ValueError("No conversion to bool")
def guessType(token):
ConvertPrecedence = [yesNoBool, int, float, str]
for op in ConvertPrecedence:
try:
return op(token)
except ValueError:
pass
def simplifyChunk(text):
if len(text) == 0:
raise ParseError('No value for key')
if len(text) == 1:
return guessType(text[0])
else:
return [guessType(snip) for snip in text]
SECTION_MATCHER = re.compile('#\s*(\S*)\s*SECTION')
def parseCosmoConfig(fileobj):
result = TEMPLATE_RESULTS.copy()
namespace = result
for line in fileobj:
# We should check for section names first as it kind of looks like a comment
mobj = SECTION_MATCHER.match(line.strip())
if mobj:
name = mobj.group(1)
name = convertKeyName(name)
namespace = result['analysistool'][name]
#Other than section names # are comments
elif len(line) > 0 and line[0] == '#':
continue
else:
tokens = line.split()
if len(tokens) < 2:
continue
elif tokens[0].lower() == 'analysistool' and len(tokens) > 2 and yesNoBool(tokens[2]):
key = convertKeyName(tokens[1].strip())
result['analysistool'][key] = {}
elif tokens[0] == 'INSTANCE_NAME':
try:
key = convertKeyName(tokens[1])
namespace.update(ANALYSIS_TEMPLATES[key])
except KeyError:
pass
else:
key = convertKeyName(tokens[0])
namespace[key] = simplifyChunk(tokens[1:])
verifyMetaData(result)
return result
def parseIndatParams(fileobj):
result = {}
for line in fileobj:
if len(line) < 1 or line[0] == '#':
continue
else:
tokens = line.split()
if len(tokens) < 2:
continue
key = convertKeyName(tokens[0])
result[key] = simplifyChunk([tokens[1]])
return result
def main(simname, cosmofile, indatfile):
simname = simname
cosmoParams = parseCosmoConfig(open(cosmofile, 'r'))
indatParams = parseIndatParams(open(indatfile, 'r'))
result = {'simulation_name' : simname,
'cosmo' : cosmoParams,
'indat' : indatParams}
return result
if __name__ == '__main__':
import sys
_r = main(sys.argv[1], sys.argv[2], sys.argv[3])
pprint.pprint(_r)
|
apache-2.0
| 5,939,486,349,299,386,000 | 27.303226 | 98 | 0.565079 | false | 4.017399 | false | false | false |
letouriste001/SmartForest_2.0
|
python3.4Smartforest/lib/python3.4/site-packages/django/utils/translation/trans_null.py
|
1
|
1408
|
# These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
def gettext(message):
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
|
mit
| -7,491,722,357,305,650,000 | 23.701754 | 77 | 0.721591 | false | 3.619537 | false | false | false |
ProjectQ-Framework/ProjectQ
|
projectq/setups/decompositions/h2rx.py
|
1
|
2023
|
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Module uses ideas from "Basic circuit compilation techniques for an
# ion-trap quantum machine" by Dmitri Maslov (2017) at
# https://iopscience.iop.org/article/10.1088/1367-2630/aa5e47
"""
Registers a decomposition for the H gate into an Ry and Rx gate.
"""
import math
from projectq.cengines import DecompositionRule
from projectq.meta import get_control_count
from projectq.ops import Ph, Rx, Ry, H
def _decompose_h2rx_M(cmd): # pylint: disable=invalid-name
"""Decompose the Ry gate."""
# Labelled 'M' for 'minus' because decomposition ends with a Ry(-pi/2)
qubit = cmd.qubits[0]
Rx(math.pi) | qubit
Ph(math.pi / 2) | qubit
Ry(-1 * math.pi / 2) | qubit
def _decompose_h2rx_N(cmd): # pylint: disable=invalid-name
"""Decompose the Ry gate."""
# Labelled 'N' for 'neutral' because decomposition doesn't end with
# Ry(pi/2) or Ry(-pi/2)
qubit = cmd.qubits[0]
Ry(math.pi / 2) | qubit
Ph(3 * math.pi / 2) | qubit
Rx(-1 * math.pi) | qubit
def _recognize_HNoCtrl(cmd): # pylint: disable=invalid-name
"""For efficiency reasons only if no control qubits."""
return get_control_count(cmd) == 0
#: Decomposition rules
all_defined_decomposition_rules = [
DecompositionRule(H.__class__, _decompose_h2rx_N, _recognize_HNoCtrl),
DecompositionRule(H.__class__, _decompose_h2rx_M, _recognize_HNoCtrl),
]
|
apache-2.0
| -7,079,851,581,845,540,000 | 33.87931 | 76 | 0.69303 | false | 3.14619 | false | false | false |
montyly/manticore
|
manticore/native/state.py
|
1
|
2684
|
from ..core.state import StateBase, Concretize, TerminateState
from ..native.memory import ConcretizeMemory, MemoryException
class State(StateBase):
@property
def cpu(self):
"""
Current cpu state
"""
return self._platform.current
@property
def mem(self):
"""
Current virtual memory mappings
"""
return self._platform.current.memory
def execute(self):
"""
Perform a single step on the current state
"""
from .cpu.abstractcpu import (
ConcretizeRegister,
) # must be here, otherwise we get circular imports
try:
result = self._platform.execute()
# Instead of State importing SymbolicRegisterException and SymbolicMemoryException
# from cpu/memory shouldn't we import Concretize from linux, cpu, memory ??
# We are forcing State to have abstractcpu
except ConcretizeRegister as e:
expression = self.cpu.read_register(e.reg_name)
def setstate(state, value):
state.cpu.write_register(setstate.e.reg_name, value)
setstate.e = e
raise Concretize(str(e), expression=expression, setstate=setstate, policy=e.policy)
except ConcretizeMemory as e:
expression = self.cpu.read_int(e.address, e.size)
def setstate(state, value):
state.cpu.write_int(setstate.e.address, value, setstate.e.size)
setstate.e = e
raise Concretize(str(e), expression=expression, setstate=setstate, policy=e.policy)
except MemoryException as e:
raise TerminateState(str(e), testcase=True)
# Remove when code gets stable?
assert self.platform.constraints is self.constraints
return result
def invoke_model(self, model):
"""
Invokes a `model`. Modelling can be used to override a function in the target program with a custom
implementation.
For more information on modelling see docs/models.rst
A `model` is a callable whose first argument is a `manticore.native.State` instance.
If the following arguments correspond to the arguments of the C function
being modeled. If the `model` models a variadic function, the following argument
is a generator object, which can be used to access function arguments dynamically.
The `model` callable should simply return the value that should be returned by the
native function being modeled.f
:param model: callable, model to invoke
"""
self._platform.invoke_model(model, prefix_args=(self,))
|
apache-2.0
| -261,491,013,077,682,460 | 35.27027 | 107 | 0.643443 | false | 4.518519 | false | false | false |
houshengbo/nova_vmware_compute_driver
|
nova/tests/matchers.py
|
1
|
14525
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
from lxml import etree
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' % self.__dict__)
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__)
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % self.__dict__)
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % self.__dict__
def get_details(self):
return {
'expected': self.expected,
'actual': self.actual,
}
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" % self.__dict__)
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" % self.__dict__)
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" % self.__dict__)
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" % self.__dict__)
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" % self.__dict__)
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" % self.__dict__)
class XMLMatchState(object):
"""
Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""
Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
def __init__(self, expected):
self.expected_xml = expected
self.expected = etree.fromstring(expected)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = etree.fromstring(actual_xml)
state = XMLMatchState(self.expected_xml, actual_xml)
result = self._compare_node(self.expected, actual, state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if 'DONTCARE' in (expected_value, actual_value):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare the contents of the node
if len(expected) == 0 and len(actual) == 0:
# No children, compare text values
if ('DONTCARE' not in (expected.text, actual.text) and
expected.text != actual.text):
return XMLTextValueMismatch(state, expected.text,
actual.text)
else:
expected_idx = 0
actual_idx = 0
while (expected_idx < len(expected) and
actual_idx < len(actual)):
# Ignore comments and processing instructions
# TODO(Vek): may interpret PIs in the future, to
# allow for, say, arbitrary ordering of some
# elements
if (expected[expected_idx].tag in
(etree.Comment, etree.ProcessingInstruction)):
expected_idx += 1
continue
# Compare the nodes
result = self._compare_node(expected[expected_idx],
actual[actual_idx], state,
actual_idx)
if result is not True:
return result
# Step on to comparing the next nodes...
expected_idx += 1
actual_idx += 1
# Make sure we consumed all nodes in actual
if actual_idx < len(actual):
return XMLUnexpectedChild(state, actual[actual_idx].tag,
actual_idx)
# Make sure we consumed all nodes in expected
if expected_idx < len(expected):
for node in expected[expected_idx:]:
if (node.tag in
(etree.Comment, etree.ProcessingInstruction)):
continue
return XMLExpectedChild(state, node.tag, actual_idx)
# The nodes match
return True
|
apache-2.0
| -1,041,197,624,686,391,300 | 32.08656 | 78 | 0.553253 | false | 4.227299 | false | false | false |
VerifiableRobotics/controller-arena
|
src/controllerarena/controllers/refVec.py
|
1
|
4049
|
# code for python reference dipole vector field controller
# these functions require stuff
#from mathFuns import *
from numpy import *
from math import *
class refVec:
# define the constructor
def __init__(self, q_0, controller_flag):
# Initialize controller state
self.phi_prev = None
self.q_prev = q_0
self.e_int_w = 0
self.e_int_u = 0
# set gains
self.k_p_u = 1 # u indicates it is an position gain. p indicates it is a proportional gain.
self.k_p_w = 3 # w indicates it is an angular gain. p indicates it is a proportional gain.
if controller_flag == 1: # PID
self.k_i_w = 1
self.k_i_u = 1
self.k_d = -1 # the derivative gain is only on the angle
elif controller_flag == 2: # PI
self.k_i_w = 1
self.k_i_u = 1
self.k_d = 0
elif controller_flag == 3: # PD
self.k_i_w = 0
self.k_i_u = 0
self.k_d = -1
else: # P
self.k_i_w = 0
self.k_i_u = 0
self.k_d = 0
def get_output(self, q_d, q, dt): # obtain reference vector field value
F = self.get_vector_field(q, q_d) # F is an column vector
## obtain control signal as a fcn of reference vector field value
u = self.get_control(q, q_d, F, dt)
return u
def get_vector_field(self, q, q_d):
# return type: numpy array
# note: unsure if this vector field was just an example from the paper!!
# compute vector field F
# unpack
# x = q[0][0]
# y = q[1][0]
# x_d = q_d[0][0]
# y_d = q_d[1][0]
# #
# # compute [taken from paper draft], where r = [1;0] and lambda = 3
# Fx = 2*(x - x_d)**2 - (y - y_d)**2
# Fy = 3*(x - x_d)*(y - y_d)
# F = array([[Fx],[Fy]])
lamb = 3
theta_d = q_d[2][0]
delta_p = q[0:2] - q_d[0:2] # location - location_desired
r = array([[cos(theta_d)],[sin(theta_d)]])
F = lamb*(dot(transpose(r), delta_p)[0][0])*delta_p - r*(dot(transpose(delta_p), delta_p)[0][0]) # should be col vector
print F
return F # col vector
def get_control(self, q, q_d, F, dt):
# I think that this control law is not a function of the vector field, and that it should
# work if F(q) changes
#
# compute control signal u
delta_p = q[0:2] - q_d[0:2] # location - location_desired
self.e_int_w += self.sub_angles(q[2][0],q_d[2][0])*dt # accumulate angular error
self.e_int_u += linalg.norm(delta_p)*dt # accumulate position error
theta = q[2][0]
# unpack gains
k_p_u = self.k_p_u
k_p_w = self.k_p_w
k_i_w = self.k_i_w
k_i_u = self.k_i_u
k_d = self.k_d
Fx = F[0][0]
Fy = F[1][0]
phi = atan2(Fy,Fx)
# backward finite difference for phidot
if self.phi_prev == None: # if this is the first pass through the controller, phi_dot = 0
self.phi_prev = phi
# end if
phi_dot = (phi-self.phi_prev)/dt
self.phi_prev = phi
q_dot = (q-self.q_prev)/dt
self.q_prev = q
# controller
v = -k_p_u*sign( dot(transpose(delta_p), array([[cos(theta)],[sin(theta)]]) )[0][0] )*tanh(linalg.norm(delta_p)**2) - k_i_u*self.e_int_u
w = -k_p_w*self.sub_angles(theta, phi) - k_i_w*self.e_int_w - k_d*phi_dot # k_d determines whether derivative term is used, k_i for i term
u = array([[v], [w]])
print u
return u
def update_state(self, q_d, q, dt):
# x_k+1 = 0
pass
def sub_angles(self, ang1, ang2):
return (ang1 - ang2 + pi)%(2*pi) - pi
# For future:
# pass r vector as parameter
# low pass filtering for derivatives (PD control?) [phidot]
# visual stuff
# global feedback plan is the ref vecf field
# controller is a function of vector field, but you can use a better controller to get better performance
|
bsd-3-clause
| -3,581,265,295,090,304,000 | 33.606838 | 147 | 0.535935 | false | 3.010409 | false | false | false |
tensorflow/datasets
|
tensorflow_datasets/summarization/newsroom.py
|
1
|
4417
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEWSROOM Dataset."""
import json
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{Grusky_2018,
title={Newsroom: A Dataset of 1.3 Million Summaries with Diverse Extractive Strategies},
url={http://dx.doi.org/10.18653/v1/n18-1065},
DOI={10.18653/v1/n18-1065},
journal={Proceedings of the 2018 Conference of the North American Chapter of
the Association for Computational Linguistics: Human Language
Technologies, Volume 1 (Long Papers)},
publisher={Association for Computational Linguistics},
author={Grusky, Max and Naaman, Mor and Artzi, Yoav},
year={2018}
}
"""
_DESCRIPTION = """
NEWSROOM is a large dataset for training and evaluating summarization systems.
It contains 1.3 million articles and summaries written by authors and
editors in the newsrooms of 38 major publications.
Dataset features includes:
- text: Input news text.
- summary: Summary for the news.
And additional features:
- title: news title.
- url: url of the news.
- date: date of the article.
- density: extractive density.
- coverage: extractive coverage.
- compression: compression ratio.
- density_bin: low, medium, high.
- coverage_bin: extractive, abstractive.
- compression_bin: low, medium, high.
This dataset can be downloaded upon requests. Unzip all the contents
"train.jsonl, dev.josnl, test.jsonl" to the tfds folder.
"""
_DOCUMENT = "text"
_SUMMARY = "summary"
_ADDITIONAL_TEXT_FEATURES = [
"title", "url", "date", "density_bin", "coverage_bin", "compression_bin"
]
_ADDITIONAL_FLOAT_FEATURES = [
"density",
"coverage",
"compression",
]
class Newsroom(tfds.core.GeneratorBasedBuilder):
"""NEWSROOM Dataset."""
VERSION = tfds.core.Version("1.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
You should download the dataset from https://summari.es/download/
The webpage requires registration.
After downloading, please put dev.jsonl, test.jsonl and train.jsonl
files in the manual_dir.
"""
def _info(self):
features = {
k: tfds.features.Text()
for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES
}
features.update({
k: tfds.features.Tensor(shape=[], dtype=tf.float32)
for k in _ADDITIONAL_FLOAT_FEATURES
})
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://summari.es",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"input_file": os.path.join(dl_manager.manual_dir, "train.jsonl")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"input_file": os.path.join(dl_manager.manual_dir, "dev.jsonl")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"input_file": os.path.join(dl_manager.manual_dir, "test.jsonl")
},
),
]
def _generate_examples(self, input_file=None):
"""Yields examples."""
with tf.io.gfile.GFile(input_file) as f:
for i, line in enumerate(f):
d = json.loads(line)
# fields are "url", "archive", "title", "date", "text",
# "compression_bin", "density_bin", "summary", "density",
# "compression', "coverage", "coverage_bin",
yield i, {
k: d[k] for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES +
_ADDITIONAL_FLOAT_FEATURES
}
|
apache-2.0
| 6,486,932,188,809,753,000 | 31.007246 | 91 | 0.655649 | false | 3.5535 | false | false | false |
Azure/azure-sdk-for-python
|
sdk/servicebus/azure-servicebus/tests/livetest/test_errors.py
|
1
|
1336
|
import logging
from uamqp import errors as AMQPErrors, constants as AMQPConstants
from azure.servicebus.exceptions import (
_create_servicebus_exception,
ServiceBusConnectionError,
ServiceBusError
)
def test_link_idle_timeout():
logger = logging.getLogger("testlogger")
amqp_error = AMQPErrors.LinkDetach(AMQPConstants.ErrorCodes.LinkDetachForced, description="Details: AmqpMessageConsumer.IdleTimerExpired: Idle timeout: 00:10:00.")
sb_error = _create_servicebus_exception(logger, amqp_error)
assert isinstance(sb_error, ServiceBusConnectionError)
assert sb_error._retryable
assert sb_error._shutdown_handler
def test_unknown_connection_error():
logger = logging.getLogger("testlogger")
amqp_error = AMQPErrors.AMQPConnectionError(AMQPConstants.ErrorCodes.UnknownError)
sb_error = _create_servicebus_exception(logger, amqp_error)
assert isinstance(sb_error,ServiceBusConnectionError)
assert sb_error._retryable
assert sb_error._shutdown_handler
amqp_error = AMQPErrors.AMQPError(AMQPConstants.ErrorCodes.UnknownError)
sb_error = _create_servicebus_exception(logger, amqp_error)
assert not isinstance(sb_error,ServiceBusConnectionError)
assert isinstance(sb_error,ServiceBusError)
assert not sb_error._retryable
assert sb_error._shutdown_handler
|
mit
| 5,185,030,741,937,892,000 | 39.484848 | 167 | 0.776946 | false | 3.742297 | false | false | false |
codywilbourn/streamparse
|
streamparse/run.py
|
1
|
1924
|
"""
Helper script for starting up bolts and spouts.
"""
import argparse
import importlib
import os
import sys
from pystorm.component import _SERIALIZERS
RESOURCES_PATH = 'resources'
def main():
"""main entry point for Python bolts and spouts"""
parser = argparse.ArgumentParser(description='Run a bolt/spout class',
epilog='This is internal to streamparse '
'and is used to run spout and bolt '
'classes via ``python -m '
'streamparse.run <class name>``.')
parser.add_argument('target_class', help='The bolt/spout class to start.')
parser.add_argument('-s', '--serializer',
help='The serialization protocol to use to talk to '
'Storm.',
choices=_SERIALIZERS.keys(),
default='json')
# Storm sends everything as one string, which is not great
if len(sys.argv) == 2:
sys.argv = [sys.argv[0]] + sys.argv[1].split()
args = parser.parse_args()
mod_name, cls_name = args.target_class.rsplit('.', 1)
# Add current directory to sys.path so imports will work
import_path = os.getcwd() # Storm <= 1.0.2
if RESOURCES_PATH in next(os.walk(import_path))[1] and \
os.path.isfile(os.path.join(import_path,
RESOURCES_PATH,
mod_name.replace('.', os.path.sep) + '.py')):
import_path = os.path.join(import_path,
RESOURCES_PATH) # Storm >= 1.0.3
sys.path.append(import_path)
# Import module
mod = importlib.import_module(mod_name)
# Get class from module and run it
cls = getattr(mod, cls_name)
cls(serializer=args.serializer).run()
if __name__ == '__main__':
main()
|
apache-2.0
| 6,012,932,336,202,372,000 | 38.265306 | 80 | 0.539501 | false | 4.042017 | false | false | false |
uni2u/neutron
|
neutron/db/metering/metering_db.py
|
1
|
10691
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import metering
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
class MeteringLabelRule(model_base.BASEV2, models_v2.HasId):
direction = sa.Column(sa.Enum('ingress', 'egress',
name='meteringlabels_direction'))
remote_ip_prefix = sa.Column(sa.String(64))
metering_label_id = sa.Column(sa.String(36),
sa.ForeignKey("meteringlabels.id",
ondelete="CASCADE"),
nullable=False)
excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
routers = orm.relationship(
l3_db.Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
uselist=True)
shared = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringDbMixin(metering.MeteringPluginBase,
base_db.CommonDbMixin):
def __init__(self):
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
def _make_metering_label_dict(self, metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'shared': metering_label['shared'],
'tenant_id': metering_label['tenant_id']}
return self._fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
tenant_id = self._get_tenant_id_for_create(context, m)
with context.session.begin(subtransactions=True):
metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
description=m['description'],
tenant_id=tenant_id,
name=m['name'],
shared=m['shared'])
context.session.add(metering_db)
return self._make_metering_label_dict(metering_db)
def delete_metering_label(self, context, label_id):
with context.session.begin(subtransactions=True):
try:
label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
context.session.delete(label)
def get_metering_label(self, context, label_id, fields=None):
try:
metering_label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_dict(metering_label, fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
marker)
return self._get_collection(context, MeteringLabel,
self._make_metering_label_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
'excluded': metering_label_rule['excluded']}
return self._fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
limit, marker)
return self._get_collection(context, MeteringLabelRule,
self._make_metering_label_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_metering_label_rule(self, context, rule_id, fields=None):
try:
metering_label_rule = self._get_by_id(context,
MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
return self._make_metering_label_rule_dict(metering_label_rule, fields)
def _validate_cidr(self, context, label_id, remote_ip_prefix,
direction, excluded):
r_ips = self.get_metering_label_rules(context,
filters={'metering_label_id':
label_id,
'direction':
[direction],
'excluded':
[excluded]},
fields=['remote_ip_prefix'])
cidrs = [r['remote_ip_prefix'] for r in r_ips]
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
raise metering.MeteringLabelRuleOverlaps(
remote_ip_prefix=remote_ip_prefix)
def create_metering_label_rule(self, context, metering_label_rule):
m = metering_label_rule['metering_label_rule']
with context.session.begin(subtransactions=True):
label_id = m['metering_label_id']
ip_prefix = m['remote_ip_prefix']
direction = m['direction']
excluded = m['excluded']
self._validate_cidr(context, label_id, ip_prefix, direction,
excluded)
metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=direction,
excluded=m['excluded'],
remote_ip_prefix=ip_prefix)
context.session.add(metering_db)
return self._make_metering_label_rule_dict(metering_db)
def delete_metering_label_rule(self, context, rule_id):
with context.session.begin(subtransactions=True):
try:
rule = self._get_by_id(context, MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
context.session.delete(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, context, labels):
all_routers = None
routers_dict = {}
for label in labels:
if label.shared:
if not all_routers:
all_routers = self._get_collection_query(context,
l3_db.Router)
routers = all_routers
else:
routers = label.routers
for router in routers:
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
labels = context.session.query(MeteringLabel)
if label_id:
labels = labels.filter(MeteringLabel.id == label_id)
elif router_ids:
labels = (labels.join(MeteringLabel.routers).
filter(l3_db.Router.id.in_(router_ids)))
return self._process_sync_metering_data(context, labels)
|
apache-2.0
| -8,351,349,277,479,719,000 | 42.283401 | 79 | 0.549341 | false | 4.356561 | false | false | false |
Martin456/eve
|
eve/render.py
|
1
|
15213
|
# -*- coding: utf-8 -*-
"""
eve.render
~~~~~~~~~~
Implements proper, automated rendering for Eve responses.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import re
import time
import datetime
import simplejson as json
from werkzeug import utils
from functools import wraps
from eve.methods.common import get_rate_limit
from eve.utils import date_to_str, date_to_rfc1123, config, \
debug_error_message
from flask import make_response, request, Response, current_app as app, abort
try:
from collections import OrderedDict # noqa
except ImportError:
# Python 2.6 needs this back-port
from ordereddict import OrderedDict
# mapping between supported mime types and render functions.
_MIME_TYPES = [
{'mime': ('application/json',), 'renderer': 'render_json', 'tag': 'JSON'},
{'mime': ('application/xml', 'text/xml', 'application/x-xml',),
'renderer': 'render_xml', 'tag': 'XML'}]
def raise_event(f):
""" Raises both general and resource-level events after the decorated
function has been executed. Returns both the flask.request object and the
response payload to the callback.
.. versionchanged:: 0.2
Renamed 'on_<method>' hooks to 'on_post_<method>' for coherence
with new 'on_pre_<method>' hooks.
.. versionchanged:: 0.1.0
Support for PUT.
.. versionchanged:: 0.0.9
To emphasize the fact that they are tied to a method, in `on_<method>`
events, <method> is now uppercase.
.. versionadded:: 0.0.6
"""
@wraps(f)
def decorated(*args, **kwargs):
r = f(*args, **kwargs)
method = request.method
if method in ('GET', 'POST', 'PATCH', 'DELETE', 'PUT'):
event_name = 'on_post_' + method
resource = args[0] if args else None
# general hook
getattr(app, event_name)(resource, request, r)
if resource:
# resource hook
getattr(app, event_name + '_' + resource)(request, r)
return r
return decorated
@raise_event
def send_response(resource, response):
""" Prepares the response for the client.
:param resource: the resource involved.
:param response: either a flask.Response object or a tuple. The former will
simply be forwarded to the client. If the latter a proper
response will be prepared, according to directives within
the tuple.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.5
Handling the case where response is None. Happens when the request
method is 'OPTIONS', most likely while processing a CORS 'preflight'
request.
.. versionchanged:: 0.0.4
Now a simple dispatcher. Moved the response preparation logic to
``_prepare_response``.
"""
if isinstance(response, Response):
return response
else:
return _prepare_response(resource, *response if response else [None])
def _prepare_response(resource, dct, last_modified=None, etag=None,
status=200, headers=None):
""" Prepares the response object according to the client request and
available renderers, making sure that all accessory directives (caching,
etag, last-modified) are present.
:param resource: the resource involved.
:param dct: the dict that should be sent back as a response.
:param last_modified: Last-Modified header value.
:param etag: ETag header value.
:param status: response status.
.. versionchanged:: 0.7
Add support for regexes in X_DOMAINS_RE. Closes #660, #974.
ETag value now surrounded by double quotes. Closes #794.
.. versionchanged:: 0.6
JSONP Support.
.. versionchanged:: 0.4
Support for optional extra headers.
Fix #381. 500 instead of 404 if CORS is enabled.
.. versionchanged:: 0.3
Support for X_MAX_AGE.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged:: 0.0.9
Support for Python 3.3.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing (CORS).
.. versionadded:: 0.0.4
"""
if request.method == 'OPTIONS':
resp = app.make_default_options_response()
else:
# obtain the best match between client's request and available mime
# types, along with the corresponding render function.
mime, renderer = _best_mime()
# invoke the render function and obtain the corresponding rendered item
rendered = globals()[renderer](dct)
# JSONP
if config.JSONP_ARGUMENT:
jsonp_arg = config.JSONP_ARGUMENT
if jsonp_arg in request.args and 'json' in mime:
callback = request.args.get(jsonp_arg)
rendered = "%s(%s)" % (callback, rendered)
# build the main wsgi response object
resp = make_response(rendered, status)
resp.mimetype = mime
# extra headers
if headers:
for header, value in headers:
if header != 'Content-Type':
resp.headers.add(header, value)
# cache directives
if request.method in ('GET', 'HEAD'):
if resource:
cache_control = config.DOMAIN[resource]['cache_control']
expires = config.DOMAIN[resource]['cache_expires']
else:
cache_control = config.CACHE_CONTROL
expires = config.CACHE_EXPIRES
if cache_control:
resp.headers.add('Cache-Control', cache_control)
if expires:
resp.expires = time.time() + expires
# etag and last-modified
if etag:
resp.headers.add('ETag', '"' + etag + '"')
if last_modified:
resp.headers.add('Last-Modified', date_to_rfc1123(last_modified))
# CORS
origin = request.headers.get('Origin')
if origin and (config.X_DOMAINS or config.X_DOMAINS_RE):
if config.X_DOMAINS is None:
domains = []
elif isinstance(config.X_DOMAINS, str):
domains = [config.X_DOMAINS]
else:
domains = config.X_DOMAINS
if config.X_DOMAINS_RE is None:
domains_re = []
elif isinstance(config.X_DOMAINS_RE, str):
domains_re = [config.X_DOMAINS_RE]
else:
domains_re = config.X_DOMAINS_RE
# precompile regexes and ignore invalids
domains_re_compiled = []
for domain_re in domains_re:
try:
domains_re_compiled.append(re.compile(domain_re))
except re.error:
continue
if config.X_HEADERS is None:
headers = []
elif isinstance(config.X_HEADERS, str):
headers = [config.X_HEADERS]
else:
headers = config.X_HEADERS
if config.X_EXPOSE_HEADERS is None:
expose_headers = []
elif isinstance(config.X_EXPOSE_HEADERS, str):
expose_headers = [config.X_EXPOSE_HEADERS]
else:
expose_headers = config.X_EXPOSE_HEADERS
# The only accepted value for Access-Control-Allow-Credentials header
# is "true"
allow_credentials = config.X_ALLOW_CREDENTIALS is True
methods = app.make_default_options_response().headers.get('allow', '')
if '*' in domains:
resp.headers.add('Access-Control-Allow-Origin', origin)
resp.headers.add('Vary', 'Origin')
elif any(origin == domain for domain in domains):
resp.headers.add('Access-Control-Allow-Origin', origin)
elif any(domain.match(origin) for domain in domains_re_compiled):
resp.headers.add('Access-Control-Allow-Origin', origin)
else:
resp.headers.add('Access-Control-Allow-Origin', '')
resp.headers.add('Access-Control-Allow-Headers', ', '.join(headers))
resp.headers.add('Access-Control-Expose-Headers',
', '.join(expose_headers))
resp.headers.add('Access-Control-Allow-Methods', methods)
resp.headers.add('Access-Control-Max-Age', config.X_MAX_AGE)
if allow_credentials:
resp.headers.add('Access-Control-Allow-Credentials', "true")
# Rate-Limiting
limit = get_rate_limit()
if limit and limit.send_x_headers:
resp.headers.add('X-RateLimit-Remaining', str(limit.remaining))
resp.headers.add('X-RateLimit-Limit', str(limit.limit))
resp.headers.add('X-RateLimit-Reset', str(limit.reset))
return resp
def _best_mime():
""" Returns the best match between the requested mime type and the
ones supported by Eve. Along with the mime, also the corresponding
render function is returns.
.. versionchanged:: 0.3
Support for optional renderers via XML and JSON configuration keywords.
"""
supported = []
renders = {}
for mime in _MIME_TYPES:
# only mime types that have not been disabled via configuration
if app.config.get(mime['tag'], True):
for mime_type in mime['mime']:
supported.append(mime_type)
renders[mime_type] = mime['renderer']
if len(supported) == 0:
abort(500, description=debug_error_message(
'Configuration error: no supported mime types')
)
best_match = request.accept_mimetypes.best_match(supported) or \
supported[0]
return best_match, renders[best_match]
def render_json(data):
""" JSON render function
.. versionchanged:: 0.2
Json encoder class is now inferred by the active data layer, allowing
for customized, data-aware JSON encoding.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
"""
set_indent = None
# make pretty prints available
if 'GET' in request.method and 'pretty' in request.args:
set_indent = 4
return json.dumps(data, indent=set_indent, cls=app.data.json_encoder_class,
sort_keys=config.JSON_SORT_KEYS)
def render_xml(data):
""" XML render function.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.4
Support for pagination info (_meta).
.. versionchanged:: 0.2
Use the new ITEMS configuration setting.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged:: 0.0.3
Support for HAL-like hyperlinks and resource descriptors.
"""
if isinstance(data, list):
data = {config.ITEMS: data}
xml = ''
if data:
xml += xml_root_open(data)
xml += xml_add_links(data)
xml += xml_add_meta(data)
xml += xml_add_items(data)
xml += xml_root_close()
return xml
def xml_root_open(data):
""" Returns the opening tag for the XML root node. If the datastream
includes informations about resource endpoints (href, title), they will
be added as node attributes. The resource endpoint is then removed to allow
for further processing of the datastream.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged:: 0.0.6
Links are now properly escaped.
.. versionadded:: 0.0.3
"""
links = data.get(config.LINKS)
href = title = ''
if links and 'self' in links:
self_ = links.pop('self')
href = ' href="%s" ' % utils.escape(self_['href'])
if 'title' in self_:
title = ' title="%s" ' % self_['title']
return '<resource%s%s>' % (href, title)
def xml_add_meta(data):
""" Returns a meta node with page, total, max_results fields.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.5
Always return ordered items (#441).
.. versionadded:: 0.4
"""
xml = ''
meta = []
if data.get(config.META):
ordered_meta = OrderedDict(sorted(data[config.META].items()))
for name, value in ordered_meta.items():
meta.append('<%s>%d</%s>' % (name, value, name))
if meta:
xml = '<%s>%s</%s>' % (config.META, ''.join(meta), config.META)
return xml
def xml_add_links(data):
""" Returns as many <link> nodes as there are in the datastream. The links
are then removed from the datastream to allow for further processing.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.5
Always return ordered items (#441).
.. versionchanged:: 0.0.6
Links are now properly escaped.
.. versionadded:: 0.0.3
"""
xml = ''
chunk = '<link rel="%s" href="%s" title="%s" />'
links = data.pop(config.LINKS, {})
ordered_links = OrderedDict(sorted(links.items()))
for rel, link in ordered_links.items():
if isinstance(link, list):
xml += ''.join([chunk % (rel, utils.escape(d['href']),
utils.escape(d['title'])) for d in link])
else:
xml += ''.join(chunk % (rel, utils.escape(link['href']),
link['title']))
return xml
def xml_add_items(data):
""" When this function is called the datastream can only contain a `_items`
list, or a dictionary. If a list, each item is a resource which rendered as
XML. If a dictionary, it will be rendered as XML.
:param data: the data stream to be rendered as xml.
.. versionadded:: 0.0.3
"""
try:
xml = ''.join([xml_item(item) for item in data[config.ITEMS]])
except:
xml = xml_dict(data)
return xml
def xml_item(item):
""" Represents a single resource (member of a collection) as XML.
:param data: the data stream to be rendered as xml.
.. versionadded:: 0.0.3
"""
xml = xml_root_open(item)
xml += xml_add_links(item)
xml += xml_dict(item)
xml += xml_root_close()
return xml
def xml_root_close():
""" Returns the closing tag of the XML root node.
.. versionadded:: 0.0.3
"""
return '</resource>'
def xml_dict(data):
""" Renders a dict as XML.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.5
Always return ordered items (#441).
.. versionchanged:: 0.2
Leaf values are now properly escaped.
.. versionadded:: 0.0.3
"""
xml = ''
ordered_items = OrderedDict(sorted(data.items()))
for k, v in ordered_items.items():
if isinstance(v, datetime.datetime):
v = date_to_str(v)
elif isinstance(v, (datetime.time, datetime.date)):
v = v.isoformat()
if not isinstance(v, list):
v = [v]
for value in v:
if isinstance(value, dict):
links = xml_add_links(value)
xml += "<%s>" % k
xml += xml_dict(value)
xml += links
xml += "</%s>" % k
else:
xml += "<%s>%s</%s>" % (k, utils.escape(value), k)
return xml
|
bsd-3-clause
| -5,451,845,274,095,905,000 | 30.69375 | 79 | 0.602248 | false | 4.021412 | true | false | false |
nwspeete-ibm/openwhisk
|
core/pythonAction/cli/wskrule.py
|
1
|
5355
|
#
# Copyright 2015-2016 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import httplib
from wskitem import Item
from wskutil import addAuthenticatedCommand, apiBase, parseQName, request, responseError
import urllib
class Rule(Item):
def __init__(self):
super(Rule, self).__init__('rule', 'rules')
def getItemSpecificCommands(self, parser, props):
subcmd = parser.add_parser('create', help='create new rule')
subcmd.add_argument('name', help='the name of the rule')
subcmd.add_argument('trigger', help='the trigger')
subcmd.add_argument('action', help='the action')
addAuthenticatedCommand(subcmd, props)
subcmd.add_argument('--shared', nargs='?', const='yes', choices=['yes', 'no'], help='shared action (default: private)')
subcmd.add_argument('--enable', help='enable rule after creating it', action='store_true', default=False)
subcmd = parser.add_parser('delete', help='delete %s' % self.name)
subcmd.add_argument('name', help='the name of the %s' % self.name)
addAuthenticatedCommand(subcmd, props)
subcmd.add_argument('--disable', help='automatically disable rule before deleting it', action='store_true', default=False)
subcmd = parser.add_parser('update', help='update an existing rule')
subcmd.add_argument('name', help='the name of the rule')
subcmd.add_argument('trigger', help='the trigger')
subcmd.add_argument('action', help='the action')
addAuthenticatedCommand(subcmd, props)
subcmd.add_argument('--shared', nargs='?', const='yes', choices=['yes', 'no'], help='shared action (default: private)')
subcmd = parser.add_parser('enable', help='enable rule')
subcmd.add_argument('name', help='the name of the rule')
addAuthenticatedCommand(subcmd, props)
subcmd = parser.add_parser('disable', help='disable rule')
subcmd.add_argument('name', help='the name of the rule')
addAuthenticatedCommand(subcmd, props)
subcmd = parser.add_parser('status', help='get rule status')
subcmd.add_argument('name', help='the name of the rule')
addAuthenticatedCommand(subcmd, props)
self.addDefaultCommands(parser, props, ['get', 'list'])
def cmd(self, args, props):
if args.subcmd == 'enable':
return self.setState(args, props, True)
elif args.subcmd == 'disable':
return self.setState(args, props, False)
elif args.subcmd == 'status':
return self.getState(args, props)
else:
return super(Rule, self).cmd(args, props)
def create(self, args, props, update):
payload = { 'trigger': args.trigger, 'action': args.action }
if args.shared:
self.addPublish(payload, args)
code = self.put(args, props, update, json.dumps(payload))
if (code == 0 and 'enable' in args and args.enable):
return self.setState(args, props, True)
else:
return code
def preProcessDelete(self, args, props):
if (args.disable):
return self.setState(args, props, False)
else:
return 0
def setState(self, args, props, enable):
namespace, pname = parseQName(args.name, props)
desc = 'active' if enable else 'inactive'
status = json.dumps({ 'status': desc })
url = '%(apibase)s/namespaces/%(namespace)s/rules/%(name)s' % {
'apibase': apiBase(props),
'namespace': urllib.quote(namespace),
'name': self.getSafeName(pname)
}
headers = {
'Content-Type': 'application/json'
}
res = request('POST', url, status, headers, auth=args.auth, verbose=args.verbose)
if res.status == httplib.OK:
print 'ok: rule %(name)s is %(desc)s' % {'desc': desc, 'name': args.name}
return 0
elif res.status == httplib.ACCEPTED:
desc = 'activating' if enable else 'deactivating'
print 'ok: rule %(name)s is %(desc)s' % {'desc': desc, 'name': args.name}
return 0
else:
return responseError(res)
def getState(self, args, props):
namespace, pname = parseQName(args.name, props)
url = '%(apibase)s/namespaces/%(namespace)s/rules/%(name)s' % {
'apibase': apiBase(props),
'namespace': urllib.quote(namespace),
'name': self.getSafeName(pname)
}
res = request('GET', url, auth=args.auth, verbose=args.verbose)
if res.status == httplib.OK:
result = json.loads(res.read())
print 'ok: rule %(name)s is %(status)s' % { 'name': args.name, 'status': result['status'] }
return 0
else:
return responseError(res)
|
apache-2.0
| 3,352,856,487,309,075,500 | 40.835938 | 130 | 0.620355 | false | 3.908759 | false | false | false |
erudit/zenon
|
tests/unit/apps/public/search/test_utils.py
|
1
|
3018
|
import pytest
from django.http.request import QueryDict
from apps.public.search.forms import SearchForm
from apps.public.search.utils import get_search_elements
class FakeSolrData:
def get_search_form_facets(self):
return {
'disciplines': [],
'languages': [
('fr', 'Français'),
('en', 'Anglais'),
],
'journals': [
('foo', 'Foo'),
('bar', 'Bar'),
],
}
@pytest.mark.parametrize('queryparams, expected_elements', [
('', []),
# Languages
('languages=es', []),
('languages=fr', [{
'field': 'Langues',
'operator': 'AND',
'str': " ET (Langues : ['Français'])",
'term': "['Français']",
}]),
('languages=fr&languages=en', [{
'field': 'Langues',
'operator': 'AND',
'str': " ET (Langues : ['Anglais', 'Français'])",
'term': "['Anglais', 'Français']",
}]),
('languages=fr&languages=en&languages=es', [{
'field': 'Langues',
'operator': 'AND',
'str': " ET (Langues : ['Anglais', 'Français'])",
'term': "['Anglais', 'Français']",
}]),
# Journals
('journal=baz', []),
('journals=foo', [{
'field': 'Revues',
'operator': 'AND',
'str': " ET (Revues : ['Foo'])",
'term': "['Foo']",
}]),
('journals=foo&journals=bar', [{
'field': 'Revues',
'operator': 'AND',
'str': " ET (Revues : ['Bar', 'Foo'])",
'term': "['Bar', 'Foo']",
}]),
('journals=foo&journals=bar&journals=baz', [{
'field': 'Revues',
'operator': 'AND',
'str': " ET (Revues : ['Bar', 'Foo'])",
'term': "['Bar', 'Foo']",
}]),
# Languages & Journals
('languages=es&journal=baz', []),
('languages=fr&journals=foo', [{
'field': 'Langues',
'operator': 'AND',
'str': " ET (Langues : ['Français'])",
'term': "['Français']",
}, {
'field': 'Revues',
'operator': 'AND',
'str': " ET (Revues : ['Foo'])",
'term': "['Foo']",
}]),
('languages=fr&languages=en&journals=foo&journals=bar', [{
'field': 'Langues',
'operator': 'AND',
'str': " ET (Langues : ['Anglais', 'Français'])",
'term': "['Anglais', 'Français']",
}, {
'field': 'Revues',
'operator': 'AND',
'str': " ET (Revues : ['Bar', 'Foo'])",
'term': "['Bar', 'Foo']",
}]),
])
def test_get_search_elements(queryparams, expected_elements, monkeypatch):
monkeypatch.setattr(SearchForm, 'solr_data', FakeSolrData())
elements = get_search_elements(
QueryDict(queryparams),
SearchForm(),
)
base_elements = [
{
'term': '*',
'field': 'Tous les champs',
'operator': None,
'str': '(Tous les champs : *)',
},
]
assert base_elements + expected_elements == elements
|
gpl-3.0
| -2,187,604,127,645,334,000 | 27.638095 | 74 | 0.460925 | false | 3.359777 | false | false | false |
psf/black
|
tests/data/cantfit.py
|
1
|
4107
|
# long variable name
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
1, 2, 3
]
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function()
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
arg1, arg2, arg3
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long function name
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
arg1, arg2, arg3
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long arguments
normal_name = normal_function_name(
"but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
"eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
)
string_variable_name = (
"a string that is waaaaaaaayyyyyyyy too long, even in parens, there's nothing you can do" # noqa
)
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
concatenated_strings = "some strings that are " "concatenated implicitly, so if you put them on separate " "lines it will fit"
del concatenated_strings, string_variable_name, normal_function_name, normal_name, need_more_to_make_the_line_long_enough
# output
# long variable name
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
0
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
1 # with a comment
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
1,
2,
3,
]
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
function()
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
arg1, arg2, arg3
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long function name
normal_name = (
but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
)
normal_name = (
but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
arg1, arg2, arg3
)
)
normal_name = (
but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
)
# long arguments
normal_name = normal_function_name(
"but with super long string arguments that on their own exceed the line limit so"
" there's no way it can ever fit",
"eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs"
" with spam and eggs and spam with eggs",
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
)
string_variable_name = "a string that is waaaaaaaayyyyyyyy too long, even in parens, there's nothing you can do" # noqa
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
concatenated_strings = (
"some strings that are "
"concatenated implicitly, so if you put them on separate "
"lines it will fit"
)
del (
concatenated_strings,
string_variable_name,
normal_function_name,
normal_name,
need_more_to_make_the_line_long_enough,
)
|
mit
| 2,691,835,164,074,515,500 | 37.383178 | 126 | 0.702703 | false | 2.651388 | false | false | false |
siusoon/Python_SPEEDSHOW
|
checkinactivity.py
|
1
|
1953
|
# logic: detect idle time from mac os (mouse and keyboard) and force to go to a specific website
# to run the program, administrator needs to set the sleeptime, temp_idle_value and url
# Firefox should be the default browser, hide all other applications and docking on the screen
# set no screensaver
# install fullscreen firefox add-ons (to maintain the full screen mode of your firefox browser): https://addons.mozilla.org/en-US/firefox/addon/resizeit/contribute/roadblock/?src=search&version=3.6.2
# put the file in desktop, then open terminal to go to Desktop directory, type: python [filename]
import sys,os,time
import webbrowser
# define variable
sleeptime = 10 #how frequent to get the idle time
temp_idle_value = 60 #Duration to reset browser (in sec format)
url = "http://www.facebook.com"
def main_loop():
while 1:
time.sleep(sleeptime)
cmd = "ioreg -c IOHIDSystem | perl -ane 'if (/Idle/) {$idle=(pop @F)/1000000000; print $idle}'"
result = os.popen(cmd) #use popen instead of os.system to open a perl script
str = result.read()
temp_idle = int(str.split(".")[0])
#print(str)
if temp_idle > temp_idle_value and status == 0:
resetBrowser()
status = 1
elif temp_idle > temp_idle_value and status == 1:
print("do nothing")
else:
print("continue")
status = 0
def resetBrowser():
result1 = os.system("ps axo pid,command | grep '[f]irefox'") #256 means not active, else will display a whole line
if result1 == 256:
print("firefox is inactive -> start a browser")
webbrowser.open_new(url) #workable
else:
print("should kill the browser then open a firefox")
os.system("killall -9 firefox")
time.sleep(5)
webbrowser.get("firefox")
webbrowser.open(url, new=0, autoraise=False)
if __name__ == '__main__':
try:
status = 0
main_loop()
except KeyboardInterrupt: #control+c in mac
print ('stop')
sys.exit(0)
|
unlicense
| -1,857,018,180,525,671,700 | 31.016393 | 199 | 0.680492 | false | 3.222772 | false | false | false |
almeidapaulopt/frappe
|
frappe/utils/boilerplate.py
|
1
|
9260
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from six.moves import input
import frappe, os, re
from frappe.utils import touch_file, encode, cstr
def make_boilerplate(dest, app_name):
if not os.path.exists(dest):
print("Destination directory does not exist")
return
# app_name should be in snake_case
app_name = frappe.scrub(app_name)
hooks = frappe._dict()
hooks.app_name = app_name
app_title = hooks.app_name.replace("_", " ").title()
for key in ("App Title (default: {0})".format(app_title),
"App Description", "App Publisher", "App Email",
"App Icon (default 'octicon octicon-file-directory')",
"App Color (default 'grey')",
"App License (default 'MIT')"):
hook_key = key.split(" (")[0].lower().replace(" ", "_")
hook_val = None
while not hook_val:
hook_val = cstr(input(key + ": "))
if not hook_val:
defaults = {
"app_title": app_title,
"app_icon": "octicon octicon-file-directory",
"app_color": "grey",
"app_license": "MIT"
}
if hook_key in defaults:
hook_val = defaults[hook_key]
if hook_key=="app_name" and hook_val.lower().replace(" ", "_") != hook_val:
print("App Name must be all lowercase and without spaces")
hook_val = ""
elif hook_key=="app_title" and not re.match("^(?![\W])[^\d_\s][\w -]+$", hook_val, re.UNICODE):
print("App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores")
hook_val = ""
hooks[hook_key] = hook_val
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)),
with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "www"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"pages"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"includes"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "config"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"css"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"js"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "__init__.py"), "w") as f:
f.write(frappe.as_unicode(init_template))
with open(os.path.join(dest, hooks.app_name, "MANIFEST.in"), "w") as f:
f.write(frappe.as_unicode(manifest_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, ".gitignore"), "w") as f:
f.write(frappe.as_unicode(gitignore_template.format(app_name = hooks.app_name)))
with open(os.path.join(dest, hooks.app_name, "setup.py"), "w") as f:
f.write(frappe.as_unicode(setup_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, "requirements.txt"), "w") as f:
f.write("frappe")
with open(os.path.join(dest, hooks.app_name, "README.md"), "w") as f:
f.write(frappe.as_unicode("## {0}\n\n{1}\n\n#### License\n\n{2}".format(hooks.app_title,
hooks.app_description, hooks.app_license)))
with open(os.path.join(dest, hooks.app_name, "license.txt"), "w") as f:
f.write(frappe.as_unicode("License: " + hooks.app_license))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "modules.txt"), "w") as f:
f.write(frappe.as_unicode(hooks.app_title))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "hooks.py"), "w") as f:
f.write(frappe.as_unicode(hooks_template.format(**hooks)))
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "patches.txt"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "desktop.py"), "w") as f:
f.write(frappe.as_unicode(desktop_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "docs.py"), "w") as f:
f.write(frappe.as_unicode(docs_template.format(**hooks)))
print("'{app}' created at {path}".format(app=app_name, path=os.path.join(dest, app_name)))
manifest_template = """include MANIFEST.in
include requirements.txt
include *.json
include *.md
include *.py
include *.txt
recursive-include {app_name} *.css
recursive-include {app_name} *.csv
recursive-include {app_name} *.html
recursive-include {app_name} *.ico
recursive-include {app_name} *.js
recursive-include {app_name} *.json
recursive-include {app_name} *.md
recursive-include {app_name} *.png
recursive-include {app_name} *.py
recursive-include {app_name} *.svg
recursive-include {app_name} *.txt
recursive-exclude {app_name} *.pyc"""
init_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.0.1'
"""
hooks_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "{app_name}"
app_title = "{app_title}"
app_publisher = "{app_publisher}"
app_description = "{app_description}"
app_icon = "{app_icon}"
app_color = "{app_color}"
app_email = "{app_email}"
app_license = "{app_license}"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/{app_name}/css/{app_name}.css"
# app_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js, css files in header of web template
# web_include_css = "/assets/{app_name}/css/{app_name}.css"
# web_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js in page
# page_js = {{"page" : "public/js/file.js"}}
# include js in doctype views
# doctype_js = {{"doctype" : "public/js/doctype.js"}}
# doctype_list_js = {{"doctype" : "public/js/doctype_list.js"}}
# doctype_tree_js = {{"doctype" : "public/js/doctype_tree.js"}}
# doctype_calendar_js = {{"doctype" : "public/js/doctype_calendar.js"}}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {{
# "Role": "home_page"
# }}
# Website user home page (by function)
# get_website_user_home_page = "{app_name}.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "{app_name}.install.before_install"
# after_install = "{app_name}.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "{app_name}.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {{
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }}
#
# has_permission = {{
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {{
# "*": {{
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }}
# }}
# Scheduled Tasks
# ---------------
# scheduler_events = {{
# "all": [
# "{app_name}.tasks.all"
# ],
# "daily": [
# "{app_name}.tasks.daily"
# ],
# "hourly": [
# "{app_name}.tasks.hourly"
# ],
# "weekly": [
# "{app_name}.tasks.weekly"
# ]
# "monthly": [
# "{app_name}.tasks.monthly"
# ]
# }}
# Testing
# -------
# before_tests = "{app_name}.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {{
# "frappe.desk.doctype.event.event.get_events": "{app_name}.event.get_events"
# }}
"""
desktop_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{{
"module_name": "{app_title}",
"color": "{app_color}",
"icon": "{app_icon}",
"type": "module",
"label": _("{app_title}")
}}
]
"""
setup_template = """# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in {app_name}/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{app_name}/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='{app_name}',
version=version,
description='{app_description}',
author='{app_publisher}',
author_email='{app_email}',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
"""
gitignore_template = """.DS_Store
*.pyc
*.egg-info
*.swp
tags
{app_name}/docs/current"""
docs_template = '''"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/{app_name}"
# docs_base_url = "https://[org_name].github.io/{app_name}"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "{app_title}"
'''
|
mit
| 8,096,362,969,132,424,000 | 28.303797 | 117 | 0.653888 | false | 2.873991 | true | false | false |
mrakitin/coding
|
route_tables_update/add_route.py
|
1
|
1944
|
__author__ = 'mrakitin'
import os
import socket
import subprocess
address_to_route = None
qsh_ip = '192.12.90.0'
qsh_ip_mask = '255.255.255.0'
# Find IP address provided by SBU VPN:
ips_dict = {}
for i in socket.getaddrinfo(socket.gethostname(), None):
ip = i[4][0]
try:
socket.inet_aton(ip)
ipv4 = True
except socket.error:
ipv4 = False
if ipv4:
key, none, value = socket.gethostbyaddr(ip)
ips_dict[key] = value[0]
for key in ips_dict.keys():
if key.find('stonybrook.edu') >= 0:
address_to_route = ips_dict[key]
break
# Delete the route first in case it existed:
try:
cmd_del = ['route', 'delete', qsh_ip]
out_del = subprocess.check_output(cmd_del, stderr=subprocess.STDOUT)
status_del = out_del.strip()
if status_del.find('OK') >= 0:
print 'Route %s has been deleted.' % (qsh_ip)
elif status_del.find('The route deletion failed: Element not found.') >= 0:
# print 'WARNING! ' + status_add
pass
else:
print 'Unknown error occurred during deletion.'
except:
print 'WARNING! Route %s has not been deleted.' % (qsh_ip)
# Add a new route if the VPN-provided address is found:
if address_to_route:
cmd = ['route', 'add', qsh_ip, 'mask', qsh_ip_mask, address_to_route]
try:
print 'The following command will be executed:\n\n\t%s\n' % (' '.join(cmd))
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
status_add = out.strip()
if status_add.find('OK') >= 0:
print 'Addition was successful.\n'
os.system('route print')
elif status_add.find('The route addition failed') >= 0:
print 'ERROR! ' + status_add
else:
print 'Unknown error occurred during addition.'
except:
pass
else:
print 'ERROR! The VPN interface is not connected. The route to %s has not been added.' % (qsh_ip)
|
gpl-2.0
| 4,063,799,232,763,459,000 | 28.907692 | 101 | 0.609053 | false | 3.334477 | false | false | false |
moio/spacewalk
|
backend/server/rhnServer/server_wrapper.py
|
1
|
3967
|
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# this file implements the ServerWrapper class, which takes care
# of all the load and save functions for misc tables associated
# with a server (such as packages, hardware, history)
#
# the server.Server class inherits this ServerWrapper class
#
from server_hardware import Hardware
from server_packages import Packages
from server_history import History
from server_solarispatches import SolarisPatches
from rhn.UserDictCase import UserDictCase
from spacewalk.server import rhnSQL
class ServerWrapper(Packages, Hardware, History, SolarisPatches):
""" This is a middle class that ties all the subclasses together, plus it
provides a cleaner way to keep all the wrapper functions in one place.
The main Server class is based on this one and it looks a little bit
cleaner that way.
"""
def __init__(self):
self.server = UserDictCase()
Packages.__init__(self)
History.__init__(self)
Hardware.__init__(self)
SolarisPatches.__init__(self)
def __repr__(self):
return "<%s instance>" % (self.__class__,)
def set_value(self, name, value):
""" update a value in self.server """
if name is None or value is None:
return -1
self.server[name] = value
return 0
###
### PACKAGES
###
def add_package(self, entry):
""" Wrappers for the similar functions from Packages class that supplementaly
require a valid sysid.
"""
if entry['name'].startswith("patch-solaris"):
SolarisPatches.add_patch(self, self.server.get("id"), entry)
return Packages.add_package(self, self.server.get("id"), entry)
def delete_package(self, entry):
return Packages.delete_package(self, self.server.get("id"), entry)
def dispose_packages(self):
SolarisPatches.dispose_patched_packages(self, self.server["id"])
return Packages.dispose_packages(self, self.server["id"])
def save_packages(self, schedule=1):
""" wrapper for the Packages.save_packages_byid() which requires the sysid """
SolarisPatches.save_patched_packages(self, self.server["id"])
ret = self.save_packages_byid(self.server["id"], schedule=schedule)
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
###
### HARDWARE
###
def delete_hardware(self):
""" Wrappers for the similar functions from Hardware class """
return Hardware.delete_hardware(self, self.server.get("id"))
def save_hardware(self):
""" wrapper for the Hardware.save_hardware_byid() which requires the sysid """
ret = self.save_hardware_byid(self.server["id"])
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
def reload_hardware(self):
""" wrapper for the Hardware.reload_hardware_byid() which requires the sysid """
ret = self.reload_hardware_byid(self.server["id"])
return ret
###
### HISTORY
###
def save_history(self):
ret = self.save_history_byid(self.server["id"])
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
|
gpl-2.0
| -8,041,331,313,070,005,000 | 35.394495 | 88 | 0.663222 | false | 4.060389 | false | false | false |
SalesforceEng/Providence
|
Empire/cloudservices/github/GithubAPI.py
|
1
|
5227
|
'''
Copyright (c) 2015, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
"""
GitHubCommit - Convert JSON message from GH into an object representative of the commit
GitHubRepo - Represent the basic information needed to interact with a GH repo
GitHubAPI - Send and receive data from the REST API
"""
# TODO:
# - Pagination the github way
# - Groups / Users / Org
# - Security
# - Stale branches
import sys
import os.path
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
from creds.credentials import Credentials
import requests
import urllib
import datetime
import pytz
import json
import logging
logger = logging.getLogger('GithubAPI')
__copyright__ = "2015 Salesforce.com, Inc"
__status__ = "Prototype"
class GithubAPI(object):
def __init__(self, server, credentials):
self.server = server
self.credentials = credentials
self._no_more_requests_until = None
def fetch(self, url, params=None, post_data=None):
if self._no_more_requests_until:
if self._no_more_requests_until < datetime.datetime.utcnow():
return None
self._no_more_requests_until = None
r = None
if post_data:
raise NotImplementedError("GithubAPI Post unimplemented")
return
else:
if self.credentials:
r = requests.get(url, params=params, headers={ "Authorization":self.credentials.authorizationHeaderValue() })
else:
r = requests.get(url, params=params)
if r.headers.get('x-ratelimit-remaining'):
remaining_requests = int(r.headers['x-ratelimit-remaining'])
if (remaining_requests == 0):
logger.warning("Github API hit the rate limiter")
self._no_more_requests_until = datetime.datetime.fromtimestamp(float(r.headers.get('x-ratelimit-reset')));
return None
if(r.ok):
results = r.json()
return results
logger.warning("Github fetch of %s failed\n%s\n",r.url,r.text)
return None
def fetch_raw(self, url):
if self._no_more_requests_until:
if self._no_more_requests_until < datetime.datetime.utcnow():
return None
self._no_more_requests_until = None
r = None
if self.credentials:
r = requests.get(url, headers={ "Authorization":self.credentials.authorizationHeaderValue(),"Accept":"application/vnd.github.v3.raw" })
else:
r = requests.get(url)
if r.headers.get('x-ratelimit-remaining'):
remaining_requests = int(r.headers['x-ratelimit-remaining'])
if (remaining_requests == 0):
logger.warning("Github API hit the rate limiter")
self._no_more_requests_until = datetime.datetime.fromtimestamp(float(r.headers.get('x-ratelimit-reset')));
return None
if(r.ok):
results = r.text
return results
logger.warning("Github fetch of %s failed\n%s\n",r.url,r.text)
return None
def baseURL(self, org_name=None, repo_name=None):
baseurl = 'https://%s' % (self.server)
if repo_name is not None:
baseurl += "/repos/%s/%s" % (org_name, repo_name)
elif org_name is not None:
baseurl += "/orgs/%s" % (org_name)
return baseurl
if __name__ == "__main__":
creds = Credentials("github")
git = GithubAPI(GithubRepo('api.github.com', 'salesforce','providence'), creds)
bugs = git.issues(params={"labels":"bug,security","state":"all","since":"2015-02-01T00:00:00Z"})
import json
print json.dumps(bugs, indent=2)
if bugs:
for bug in bugs:
print bug["title"], bug["state"]
|
bsd-3-clause
| -3,992,668,494,168,806,400 | 44.850877 | 755 | 0.664817 | false | 4.184948 | false | false | false |
NMisko/monkalot
|
bot/commands/speech_cleverbot.py
|
1
|
1114
|
"""Commands: "@[botname] XXXXX"."""
from cleverwrap import CleverWrap
from bot.commands.abstract.speech import Speech, Chatbot
class CleverbotSpeech(Speech):
"""Natural language by using cleverbot."""
def __init__(self, bot):
"""Initialize variables."""
if "cleverbot_key" in bot.config and bot.config["cleverbot_key"] != "":
self.chatbot = Cleverbot(bot.config["cleverbot_key"])
else:
raise RuntimeError(
"Cleverbot instantiated, but no key set in configuration."
)
class Cleverbot(Chatbot):
"""A replier that uses cleverbot."""
name = "cleverbot"
def __init__(self, key):
self.cleverbot_key = key
self.conversations = {}
def get_reply(self, message, name):
"""Get a reply from cleverbot api."""
if name not in self.conversations:
self.conversations[name] = CleverWrap(self.cleverbot_key, name)
return self.conversations[name].say(message)
def get_name(self):
"""Returns name or short description for this bot."""
return self.name
|
mit
| 3,116,736,034,507,032,600 | 29.108108 | 79 | 0.618492 | false | 3.664474 | false | false | false |
mahmoud/wapiti
|
wapiti/operations/feedback.py
|
1
|
1807
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from base import QueryOperation
from params import SingleParam, StaticParam
from utils import OperationExample
#class GetFeedbackV4(QueryOperation):
# """
# This API is no longer available (on en or de wikipedia). As of
# 3/9/2013, this API does not even appear in the documentation at:
# http://en.wikipedia.org/w/api.php
# """
# field_prefix = 'af'
# input_field = SingleParam('pageid')
# fields = [StaticParam('list', 'articlefeedback')]
# output_type = list
#
# def extract_results(self, query_resp):
# ret = query_resp['articlefeedback'][0].get('ratings', [])
# return ret
_FV5_KNOWN_FILTERS = ['*', 'featured', 'unreviewed', 'helpful', 'unhelpful',
'flagged', 'useful', 'resolved', 'noaction',
'inappropriate', 'archived', 'allcomment', 'hidden',
'requested', 'declined', 'oversighted', 'all']
class GetFeedbackV5(QueryOperation):
"""
article feedback v5 breaks standards in a couple ways.
* the various v5 APIs use different prefixes (af/afvf)
* it doesn't put its results under 'query', requiring a custom
post_process_response()
"""
field_prefix = 'afvf'
input_field = SingleParam('pageid')
fields = [StaticParam('list', 'articlefeedbackv5-view-feedback'),
SingleParam('filter', default='featured')]
output_type = list
examples = [OperationExample('604727')]
def post_process_response(self, response):
if not response.results:
return {}
return dict(response.results)
def extract_results(self, query_resp):
count = query_resp['articlefeedbackv5-view-feedback']['count']
return ['TODO'] * int(count)
|
bsd-3-clause
| -2,002,363,507,429,485,600 | 33.75 | 76 | 0.630327 | false | 3.725773 | false | false | false |
irinabov/debian-qpid-python
|
qpid/selector.py
|
2
|
6778
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time, errno, os, atexit, traceback
from compat import select, SelectError, set, selectable_waiter, format_exc
from threading import Thread, Lock
from logging import getLogger
from qpid.messaging import InternalError
def _stack(skip=0):
return ("".join(traceback.format_stack()[:-(1+skip)])).strip()
class SelectorStopped(InternalError):
def __init__(self, msg, where=None):
InternalError.__init__(self, text=msg)
self.where = _stack(1)
def _check(ex, skip=0):
if ex:
log.error("illegal use of qpid.messaging at:\n%s\n%s" % (_stack(1), ex))
where = getattr(ex, 'where')
if where:
log.error("qpid.messaging was previously stopped at:\n%s\n%s" % (where, ex))
raise ex
log = getLogger("qpid.messaging")
class Acceptor:
def __init__(self, sock, handler):
self.sock = sock
self.handler = handler
def fileno(self):
return self.sock.fileno()
def reading(self):
return True
def writing(self):
return False
def readable(self):
sock, addr = self.sock.accept()
self.handler(sock)
class Selector:
lock = Lock()
DEFAULT = None
_current_pid = None
@staticmethod
def default():
Selector.lock.acquire()
try:
if Selector.DEFAULT is None or Selector._current_pid != os.getpid():
# If we forked, mark the existing Selector dead.
if Selector.DEFAULT is not None:
log.warning("process forked, child must not use parent qpid.messaging")
Selector.DEFAULT.dead(SelectorStopped("forked child using parent qpid.messaging"))
sel = Selector()
sel.start()
atexit.register(sel.stop)
Selector.DEFAULT = sel
Selector._current_pid = os.getpid()
return Selector.DEFAULT
finally:
Selector.lock.release()
def __init__(self):
self.selectables = set()
self.reading = set()
self.writing = set()
self.waiter = selectable_waiter()
self.reading.add(self.waiter)
self.stopped = False
self.exception = None
def wakeup(self):
_check(self.exception)
self.waiter.wakeup()
def register(self, selectable):
self.selectables.add(selectable)
self.modify(selectable)
def _update(self, selectable):
if selectable.reading():
self.reading.add(selectable)
else:
self.reading.discard(selectable)
if selectable.writing():
self.writing.add(selectable)
else:
self.writing.discard(selectable)
return selectable.timing()
def modify(self, selectable):
self._update(selectable)
self.wakeup()
def unregister(self, selectable):
self.reading.discard(selectable)
self.writing.discard(selectable)
self.selectables.discard(selectable)
self.wakeup()
def start(self):
_check(self.exception)
self.thread = Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start();
def run(self):
try:
while not self.stopped and not self.exception:
wakeup = None
for sel in self.selectables.copy():
t = self._update(sel)
if t is not None:
if wakeup is None:
wakeup = t
else:
wakeup = min(wakeup, t)
rd = []
wr = []
ex = []
while True:
try:
if wakeup is None:
timeout = None
else:
timeout = max(0, wakeup - time.time())
rd, wr, ex = select(self.reading, self.writing, (), timeout)
break
except SelectError, e:
# Repeat the select call if we were interrupted.
if e[0] == errno.EINTR:
continue
else:
# unrecoverable: promote to outer try block
raise
for sel in wr:
if sel.writing():
sel.writeable()
for sel in rd:
if sel.reading():
sel.readable()
now = time.time()
for sel in self.selectables.copy():
w = sel.timing()
if w is not None and now > w:
sel.timeout()
except Exception, e:
log.error("qpid.messaging thread died: %s" % e)
self.exception = SelectorStopped(str(e))
self.exception = self.exception or self.stopped
self.dead(self.exception or SelectorStopped("qpid.messaging thread died: reason unknown"))
def stop(self, timeout=None):
"""Stop the selector and wait for it's thread to exit. It cannot be re-started"""
if self.thread and not self.stopped:
self.stopped = SelectorStopped("qpid.messaging thread has been stopped")
self.wakeup()
self.thread.join(timeout)
def dead(self, e):
"""Mark the Selector as dead if it is stopped for any reason. Ensure there any future
attempt to use the selector or any of its connections will throw an exception.
"""
self.exception = e
try:
for sel in self.selectables.copy():
c = sel.connection
for ssn in c.sessions.values():
for l in ssn.senders + ssn.receivers:
disable(l, self.exception)
disable(ssn, self.exception)
disable(c, self.exception)
except Exception, e:
log.error("error stopping qpid.messaging (%s)\n%s", self.exception, format_exc())
try:
self.waiter.close()
except Exception, e:
log.error("error stopping qpid.messaging (%s)\n%s", self.exception, format_exc())
# Disable an object to avoid hangs due to forked mutex locks or a stopped selector thread
import inspect
def disable(obj, exception):
assert(exception)
# Replace methods to raise exception or be a no-op
for m in inspect.getmembers(
obj, predicate=lambda m: inspect.ismethod(m) and not inspect.isbuiltin(m)):
name = m[0]
if name in ["close", "detach", "detach_all"]: # No-ops for these
setattr(obj, name, lambda *args, **kwargs: None)
else: # Raise exception for all others
setattr(obj, name, lambda *args, **kwargs: _check(exception, 1))
|
apache-2.0
| -213,931,680,745,974,880 | 29.949772 | 94 | 0.638241 | false | 3.884241 | false | false | false |
django-inplaceedit/django-inplaceedit
|
inplaceeditform/settings.py
|
1
|
2626
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2013 by Yaco Sistemas <[email protected]>
# 2015 by Pablo Martín <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
INPLACEEDIT_EDIT_EMPTY_VALUE = (getattr(settings, 'INPLACEEDIT_EDIT_EMPTY_VALUE', None) and
_(settings.INPLACEEDIT_EDIT_EMPTY_VALUE) or _('Doubleclick to edit'))
INPLACEEDIT_AUTO_SAVE = getattr(settings, 'INPLACEEDIT_AUTO_SAVE', False)
INPLACEEDIT_EVENT = getattr(settings, 'INPLACEEDIT_EVENT', 'dblclick')
INPLACEEDIT_DISABLE_CLICK = getattr(settings, 'INPLACEEDIT_DISABLE_CLICK', True)
INPLACEEDIT_EDIT_MESSAGE_TRANSLATION = (getattr(settings, 'INPLACEEDIT_EDIT_MESSAGE_TRANSLATION', None) and
_(settings.INPLACEEDIT_EDIT_MESSAGE_TRANSLATION) or _('Write a translation'))
INPLACEEDIT_SUCCESS_TEXT = (getattr(settings, 'INPLACEEDIT_SUCCESS_TEXT', None) and
_(settings.INPLACEEDIT_SUCCESS_TEXT) or _('Successfully saved'))
INPLACEEDIT_UNSAVED_TEXT = (getattr(settings, 'INPLACEEDIT_UNSAVED_TEXT', None) and
_(settings.INPLACEEDIT_UNSAVED_TEXT) or _('You have unsaved changes!'))
INPLACE_ENABLE_CLASS = getattr(settings, 'INPLACE_ENABLE_CLASS', 'enable')
DEFAULT_INPLACE_EDIT_OPTIONS = getattr(settings, "DEFAULT_INPLACE_EDIT_OPTIONS", {})
DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE = getattr(settings, 'DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE', False)
ADAPTOR_INPLACEEDIT_EDIT = getattr(settings, 'ADAPTOR_INPLACEEDIT_EDIT', None)
ADAPTOR_INPLACEEDIT = getattr(settings, 'ADAPTOR_INPLACEEDIT', {})
INPLACE_GET_FIELD_URL = getattr(settings, 'INPLACE_GET_FIELD_URL', None)
INPLACE_SAVE_URL = getattr(settings, 'INPLACE_SAVE_URL', None)
INPLACE_FIELD_TYPES = getattr(settings, 'INPLACE_FIELD_TYPES', 'input, select, textarea')
INPLACE_FOCUS_WHEN_EDITING = getattr(settings, 'INPLACE_FOCUS_WHEN_EDITING', True)
|
lgpl-3.0
| 6,846,049,735,984,986,000 | 60.046512 | 117 | 0.726476 | false | 3.490691 | false | false | false |
kohnle-lernmodule/palama
|
exe/engine/idevicestore.py
|
1
|
36506
|
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
The collection of iDevices available
"""
from exe.engine import persist
from exe.engine.idevice import Idevice
from exe.engine.field import TextAreaField, FeedbackField
from nevow.flat import flatten
import imp
import sys
import logging
import copy
log = logging.getLogger(__name__)
# ===========================================================================
class IdeviceStore:
"""
The collection of iDevices available
"""
def __init__(self, config):
"""
Initialize
"""
self._nextIdeviceId = 0
self.config = config
self.extended = []
self.generic = []
self.listeners = []
#JR: Anado una lista que contendra todos los iDevices disponibles
self.factoryiDevices = []
def getNewIdeviceId(self):
"""
Returns an iDevice Id which is unique
"""
id_ = unicode(self._nextIdeviceId)
self._nextIdeviceId += 1
return id_
def isGeneric(self, idevice):
"""
Devuelve true si el iDevice es de la clase GenericIdevie
"""
from exe.engine.genericidevice import GenericIdevice
if isinstance(idevice, GenericIdevice):
return True
else:
return False
def getIdevices(self):
"""
Get the idevices which are applicable for the current node of
this package
In future the idevices which are returned will depend
upon the pedagogical template we are using
"""
return self.extended + self.generic
def getFactoryIdevices(self):
"""
JR: Devuelve todos los iDevices de fabrica
"""
return self.factoryiDevices
def __delGenericIdevice(self, idevice):
"""
Delete a generic idevice from idevicestore.
"""
idevice_remove = None
exist = False
for i in self.generic:
if idevice.title == i.title:
idevice_remove = i
exist = True
break
if exist:
self.generic.remove(idevice_remove)
#JR: Comunicamos a los listener que este iDevice ya no esta disponible
for listener in self.listeners:
listener.delIdevice(idevice_remove)
def __delExtendedIdevice(self, idevice):
"""
Delete an extended idevice from idevicestore.
"""
idevice_remove = None
exist = False
for i in self.extended:
if idevice.title == i.title:
idevice_remove = i
exist = True
break
if exist:
self.extended.remove(idevice_remove)
#JR: Comunicamos a los listener que este iDevice ya no esta disponible
for listener in self.listeners:
listener.delIdevice(idevice_remove)
def delIdevice(self, idevice):
"""
JR: Borra un idevice
"""
if not self.isGeneric(idevice):
idevice_remove = None
exist = False
for i in self.extended:
if i.title == idevice.title:
idevice_remove = i
exist = True
break
if exist:
self.__delExtendedIdevice(idevice_remove)
else:
idevice_remove = None
exist = False
for i in self.generic:
if i.title == idevice.title:
idevice_remove = i
exist = True
break
if exist:
self.__delGenericIdevice(idevice_remove)
def register(self, listener):
"""
Register a listener who is interested in changes to the
IdeviceStore.
Created for IdevicePanes, but could be used by other objects
"""
self.listeners.append(listener)
def addIdevice(self, idevice):
"""
Register another iDevice as available
"""
if not self.isGeneric(idevice):
exist = False
for i in self.extended:
if i.title == idevice.title:
exist = True
if not exist:
self.extended.append(idevice)
idevice.edit = True
for listener in self.listeners:
listener.addIdevice(idevice)
else:
exist = False
for i in self.generic:
if i.title == idevice.title:
exist = True
if not exist:
self.generic.append(idevice)
idevice.edit = True
for listener in self.listeners:
listener.addIdevice(idevice)
def load(self):
"""
Load iDevices from the generic iDevices and the extended ones
"""
log.debug("load iDevices")
idevicesDir = self.config.configDir/'idevices'
if not idevicesDir.exists():
idevicesDir.mkdir()
self.__loadExtended()
self.__loadGeneric()
#JR: comunicamos a los listener los iDevices extendidos
for listener in self.listeners:
for idevice in self.getIdevices():
listener.addIdevice(idevice)
def __getIdevicesFPD(self):
"""
JR: Esta funcion devuelve los iDevices de FPD
"""
from exe.engine.reflectionfpdidevice import ReflectionfpdIdevice
from exe.engine.reflectionfpdmodifidevice import ReflectionfpdmodifIdevice
from exe.engine.clozefpdidevice import ClozefpdIdevice
from exe.engine.clozelangfpdidevice import ClozelangfpdIdevice
from exe.engine.parasabermasfpdidevice import ParasabermasfpdIdevice
from exe.engine.debesconocerfpdidevice import DebesconocerfpdIdevice
from exe.engine.citasparapensarfpdidevice import CitasparapensarfpdIdevice
from exe.engine.recomendacionfpdidevice import RecomendacionfpdIdevice
from exe.engine.verdaderofalsofpdidevice import VerdaderofalsofpdIdevice
from exe.engine.seleccionmultiplefpdidevice import SeleccionmultiplefpdIdevice
from exe.engine.eleccionmultiplefpdidevice import EleccionmultiplefpdIdevice
from exe.engine.casopracticofpdidevice import CasopracticofpdIdevice
from exe.engine.ejercicioresueltofpdidevice import EjercicioresueltofpdIdevice
from exe.engine.destacadofpdidevice import DestacadofpdIdevice
from exe.engine.orientacionesalumnadofpdidevice import OrientacionesalumnadofpdIdevice
from exe.engine.orientacionestutoriafpdidevice import OrientacionestutoriafpdIdevice
from exe.engine.freetextfpdidevice import FreeTextfpdIdevice
idevices_FPD = []
idevices_FPD.append(ReflectionfpdIdevice())
idevices_FPD.append(ReflectionfpdmodifIdevice())
idevices_FPD.append(ClozefpdIdevice())
idevices_FPD.append(ClozelangfpdIdevice())
idevices_FPD.append(ParasabermasfpdIdevice())
idevices_FPD.append(DebesconocerfpdIdevice())
idevices_FPD.append(CitasparapensarfpdIdevice())
idevices_FPD.append(RecomendacionfpdIdevice())
idevices_FPD.append(VerdaderofalsofpdIdevice())
idevices_FPD.append(SeleccionmultiplefpdIdevice())
idevices_FPD.append(EleccionmultiplefpdIdevice())
idevices_FPD.append(CasopracticofpdIdevice())
idevices_FPD.append(EjercicioresueltofpdIdevice())
idevices_FPD.append(DestacadofpdIdevice())
#idevices_FPD.append(CorreccionfpdIdevice())
idevices_FPD.append(OrientacionesalumnadofpdIdevice())
idevices_FPD.append(OrientacionestutoriafpdIdevice())
idevices_FPD.append(FreeTextfpdIdevice())
return idevices_FPD
def __getFactoryExtendediDevices(self):
"""
JR: Carga los iDevices de fabrica
"""
from exe.engine.freetextidevice import FreeTextIdevice
from exe.engine.multimediaidevice import MultimediaIdevice
from exe.engine.reflectionidevice import ReflectionIdevice
from exe.engine.casestudyidevice import CasestudyIdevice
from exe.engine.truefalseidevice import TrueFalseIdevice
# converting ImageWithTextIdevice -> FreeTextIdevice:
#from exe.engine.imagewithtextidevice import ImageWithTextIdevice
#from exe.engine.wikipediaidevice import WikipediaIdevice
from exe.engine.attachmentidevice import AttachmentIdevice
from exe.engine.titleidevice import TitleIdevice
from exe.engine.galleryidevice import GalleryIdevice
from exe.engine.clozeidevice import ClozeIdevice
#from exe.engine.clozelangidevice import ClozelangIdevice
from exe.engine.flashwithtextidevice import FlashWithTextIdevice
from exe.engine.externalurlidevice import ExternalUrlIdevice
from exe.engine.imagemagnifieridevice import ImageMagnifierIdevice
# converting Maths Idevice -> FreeTextIdevice:
#from exe.engine.mathidevice import MathIdevice
from exe.engine.multichoiceidevice import MultichoiceIdevice
#from exe.engine.rssidevice import RssIdevice
from exe.engine.multiselectidevice import MultiSelectIdevice
#from exe.engine.appletidevice import AppletIdevice
from exe.engine.flashmovieidevice import FlashMovieIdevice
from exe.engine.quiztestidevice import QuizTestIdevice
# JR
# Necesarios para la FPD
from exe.engine.reflectionfpdidevice import ReflectionfpdIdevice
from exe.engine.reflectionfpdmodifidevice import ReflectionfpdmodifIdevice
from exe.engine.clozefpdidevice import ClozefpdIdevice
from exe.engine.clozelangfpdidevice import ClozelangfpdIdevice
from exe.engine.parasabermasfpdidevice import ParasabermasfpdIdevice
from exe.engine.debesconocerfpdidevice import DebesconocerfpdIdevice
from exe.engine.citasparapensarfpdidevice import CitasparapensarfpdIdevice
from exe.engine.recomendacionfpdidevice import RecomendacionfpdIdevice
from exe.engine.verdaderofalsofpdidevice import VerdaderofalsofpdIdevice
from exe.engine.seleccionmultiplefpdidevice import SeleccionmultiplefpdIdevice
from exe.engine.eleccionmultiplefpdidevice import EleccionmultiplefpdIdevice
from exe.engine.casopracticofpdidevice import CasopracticofpdIdevice
from exe.engine.ejercicioresueltofpdidevice import EjercicioresueltofpdIdevice
from exe.engine.destacadofpdidevice import DestacadofpdIdevice
#from exe.engine.correccionfpdidevice import CorreccionfpdIdevice
from exe.engine.orientacionesalumnadofpdidevice import OrientacionesalumnadofpdIdevice
from exe.engine.orientacionestutoriafpdidevice import OrientacionestutoriafpdIdevice
from exe.engine.freetextfpdidevice import FreeTextfpdIdevice
# eXelearningPlus iDevices
from exe.engine.scormclozeidevice import ScormClozeIdevice
from exe.engine.scormmultiselectidevice import ScormMultiSelectIdevice
from exe.engine.scormdropdownidevice import ScormDropDownIdevice
from exe.engine.scormmulticlozeidevice import ScormMultiClozeIdevice
from exe.engine.opinionidevice import OpinionIdevice
from exe.engine.dropdownidevice import DropDownIdevice
from exe.engine.scormmultiselectindfeedbackidevice import ScormMultiSelectIndFeedbackIdevice
factoryExtendedIdevices = []
factoryExtendedIdevices.append(FreeTextIdevice())
factoryExtendedIdevices.append(MultichoiceIdevice())
factoryExtendedIdevices.append(ReflectionIdevice())
factoryExtendedIdevices.append(CasestudyIdevice())
factoryExtendedIdevices.append(TrueFalseIdevice())
defaultImage = unicode(self.config.webDir / "images" / "sunflowers.jpg")
# converting ImageWithTextIdevice -> FreeTextIdevice:
#factoryExtendedIdevices.append(ImageWithTextIdevice(defaultImage))
factoryExtendedIdevices.append(ImageMagnifierIdevice(defaultImage))
defaultImage = unicode(self.config.webDir / "images" / "sunflowers.jpg")
#defaultSite = 'http://%s.wikipedia.org/' % self.config.locale
#factoryExtendedIdevices.append(WikipediaIdevice(defaultSite))
#JR: Eliminamos este iDevices de los extendidos
#factoryExtendedIdevices.append(AttachmentIdevice())
factoryExtendedIdevices.append(GalleryIdevice())
factoryExtendedIdevices.append(ClozeIdevice())
#factoryExtendedIdevices.append(ClozelangIdevice())
#JR: Eliminamos este iDevices de los extendidos
#factoryExtendedIdevices.append(FlashWithTextIdevice())
factoryExtendedIdevices.append(ExternalUrlIdevice())
# converting Maths Idevice -> FreeTextIdevice:
#factoryExtendedIdevices.append(MathIdevice())
#JR: Eliminamos este iDevices de los extendidos
#factoryExtendedIdevices.append(MultimediaIdevice())
#factoryExtendedIdevices.append(RssIdevice())
factoryExtendedIdevices.append(MultiSelectIdevice())
#factoryExtendedIdevices.append(AppletIdevice())
#JR: Eliminamos este iDevices de los extendidos
#factoryExtendedIdevices.append(FlashMovieIdevice())
#modification lernmodule.net
#factoryExtendedIdevices.append(QuizTestIdevice())
#end modification lernmodule.net
# JR
# iDevices para la FPD
factoryExtendedIdevices.append(ReflectionfpdIdevice())
factoryExtendedIdevices.append(ReflectionfpdmodifIdevice())
factoryExtendedIdevices.append(ClozefpdIdevice())
factoryExtendedIdevices.append(ClozelangfpdIdevice())
factoryExtendedIdevices.append(ParasabermasfpdIdevice())
factoryExtendedIdevices.append(DebesconocerfpdIdevice())
factoryExtendedIdevices.append(CitasparapensarfpdIdevice())
factoryExtendedIdevices.append(RecomendacionfpdIdevice())
factoryExtendedIdevices.append(VerdaderofalsofpdIdevice())
factoryExtendedIdevices.append(SeleccionmultiplefpdIdevice())
factoryExtendedIdevices.append(EleccionmultiplefpdIdevice())
factoryExtendedIdevices.append(CasopracticofpdIdevice())
factoryExtendedIdevices.append(EjercicioresueltofpdIdevice())
factoryExtendedIdevices.append(DestacadofpdIdevice())
#factoryExtendedIdevices.append(CorreccionfpdIdevice())
factoryExtendedIdevices.append(OrientacionesalumnadofpdIdevice())
factoryExtendedIdevices.append(OrientacionestutoriafpdIdevice())
factoryExtendedIdevices.append(FreeTextfpdIdevice())
# eXelearningPlus
factoryExtendedIdevices.append(ScormClozeIdevice())
factoryExtendedIdevices.append(ScormMultiSelectIdevice())
factoryExtendedIdevices.append(ScormDropDownIdevice())
factoryExtendedIdevices.append(ScormMultiClozeIdevice())
factoryExtendedIdevices.append(OpinionIdevice())
factoryExtendedIdevices.append(DropDownIdevice())
factoryExtendedIdevices.append(ScormMultiSelectIndFeedbackIdevice())
return factoryExtendedIdevices
def __loadExtended(self):
"""
Load the Extended iDevices (iDevices coded in Python)
JR: Modifico esta funcion para que tambien cargue los idevices extendidos de fabrica
"""
self.__loadUserExtended()
#JR: Si existe el archivo extended.data cargamos de ahi los iDevices extendidos
extendedPath = self.config.configDir/'idevices'/'extended.data'
log.debug("load extended iDevices from "+extendedPath)
self.factoryiDevices = self.__getFactoryExtendediDevices()
if extendedPath.exists():
self.extended = persist.decodeObject(extendedPath.bytes())
else:
self.extended = copy.deepcopy(self.factoryiDevices)
#self.extended = self.factoryiDevices
for idevice in self.__getIdevicesFPD():
self.delIdevice(idevice)
# generate new ids for these iDevices, to avoid any clashes
for idevice in self.extended:
idevice.id = self.getNewIdeviceId()
def __loadUserExtended(self):
"""
Load the user-created extended iDevices which are in the idevices
directory
"""
idevicePath = self.config.configDir/'idevices'
log.debug("load extended iDevices from "+idevicePath)
if not idevicePath.exists():
idevicePath.makedirs()
sys.path = [idevicePath] + sys.path
# Add to the list of extended idevices
for path in idevicePath.listdir("*idevice.py"):
log.debug("loading "+path)
moduleName = path.basename().splitext()[0]
module = __import__(moduleName, globals(), locals(), [])
module.register(self)
# Register the blocks for rendering the idevices
for path in idevicePath.listdir("*block.py"):
log.debug("loading "+path)
moduleName = path.basename().splitext()[0]
module = __import__(moduleName, globals(), locals(), [])
module.register()
def __loadGeneric(self):
"""
Load the Generic iDevices from the appdata directory
"""
genericPath = self.config.configDir/'idevices'/'generic.data'
log.debug("load generic iDevices from "+genericPath)
if genericPath.exists():
self.generic = persist.decodeObject(genericPath.bytes())
self.__upgradeGeneric()
self.factoryiDevices += self.__createGeneric()
else:
self.generic = self.__createGeneric()
self.factoryiDevices += self.generic
# generate new ids for these iDevices, to avoid any clashes
for idevice in self.generic:
idevice.id = self.getNewIdeviceId()
def __upgradeGeneric(self):
"""
Upgrades/removes obsolete generic idevices from before
"""
# We may have two reading activites,
# one problably has the wrong title,
# the other is redundant
readingActivitiesFound = 0
for idevice in self.generic:
if idevice.class_ == 'reading':
if readingActivitiesFound == 0:
# Rename the first one we find
idevice.title = x_(u"Reading Activity")
# and also upgrade its feedback field from using a simple
# string, to a subclass of TextAreaField.
# While this will have been initially handled by the
# field itself, and if not, then by the genericidevice's
# upgrade path, this is included here as a possibly
# painfully redundant safety check due to the extra
# special handing of generic idevices w/ generic.dat
for field in idevice.fields:
if isinstance(field, FeedbackField):
# must check for the upgrade manually, since
# persistence versions not used here.
# (but note that the persistence versioning
# will probably have ALREADY happened anyway!)
if not hasattr(field,"content"):
# this FeedbackField has NOT been upgraded:
field.content = field.feedback
field.content_w_resourcePaths = field.content
field.content_wo_resourcePaths = field.content
else:
# Destroy the second
self.generic.remove(idevice)
readingActivitiesFound += 1
if readingActivitiesFound == 2:
break
self.save()
def __createGeneric(self):
"""
Create the Generic iDevices which you get for free
(not created using the iDevice editor, but could have been)
Called when we can't find 'generic.data', generates an initial set of
free/builtin idevices and writes the new 'generic.data' file
JR: Modifico este metodo para que acepte otro parametro que sera la lista
en la que anadimos los idevices gnericos
"""
idevices = []
from exe.engine.genericidevice import GenericIdevice
readingAct = GenericIdevice(_(u"Reading Activity"),
u"reading",
_(u"University of Auckland"),
x_(u"""<p>The Reading Activity will primarily
be used to check a learner's comprehension of a given text. This can be done
by asking the learner to reflect on the reading and respond to questions about
the reading, or by having them complete some other possibly more physical task
based on the reading.</p>"""),
x_(u"<p>Teachers should keep the following "
"in mind when using this iDevice: </p>"
"<ol>"
"<li>"
"Think about the number of "
"different types of activity "
"planned for your resource that "
"will be visually signalled in the "
"content. Avoid using too many "
"different types or classification "
"of activities otherwise learner "
"may become confused. Usually three "
"or four different types are more "
"than adequate for a teaching "
"resource."
"</li>"
"<li>"
"From a visual design "
"perspective, avoid having two "
"iDevices immediately following "
"each other without any text in "
"between. If this is required, "
"rather collapse two questions or "
"events into one iDevice. "
"</li>"
"<li>"
"Think "
"about activities where the "
"perceived benefit of doing the "
"activity outweighs the time and "
"effort it will take to complete "
"the activity. "
"</li>"
"</ol>"))
readingAct.emphasis = Idevice.SomeEmphasis
readingAct.addField(TextAreaField(_(u"What to read"),
_(u"""Enter the details of the reading including reference details. The
referencing style used will depend on the preference of your faculty or
department.""")))
readingAct.addField(TextAreaField(_(u"Activity"),
_(u"""Describe the tasks related to the reading learners should undertake.
This helps demonstrate relevance for learners.""")))
readingAct.addField(FeedbackField(_(u"Feedback"),
_(u"""Use feedback to provide a summary of the points covered in the reading,
or as a starting point for further analysis of the reading by posing a question
or providing a statement to begin a debate.""")))
#idevices.append(readingAct)
objectives = GenericIdevice(_(u"Objectives"),
u"objectives",
_(u"University of Auckland"),
_(u"""Objectives describe the expected outcomes of the learning and should
define what the learners will be able to do when they have completed the
learning tasks."""),
u"")
objectives.emphasis = Idevice.SomeEmphasis
objectives.addField(TextAreaField(_(u"Objectives"),
_(u"""Type the learning objectives for this resource.""")))
#idevices.append(objectives)
#added kthamm summary idevice 111027
devsummary = GenericIdevice(_(u"Summary"),
u"devsummary",
_(u"University of Auckland"),
_(u"""Provide a summary of the learning resource."""),
u"")
devsummary.emphasis = Idevice.SomeEmphasis
devsummary.addField(TextAreaField(_(u"Summary"),
_(u"""Type a brief summary for this resource.""")))
idevices.append(devsummary)
#end added
#added kthamm preview idevice 111028
devpreview = GenericIdevice(_(u"Preview"),
u"devpreview",
_(u"University of Auckland"),
_(u"""A preview to introduce the learning resource"""),
u"")
devpreview.emphasis = Idevice.SomeEmphasis
devpreview.addField(TextAreaField(_(u"Preview"),
_(u"""Type the learning objectives for this resource.""")))
idevices.append(devpreview)
#end added
#added kthamm 111028 resource idevice
devresource = GenericIdevice(_(u"Resource"),
u"devresource",
_(u"University of Auckland"),
x_(u""" """),
x_(u" "))
devresource.emphasis = Idevice.SomeEmphasis
devresource.addField(TextAreaField(_(u"Resource"),
_(u"""Enter an URL to a resource, you want to provide. Mark the URL and click on the link button in the editor""")))
# devresource.addField(TextAreaField(_(u"Activity"),
#_(u"""Describe the tasks related to the reading learners should undertake.
#This helps demonstrate relevance for learners.""")))
#
# devresource.addField(FeedbackField(_(u"Feedback"),
#_(u"""Use feedback to provide a summary of the points covered in the reading,
#or as a starting point for further analysis of the reading by posing a question
#or providing a statement to begin a debate.""")))
idevices.append(devresource)
#end added
#added kthamm 111028 discussion idevice
devdiscussion = GenericIdevice(_(u"Discussion"),
u"devdiscussion",
_(u"University of Auckland"),
x_(u""" """),
x_(u" "))
devdiscussion.emphasis = Idevice.SomeEmphasis
devdiscussion.addField(TextAreaField(_(u"Discussion"),
_(u"""Enter the details of the reading including reference details. The
referencing style used will depend on the preference of your faculty or
department.""")))
devdiscussion.addField(TextAreaField(_(u"Activity"),
_(u"""Describe the tasks related to the reading learners should undertake.
This helps demonstrate relevance for learners.""")))
idevices.append(devdiscussion)
#end added
preknowledge = GenericIdevice(_(u"Preknowledge"),
u"preknowledge",
"",
_(u"""Prerequisite knowledge refers to the knowledge learners should already
have in order to be able to effectively complete the learning. Examples of
pre-knowledge can be: <ul>
<li> Learners must have level 4 English </li>
<li> Learners must be able to assemble standard power tools </li></ul>
"""), u"")
preknowledge.emphasis = Idevice.SomeEmphasis
preknowledge.addField(TextAreaField(_(u"Preknowledge"),
_(u"""Describe the prerequisite knowledge learners should have to effectively
complete this learning.""")))
#idevices.append(preknowledge)
activity = GenericIdevice(_(u"Activity"),
u"activity",
_(u"University of Auckland"),
_(u"""An activity can be defined as a task or set of tasks a learner must
complete. Provide a clear statement of the task and consider any conditions
that may help or hinder the learner in the performance of the task."""),
u"")
activity.emphasis = Idevice.SomeEmphasis
activity.addField(TextAreaField(_(u"Activity"),
_(u"""Describe the tasks the learners should complete.""")))
#idevices.append(activity)
self.save()
return idevices
def __createReading011(self):
"""
Create the Reading Activity 0.11
We do this only once when the user first runs eXe 0.11
"""
from exe.engine.genericidevice import GenericIdevice
readingAct = GenericIdevice(_(u"Reading Activity 0.11"),
u"reading",
_(u"University of Auckland"),
x_(u"""<p>The reading activity, as the name
suggests, should ask the learner to perform some form of activity. This activity
should be directly related to the text the learner has been asked to read.
Feedback to the activity where appropriate, can provide the learner with some
reflective guidance.</p>"""),
x_(u"Teachers should keep the following "
"in mind when using this iDevice: "
"<ol>"
"<li>"
"Think about the number of "
"different types of activity "
"planned for your resource that "
"will be visually signalled in the "
"content. Avoid using too many "
"different types or classification "
"of activities otherwise learner "
"may become confused. Usually three "
"or four different types are more "
"than adequate for a teaching "
"resource."
"</li>"
"<li>"
"From a visual design "
"perspective, avoid having two "
"iDevices immediately following "
"each other without any text in "
"between. If this is required, "
"rather collapse two questions or "
"events into one iDevice. "
"</li>"
"<li>"
"Think "
"about activities where the "
"perceived benefit of doing the "
"activity outweighs the time and "
"effort it will take to complete "
"the activity. "
"</li>"
"</ol>"))
readingAct.emphasis = Idevice.SomeEmphasis
readingAct.addField(TextAreaField(_(u"What to read"),
_(u"""Enter the details of the reading including reference details. The
referencing style used will depend on the preference of your faculty or
department.""")))
readingAct.addField(TextAreaField(_(u"Activity"),
_(u"""Describe the tasks related to the reading learners should undertake.
This helps demonstrate relevance for learners.""")))
readingAct.addField(FeedbackField(_(u"Feedback"),
_(u"""Use feedback to provide a summary of the points covered in the reading,
or as a starting point for further analysis of the reading by posing a question
or providing a statement to begin a debate.""")))
objectives = GenericIdevice(_(u"Objectives"),
u"objectives",
_(u"University of Auckland"),
_(u"""Objectives describe the expected outcomes of the learning and should
define what the learners will be able to do when they have completed the
learning tasks."""),
u"")
objectives.emphasis = Idevice.SomeEmphasis
objectives.addField(TextAreaField(_(u"Objectives"),
_(u"""Type the learning objectives for this resource.""")))
self.generic.append(objectives)
preknowledge = GenericIdevice(_(u"Preknowledge"),
u"preknowledge",
"",
_(u"""Prerequisite knowledge refers to the knowledge learners should already
have in order to be able to effectively complete the learning. Examples of
pre-knowledge can be: <ul>
<li> Learners must have level 4 English </li>
<li> Learners must be able to assemble standard power tools </li></ul>
"""), u"")
preknowledge.emphasis = Idevice.SomeEmphasis
preknowledge.addField(TextAreaField(_(u"Preknowledge"),
_(u"""Describe the prerequisite knowledge learners should have to effectively
complete this learning.""")))
self.generic.append(preknowledge)
activity = GenericIdevice(_(u"Activity"),
u"activity",
_(u"University of Auckland"),
_(u"""An activity can be defined as a task or set of tasks a learner must
complete. Provide a clear statement of the task and consider any conditions
that may help or hinder the learner in the performance of the task."""),
u"")
activity.emphasis = Idevice.SomeEmphasis
activity.addField(TextAreaField(_(u"Activity"),
_(u"""Describe the tasks the learners should complete.""")))
self.generic.append(activity)
self.save()
def save(self):
"""
Save the Generic iDevices to the appdata directory
"""
idevicesDir = self.config.configDir/'idevices'
if not idevicesDir.exists():
idevicesDir.mkdir()
fileOut = open(idevicesDir/'generic.data', 'wb')
fileOut.write(persist.encodeObject(self.generic))
#JR: Guardamos tambien los iDevices extendidos
fileOut = open(idevicesDir/'extended.data', 'wb')
fileOut.write(persist.encodeObject(self.extended))
# ===========================================================================
|
gpl-2.0
| -7,368,021,126,483,378,000 | 45.742638 | 116 | 0.594478 | false | 4.530967 | true | false | false |
seung-lab/neuroglancer
|
python/neuroglancer/tool/agglomeration_split_tool.py
|
1
|
29368
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import collections
import uuid
import copy
import heapq
import json
import re
import sqlite3
import logging
import os
import numpy as np
import six
import neuroglancer
debug_graph = False
verbose_merging = False
def normalize_edge((id_a, id_b)):
if id_a > id_b:
id_a, id_b = id_b, id_a
return id_a, id_b
class GreedyMulticut(object):
def __init__(self, combine_edges, edge_priority):
# Contains (score, edge_map_value) tuple values in heap order. The
# edge_map_value is the actual corresponding value in edge_map, not a copy.
self.edge_heap = []
# Maps segment_id -> set of segment_id neighbors.
self.regions = dict()
# Maps (id_a, id_b) -> edge_map_value=[score, key, edge_object]
self.edge_map = dict()
self.combine_edges = combine_edges
self.edge_priority = edge_priority
self.num_valid_edges = 0
self._initialized = False
def add_edge(self, (id_a, id_b), edge):
id_a, id_b = normalize_edge((id_a, id_b))
self.regions.setdefault(id_a, set()).add(id_b)
self.regions.setdefault(id_b, set()).add(id_a)
key = (id_a, id_b)
entry = self.edge_map.get(key, None)
if entry is not None:
edge_data = entry[2] = self.combine_edges(entry[0], edge)
entry[0] = self.edge_priority(edge_data)
else:
entry = self.edge_map[key] = [self.edge_priority(edge), key, edge]
self.num_valid_edges += 1
if self._initialized:
self._add_to_heap(entry)
def _initialize_heap(self):
if self._initialized:
return
for key in self.edge_map:
entry = self.edge_map[key]
self._add_to_heap(entry)
self._initialized = True
def _add_to_heap(self, entry):
heapq.heappush(self.edge_heap, (entry[0], entry))
def remove_edge_from_heap(self, segment_ids):
"""Remove an edge from the heap."""
self._initialize_heap()
key = normalize_edge(segment_ids)
if key in self.edge_map:
self.edge_map[key][0] = None
self.num_valid_edges -= 1
def check_consistency(self):
self._initialize_heap()
expected_regions = dict()
for key, entry in six.viewitems(self.edge_map):
assert entry[1] == key
expected_regions.setdefault(key[0], set()).add(key[1])
expected_regions.setdefault(key[1], set()).add(key[0])
assert expected_regions == self.regions
num_valid_edges = 0
for e in self.edge_heap:
if self._is_valid_heap_entry(e):
num_valid_edges += 1
assert num_valid_edges == self.num_valid_edges
def merge(self, (id_a, id_b)):
self._initialize_heap()
id_a, id_b = normalize_edge((id_a, id_b))
if (id_a, id_b) not in self.edge_map:
raise KeyError
for neighbor in self.regions[id_b]:
if neighbor == id_a:
continue
expired_ids = normalize_edge((neighbor, id_b))
new_ids = normalize_edge((neighbor, id_a))
new_edge = self.edge_map.get(new_ids)
expired_edge = self.edge_map[expired_ids]
if new_edge is not None:
edge_data = new_edge[2] = self.combine_edges(new_edge[2], expired_edge[2])
if new_edge[0] is not None:
self.num_valid_edges -= 1
if expired_edge[0] is not None:
self.num_valid_edges -= 1
self.num_valid_edges += 1
new_edge[0] = self.edge_priority(edge_data)
self._add_to_heap(new_edge)
else:
self.regions[neighbor].add(id_a)
self.regions[id_a].add(neighbor)
self.edge_map[new_ids] = expired_edge
expired_edge[1] = new_ids
# No need to add to heap, since score hasn't changed.
del self.edge_map[expired_ids]
self.regions[neighbor].remove(id_b)
del self.regions[id_b]
self.regions[id_a].remove(id_b)
del self.edge_map[(id_a, id_b)]
self.num_valid_edges -= 1
def _is_valid_heap_entry(self, heap_entry):
score, entry = heap_entry
expected_entry = self.edge_map.get(entry[1])
if entry is not expected_entry or entry[0] is not score:
return None
else:
return entry
def get_next_edge(self):
self._initialize_heap()
while True:
if self.num_valid_edges == 0:
return None
heap_entry = self.edge_heap[0]
entry = self._is_valid_heap_entry(heap_entry)
if entry is None:
heapq.heappop(self.edge_heap)
else:
return entry
Edge = collections.namedtuple('Edge', ['segment_ids', 'score', 'position'])
def load_edges(path):
edges = []
with open(path, 'r') as f:
f.readline()
for line in f:
parts = line.split(',')
segment_a = int(parts[0].strip())
segment_b = int(parts[1].strip())
score = float(parts[2].strip())
position = (int(parts[3].strip()), int(parts[4].strip()), int(parts[5].strip()))
edges.append(Edge(segment_ids=(segment_a, segment_b), score=score, position=position))
return edges
def load_split_seeds(path):
with open(path, 'r') as f:
raw_seeds = json.loads(f.read())
seeds = collections.OrderedDict()
for component in raw_seeds:
seeds.setdefault(component['label'], []).extend(component['supervoxels'])
return seeds
def build_graph(edges):
logging.info('Building graph with %d edges', len(edges))
def combine_edges(a, b):
return a + b
def edge_priority(x):
return x
greedy_multicut = GreedyMulticut(
combine_edges=combine_edges,
edge_priority=edge_priority,
)
for edge in edges:
greedy_multicut.add_edge(edge.segment_ids, edge.score)
return greedy_multicut
class AgglomerationGraph(object):
def __init__(self, conn):
self.conn = conn
self.agglo_members_cache = dict()
self.agglo_edges_cache = dict()
def get_agglo_id(self, supervoxel_id):
c = self.conn.cursor()
c.execute('SELECT agglo_id FROM supervoxels WHERE supervoxel_id=?', (int(supervoxel_id), ))
result = c.fetchone()
if result is None:
return supervoxel_id
else:
return result[0]
def get_agglo_members(self, agglo_id):
result = self.agglo_members_cache.get(agglo_id)
if result is not None:
return result
c = self.conn.cursor()
c.execute('SELECT supervoxel_id FROM supervoxels WHERE agglo_id=?', (int(agglo_id), ))
result = [row[0] for row in c.fetchall()]
self.agglo_members_cache[agglo_id] = result
return result
def get_agglo_edges(self, agglo_id):
result = self.agglo_edges_cache.get(agglo_id)
if result is not None:
return result
c = self.conn.cursor()
c.execute('SELECT segment_a, segment_b, score, x, y, z FROM edges WHERE agglo_id=?',
(int(agglo_id), ))
result = [
Edge(segment_ids=(row[0], row[1]), score=row[2], position=(row[3], row[4], row[5]))
for row in c.fetchall()
]
self.agglo_edges_cache[agglo_id] = result
return result
def _make_supervoxel_map(graph, split_seeds, need_agglo_ids):
supervoxel_map = dict()
agglo_ids = dict()
for label in [0, 1]:
for seed in split_seeds[label]:
supervoxel_id = seed['supervoxel_id']
if need_agglo_ids:
agglo_id = graph.get_agglo_id(supervoxel_id)
if agglo_id == 0:
continue
agglo_ids.setdefault(agglo_id, []).append((label, seed))
supervoxel_map.setdefault(supervoxel_id, set()).add(label)
return agglo_ids, supervoxel_map
def do_split(graph, split_seeds, agglo_id=None, supervoxels=None):
agglo_ids, supervoxel_map = _make_supervoxel_map(graph, split_seeds, need_agglo_ids=agglo_id is None)
if agglo_id is None:
agglo_id_counts = {
agglo_id: sum(z[1]['count'] for z in seeds)
for agglo_id, seeds in six.viewitems(agglo_ids)
}
agglo_id = max(agglo_ids, key=lambda x: agglo_id_counts[x])
if len(agglo_ids) > 1:
logging.info('Warning: more than one agglomerated component. ' +
'Choosing component %d with maximum number of seed points.', agglo_id)
logging.info('agglo_id_counts = %r', agglo_id_counts)
input_edges = graph.get_agglo_edges(agglo_id)
if supervoxels is not None:
input_edges = [x for x in input_edges if x.segment_ids[0] in supervoxels and x.segment_ids[1] in supervoxels]
graph = build_graph(input_edges)
if debug_graph:
graph.check_consistency()
cur_eqs = neuroglancer.EquivalenceMap()
logging.info('Agglomerating')
threshold = float('inf')
while True:
entry = graph.get_next_edge()
if entry is None:
if verbose_merging:
logging.info('Stopping because entry is None')
break
if entry[0] > threshold:
if verbose_merging:
logging.info('Stopping because edge score %r is > threshold %r', entry[0],
threshold)
break
segment_ids = entry[1]
seeds_a = supervoxel_map.get(segment_ids[0])
seeds_b = supervoxel_map.get(segment_ids[1])
if ((seeds_a is not None and len(seeds_a) > 1) or (seeds_b is not None and len(seeds_b) > 1)
or (seeds_a is not None and seeds_b is not None and seeds_a != seeds_b)):
if verbose_merging:
logging.info('Excluding edge %r because of seeds: %r %r', segment_ids, seeds_a,
seeds_b)
graph.remove_edge_from_heap(segment_ids)
continue
if verbose_merging:
logging.info('Merging %r with score %r', segment_ids, entry[0])
graph.merge(segment_ids)
if debug_graph:
graph.check_consistency()
new_id = cur_eqs.union(*segment_ids)
new_seeds = seeds_a or seeds_b
if new_seeds:
supervoxel_map[new_id] = new_seeds
return dict(agglo_id=agglo_id, cur_eqs=cur_eqs, supervoxel_map=supervoxel_map)
def display_split_result(graph, agglo_id, cur_eqs, supervoxel_map, split_seeds, image_url,
segmentation_url):
agglo_members = set(graph.get_agglo_members(agglo_id))
state = neuroglancer.ViewerState()
state.layers.append(name='image', layer=neuroglancer.ImageLayer(source=image_url))
state.layers.append(
name='original',
layer=neuroglancer.SegmentationLayer(
source=segmentation_url,
segments=agglo_members,
),
visible=False,
)
state.layers.append(
name='isolated-supervoxels',
layer=neuroglancer.SegmentationLayer(
source=segmentation_url,
segments=set(x for x, seeds in six.viewitems(supervoxel_map) if len(seeds) > 1),
),
visible=False,
)
state.layers.append(
name='split',
layer=neuroglancer.SegmentationLayer(
source=segmentation_url,
equivalences=cur_eqs,
segments=set(cur_eqs[x] for x in agglo_members),
))
for label, component in six.viewitems(split_seeds):
state.layers.append(
name='seed%d' % label,
layer=neuroglancer.PointAnnotationLayer(
points=[seed['position'] for seed in component],
),
)
state.show_slices = False
state.layout = '3d'
all_seed_points = [
seed['position'] for component in six.viewvalues(split_seeds) for seed in component
]
state.voxel_coordinates = np.mean(all_seed_points, axis=0)
state.perspective_zoom = 140
return state
def _set_viewer_seeds(s, seeds):
for inclusive in [False, True]:
layer_name = 'inclusive-seeds' if inclusive else 'exclusive-seeds'
s.layers[layer_name] = neuroglancer.AnnotationLayer(
annotation_color='green' if inclusive else 'red',
annotations=[
dict(
type='point',
id=x['id'],
point=x['position'],
description=str(x['supervoxel_id']),
) for x in seeds[inclusive]
],
)
def _get_viewer_seeds(s):
seeds = [[], []]
for inclusive in [False, True]:
layer_name = 'inclusive-seeds' if inclusive else 'exclusive-seeds'
try:
layer = s.layers[layer_name]
except KeyError:
pass
for x in layer.annotations:
seeds[inclusive].append(
dict(
id=x.id,
supervoxel_id=int(x.description),
position=tuple(map(int, x.point)),
))
return seeds
class ComponentState(object):
def __init__(self, data=None):
self.supervoxels = set()
self.seeds = [[], []]
if data is not None:
self.load(data)
def load(self, data):
self.supervoxels = set(data['supervoxels'])
self.seeds = data['seeds']
def to_json(self):
return {
'supervoxels': sorted(self.supervoxels),
'seeds': self.seeds,
}
class InteractiveState(object):
def __init__(self, path):
self.unused_supervoxels = set()
self.components = []
self.path = path
self.selected_component = None
def load(self):
with open(self.path, 'r') as f:
data = json.load(f)
self.unused_supervoxels = set(data['unused_supervoxels'])
self.components = map(ComponentState, data['components'])
self.selected_component = data['selected_component']
def initialize(self, supervoxel_ids):
self.unused_supervoxels = set(supervoxel_ids)
self.components = []
self.selected_component = None
def to_json(self):
return {
'unused_supervoxels': sorted(self.unused_supervoxels),
'components': [x.to_json() for x in self.components],
'selected_component': self.selected_component,
}
def save(self):
if self.path is None:
return
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.to_json()))
os.rename(tmp_path, self.path)
def make_new_component(self):
c = ComponentState()
c.supervoxels = self.unused_supervoxels
self.unused_supervoxels = set()
self.selected_component = len(self.components)
self.components.append(c)
def cycle_selected_component(self, amount):
if len(self.components) == 0:
return
if self.selected_component is None:
if amount > 0:
self.selected_component = 0
else:
self.selected_component = len(self.components) - 1
else:
self.selected_component = (
self.selected_component + amount + len(self.components)) % len(self.components)
def add_seed(self, supervoxel_id, position, inclusive):
if self.selected_component is None:
return
c = self.components[self.selected_component]
c.seeds[inclusive].append(
dict(
supervoxel_id=supervoxel_id,
position=position,
id=uuid.uuid4().hex))
class CachedSplitResult(object):
def __init__(self, state, graph, agglo_id):
self.state = state
self.graph = graph
self.agglo_id = agglo_id
self.reset()
def reset(self):
self.selected_component = None
self.seeds = [[], []]
self.supervoxels = set()
self.split_result = None
def update(self):
selected_component = self.state.selected_component
if selected_component is None:
if self.selected_component is None:
return False
self.reset()
return True
component = self.state.components[selected_component]
if selected_component == self.selected_component:
if self.supervoxels == component.supervoxels:
if self.seeds == component.seeds:
return False
self.selected_component = self.state.selected_component
self.seeds = copy.deepcopy(component.seeds)
self.supervoxels = set(component.supervoxels)
print('Recomputing split result')
self.split_result = do_split(
graph=self.graph, split_seeds=self.seeds, agglo_id=self.agglo_id,
supervoxels=self.supervoxels)
print('Done recomputing split result')
return True
class InteractiveSplitter(object):
def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
self.graph = graph
self.agglo_id = agglo_id
self.image_url = image_url
self.segmentation_url = segmentation_url
self.state = InteractiveState(state_path)
self.cached_split_result = CachedSplitResult(
state=self.state, graph=self.graph, agglo_id=self.agglo_id)
self.agglo_members = set(self.graph.get_agglo_members(agglo_id))
if state_path is not None and os.path.exists(state_path):
self.state.load()
else:
self.state.initialize(self.agglo_members)
viewer = self.viewer = neuroglancer.Viewer()
viewer.actions.add('inclusive-seed', self._add_inclusive_seed)
viewer.actions.add('exclusive-seed', self._add_exclusive_seed)
viewer.actions.add('next-component', self._next_component)
viewer.actions.add('prev-component', self._prev_component)
viewer.actions.add('new-component', self._make_new_component)
viewer.actions.add('exclude-component', self._exclude_component)
viewer.actions.add('exclude-all-but-component', self._exclude_all_but_component)
key_bindings = [
['bracketleft', 'prev-component'],
['bracketright', 'next-component'],
['at:dblclick0', 'exclude-component'],
['at:shift+mousedown2', 'exclude-all-but-component'],
['at:control+mousedown0', 'inclusive-seed'],
['at:shift+mousedown0', 'exclusive-seed'],
['enter', 'new-component'],
]
with viewer.txn() as s:
s.perspective_zoom = 140
s.layers.append(
name='image',
layer=neuroglancer.ImageLayer(source=self.image_url),
)
s.layers.append(
name='original',
layer=neuroglancer.SegmentationLayer(
source=self.segmentation_url,
segments=self.agglo_members,
),
)
s.layers.append(
name='unused',
layer=neuroglancer.SegmentationLayer(source=self.segmentation_url,
),
visible=False,
)
s.layers.append(
name='split-result',
layer=neuroglancer.SegmentationLayer(
source=self.segmentation_url,
segments=self.agglo_members,
),
)
s.concurrent_downloads = 256
self._update_state(s)
with viewer.config_state.txn() as s:
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.input_event_bindings.slice_view[key] = command
s.input_event_bindings.perspective_view[key] = command
self._update_config_state(s)
viewer.shared_state.add_changed_callback(
lambda: viewer.defer_callback(self._handle_state_changed))
def _add_inclusive_seed(self, s):
self._add_seed(s, True)
def _add_exclusive_seed(self, s):
self._add_seed(s, False)
def _exclude_component(self, s):
if self.state.selected_component is None:
return
component = self.state.components[self.state.selected_component]
supervoxel_id = self._get_mouse_supervoxel(s)
if supervoxel_id is None:
return
self.cached_split_result.update()
members = set(self.cached_split_result.split_result['cur_eqs'].members(supervoxel_id))
component.supervoxels = set(x for x in component.supervoxels if x not in members)
self.state.unused_supervoxels.update(members)
self._update_view()
def _exclude_all_but_component(self, s):
if self.state.selected_component is None:
return
component = self.state.components[self.state.selected_component]
supervoxel_id = self._get_mouse_supervoxel(s)
if supervoxel_id is None:
return
self.cached_split_result.update()
members = set(self.cached_split_result.split_result['cur_eqs'].members(supervoxel_id))
new_unused = set(x for x in component.supervoxels if x not in members)
component.supervoxels = members
self.state.unused_supervoxels.update(new_unused)
self._update_view()
def _make_new_component(self, s):
self.state.make_new_component()
self._update_view()
def _next_component(self, s):
self.state.cycle_selected_component(1)
self._update_view()
def _prev_component(self, s):
self.state.cycle_selected_component(-1)
self._update_view()
def _handle_state_changed(self):
if self.state.selected_component is None:
return
seeds = _get_viewer_seeds(self.viewer.state)
component = self.state.components[self.state.selected_component]
if seeds == component.seeds:
return
component.seeds = seeds
with self.viewer.txn() as s:
self._update_state(s)
def _get_mouse_supervoxel(self, s):
supervoxel_id = s.selected_values['original']
if supervoxel_id is None:
m = s.selected_values['split-result']
if m is not None:
if isinstance(m, neuroglancer.MapEntry):
supervoxel_id = m.key
else:
supervoxel_id = m
if supervoxel_id is None or supervoxel_id == 0:
return None
return supervoxel_id
def _add_seed(self, s, inclusive):
supervoxel_id = self._get_mouse_supervoxel(s)
mouse_voxel_coordinates = s.mouse_voxel_coordinates
if mouse_voxel_coordinates is None or supervoxel_id is None:
return
position = tuple(int(x) for x in mouse_voxel_coordinates)
self.state.add_seed(supervoxel_id, position, inclusive)
self._update_view()
def _update_view(self):
with self.viewer.txn() as s:
self._update_state(s)
with self.viewer.config_state.txn() as s:
self._update_config_state(s)
def _update_config_state(self, s):
if self.state.selected_component is None:
msg = '[No component selected] %d unused supervoxels' % len(
self.state.unused_supervoxels)
else:
selected_component = self.state.selected_component
msg = '[Component %d/%d] : %d supervoxels, %d connected components, %d unused' % (
selected_component, len(self.state.components),
len(self.cached_split_result.supervoxels),
len(self.cached_split_result.split_result['cur_eqs'].sets()), len(self.state.unused_supervoxels))
s.status_messages['status'] = msg
def _update_state(self, s):
self.cached_split_result.update()
self.state.save()
_set_viewer_seeds(s, self.cached_split_result.seeds)
s.layers['unused'].segments = self.state.unused_supervoxels
s.layers['original'].segments = self.cached_split_result.supervoxels
s.layers['split-result'].segments = self.cached_split_result.supervoxels
split_result = self.cached_split_result.split_result
if split_result is not None:
self._show_split_result(
s,
cur_eqs=split_result['cur_eqs'],
supervoxel_map=split_result['supervoxel_map'],
)
s.layout = neuroglancer.row_layout([
neuroglancer.LayerGroupViewer(
layout='3d',
layers=['image', 'original', 'unused', 'inclusive-seeds', 'exclusive-seeds']),
neuroglancer.LayerGroupViewer(
layout='3d', layers=['image', 'split-result', 'inclusive-seeds',
'exclusive-seeds']),
])
def _show_split_result(self, s, cur_eqs, supervoxel_map):
split_layer = s.layers['split-result']
split_layer.equivalences = cur_eqs
split_layer.segments = set(cur_eqs[x] for x in self.cached_split_result.supervoxels)
def run_batch(args, graph):
for path in args.split_seeds:
split_seeds = load_split_seeds(path)
split_result = do_split(graph=graph, split_seeds=split_seeds, agglo_id=args.agglo_id)
state = display_split_result(
graph=graph,
split_seeds=split_seeds,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
**split_result)
print('<p><a href="%s">%s</a></p>' % (neuroglancer.to_url(state), path))
def run_interactive(args, graph):
# Make splitter a global variable so that it is accessible from the
# interactive `python -i` shell.
global splitter
if args.bind_address:
neuroglancer.set_server_bind_address(args.bind_address)
if args.static_content_url:
neuroglancer.set_static_content_source(url=args.static_content_url)
splitter = InteractiveSplitter(
graph,
agglo_id=args.agglo_id,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
state_path=args.state)
print(splitter.viewer)
def open_graph(path, agglo_id):
# Check if graph_db is sharded
graph_db = path
m = re.match('(.*)@([0-9]+)((?:\..*)?)$', graph_db)
if m is not None:
num_shards = int(m.group(2))
shard = agglo_id % num_shards
graph_db = m.group(1) + ('-%05d-of-%05d' % (shard, num_shards)) + m.group(3)
return AgglomerationGraph(sqlite3.connect(graph_db, check_same_thread=False))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-v', '--verbose', action='store_true', help='Display verbose log messages.')
common_ap = argparse.ArgumentParser(add_help=False)
common_ap.add_argument(
'--graph-db', required=True, help='Path to sqlite3 database specifying agglomeration graph')
common_ap.add_argument(
'--image-url', required=True, help='Neuroglancer data source URL for image')
common_ap.add_argument(
'--segmentation-url', required=True, help='Neuroglancer data source URL for segmentation')
sub_aps = ap.add_subparsers(help='command to run')
interactive_ap = sub_aps.add_parser(
'interactive', help='Interactively split an aglomerated component', parents=[common_ap])
batch_ap = sub_aps.add_parser(
'batch', help='Split based on pre-specified seed files', parents=[common_ap])
interactive_ap.add_argument(
'--agglo-id', type=int, required=True, help='Agglomerated component id to split')
interactive_ap.add_argument('--split-seeds', help='Path to JSON file specifying split seeds')
interactive_ap.add_argument('--state', help='Path to JSON state file.')
interactive_ap.add_argument(
'-a',
'--bind-address',
help='Bind address for Python web server. Use 127.0.0.1 (the default) to restrict access '
'to browers running on the local machine, use 0.0.0.0 to permit access from remote browsers.'
)
interactive_ap.add_argument(
'--static-content-url', help='Obtain the Neuroglancer client code from the specified URL.')
interactive_ap.set_defaults(func=run_interactive)
batch_ap.add_argument(
'--split-seeds', nargs='+', help='Path to JSON file specifying split seeds')
batch_ap.add_argument('--agglo-id', type=int, help='Agglomerated component id to split')
batch_ap.set_defaults(func=run_batch)
args = ap.parse_args()
graph = open_graph(args.graph_db, args.agglo_id)
if args.verbose:
logging.basicConfig(level=logging.INFO)
args.func(args, graph)
|
apache-2.0
| -3,544,483,489,328,351,700 | 35.078624 | 117 | 0.583662 | false | 3.648652 | false | false | false |
kenny1352/ratemyhistory
|
project/server.py
|
1
|
16356
|
import os
import uuid
import psycopg2
import psycopg2.extras
import crypt, getpass, pwd
import time
import smtplib, json
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from datetime import date
from flask import Flask, redirect, url_for,session, render_template, jsonify, request
from flask.ext.socketio import SocketIO, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
messages = []
users = {}
def connectToDB():
#change connection to session db
connectionString = 'dbname=ratemyhistory user=assist password=assist host=localhost'
print connectionString
try:
print("connected!")
return psycopg2.connect(connectionString)
except:
print("Can't connect to database")
@socketio.on('connect', namespace='/iss')
def makeConnection():
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if 'username' in session:
print session['username']
print session['logged']
session['logged']= 1
emit('logged', {'logged_in' : session['logged'], 'username' : session['username'] })
print('connected')
try:
print "before query in connect"
query =cur.mogrify("SELECT c.message, s.sender FROM chat AS c CROSS JOIN usersChat AS s WHERE c.chat_id = s.chat_id")
print "after query"
cur.execute(query)
print query
messages = cur.fetchall()
print messages
for message in messages:
tmp = {'name': message[1], 'text': message[0]}
print(message)
emit('message', tmp)
except:
print("Error in database")
@socketio.on('message', namespace='/iss')
def new_message(message):
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
senderUser = session['username']
try:
print('message: ' + str(message))
print('senderUser: ' + str(senderUser))
userQuery = cur.mogrify("INSERT INTO usersChat (sender) VALUES (%s);", (senderUser,))
msgQuery = cur.mogrify("INSERT INTO chat (message) VALUES (%s);", (message,))
print userQuery
print msgQuery
cur.execute(userQuery)
cur.execute(msgQuery)
print("message added to database")
conn.commit()
tmp = {'text': message, 'name': senderUser}
emit('message', tmp, broadcast=True)
except Exception as e:
print type(e)
print("Error inserting")
conn.rollback()
# I added
# @socketio.on('message', namespace='/iss')
# def new_message(message):
# print "IN MESSAGE!"
# conn = connectToDB()
# cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# print "CONNECTED IN MESSAGE", message, " " , users[session['uuid']]['username']
# tmp ={'text': message, 'name': users[session['uuid']]['username']}
# cur.execute("""INSERT INTO userschat ( chat_id, users) VALUES(%s, %s); """,
# (users[session['uuid']]['id'], users[session['uuid']]['username']))
# conn.commit()
# print("tmp: ",tmp)
# print ("message: ", message, "ID: ",users[session['uuid']]['id'] )
# messages.append(tmp)
# emit('message', tmp, broadcast=True)
# # end I added
print ("before app route")
#for displaying html pages
@app.route('/')
def mainIndex():
print 'in hello world'
#print session['username']
# not sure we need this, but might be helpful later on
logged = 0
if 'username' in session:
logged = 1
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
profQuery = cur.mogrify("SELECT name, views from events UNION SELECT name, views from people ORDER BY views desc LIMIT 10;")
cur.execute(profQuery)
rows = cur.fetchall()
print profQuery
except:
print("Error executing SELECT statement")
return render_template('index.html', SelectedMenu = 'Index', topten = rows)
@app.route('/index.html')
def dashIndex():
print 'in hello world'
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
profQuery = cur.mogrify("SELECT name, views from events UNION SELECT name, views from people ORDER BY views desc LIMIT 10;")
cur.execute(profQuery)
rows = cur.fetchall()
print profQuery
except:
print("Error executing SELECT statement")
return render_template('index.html', SelectedMenu = 'Index', topten = rows)
@app.route('/SuggestEvent.html', methods=['GET','POST'])
def suggestEvent():
print 'in forms'
if request.method == 'POST':
eventName = request.form['eventName']
eventLoc = request.form['eventLoc']
email = request.form['senderEmail']
# file upload request
# 2 options requests
importance = request.form['importance']
time = request.form['timePeriod']
eventDesc = request.form['eventDesc']
receiver=['[email protected]']
sender = ['[email protected]']
message = "<p>Here is a suggested Event:<br /><br />"
message += "<b>Event Name: </b>" + eventName + "<br />"
message += "<b>Event Location: </b>" + eventLoc + "<br />"
message += "<b>Importance: </b>" + importance + "<br />"
message += "<b>Time: </b>" + time + "<br />"
message += "<b>Description: </b>" + eventDesc + "<br />"
message += "<b>User Email: </b>" + email + "<br />"
print(message)
message += "<br /><br />Thank you, <br />Rate My History User"
msg = MIMEMultipart('alternative')
emailMsg = MIMEText(message, 'html')
msg.attach(emailMsg)
msg['Subject'] = 'Suggest Event'
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
try:
smtpObj = smtplib.SMTP("smtp.gmail.com", 587)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login('[email protected]', 'zacharski350')
smtpObj.sendmail(sender, receiver, msg.as_string())
smtpObj.quit()
print "Successfully sent email"
complete = True
except Exception as e:
print(e)
return render_template('SuggestEvent.html', SelectedMenu = 'SuggestEvent')
@app.route('/SuggestPerson.html', methods=['GET','POST'])
def suggestPerson():
print 'in forms'
return render_template('SuggestPerson.html', SelectedMenu = 'SuggestPerson')
@app.route('/profile.html')
def profile():
print 'in profile'
if session['loggedIn'] == 'Yes':
uEmail = session['email']
print uEmail
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
profQuery = cur.mogrify("SELECT Firstname, Lastname, Address, Company, Job, Fax, Email, Phone FROM users WHERE Email = %s LIMIT 1;", (uEmail,))
cur.execute(profQuery)
print profQuery
except:
print("Error executing SELECT statement")
pageStuff = cur.fetchall()
entry = pageStuff[0]
print entry[1]
else:
print "Error: Not logged in"
return render_template('index.html', SelectedMenu = 'Index')
return render_template('anotherProfile.html', pageInfo=entry, SelectedMenu = 'Profile')
# @app.route('/charts.html')
# def charts():
# print 'in charts'
# return render_template('charts.html', SelectedMenu = 'Charts')
# @app.route('/tables.html')
# def tables():
# print 'in tables'
# return render_template('tables.html', SelectedMenu = 'Tables')
@app.route('/register.html', methods=['GET','POST'])
def register():
print 'in register'
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
rows = []
if request.method == 'POST':
email = request.form['email']
regQuery = cur.mogrify("SELECT Email FROM users WHERE Email = %s", (email,))
print (regQuery)
cur.execute(regQuery)
rows=cur.fetchall()
print ("rows")
if (rows == []):
check = request.form['password']
check2 = request.form['pwConfirm']
if (check == check2):
# THIS CAN STILL BE USED, BUT CURRENTLY DOESNT SUPPORT 3NF TABLES AND THE DATA OUR TABLE NEEDS
# regAddQuery = cur.mogrify("""INSERT INTO users (Username, Email, Password, Firstname, Lastname, Company, Job, Address, City, Country, Phone, Fax)
# VALUES(%s, %s, crypt(%s, gen_salt('bf')), %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (request.form['userName'],request.form['email'],request.form['password'],
# request.form['firstName'],request.form['lastName'],request.form['comp'],request.form['prof'],request.form['address'],request.form['city'],
# request.form['country'],request.form['phoneNumber'],request.form['faxNumber']))
regAddQuery = cur.mogrify("INSERT INTO users (Username, Email, Password) VALUES(%s, %s, crypt(%s, gen_salt('bf')));",(request.form['userName'],request.form['email'],request.form['password'],))
print (regAddQuery)
cur.execute(regAddQuery)
print("after add execute")
#commented commit until I know the query is printing right
conn.commit()
print("person registered")
return redirect(url_for('mainIndex'))
else:
print("passwords dont match, cant register")
return redirect(url_for('register'))
else:
print ("email is taken so user exists")
return render_template('register.html', SelectedMenu = 'Register')
@app.route('/AddEvent.html', methods=['GET','POST'])
def addEvent():
print 'in event addition'
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
print ("in requests")
# eventName = request.form['addEventName']
# eventLoc = request.form['addEventLoc']
# email = request.form['addSenderEmail']
# # file upload request
# eventDesc = request.form['addEventDesc']
# # 2 options requests
# importance = request.form['addImportance']
# date = request.form['year']
print (request.form["addEventName"])
print (request.form["addEventLoc"])
print (request.form["addEventDesc"])
print (request.form["year"])
addEventQuery=cur.mogrify("""INSERT INTO events (Name, Location, Description, Year) Values(%s, %s, %s, %s);""", (request.form['addEventName'],request.form['addEventLoc'],request.form['addEventDesc'], request.form['year'],))
print addEventQuery
cur.execute(addEventQuery)
conn.commit()
return render_template('AddEvent.html', SelectedMenu = 'AddEvent')
@app.route('/AddPerson.html', methods=['GET','POST'])
def addPerson():
print 'in forms'
return render_template('AddPerson.html', SelectedMenu = 'AddPerson')
@app.route('/timeline.html')
def timeline():
print 'in timeline'
return render_template('timeline.html', SelectedMenu = 'Timeline')
@app.route('/search.html')
def search():
print 'in search'
return render_template('search.html', SelectedMenu = 'searchengine')
@socketio.on('identify', namespace='/iss')
def on_identify(message):
pass
@socketio.on('userLogin', namespace='/iss')
def on_login(data):
print "in logincheck"
# pw = data['password']
username = data['username']
logging = data['logged']
print username
if (logging==1):
emit ('logged',{'logged_in' : session['logged'] })
#print (user)
# print (userEmail)
# print 'login ' + pw
# #session['logged'] = 0
# conn = connectToDB()
# cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# print('connected')
# # userQuery = cur.mogrify("select email from users where email = %s", (userEmail,))
# # cur.execute(userQuery)
# # userResult = cur.fetchone()
# print 'already there'
# loginQuery = cur.mogrify("select Username, Email from users WHERE Email = %s AND Password = crypt(%s, Password)" , (userEmail, pw,))
# cur.execute(loginQuery)
# print ('query executed')
# result = cur.fetchone()
# print result
# if result:
# print('logged in!')
# print('saving information to the session...')
# #needs work to pass to javascript to limit the message send function
# #session['logged'] = json.dumps('true')
# session['loggedIn'] = 1
# session['username'] = result[0]
# print session['username']
# emit('logged', {'logged_in' : session['logged'] })
# #return redirect(url_for('mainIndex'))
# else:
# print ('incorrect login information')
# session['loggedIn'] = 0
# emit ('logged',{'logged_in' : session['logged'] })
#return redirect(url_for('login'))
# def loggedIn(logged):
# log = logged
# return log
#updateRoster()
@socketio.on('logout', namespace='/iss')
def on_disconnect(data):
print("i am here")
session['loggedIn'] = 0
emit('logged', {'logged_in' : session['logged']})
print 'user disconnected'
# need to login in app.rout('/login') and create session variable in the app.route so that it carries across sessions
# ng-init LOOK THIS UPs
@app.route('/login.html', methods=['GET', 'POST'])
def login():
print 'in login'
conn = connectToDB()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if request.method == 'POST':
print "in request login"
email = request.form['email']
password = request.form['password']
print email
print password
try:
loginQuery = cur.mogrify("SELECT Username, Email FROM users WHERE Email = %s AND Password = crypt(%s, Password);" , (email, password,))
print loginQuery
cur.execute(loginQuery)
print "EXECUTED: ", loginQuery
result = cur.fetchone()
#result = result
print('logged in')
# print('name = ', result['username'])
session['username'] = result['username']
session['logged'] = 1
session['email'] = result['email']
print ("username is : ", session['username'])
#socketio.emit('userLogin', {'logged_in' : session['logged'], 'username' : session['username']})
return redirect(url_for('mainIndex'))
except Exception as e:
print(e)
#print "passwords didnt match"
print "error logging in"
session['logged'] = 0
return redirect(url_for('login'))
return render_template('login.html', SelectedMenu = 'Login')
@app.route('/logout.html')
def logout():
print('removing session variables')
if 'username' in session:
del session['username']
session['loggedIn'] = 0
#print session['userName']
#session['userName'].close()
return redirect(url_for('mainIndex'))
#probably remove these later, but added them just to see what things could look like
@app.route('/bootstrap-elements')
def bootstrap():
print 'in tables'
return render_template('bootstrap-elements.html', SelectedMenu = 'Bootstrap-elements')
@app.route('/bootstrap-grid')
def bootstrap2():
print 'in tables'
return render_template('bootstrap-grid.html', SelectedMenu = 'Bootstrap-grid')
# start the server
if __name__ == '__main__':
socketio.run(app, host=os.getenv('IP', '0.0.0.0'), port =int(os.getenv('PORT', 8080)), debug=True)
|
apache-2.0
| 4,584,796,409,708,986,000 | 32.587269 | 231 | 0.590548 | false | 3.920422 | false | false | false |
josenavas/QiiTa
|
qiita_pet/handlers/study_handlers/sample_template.py
|
1
|
18227
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import basename
from json import loads, dumps
from tornado.web import authenticated, HTTPError
from natsort import natsorted
from qiita_core.qiita_settings import r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.util import get_files_from_uploads_folders
from qiita_db.study import Study
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.metadata_template.util import looks_like_qiime_mapping_file
from qiita_db.software import Software, Parameters
from qiita_db.processing_job import ProcessingJob
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_pet.handlers.api_proxy import (
data_types_get_req, sample_template_samples_get_req,
prep_template_samples_get_req, study_prep_get_req,
sample_template_meta_cats_get_req, sample_template_category_get_req,
get_sample_template_processing_status,
check_fp)
SAMPLE_TEMPLATE_KEY_FORMAT = 'sample_template_%s'
def sample_template_checks(study_id, user, check_exists=False):
"""Performs different checks and raises errors if any of the checks fail
Parameters
----------
study_id : int
The study id
user : qiita_db.user.User
The user trying to access the study
check_exists : bool, optional
If true, check if the sample template exists
Raises
------
HTTPError
404 if the study does not exist
403 if the user does not have access to the study
404 if check_exists == True and the sample template doesn't exist
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
raise HTTPError(404, 'Study does not exist')
if not study.has_access(user):
raise HTTPError(403, 'User does not have access to study')
# Check if the sample template exists
if check_exists and not SampleTemplate.exists(study_id):
raise HTTPError(404, "Study %s doesn't have sample information"
% study_id)
def sample_template_handler_post_request(study_id, user, filepath,
data_type=None):
"""Creates a new sample template
Parameters
----------
study_id: int
The study to add the sample information
user: qiita_db.user import User
The user performing the request
filepath: str
The path to the sample template file
data_type: str, optional
If filepath is a QIIME mapping file, the data type of the prep
information file
Returns
-------
dict of {'job': str}
job: the id of the job adding the sample information to the study
Raises
------
HTTPError
404 if the filepath doesn't exist
"""
# Check if the current user has access to the study
sample_template_checks(study_id, user)
# Check if the file exists
fp_rsp = check_fp(study_id, filepath)
if fp_rsp['status'] != 'success':
raise HTTPError(404, 'Filepath not found')
filepath = fp_rsp['file']
is_mapping_file = looks_like_qiime_mapping_file(filepath)
if is_mapping_file and not data_type:
raise HTTPError(400, 'Please, choose a data type if uploading a '
'QIIME mapping file')
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('create_sample_template')
params = Parameters.load(
cmd, values_dict={'fp': filepath, 'study_id': study_id,
'is_mapping_file': is_mapping_file,
'data_type': data_type})
job = ProcessingJob.create(user, params, True)
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
def sample_template_handler_patch_request(user, req_op, req_path,
req_value=None, req_from=None):
"""Patches the sample template
Parameters
----------
user: qiita_db.user.User
The user performing the request
req_op : str
The operation to perform on the sample template
req_path : str
The path to the attribute to patch
req_value : str, optional
The new value
req_from : str, optional
The original path of the element
Returns
-------
Raises
------
HTTPError
400 If the path parameter doens't follow the expected format
400 If the given operation is not supported
"""
req_path = [v for v in req_path.split('/') if v]
# At this point we know the path should be at least length 2
if len(req_path) < 2:
raise HTTPError(400, 'Incorrect path parameter')
study_id = int(req_path[0])
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
if req_op == 'remove':
# Path format
# column: study_id/columns/column_name
# sample: study_id/samples/sample_id
if len(req_path) != 3:
raise HTTPError(400, 'Incorrect path parameter')
attribute = req_path[1]
attr_id = req_path[2]
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_sample_or_column')
params = Parameters.load(
cmd, values_dict={'obj_class': 'SampleTemplate',
'obj_id': study_id,
'sample_or_col': attribute,
'name': attr_id})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
elif req_op == 'replace':
# WARNING: Although the patch operation is a replace, is not a full
# true replace. A replace is in theory equivalent to a remove + add.
# In this case, the replace operation doesn't necessarily removes
# anything (e.g. when only new columns/samples are being added to the)
# sample information.
# Path format: study_id/data
# Forcing to specify data for extensibility. In the future we may want
# to use this function to replace other elements of the sample
# information
if len(req_path) != 2:
raise HTTPError(400, 'Incorrect path parameter')
attribute = req_path[1]
if attribute == 'data':
# Update the sample information
if req_value is None:
raise HTTPError(400, "Value is required when updating "
"sample information")
# Check if the file exists
fp_rsp = check_fp(study_id, req_value)
if fp_rsp['status'] != 'success':
raise HTTPError(404, 'Filepath not found')
filepath = fp_rsp['file']
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('update_sample_template')
params = Parameters.load(
cmd, values_dict={'study': study_id,
'template_fp': filepath})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
else:
raise HTTPError(404, 'Attribute %s not found' % attribute)
else:
raise HTTPError(400, 'Operation %s not supported. Current supported '
'operations: remove, replace' % req_op)
def sample_template_handler_delete_request(study_id, user):
"""Deletes the sample template
Parameters
----------
study_id: int
The study to delete the sample information
user: qiita_db.user
The user performing the request
Returns
-------
dict of {'job': str}
job: the id of the job deleting the sample information to the study
Raises
------
HTTPError
404 If the sample template doesn't exist
"""
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_sample_template')
params = Parameters.load(cmd, values_dict={'study': int(study_id)})
job = ProcessingJob.create(user, params, True)
# Store the job if deleteing the sample template
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
class SampleTemplateHandler(BaseHandler):
@authenticated
def get(self):
study_id = self.get_argument('study_id')
# Check if the current user has access to the study
sample_template_checks(study_id, self.current_user)
self.render('study_ajax/sample_summary.html', study_id=study_id)
@authenticated
def post(self):
study_id = int(self.get_argument('study_id'))
filepath = self.get_argument('filepath')
data_type = self.get_argument('data_type')
self.write(sample_template_handler_post_request(
study_id, self.current_user, filepath, data_type=data_type))
@authenticated
def patch(self):
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.get_argument('value', None)
req_from = self.get_argument('from', None)
self.write(sample_template_handler_patch_request(
self.current_user, req_op, req_path, req_value, req_from))
@authenticated
def delete(self):
study_id = int(self.get_argument('study_id'))
self.write(sample_template_handler_delete_request(
study_id, self.current_user))
def sample_template_overview_handler_get_request(study_id, user):
# Check if the current user has access to the sample template
sample_template_checks(study_id, user)
# Check if the sample template exists
exists = SampleTemplate.exists(study_id)
# The following information should always be provided:
# The files that have been uploaded to the system and can be a
# sample template file
files = [f for _, f in get_files_from_uploads_folders(study_id)
if f.endswith(('txt', 'tsv'))]
# If there is a job associated with the sample information, the job id
job = None
job_info = r_client.get(SAMPLE_TEMPLATE_KEY_FORMAT % study_id)
if job_info:
job = loads(job_info)['job_id']
# Specific information if it exists or not:
data_types = []
st_fp_id = None
old_files = []
num_samples = 0
num_cols = 0
if exists:
# If it exists we need to provide:
# The id of the sample template file so the user can download it and
# the list of old filepaths
st = SampleTemplate(study_id)
all_st_files = st.get_filepaths()
# The current sample template file is the first one in the list
# (pop(0)) and we are interested only in the id ([0])
st_fp_id = all_st_files.pop(0)[0]
# For the old filepaths we are only interested in their basename
old_files = [basename(fp) for _, fp in all_st_files]
# The number of samples - this is a space efficient way of counting
# the number of samples. Doing len(list(st.keys())) creates a list
# that we are not using
num_samples = sum(1 for _ in st.keys())
# The number of columns
num_cols = len(st.categories())
else:
# It doesn't exist, we also need to provide the data_types in case
# the user uploads a QIIME mapping file
data_types = sorted(data_types_get_req()['data_types'])
return {'exists': exists,
'uploaded_files': files,
'data_types': data_types,
'user_can_edit': Study(study_id).can_edit(user),
'job': job,
'download_id': st_fp_id,
'old_files': old_files,
'num_samples': num_samples,
'num_columns': num_cols}
class SampleTemplateOverviewHandler(BaseHandler):
@authenticated
def get(self):
study_id = int(self.get_argument('study_id'))
self.write(
sample_template_overview_handler_get_request(
study_id, self.current_user))
def sample_template_summary_get_req(study_id, user):
"""Returns a summary of the sample template metadata columns
Parameters
----------
study_id: int
The study to retrieve the sample information summary
user: qiita_db.user
The user performing the request
Returns
-------
dict of {str: object}
Keys are metadata categories and the values are list of tuples. Each
tuple is an observed value in the category and the number of times
it's seen.
Raises
------
HTTPError
404 If the sample template doesn't exist
"""
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
st = SampleTemplate(study_id)
df = st.to_dataframe()
# Drop the study_id column if it exists
if 'study_id' in df.columns:
df.drop('study_id', axis=1, inplace=True)
res = {}
for column in df.columns:
counts = df[column].value_counts()
res[str(column)] = [(str(key), counts[key])
for key in natsorted(
counts.index,
key=lambda x: unicode(x, errors='ignore'))]
return res
class SampleTemplateSummaryHandler(BaseHandler):
@authenticated
def get(self):
"""Send formatted summary page of sample template"""
study_id = int(self.get_argument('study_id'))
self.write(
sample_template_summary_get_req(study_id, self.current_user))
def _build_sample_summary(study_id, user_id):
"""Builds the initial table of samples associated with prep templates
Parameters
----------
study_id : int
Study to get samples from
user_id : str
User requesting the information
Returns
-------
columns : list of dict
SlickGrid formatted list of columns
samples_table : list of dict
SlickGrid formatted table information
"""
# Load all samples available into dictionary and set
samps_table = {s: {'sample': s} for s in
sample_template_samples_get_req(
study_id, user_id)['samples']}
all_samps = set(samps_table.keys())
columns = [{"id": "sample", "name": "Sample", "field": "sample",
"width": 240, "sortable": False}]
# Add one column per prep template highlighting what samples exist
preps = study_prep_get_req(study_id, user_id)["info"]
for dt in preps:
for prep in preps[dt]:
col_field = "prep%d" % prep["id"]
col_name = "%s - %d" % (prep["name"], prep["id"])
columns.append({"id": col_field,
"name": col_name,
"field": col_field,
"sortable": False,
"width": 240})
prep_samples = prep_template_samples_get_req(
prep['id'], user_id)['samples']
# Empty cell for samples not in the prep template
for s in all_samps.difference(prep_samples):
samps_table[s][col_field] = ""
# X in cell for samples in the prep template
for s in all_samps.intersection(prep_samples):
samps_table[s][col_field] = "X"
return columns, samps_table.values()
class SampleAJAX(BaseHandler):
@authenticated
def get(self):
"""Show the sample summary page"""
study_id = self.get_argument('study_id')
res = sample_template_meta_cats_get_req(
int(study_id), self.current_user.id)
if res['status'] == 'error':
if 'does not exist' in res['message']:
raise HTTPError(404, res['message'])
elif 'User does not have access to study' in res['message']:
raise HTTPError(403, res['message'])
else:
raise HTTPError(500, res['message'])
meta_cats = res['categories']
cols, samps_table = _build_sample_summary(study_id,
self.current_user.id)
_, alert_type, alert_msg = get_sample_template_processing_status(
study_id)
self.render('study_ajax/sample_prep_summary.html',
table=samps_table, cols=cols, meta_available=meta_cats,
study_id=study_id, alert_type=alert_type,
alert_message=alert_msg,
user_can_edit=Study(study_id).can_edit(self.current_user))
@authenticated
def post(self):
study_id = int(self.get_argument('study_id'))
meta_col = self.get_argument('meta_col')
values = sample_template_category_get_req(meta_col, study_id,
self.current_user.id)
if values['status'] != 'success':
self.write(values)
else:
self.write({'status': 'success',
'message': '',
'values': values['values']
})
|
bsd-3-clause
| -9,172,197,313,224,403,000 | 34.80943 | 79 | 0.594009 | false | 4.098718 | false | false | false |
verificarlo/verificarlo
|
src/tools/ci/vfc_ci_report/inspect_runs.py
|
1
|
24408
|
#############################################################################
# #
# This file is part of Verificarlo. #
# #
# Copyright (c) 2015-2021 #
# Verificarlo contributors #
# Universite de Versailles St-Quentin-en-Yvelines #
# CMLA, Ecole Normale Superieure de Cachan #
# #
# Verificarlo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Verificarlo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Verificarlo. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
# Manage the view comparing the variables of a run
# Manage the view comparing a variable over different runs
# At its creation, an InspectRuns object will create all the needed Bokeh widgets
# and plots, setup the callback functions (either server side or client side),
# initialize widgets selection, and from this selection generate the first plots.
# Then, when callback functions are triggered, widgets selections are updated,
# and plots are re-generated with the newly selected data.
from math import pi
from functools import partial
import pandas as pd
import numpy as np
from bokeh.plotting import figure, curdoc
from bokeh.embed import components
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool,\
RadioButtonGroup, CheckboxGroup, CustomJS
import helper
import plot
##########################################################################
class InspectRuns:
# Helper functions related to InspectRun
def gen_runs_selection(self):
'''
Returns a dictionary mapping user-readable strings to all run timestamps
'''
runs_dict = {}
# Iterate over timestamp rows (runs) and fill dict
for row in self.metadata.iloc:
# The syntax used by pandas makes this part a bit tricky :
# row.name is the index of metadata (so it refers to the
# timestamp), whereas row["name"] is the column called "name"
# (which is the display string used for the run)
# runs_dict[run's name] = run's timestamp
runs_dict[row["name"]] = row.name
return runs_dict
def gen_boxplot_tooltips(self, prefix):
return [
("Name", "@%s_x" % prefix),
("Min", "@" + prefix + "_min{%0.18e}"),
("Max", "@" + prefix + "_max{%0.18e}"),
("1st quartile", "@" + prefix + "_quantile25{%0.18e}"),
("Median", "@" + prefix + "_quantile50{%0.18e}"),
("3rd quartile", "@" + prefix + "_quantile75{%0.18e}"),
("μ", "@" + prefix + "_mu{%0.18e}"),
("Number of samples (tests)", "@nsamples")
]
def gen_boxplot_tooltips_formatters(self, prefix):
return {
"@%s_min" % prefix: "printf",
"@%s_max" % prefix: "printf",
"@%s_quantile25" % prefix: "printf",
"@%s_quantile50" % prefix: "printf",
"@%s_quantile75" % prefix: "printf",
"@%s_mu" % prefix: "printf"
}
# Data processing helper
# (computes new distributions for sigma, s2, s10)
def data_processing(self, dataframe):
# Compute aggragated mu
dataframe["mu"] = np.vectorize(
np.average)(
dataframe["mu"],
weights=dataframe["nsamples"])
# nsamples is the number of aggregated elements (as well as the number
# of samples for our new sigma and s distributions)
dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x))
dataframe["mu_x"] = dataframe.index
# Make sure that strings don't excede a certain length
dataframe["mu_x"] = dataframe["mu_x"].apply(
lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x
)
# Get quantiles and mu for sigma, s10, s2
for prefix in ["sigma", "s10", "s2"]:
dataframe["%s_x" % prefix] = dataframe["mu_x"]
dataframe[prefix] = dataframe[prefix].apply(np.sort)
dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min)
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.25,))
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.50,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.75,))
dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max)
dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average)
del dataframe[prefix]
return dataframe
# Plots update function
def update_plots(self):
groupby_display = self.widgets["groupby_radio"].labels[
self.widgets["groupby_radio"].active
]
groupby = self.factors_dict[groupby_display]
filterby_display = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby_display]
# Groupby and aggregate lines belonging to the same group in lists
groups = self.run_data[
self.run_data.index.isin(
[self.widgets["select_filter"].value],
level=filterby
)
].groupby(groupby)
groups = groups.agg({
"sigma": lambda x: x.tolist(),
"s10": lambda x: x.tolist(),
"s2": lambda x: x.tolist(),
"mu": lambda x: x.tolist(),
# Used for mu weighted average first, then will be replaced
"nsamples": lambda x: x.tolist()
})
# Compute the new distributions, ...
groups = self.data_processing(groups).to_dict("list")
# Update source
# Assign each ColumnDataSource, starting with the boxplots
for prefix in ["sigma", "s10", "s2"]:
dict = {
"%s_x" % prefix: groups["%s_x" % prefix],
"%s_min" % prefix: groups["%s_min" % prefix],
"%s_quantile25" % prefix: groups["%s_quantile25" % prefix],
"%s_quantile50" % prefix: groups["%s_quantile50" % prefix],
"%s_quantile75" % prefix: groups["%s_quantile75" % prefix],
"%s_max" % prefix: groups["%s_max" % prefix],
"%s_mu" % prefix: groups["%s_mu" % prefix],
"nsamples": groups["nsamples"]
}
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
# Boxplots will be filtered by max then min
top_outliers = helper.detect_outliers(dict["%s_max" % prefix])
helper.remove_boxplot_outliers(dict, top_outliers, prefix)
bottom_outliers = helper.detect_outliers(
dict["%s_min" % prefix])
helper.remove_boxplot_outliers(dict, bottom_outliers, prefix)
self.sources["%s_source" % prefix].data = dict
# Finish with the mu plot
dict = {
"mu_x": groups["mu_x"],
"mu": groups["mu"],
"nsamples": groups["nsamples"]
}
self.sources["mu_source"].data = dict
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
mu_outliers = helper.detect_outliers(groups["mu"])
groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers)
groups["mu_x"] = helper.remove_outliers(
groups["mu_x"], mu_outliers)
# Update plots axis/titles
# Get display string of the last (unselected) factor
factors_dict = self.factors_dict.copy()
del factors_dict[groupby_display]
del factors_dict[filterby_display]
for_all = list(factors_dict.keys())[0]
# Update all display strings for plot title (remove caps, plural)
groupby_display = groupby_display.lower()
filterby_display = filterby_display.lower()[:-1]
for_all = for_all.lower()
self.plots["mu_inspect"].title.text = \
"Empirical average μ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
self.plots["sigma_inspect"].title.text = \
"Standard deviation σ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
self.plots["s10_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
self.plots["s2_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, for_all)
helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"])
helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"])
helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"])
helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"])
# Widets' callback functions
# Run selector callback
def update_run(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Update run selection (by using dict mapping)
self.current_run = self.runs_dict[new]
# Update run data
self.run_data = self.data[self.data["timestamp"] == self.current_run]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# "Group by" radio
def update_groupby(self, attrname, old, new):
# Update "Filter by" radio list
filterby_list = list(self.factors_dict.keys())
del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"].labels = filterby_list
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# "Filter by" radio
def update_filterby(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter selector options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# Filter selector callback
def update_filter(self, attrname, old, new):
self.update_plots()
# Filter outliers checkbox callback
def update_outliers_filtering(self, attrname, old, new):
# The status (checked/unchecked) of the checkbox is also verified inside
# self.update_plots(), so calling this function is enough
self.update_plots()
# Bokeh setup functions
# (for both variable and backend selection at once)
def setup_plots(self):
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
# Tooltips and formatters
dotplot_tooltips = [
("Name", "@mu_x"),
("μ", "@mu{%0.18e}"),
("Number of samples (tests)", "@nsamples")
]
dotplot_formatters = {
"@mu": "printf"
}
sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma")
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"sigma")
s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10")
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s10")
s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2")
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s2")
# Plots
# Mu plot
self.plots["mu_inspect"] = figure(
name="mu_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
plot.fill_dotplot(
self.plots["mu_inspect"], self.sources["mu_source"], "mu",
tooltips=dotplot_tooltips,
tooltips_formatters=dotplot_formatters
)
self.doc.add_root(self.plots["mu_inspect"])
# Sigma plot
self.plots["sigma_inspect"] = figure(
name="sigma_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
plot.fill_boxplot(
self.plots["sigma_inspect"],
self.sources["sigma_source"],
prefix="sigma",
tooltips=sigma_boxplot_tooltips,
tooltips_formatters=sigma_boxplot_tooltips_formatters)
self.doc.add_root(self.plots["sigma_inspect"])
# s plots
self.plots["s10_inspect"] = figure(
name="s10_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode='scale_width'
)
plot.fill_boxplot(
self.plots["s10_inspect"],
self.sources["s10_source"],
prefix="s10",
tooltips=s10_boxplot_tooltips,
tooltips_formatters=s10_boxplot_tooltips_formatters)
s10_tab_inspect = Panel(
child=self.plots["s10_inspect"],
title="Base 10")
self.plots["s2_inspect"] = figure(
name="s2_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode='scale_width'
)
plot.fill_boxplot(
self.plots["s2_inspect"], self.sources["s2_source"], prefix="s2",
tooltips=s2_boxplot_tooltips,
tooltips_formatters=s2_boxplot_tooltips_formatters
)
s2_tab_inspect = Panel(child=self.plots["s2_inspect"], title="Base 2")
s_tabs_inspect = Tabs(
name="s_tabs_inspect",
tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location="below"
)
self.doc.add_root(s_tabs_inspect)
def setup_widgets(self):
# Generation of selectable items
# Dict contains all inspectable runs (maps display strings to timestamps)
# The dict structure allows to get the timestamp from the display string
# in O(1)
self.runs_dict = self.gen_runs_selection()
# Dict maps display strings to column names for the different factors
# (var, backend, test)
self.factors_dict = {
"Variables": "variable",
"Backends": "vfc_backend",
"Tests": "test"
}
# Run selection
# Contains all options strings
runs_display = list(self.runs_dict.keys())
# Will be used when updating plots (contains actual number)
self.current_run = self.runs_dict[runs_display[-1]]
# Contains the selected option string, used to update current_n_runs
current_run_display = runs_display[-1]
# This contains only entries matching the run
self.run_data = self.data[self.data["timestamp"] == self.current_run]
change_run_callback_js = "updateRunMetadata(cb_obj.value);"
self.widgets["select_run"] = Select(
name="select_run", title="Run :",
value=current_run_display, options=runs_display
)
self.doc.add_root(self.widgets["select_run"])
self.widgets["select_run"].on_change("value", self.update_run)
self.widgets["select_run"].js_on_change("value", CustomJS(
code=change_run_callback_js,
args=(dict(
metadata=helper.metadata_to_dict(
helper.get_metadata(self.metadata, self.current_run)
)
))
))
# Factors selection
# "Group by" radio
self.widgets["groupby_radio"] = RadioButtonGroup(
name="groupby_radio",
labels=list(self.factors_dict.keys()), active=0
)
self.doc.add_root(self.widgets["groupby_radio"])
# The functions are defined inside the template to avoid writing too
# much JS server side
self.widgets["groupby_radio"].on_change(
"active",
self.update_groupby
)
# "Filter by" radio
# Get all possible factors, and remove the one selected in "Group by"
filterby_list = list(self.factors_dict.keys())
del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"] = RadioButtonGroup(
name="filterby_radio",
labels=filterby_list, active=0
)
self.doc.add_root(self.widgets["filterby_radio"])
# The functions are defined inside the template to avoid writing too
# much JS server side
self.widgets["filterby_radio"].on_change(
"active",
self.update_filterby
)
# Filter selector
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"] = Select(
# We need a different name to avoid collision in the template with
# the runs comparison's widget
name="select_filter", title="Select a filter :",
value=options[0], options=options
)
self.doc.add_root(self.widgets["select_filter"])
self.widgets["select_filter"]\
.on_change("value", self.update_filter)
# Toggle for outliers filtering
self.widgets["outliers_filtering_inspect"] = CheckboxGroup(
name="outliers_filtering_inspect",
labels=["Filter outliers"], active=[]
)
self.doc.add_root(self.widgets["outliers_filtering_inspect"])
self.widgets["outliers_filtering_inspect"]\
.on_change("active", self.update_outliers_filtering)
# Communication methods
# (to send/receive messages to/from master)
def change_repo(self, new_data, new_metadata):
'''
When received, update data and metadata with the new repo, and update
everything
'''
self.data = new_data
self.metadata = new_metadata
self.runs_dict = self.gen_runs_selection()
runs_display = list(self.runs_dict.keys())
current_run_display = runs_display[-1]
# Update widget (and trigger its callback)
self.widgets["select_run"].options = runs_display
self.widgets["select_run"].value = current_run_display
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
self.run_data = self.data[self.data["timestamp"] == self.current_run]
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
# Update widget (and trigger its callback)
self.widgets["select_filter"].options = options
self.widgets["select_filter"].value = options[0]
def switch_view(self, run_name):
'''When received, switch selected run to run_name'''
# This will trigger the widget's callback
self.widgets["select_run"].value = run_name
# Constructor
def __init__(self, master, doc, data, metadata):
'''
Here are the most important attributes of the InspectRuns class
master : reference to the ViewMaster class
doc : an object provided by Bokeh to add elements to the HTML document
data : pandas dataframe containing all the tests data
metadata : pandas dataframe containing all the tests metadata
sources : ColumnDataSource object provided by Bokeh, contains current
data for the plots (inside the .data attribute)
plots : dictionary of Bokeh plots
widgets : dictionary of Bokeh widgets
'''
self.master = master
self.doc = doc
self.data = data
self.metadata = metadata
self.sources = {
"mu_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}),
"s10_source": ColumnDataSource(data={}),
"s2_source": ColumnDataSource(data={})
}
self.plots = {}
self.widgets = {}
# Setup Bokeh objects
self.setup_plots()
self.setup_widgets()
# Pass the initial metadata to the template (will be updated in CustomJS
# callbacks). This is required because metadata is not displayed in a
# Bokeh widget, so we can't update this with a server callback.
initial_run = helper.get_metadata(self.metadata, self.current_run)
self.doc.template_variables["initial_timestamp"] = initial_run.name
self.doc.template_variables["initial_repo"] = initial_run.repo_name
# At this point, everything should have been initialized, so we can
# show the plots for the first time
self.update_plots()
|
gpl-3.0
| -3,457,770,372,378,908,000 | 36.602465 | 81 | 0.56585 | false | 4.164505 | false | false | false |
lhfei/spark-in-action
|
spark-2.x/src/main/python/mllib/binary_classification_metrics_example.py
|
1
|
2177
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Binary Classification Metrics Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="BinaryClassificationMetricsExample")
# $example on$
# Several of the methods available in scala are currently missing from pyspark
# Load training data in LIBSVM format
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_binary_classification_data.txt")
# Split data into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=11)
training.cache()
# Run training algorithm to build the model
model = LogisticRegressionWithLBFGS.train(training)
# Compute raw scores on the test set
predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
# Instantiate metrics object
metrics = BinaryClassificationMetrics(predictionAndLabels)
# Area under precision-recall curve
print("Area under PR = %s" % metrics.areaUnderPR)
# Area under ROC curve
print("Area under ROC = %s" % metrics.areaUnderROC)
# $example off$
sc.stop()
|
apache-2.0
| -8,696,846,894,260,542,000 | 36.875 | 92 | 0.725769 | false | 4.054004 | false | false | false |
UITools/saleor
|
tests/dashboard/test_staff.py
|
1
|
7623
|
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.templatetags.static import static
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from templated_email import send_templated_mail
from saleor.account.models import User
from saleor.core.utils import build_absolute_uri
from saleor.dashboard.staff.forms import StaffForm
from saleor.dashboard.staff.utils import remove_staff_member
from saleor.settings import DEFAULT_FROM_EMAIL
def test_remove_staff_member_with_orders(
staff_user, permission_manage_products, order):
order.user = staff_user
order.save()
staff_user.user_permissions.add(permission_manage_products)
remove_staff_member(staff_user)
staff_user = User.objects.get(pk=staff_user.pk)
assert not staff_user.is_staff
assert not staff_user.user_permissions.exists()
def test_remove_staff_member(staff_user):
remove_staff_member(staff_user)
assert not User.objects.filter(pk=staff_user.pk).exists()
def test_staff_form_not_valid(staff_user):
data = {'user_permissions': 1}
form = StaffForm(data=data, user=staff_user)
assert not form.is_valid()
def test_staff_form_create_valid(
admin_client, staff_user, permission_manage_products):
assert staff_user.user_permissions.count() == 0
url = reverse('dashboard:staff-details', kwargs={'pk': staff_user.pk})
data = {
'email': '[email protected]', 'is_staff': True,
'user_permissions': permission_manage_products.pk}
admin_client.post(url, data)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.user_permissions.count() == 1
def test_staff_form_create_not_valid(admin_client, staff_user):
url = reverse('dashboard:staff-details', kwargs={'pk': staff_user.pk})
data = {'csrf': 'examplecsfr'}
admin_client.post(url, data)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.user_permissions.count() == 0
def test_admin_cant_change_his_permissions(admin_client, admin_user):
assert admin_user.is_active
assert admin_user.is_staff
url = reverse('dashboard:staff-details', kwargs={'pk': admin_user.pk})
data = {'is_active': False, 'is_staff': False}
response = admin_client.post(url, data)
admin_user = User.objects.get(pk=admin_user.pk)
assert response.status_code == 200
assert admin_user.is_active
assert admin_user.is_staff
def test_staff_form_remove_permissions_after_unassign_is_staff(
admin_client, staff_user, permission_manage_products):
staff_user.user_permissions.add(permission_manage_products)
assert staff_user.is_active
assert staff_user.is_staff
assert staff_user.user_permissions.count() == 1
url = reverse('dashboard:staff-details', kwargs={'pk': staff_user.pk})
data = {
'email': staff_user.email, 'is_active': True, 'is_staff': False,
'user_permissions': permission_manage_products.pk}
response = admin_client.post(url, data)
staff_user.refresh_from_db()
assert response.status_code == 302
assert staff_user.is_active
assert not staff_user.is_staff
assert staff_user.user_permissions.count() == 0
def test_delete_staff(admin_client, staff_user):
user_count = User.objects.all().count()
url = reverse('dashboard:staff-delete', kwargs={'pk': staff_user.pk})
data = {'pk': staff_user.pk}
response = admin_client.post(url, data)
assert User.objects.all().count() == user_count - 1
assert response['Location'] == reverse('dashboard:staff-list')
def test_delete_staff_no_post(admin_client, staff_user):
user_count = User.objects.all().count()
url = reverse('dashboard:staff-delete', kwargs={'pk': staff_user.pk})
admin_client.get(url)
assert User.objects.all().count() == user_count
def test_delete_staff_with_orders(admin_client, staff_user, order):
order.user = staff_user
order.save()
user_count = User.objects.all().count()
url = reverse('dashboard:staff-delete', kwargs={'pk': staff_user.pk})
data = {'pk': staff_user.pk}
response = admin_client.post(url, data)
# Staff placed some orders in the past, so his acc should be not deleted
assert User.objects.all().count() == user_count
staff_user.refresh_from_db()
# Instead, his privileges are taken away
assert not staff_user.is_staff
assert response['Location'] == reverse('dashboard:staff-list')
def test_staff_create_email_with_set_link_password(admin_client):
user_count = User.objects.count()
mail_outbox_count = len(mail.outbox)
url = reverse('dashboard:staff-create')
data = {'email': '[email protected]', 'is_staff': True}
response = admin_client.post(url, data)
assert User.objects.count() == user_count + 1
assert len(mail.outbox) == mail_outbox_count + 1
assert response['Location'] == reverse('dashboard:staff-list')
def test_send_set_password_email(staff_user, site_settings):
site = site_settings.site
uid = urlsafe_base64_encode(force_bytes(staff_user.pk))
token = default_token_generator.make_token(staff_user)
logo_url = build_absolute_uri(static('images/logo-light.svg'))
password_set_url = build_absolute_uri(
reverse(
'account:reset-password-confirm',
kwargs={'token': token, 'uidb64': uid}))
ctx = {
'logo_url': logo_url,
'password_set_url': password_set_url,
'site_name': site.name}
send_templated_mail(
template_name='dashboard/staff/set_password',
from_email=DEFAULT_FROM_EMAIL,
recipient_list=[staff_user.email],
context=ctx)
assert len(mail.outbox) == 1
generated_link = reverse(
'account:reset-password-confirm',
kwargs={
'uidb64': uid,
'token': token})
absolute_generated_link = build_absolute_uri(generated_link)
sended_message = mail.outbox[0].body
assert absolute_generated_link in sended_message
def test_create_staff_and_set_password(admin_client):
url = reverse('dashboard:staff-create')
data = {
'first_name': 'Jan', 'last_name': 'Nowak',
'email': '[email protected]', 'is_staff': True}
response = admin_client.post(url, data)
assert response.status_code == 302
new_user = User.objects.get(email='[email protected]')
assert new_user.first_name == 'Jan'
assert new_user.last_name == 'Nowak'
assert not new_user.password
uid = urlsafe_base64_encode(force_bytes(new_user.pk))
token = default_token_generator.make_token(new_user)
response = admin_client.get(
reverse(
'account:reset-password-confirm',
kwargs={
'uidb64': uid,
'token': token}))
assert response.status_code == 302
post_data = {'new_password1': 'password', 'new_password2': 'password'}
response = admin_client.post(response['Location'], post_data)
assert response.status_code == 302
assert response['Location'] == reverse('account:reset-password-complete')
new_user = User.objects.get(email='[email protected]')
assert new_user.has_usable_password()
def test_create_staff_from_customer(
admin_client, customer_user, permission_manage_products):
url = reverse('dashboard:staff-create')
data = {
'email': customer_user.email, 'is_staff': True,
'user_permissions': permission_manage_products.pk}
admin_client.post(url, data)
customer_user.refresh_from_db()
assert customer_user.is_staff
|
bsd-3-clause
| 4,133,092,150,845,388,000 | 36.737624 | 77 | 0.681228 | false | 3.501608 | true | false | false |
coleifer/scout
|
scout/validator.py
|
1
|
2599
|
import json
import sys
from flask import request
from scout.constants import PROTECTED_KEYS
from scout.exceptions import error
from scout.models import Index
if sys.version_info[0] == 2:
json_load = lambda d: json.loads(d)
else:
json_load = lambda d: json.loads(d.decode('utf-8') if isinstance(d, bytes)
else d)
class RequestValidator(object):
def parse_post(self, required_keys=None, optional_keys=None):
"""
Clean and validate POSTed JSON data by defining sets of required and
optional keys.
"""
if request.headers.get('content-type') == 'application/json':
data = request.data
elif 'data' not in request.form:
error('Missing correct content-type or missing "data" field.')
else:
data = request.form['data']
if data:
try:
data = json_load(data)
except ValueError:
error('Unable to parse JSON data from request.')
else:
data = {}
required = set(required_keys or ())
optional = set(optional_keys or ())
all_keys = required | optional
keys_present = set(key for key in data if data[key] not in ('', None))
missing = required - keys_present
if missing:
error('Missing required fields: %s' % ', '.join(sorted(missing)))
invalid_keys = keys_present - all_keys
if invalid_keys:
error('Invalid keys: %s' % ', '.join(sorted(invalid_keys)))
return data
def validate_indexes(self, data, required=True):
if data.get('index'):
index_names = (data['index'],)
elif data.get('indexes'):
index_names = data['indexes']
elif ('index' in data or 'indexes' in data) and not required:
return ()
else:
return None
indexes = list(Index.select().where(Index.name << index_names))
# Validate that all the index names exist.
observed_names = set(index.name for index in indexes)
invalid_names = []
for index_name in index_names:
if index_name not in observed_names:
invalid_names.append(index_name)
if invalid_names:
error('The following indexes were not found: %s.' %
', '.join(invalid_names))
return indexes
def extract_get_params(self):
return dict(
(key, request.args.getlist(key))
for key in request.args
if key not in PROTECTED_KEYS)
|
mit
| 5,230,470,322,012,655,000 | 30.313253 | 78 | 0.569065 | false | 4.246732 | false | false | false |
ov1d1u/tv-maxe-ng
|
tv-maxe/tvmaxe.py
|
1
|
3058
|
#!/usr/bin/env python3
import sys
import os
import logger
import logging
import argparse
from os.path import isfile, join, splitext
from importlib import import_module
from PyQt5 import QtCore, QtWidgets, uic
from PyQt5.QtGui import QIcon, QPixmap
from settingsmanager import SettingsManager
from mainwindow import TVMaxeMainWindow
log = logging.getLogger(__name__)
class TVMaxe(QtWidgets.QApplication):
protocol_plugins = {}
def __init__(self, argv):
super(QtWidgets.QApplication, self).__init__(argv)
self.setApplicationName("TV-Maxe")
self.setApplicationVersion("0.1a")
self.setOrganizationDomain("org.tv-maxe.app")
self.setOrganizationName("TV-Maxe")
log.info('{0} {1}'.format(self.applicationName(), self.applicationVersion()))
self.settings_manager = SettingsManager()
self.init_plugins()
log.debug('Current localization: {0}'.format(QtCore.QLocale.system().name()))
translator = QtCore.QTranslator()
translator.load("i18n/{0}.qm".format(QtCore.QLocale.system().name()))
self.installTranslator(translator)
self.mainw = TVMaxeMainWindow(None)
self.mainw.show()
def init_plugins(self):
log.debug('Initializing plugins:')
protocols_dir = 'protocols'
sys.path.insert(0, 'protocols/')
protocol_modules = [f for f in os.listdir(protocols_dir) if isfile(join(protocols_dir, f))]
for filename in protocol_modules:
if filename == '__init__.py' or filename == '__init__.pyc':
continue
file, extension = splitext(filename)
if extension == '.py':
protocol_module = import_module(file)
protocol_class = protocol_module.__classname__
log.debug('- Plugin found: {0} {1} ({2})'.format(
protocol_module.__classname__.name,
protocol_module.__classname__.version,
protocol_module.__classname__)
)
for protocol in protocol_class.protocols:
self.protocol_plugins[protocol] = protocol_class
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--log-level",
help="Sets the logger verbosity",
choices=["debug", "warn", "info"]
)
args = parser.parse_args()
if args.log_level:
if args.log_level == 'debug':
logger.set_logging_level(logging.DEBUG)
elif args.log_level == 'warn':
logger.set_logging_level(logging.WARNING)
else:
logger.set_logging_level(logging.INFO)
else:
logger.set_logging_level(logging.INFO)
if __name__ == '__main__':
parse_args()
if getattr(sys, 'frozen', False):
os.chdir(sys._MEIPASS)
else:
os.chdir(os.path.dirname(os.path.realpath(__file__)))
log.debug('Current working directory: {0}'.format(os.getcwd()))
app = TVMaxe(sys.argv)
sys.exit(app.exec_())
log.debug('Exiting app...')
|
lgpl-3.0
| 2,025,557,249,018,245,600 | 32.615385 | 99 | 0.611511 | false | 4.018397 | false | false | false |
projecthamster/experiments
|
pulse.py
|
1
|
3779
|
#!/usr/bin/env python
# - coding: utf-8 -
# Copyright (C) 2010 Toms Bauģis <toms.baugis at gmail.com>
"""
Demo of a a timer based ripple running through nodes and initiating
sub-animations. Not sure where this could come handy.
"""
from gi.repository import Gtk as gtk
from lib import graphics
from lib.pytweener import Easing
from random import random
import math
class Node(graphics.Sprite):
def __init__(self, angle, distance):
graphics.Sprite.__init__(self)
self.angle = angle
self.distance = distance
self.base_angle = 0
self.distance_scale = 1
self.radius = 4.0
self.phase = 0
self.connect("on-render", self.on_render)
def on_render(self, sprite):
self.graphics.clear()
self.x = math.cos(self.angle + self.base_angle) * self.distance * self.distance_scale
self.y = math.sin(self.angle + self.base_angle) * self.distance * self.distance_scale
self.graphics.circle(0, 0, self.radius)
self.graphics.fill("#aaa")
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self)
self.nodes = []
self.tick = 0
self.phase = 0
self.container = graphics.Sprite()
self.add_child(self.container)
self.framerate = 30
self.connect("on-enter-frame", self.on_enter_frame)
self.connect("on-mouse-move", self.on_mouse_move)
def on_mouse_move(self, scene, event):
if gdk.ModifierType.BUTTON1_MASK & event.state:
# rotate and scale on mouse
base_angle = math.pi * 2 * ((self.width / 2 - event.x) / self.width) / 3
distance_scale = math.sqrt((self.width / 2 - event.x) ** 2 + (self.height / 2 - event.y) ** 2) \
/ math.sqrt((self.width / 2) ** 2 + (self.height / 2) ** 2)
for node in self.nodes:
node.base_angle = base_angle
node.distance_scale = distance_scale
def on_enter_frame(self, scene, context):
self.container.x = self.width / 2
self.container.y = self.height / 2
if len(self.nodes) < 100:
for i in range(100 - len(self.nodes)):
angle = random() * math.pi * 2
distance = random() * 500
node = Node(angle, distance)
node.phase = self.phase
self.container.add_child(node)
self.nodes.append(node)
if not self.tick:
self.phase +=1
self.animate(self,
tick = 550,
duration = 3,
on_complete = self.reset_tick,
easing = Easing.Expo.ease_in_out)
for node in self.nodes:
if node.phase < self.phase and node.distance < self.tick:
node.phase = self.phase
self.tweener.kill_tweens(node)
self.animate(node,
duration = 0.5,
radius = 20,
easing = Easing.Expo.ease_in,
on_complete = self.slide_back)
def reset_tick(self, target):
self.tick = 0
def slide_back(self, node):
self.animate(node,
radius = 4,
duration = 0.5,
easing = Easing.Expo.ease_out)
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_size_request(600, 500)
window.connect("delete_event", lambda *args: gtk.main_quit())
window.add(Scene())
window.show_all()
example = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
|
mit
| 8,402,965,764,535,398,000 | 31.568966 | 108 | 0.543409 | false | 3.770459 | false | false | false |
yephper/django
|
django/bin/roub/home/migrations/0007_auto_20160414_1100.py
|
1
|
2138
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-04-14 03:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0006_auto_20160414_1055'),
]
operations = [
migrations.CreateModel(
name='Commodity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gname', models.CharField(max_length=100, unique=True, verbose_name='商品名称')),
('gdescription', models.CharField(max_length=200, unique=True, verbose_name='商品描述')),
('gcontent', models.TextField(verbose_name='商品详情')),
('cid', models.IntegerField(verbose_name='商品分类id')),
('sid', models.IntegerField(verbose_name='分店ID')),
('goprice', models.FloatField(max_length=10, unique=True, verbose_name='原价')),
('gdprice', models.FloatField(max_length=10, unique=True, verbose_name='折扣价')),
('gstock', models.IntegerField(max_length=10, unique=True, verbose_name='库存')),
('gimg', models.CharField(max_length=200, unique=True, verbose_name='商品图片')),
('pictureset', models.TextField(verbose_name='商品图片集')),
('gorder', models.IntegerField(max_length=5, unique=True, verbose_name='商品排序')),
('gtype', models.IntegerField(max_length=1, unique=True, verbose_name='消费类型')),
('gstatus', models.IntegerField(max_length=1, unique=True, verbose_name='商品状态')),
('gvrebate', models.FloatField(max_length=10, verbose_name='VIP会员返现金额')),
('printid', models.CharField(max_length=32, unique=True, verbose_name='打印机ID')),
('isboutique', models.IntegerField(max_length=1, unique=True, verbose_name='是否精品')),
],
),
migrations.DeleteModel(
name='Goods',
),
]
|
bsd-3-clause
| -564,066,498,514,451,000 | 49.5 | 114 | 0.588119 | false | 3.447099 | false | false | false |
cechrist/cardoon
|
cardoon/devices/autoThermal.py
|
1
|
6405
|
"""
Generates an electrothermal device from a nonlinear device class
One assumption is that the base class defines numTerms directly in the
class definition and it is not changed in process_params().
-------------------------------------------------------------------
Copyright Carlos Christoffersen <[email protected]>
This file is part of the cardoon electronic circuit simulator.
Cardoon is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 or later:
http://www.gnu.org/licenses/gpl.html
"""
import numpy as np
from cardoon.globalVars import glVar
# For automatic differentiation:
import cppaddev as ad
def thermal_device(nle):
class ThermalDevice(nle):
"""
Generic electrothermal nonlinear element
Inherits from a regular nonlinear device class (nle). nle can
be a linear device (like resistor.py) but it must implement
all nonlinear functions to use this template. The
electrothermal device is always nonlinear.
Adds one thermal port (pair of terminals) connected after the
regular terminals. Temperature in this port is the difference
with ambient temperature in degrees C. A current source
proportional to the instantaneous power dissipated in device
is connected to the thermal port.
"""
# devtype is the 'model' name
devType = nle.devType + '_t'
# Force nonlinear behaviour (even if base class is linear, see
# resistor.py)
isNonlinear = True
def __init__(self, instanceName):
nle.__init__(self, instanceName)
self.__addThermalPorts = True
if nle.numTerms:
# Add two thermal terminals
self.numTerms = nle.numTerms + 2
self.__varTerms = False
else:
self.__varTerms = True
def process_params(self):
"""
Process parameters as in base class
Add extra thermal terminals if __addThermalPorts is True
"""
# Process parameters in base class
nle.process_params(self, thermal = True)
# Add thermal terminals to control and output tuples only
# if needed. Base class must reset __addThermalPorts to
# True if needed.
if self.__addThermalPorts:
# Add units to thermal port
self.connection[self.numTerms-1].unit = \
'+{0} C'.format(glVar.temp)
self.connection[self.numTerms-2].unit = \
'+{0} C'.format(glVar.temp)
self.csOutPorts = self.csOutPorts + [(self.numTerms-1,
self.numTerms-2)]
self.controlPorts = self.controlPorts + [(self.numTerms-2,
self.numTerms-1)]
# Thermal output number
self.__ton = len(self.csOutPorts) - 1
# Thermal control port number
self.__tpn = len(self.controlPorts) - 1
self.__addThermalPorts = False
# Initial guess for input ports:
try:
# Consider time-delayed ports come after regular control ports
if len(self.vPortGuess) < len(self.controlPorts) + self.nDelays:
self.vPortGuess = np.insert(self.vPortGuess, self.__tpn, 0.)
except AttributeError:
# Ignore if vPortGuess not provided
pass
def eval_cqs(self, vPort, getOP = False):
"""
vPort is a vector with control voltages (last port is thermal)
"""
if getOP:
# assume that temperature is not passed in vPort: that
# is because only nle can call this function with
# getOP = True
opDict = nle.eval_cqs(self, vPort, True)
return opDict
# set temperature in base class first
self.temp = vPort[self.__tpn] + glVar.temp
nle.set_temp_vars(self, self.temp)
# Remove thermal port from vPort (needed in case
# time-delayed ports follow regular control ports)
vPort1 = np.delete(vPort, self.__tpn)
# now calculate currents and charges
(iVec1, qVec) = nle.eval_cqs(self, vPort1)
# Calculate instantaneous power
pout = nle.power(self, vPort1, iVec1)
# Re-arrange output vector
iVec = np.append(iVec1, pout)
return (iVec, qVec)
# Create these using the AD facility
eval_and_deriv = ad.eval_and_deriv
eval = ad.eval
def power(self, vPort, ioutV):
"""
Calculate total instantaneous power
Input: control voltages and currents from eval_cqs()
It works OK even if ioutV includes charges
"""
# Power is already stored in ioutV
return ioutV[self.__ton]
# Return template class
return ThermalDevice
# No need to override this: too much overhead and not much advantage
#
# def get_OP(self, vPort):
# """
# Calculates operating point information
#
# Input: vPort (port voltages, including thermal)
# Output: dictionary with OP variables
# """
# # set temperature in base class first
# temp = vPort[self.__tpn] + glVar.temp
# nle.set_temp_vars(self, temp)
#
# # Remove thermal port from vPort (needed in case
# # time-delayed ports follow regular control ports)
# vPort1 = np.delete(vPort, self.__tpn)
# # now calculate currents and charges
# (iVec1, qVec) = nle.eval_cqs(self, vPort1)
# # Calculate instantaneous power
# pout = nle.power(self, vPort1, iVec1)
#
# # Get operating point dictionary from base class
# opDict = nle.get_OP(self, vPort)
# # Add temperature / power
# opDict.update({'Temp': temp,
# 'Power': pout})
# return opDict
|
gpl-3.0
| -7,248,309,319,612,847,000 | 37.353293 | 80 | 0.560812 | false | 4.208279 | false | false | false |
jesuscript/topo-mpi
|
topo/sheet/lissom.py
|
1
|
18697
|
"""
LISSOM and related sheet classes.
$Id$
"""
__version__='$Revision$'
from numpy import zeros,ones
import copy
import param
import topo
from topo.base.projection import Projection
from topo.base.sheet import activity_type
from topo.base.simulation import EPConnectionEvent
from topo.transferfn.basic import PiecewiseLinear
from topo.sheet import JointNormalizingCFSheet
class LISSOM(JointNormalizingCFSheet):
"""
A Sheet class implementing the LISSOM algorithm
(Sirosh and Miikkulainen, Biological Cybernetics 71:66-78, 1994).
A LISSOM sheet is a JointNormalizingCFSheet slightly modified to
enforce a fixed number of settling steps. Settling is controlled
by the tsettle parameter; once that number of settling steps has
been reached, an external input is required before the sheet will
activate again.
"""
strict_tsettle = param.Parameter(default = None,doc="""
If non-None, delay sending output until activation_count reaches this value.""")
mask_init_time=param.Integer(default=5,bounds=(0,None),doc="""
Determines when a new mask is initialized in each new iteration.
The mask is reset whenever new input comes in. Once the
activation_count (see tsettle) reaches mask_init_time, the mask
is initialized to reflect the current activity profile.""")
tsettle=param.Integer(default=8,bounds=(0,None),doc="""
Number of times to activate the LISSOM sheet for each external input event.
A counter is incremented each time an input is received from any
source, and once the counter reaches tsettle, the last activation
step is skipped so that there will not be any further recurrent
activation. The next external (i.e., afferent or feedback)
event will then start the counter over again.""")
continuous_learning = param.Boolean(default=False, doc="""
Whether to modify the weights after every settling step.
If false, waits until settling is completed before doing learning.""")
output_fns = param.HookList(default=[PiecewiseLinear(lower_bound=0.1,upper_bound=0.65)])
precedence = param.Number(0.6)
post_initialization_weights_output_fns = param.HookList([],doc="""
If not empty, weights output_fns that will replace the
existing ones after an initial normalization step.""")
beginning_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the beginning of each iteration.""")
end_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the end of each iteration.""")
def __init__(self,**params):
super(LISSOM,self).__init__(**params)
self.__counter_stack=[]
self.activation_count = 0
self.new_iteration = True
def start(self):
self._normalize_weights(active_units_mask=False)
if len(self.post_initialization_weights_output_fns)>0:
for proj in self.in_connections:
if not isinstance(proj,Projection):
self.debug("Skipping non-Projection ")
else:
proj.weights_output_fns=self.post_initialization_weights_output_fns
def input_event(self,conn,data):
# On a new afferent input, clear the activity
if self.new_iteration:
for f in self.beginning_of_iteration: f()
self.new_iteration = False
self.activity *= 0.0
for proj in self.in_connections:
proj.activity *= 0.0
self.mask.reset()
super(LISSOM,self).input_event(conn,data)
### JABALERT! There should be some sort of warning when
### tsettle times the input delay is larger than the input period.
### Right now it seems to do strange things in that case (does it
### settle at all after the first iteration?), but of course that
### is arguably an error condition anyway (and should thus be
### flagged).
# CEBALERT: there is at least one bug in here for tsettle==0: see
# CB/JAB email "LISSOM tsettle question", 2010/03/22.
def process_current_time(self):
"""
Pass the accumulated stimulation through self.output_fns and
send it out on the default output port.
"""
if self.new_input:
self.new_input = False
if self.activation_count == self.mask_init_time:
self.mask.calculate()
if self.tsettle == 0:
# Special case: behave just like a CFSheet
self.activate()
self.learn()
elif self.activation_count == self.tsettle:
# Once we have been activated the required number of times
# (determined by tsettle), reset various counters, learn
# if appropriate, and avoid further activation until an
# external event arrives.
for f in self.end_of_iteration: f()
self.activation_count = 0
self.new_iteration = True # used by input_event when it is called
if (self.plastic and not self.continuous_learning):
self.learn()
else:
self.activate()
self.activation_count += 1
if (self.plastic and self.continuous_learning):
self.learn()
# print the weights of a unit
def printwts(self,x,y):
for proj in self.in_connections:
print proj.name, x, y
print proj.cfs[x,y].weights
def state_push(self,**args):
super(LISSOM,self).state_push(**args)
self.__counter_stack.append((self.activation_count,self.new_iteration))
def state_pop(self,**args):
super(LISSOM,self).state_pop(**args)
self.activation_count,self.new_iteration=self.__counter_stack.pop()
def send_output(self,src_port=None,data=None):
"""Send some data out to all connections on the given src_port."""
out_conns_on_src_port = [conn for conn in self.out_connections
if self._port_match(conn.src_port,[src_port])]
for conn in out_conns_on_src_port:
if self.strict_tsettle != None:
if self.activation_count < self.strict_tsettle:
if len(conn.dest_port)>2 and conn.dest_port[2] == 'Afferent':
continue
self.verbose("Sending output on src_port %s via connection %s to %s" %
(str(src_port), conn.name, conn.dest.name))
e=EPConnectionEvent(self.simulation._convert_to_time_type(conn.delay)+self.simulation.time(),conn,data)
self.simulation.enqueue_event(e)
class JointScaling(LISSOM):
"""
LISSOM sheet extended to allow joint auto-scaling of Afferent input projections.
An exponentially weighted average is used to calculate the average
joint activity across all jointly-normalized afferent projections.
This average is then used to calculate a scaling factor for the
current afferent activity and for the afferent learning rate.
The target average activity for the afferent projections depends
on the statistics of the input; if units are activated more often
(e.g. the number of Gaussian patterns on the retina during each
iteration is increased) the target average activity should be
larger in order to maintain a constant average response to similar
inputs in V1. The target activity for learning rate scaling does
not need to change, because the learning rate should be scaled
regardless of what causes the change in average activity.
"""
# ALERT: Should probably be extended to jointly scale different
# groups of projections. Currently only works for the joint
# scaling of projections named "Afferent", grouped together by
# JointNormalize in dest_port.
target = param.Number(default=0.045, doc="""
Target average activity for jointly scaled projections.""")
# JABALERT: I cannot parse the docstring; is it an activity or a learning rate?
target_lr = param.Number(default=0.045, doc="""
Target average activity for jointly scaled projections.
Used for calculating a learning rate scaling factor.""")
smoothing = param.Number(default=0.999, doc="""
Influence of previous activity, relative to current, for computing the average.""")
apply_scaling = param.Boolean(default=True, doc="""Whether to apply the scaling factors.""")
precedence = param.Number(0.65)
def __init__(self,**params):
super(JointScaling,self).__init__(**params)
self.x_avg=None
self.sf=None
self.lr_sf=None
self.scaled_x_avg=None
self.__current_state_stack=[]
def calculate_joint_sf(self, joint_total):
"""
Calculate current scaling factors based on the target and previous average joint activities.
Keeps track of the scaled average for debugging. Could be
overridden by a subclass to calculate the factors differently.
"""
if self.plastic:
self.sf *=0.0
self.lr_sf *=0.0
self.sf += self.target/self.x_avg
self.lr_sf += self.target_lr/self.x_avg
self.x_avg = (1.0-self.smoothing)*joint_total + self.smoothing*self.x_avg
self.scaled_x_avg = (1.0-self.smoothing)*joint_total*self.sf + self.smoothing*self.scaled_x_avg
def do_joint_scaling(self):
"""
Scale jointly normalized projections together.
Assumes that the projections to be jointly scaled are those
that are being jointly normalized. Calculates the joint total
of the grouped projections, and uses this to calculate the
scaling factor.
"""
joint_total = zeros(self.shape, activity_type)
for key,projlist in self._grouped_in_projections('JointNormalize'):
if key is not None:
if key =='Afferent':
for proj in projlist:
joint_total += proj.activity
self.calculate_joint_sf(joint_total)
if self.apply_scaling:
for proj in projlist:
proj.activity *= self.sf
if hasattr(proj.learning_fn,'learning_rate_scaling_factor'):
proj.learning_fn.update_scaling_factor(self.lr_sf)
else:
raise ValueError("Projections to be joint scaled must have a learning_fn that supports scaling, such as CFPLF_PluginScaled")
else:
raise ValueError("Only Afferent scaling currently supported")
def activate(self):
"""
Compute appropriate scaling factors, apply them, and collect resulting activity.
Scaling factors are first computed for each set of jointly
normalized projections, and the resulting activity patterns
are then scaled. Then the activity is collected from each
projection, combined to calculate the activity for this sheet,
and the result is sent out.
"""
self.activity *= 0.0
if self.x_avg is None:
self.x_avg=self.target*ones(self.shape, activity_type)
if self.scaled_x_avg is None:
self.scaled_x_avg=self.target*ones(self.shape, activity_type)
if self.sf is None:
self.sf=ones(self.shape, activity_type)
if self.lr_sf is None:
self.lr_sf=ones(self.shape, activity_type)
#Afferent projections are only activated once at the beginning of each iteration
#therefore we only scale the projection activity and learning rate once.
if self.activation_count == 0:
self.do_joint_scaling()
for proj in self.in_connections:
self.activity += proj.activity
if self.apply_output_fns:
for of in self.output_fns:
of(self.activity)
self.send_output(src_port='Activity',data=self.activity)
def state_push(self,**args):
super(JointScaling,self).state_push(**args)
self.__current_state_stack.append((copy.copy(self.x_avg),copy.copy(self.scaled_x_avg),
copy.copy(self.sf), copy.copy(self.lr_sf)))
def state_pop(self,**args):
super(JointScaling,self).state_pop(**args)
self.x_avg,self.scaled_x_avg, self.sf, self.lr_sf=self.__current_state_stack.pop()
def schedule_events(sheet_str="topo.sim['V1']",st=0.5,aff_name="Afferent",
ids=1.0,ars=1.0,increase_inhibition=False):
"""
Convenience function for scheduling a default set of events
typically used with a LISSOM sheet. The parameters used
are the defaults from Miikkulainen, Bednar, Choe, and Sirosh
(2005), Computational Maps in the Visual Cortex, Springer.
Note that Miikulainen 2005 specifies only one output_fn for the
LISSOM sheet; where these scheduled actions operate on an
output_fn, they do so only on the first output_fn in the sheet's
list of output_fns.
Installs afferent learning rate changes for any projection whose
name contains the keyword specified by aff_name (typically
"Afferent").
The st argument determines the timescale relative to a
20000-iteration simulation, and results in the default
10000-iteration simulation for the default st=0.5.
The ids argument specifies the input density scale, i.e. how much
input there is at each iteration, on average, relative to the
default. The ars argument specifies how much to scale the
afferent learning rate, if necessary.
If increase_inhibition is true, gradually increases the strength
of the inhibitory connection, typically used for natural image
simulations.
"""
# Allow sheet.BoundingBox calls (below) after reloading a snapshot
topo.sim.startup_commands.append("from topo import sheet")
# Lateral excitatory bounds changes
# Convenience variable: excitatory projection
LE=sheet_str+".projections()['LateralExcitatory']"
topo.sim.schedule_command( 200*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.06250))')
topo.sim.schedule_command( 500*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.04375))')
topo.sim.schedule_command( 1000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.03500))')
topo.sim.schedule_command( 2000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.02800))')
topo.sim.schedule_command( 3000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.02240))')
topo.sim.schedule_command( 4000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.01344))')
topo.sim.schedule_command( 5000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00806))')
topo.sim.schedule_command( 6500*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00484))')
topo.sim.schedule_command( 8000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00290))')
topo.sim.schedule_command(20000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00174))')
# Lateral excitatory learning rate changes
idss=("" if ids==1 else "/%3.1f"%ids)
estr='%s.learning_rate=%%s%s*%s.n_units'%(LE,idss,LE)
topo.sim.schedule_command( 200*st,estr%'0.12168')
topo.sim.schedule_command( 500*st,estr%'0.06084')
topo.sim.schedule_command( 1000*st,estr%'0.06084')
topo.sim.schedule_command( 2000*st,estr%'0.06084')
topo.sim.schedule_command( 3000*st,estr%'0.06084')
topo.sim.schedule_command( 4000*st,estr%'0.06084')
topo.sim.schedule_command( 5000*st,estr%'0.06084')
topo.sim.schedule_command( 6500*st,estr%'0.06084')
topo.sim.schedule_command( 8000*st,estr%'0.06084')
topo.sim.schedule_command(20000*st,estr%'0.06084')
### Lateral inhibitory learning rate and strength changes
if increase_inhibition:
LI=sheet_str+".projections()['LateralInhibitory']"
istr='%s.learning_rate=%%s%s'%(LI,idss)
topo.sim.schedule_command( 1000*st,istr%'1.80873/5.0*2.0')
topo.sim.schedule_command( 2000*st,istr%'1.80873/5.0*3.0')
topo.sim.schedule_command( 5000*st,istr%'1.80873/5.0*5.0')
topo.sim.schedule_command( 1000*st,LI+'.strength=-2.2')
topo.sim.schedule_command( 2000*st,LI+'.strength=-2.6')
# Afferent learning rate changes (for every Projection named Afferent)
sheet_=eval(sheet_str)
projs = [pn for pn in sheet_.projections().keys() if pn.count(aff_name)]
num_aff=len(projs)
arss="" if ars==1.0 else "*%3.1f"%ars
for pn in projs:
ps="%s.projections()['%s'].learning_rate=%%s%s%s" % \
(sheet_str,pn,idss if num_aff==1 else "%s/%d"%(idss,num_aff),arss)
topo.sim.schedule_command( 500*st,ps%('0.6850'))
topo.sim.schedule_command( 2000*st,ps%('0.5480'))
topo.sim.schedule_command( 4000*st,ps%('0.4110'))
topo.sim.schedule_command(20000*st,ps%('0.2055'))
# Activation function threshold changes
bstr = sheet_str+'.output_fns[0].lower_bound=%5.3f;'+\
sheet_str+'.output_fns[0].upper_bound=%5.3f'
lbi=sheet_.output_fns[0].lower_bound
ubi=sheet_.output_fns[0].upper_bound
topo.sim.schedule_command( 200*st,bstr%(lbi+0.01,ubi+0.01))
topo.sim.schedule_command( 500*st,bstr%(lbi+0.02,ubi+0.02))
topo.sim.schedule_command( 1000*st,bstr%(lbi+0.05,ubi+0.03))
topo.sim.schedule_command( 2000*st,bstr%(lbi+0.08,ubi+0.05))
topo.sim.schedule_command( 3000*st,bstr%(lbi+0.10,ubi+0.08))
topo.sim.schedule_command( 4000*st,bstr%(lbi+0.10,ubi+0.11))
topo.sim.schedule_command( 5000*st,bstr%(lbi+0.11,ubi+0.14))
topo.sim.schedule_command( 6500*st,bstr%(lbi+0.12,ubi+0.17))
topo.sim.schedule_command( 8000*st,bstr%(lbi+0.13,ubi+0.20))
topo.sim.schedule_command(20000*st,bstr%(lbi+0.14,ubi+0.23))
# Just to get more progress reports
topo.sim.schedule_command(12000*st,'pass')
topo.sim.schedule_command(16000*st,'pass')
# Settling steps changes
topo.sim.schedule_command( 2000*st,sheet_str+'.tsettle=10')
topo.sim.schedule_command( 5000*st,sheet_str+'.tsettle=11')
topo.sim.schedule_command( 6500*st,sheet_str+'.tsettle=12')
topo.sim.schedule_command( 8000*st,sheet_str+'.tsettle=13')
|
bsd-3-clause
| 2,643,544,422,502,129,000 | 42.080645 | 156 | 0.643686 | false | 3.743143 | false | false | false |
jffifa/kyotogang-toolset
|
kotori/gconf.py
|
1
|
3723
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import sys
import os
import urlparse
import urllib
class GConf:
"""global configuration
"""
GROUP_ID = '10079277'
# encodings
SHELL_ENCODING = sys.stdout.encoding
INTERNAL_ENCODING = 'utf_8'
# debug mode
DEBUG = False
# global dir and file path settings
BASE_DIR = os.path.dirname(__file__)
USER_DATA_PATH = os.path.join(BASE_DIR, 'data', 'user')
KOTORI_ASCII_PATH = 'kotori_ascii'
# global conf for urls
PROTOCOL = 'http'
BASE_URL = 'bbs.saraba1st.com'
# http origin url
ORIGIN_URL = urlparse.urlunparse((PROTOCOL, BASE_URL, '', '', '', ''))
# forum homepage url
FORUM_PATH = '/2b/forum.php'
FORUM_URL = urlparse.urlunparse((PROTOCOL, BASE_URL, FORUM_PATH, '', '', ''))
# ajax login url
LOGIN_PATH = '/2b/member.php'
LOGIN_QUERY = urllib.urlencode({
'mod':'logging',
'action':'login',
'loginsubmit':'yes',
'infloat':'yes',
'lssubmit':'yes',
'inajax':'1',
})
LOGIN_URL = urlparse.urlunparse((PROTOCOL, BASE_URL, LOGIN_PATH, '', LOGIN_QUERY, ''))
# session keeping url
KEEP_CONN_PATH = '/2b/home.php'
KEEP_CONN_QUERY = urllib.urlencode({
'mod':'spacecp',
'ac':'credit',
'showcredit':'1'
})
KEEP_CONN_URL = urlparse.urlunparse((PROTOCOL, BASE_URL, KEEP_CONN_PATH, '', KEEP_CONN_QUERY, ''))
# get rate form url
RATE_LIM_TID = 643316
RATE_LIM_PID = 22412315
RATE_FORM_PATH = '/2b/forum.php'
RATE_FORM_QUERY_DICT = {
'mod':'misc',
'action':'rate',
#'t':'1385395649378',
#'tid':'643316',
#'pid':'22412315',
'infloat':'yes',
'handlekey':'rate',
'inajax':'1',
'ajaxtarget':'fwin_content_rate',
}
RATE_PATH = FORUM_PATH
RATE_QUERY = urllib.urlencode({
'mod':'misc',
'action':'rate',
'ratesubmit':'yes',
'infloat':'yes',
'inajax':'1'
})
RATE_URL = urlparse.urlunparse((
PROTOCOL,
BASE_URL,
FORUM_PATH,
'',
RATE_QUERY,
''))
# fake user agent
FAKE_UA = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML: like Gecko) Chrome/31.0.1650.57 Safari/537.36'
# http header
LOGIN_HEADER = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language':'zh-CN,zh;q=0.8,ja;q=0.6',
'Cache-Control':'max-age=0',
#'Connection':'keep-alive',
'Connection':'close',
'User-Agent':FAKE_UA,
'Origin':ORIGIN_URL,
'Referer':FORUM_URL ,
}
RATE_FORM_HEADER = {
'Accept':'*/*',
'Accept-Language':'zh-CN,zh;q=0.8:ja;q=0.6',
#'Connection':'keep-alive',
'Connection':'close',
'User-Agent':FAKE_UA,
#'Referer':'http://bbs.saraba1st.com/2b/forum.php?mod=viewthread&tid=643316',
'X-Requested-With':'XMLHttpRequest',
}
RATE_HEADER = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp:*/*;q=0.8',
'Accept-Language':'zh-CN,zh;q=0.8:ja;q=0.6',
'Cache-Control':'max-age=0',
#'Connection':'keep-alive',
'Connection':'close',
'User-Agent':FAKE_UA,
'Origin':ORIGIN_URL,
#'Referer':'http://bbs.saraba1st.com/2b/forum.php?mod=viewthread&tid=974473&page=1',
}
# session status
SESSION_STATUS_INIT = 0
SESSION_STATUS_LOGIN = 1
SESSION_STATUS_LOGOUT = 2
SESSION_STATUS_CONN = 3
# max users
MAX_USER = 256
POST_PER_PAGE = 30
MAX_RATE_CONCURRENCY = 256
|
mit
| 5,599,428,392,297,824,000 | 25.978261 | 120 | 0.551974 | false | 3.131203 | false | false | false |
internap/almanach
|
tests/api/test_api_authentication.py
|
1
|
1252
|
# Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from tests.api.base_api import BaseApi
class ApiAuthenticationTest(BaseApi):
def setUp(self):
self.prepare()
self.prepare_with_failed_authentication()
def test_with_wrong_authentication(self):
self.controller.should_receive('list_entities').never()
query_string = {'start': '2014-01-01 00:00:00.0000', 'end': '2014-02-01 00:00:00.0000'}
code, result = self.api_get(url='/project/TENANT_ID/entities',
query_string=query_string,
headers={'X-Auth-Token': 'wrong token'})
assert_that(code, equal_to(401))
|
apache-2.0
| -9,071,383,592,736,127,000 | 36.939394 | 95 | 0.676518 | false | 3.900312 | false | false | false |
sjl767/woo
|
scripts/test-OLD/clump-hopper-viscoelastic.py
|
1
|
3251
|
# -*- coding: utf-8
from woo import utils,pack,export,qt
import gts,os,random,itertools
from numpy import *
import woo.log
#woo.log.setLevel('NewtonIntegrator',woo.log.TRACE)
# Parameters
tc=0.001# collision time
en=.3 # normal restitution coefficient
es=.3 # tangential restitution coefficient
frictionAngle=radians(35)#
density=2700
# facets material
params=utils.getViscoelasticFromSpheresInteraction(tc,en,es)
facetMat=O.materials.append(ViscElMat(frictionAngle=frictionAngle,**params)) # **params sets kn, cn, ks, cs
# default spheres material
dfltSpheresMat=O.materials.append(ViscElMat(density=density,frictionAngle=frictionAngle, **params))
O.dt=.1*tc # time step
Rs=0.05 # particle radius
# Create geometry
x0=0.; y0=0.; z0=0.; ab=.7; at=2.; h=1.; hl=h; al=at*3
zb=z0; x0b=x0-ab/2.; y0b=y0-ab/2.; x1b=x0+ab/2.; y1b=y0+ab/2.
zt=z0+h; x0t=x0-at/2.; y0t=y0-at/2.; x1t=x0+at/2.; y1t=y0+at/2.
zl=z0-hl;x0l=x0-al/2.; y0l=y0-al/2.; x1l=x0+al/2.; y1l=y0+al/2.
left = pack.sweptPolylines2gtsSurface([[Vector3(x0b,y0b,zb),Vector3(x0t,y0t,zt),Vector3(x0t,y1t,zt),Vector3(x0b,y1b,zb)]],capStart=True,capEnd=True)
lftIds=O.bodies.append(pack.gtsSurface2Facets(left.faces(),material=facetMat,color=(0,1,0)))
right = pack.sweptPolylines2gtsSurface([[Vector3(x1b,y0b,zb),Vector3(x1t,y0t,zt),Vector3(x1t,y1t,zt),Vector3(x1b,y1b,zb)]],capStart=True,capEnd=True)
rgtIds=O.bodies.append(pack.gtsSurface2Facets(right.faces(),material=facetMat,color=(0,1,0)))
near = pack.sweptPolylines2gtsSurface([[Vector3(x0b,y0b,zb),Vector3(x0t,y0t,zt),Vector3(x1t,y0t,zt),Vector3(x1b,y0b,zb)]],capStart=True,capEnd=True)
nearIds=O.bodies.append(pack.gtsSurface2Facets(near.faces(),material=facetMat,color=(0,1,0)))
far = pack.sweptPolylines2gtsSurface([[Vector3(x0b,y1b,zb),Vector3(x0t,y1t,zt),Vector3(x1t,y1t,zt),Vector3(x1b,y1b,zb)]],capStart=True,capEnd=True)
farIds=O.bodies.append(pack.gtsSurface2Facets(far.faces(),material=facetMat,color=(0,1,0)))
table = pack.sweptPolylines2gtsSurface([[Vector3(x0l,y0l,zl),Vector3(x0l,y1l,zl),Vector3(x1l,y1l,zl),Vector3(x1l,y0l,zl)]],capStart=True,capEnd=True)
tblIds=O.bodies.append(pack.gtsSurface2Facets(table.faces(),material=facetMat,color=(0,1,0)))
# Create clumps...
clumpColor=(0.0, 0.5, 0.5)
for k,l in itertools.product(arange(0,10),arange(0,10)):
clpId,sphId=O.bodies.appendClumped([utils.sphere(Vector3(x0t+Rs*(k*4+2),y0t+Rs*(l*4+2),i*Rs*2+zt),Rs,color=clumpColor,material=dfltSpheresMat) for i in xrange(4)])
# ... and spheres
#spheresColor=(0.4, 0.4, 0.4)
#for k,l in itertools.product(arange(0,9),arange(0,9)):
#sphAloneId=O.bodies.append( [utils.sphere( Vector3(x0t+Rs*(k*4+4),y0t+Rs*(l*4+4),i*Rs*2.3+zt),Rs,color=spheresColor,material=dfltSpheresMat) for i in xrange(4) ] )
# Create engines
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
GravityEngine(gravity=[0,0,-9.81]),
NewtonIntegrator(damping=0),
#VTKRecorder(virtPeriod=0.01,fileName='/tmp/',recorders=['spheres','velocity','facets'])
]
from woo import qt
qt.View()
O.saveTmp()
|
gpl-2.0
| 8,081,068,913,472,389,000 | 41.776316 | 168 | 0.725008 | false | 2.170227 | false | false | false |
iesugrace/book-reader
|
lib/noter.py
|
1
|
3267
|
from record import Record
from recorder import Recorder
from timeutils import isotime
import time
import interact
import os
class Noter(Recorder):
def __init__(self, db_path, book_name):
Recorder.__init__(self, db_path)
self.book_name = book_name
def make_makers(self):
makers = []
makers.append(('book', (lambda x: self.book_name, None)))
makers.append(('chapter', (interact.readint, 'Chapter: ')))
makers.append(('subject', (interact.readstr, 'Subject: ')))
makers.append(('content', (self.edit_content, None)))
self.makers = makers
def edit(self):
""" change an existing note
"""
cont = self.opendb()
notes = sorted(cont.items(), key=lambda x: int(x[0]))
text_list = []
for time, note in notes:
text = isotime(int(time)) + '\n' + note.content[:80]
text_list.append(text)
idx, junk = interact.printAndPick(text_list)
key = notes[idx][0]
note = notes[idx][1]
prompt = 'Chapter [%s]: ' % note.chapter
note.chapter = interact.readint(prompt, default=note.chapter)
prompt = 'Subject [%s]: ' % note.subject
note.subject = interact.readstr(prompt, default='') or note.subject
note.content = self.edit_content(data=note.content)
self.save(key, note)
def list(self):
cont = self.opendb()
notes = sorted(cont.items(), key=lambda x: int(x[0]))
text_list = []
for time, note in notes:
text = isotime(int(time)) + '\n' + note.content[:80]
text_list.append(text)
res = interact.printAndPick(text_list)
if res:
idx = res[0]
else:
return
key = notes[idx][0]
note = notes[idx][1]
print('-' * 80)
print('Book: %s' % note.book)
print('Chapter: %s' % note.chapter)
print('Subject: %s' % note.subject)
print('Content:\n%s' % note.content)
def delete(self):
assert False, 'not yet implemented'
def add(self):
""" caller must supply the field names and
maker function and arguments for each field.
"""
self.make_makers()
ent = Record()
for (field_name, (func, args)) in self.makers:
setattr(ent, field_name, func(args))
self.save(str(int(time.time())), ent)
def edit_content(self, *junk, data=None):
""" edit (add, change, delete) some data, and return it
as string use temporary file to store the data while creating.
"""
import tempfile
tmpfile = tempfile.NamedTemporaryFile(delete=False)
if data:
tmpfile.write(data.encode())
tmpfile.flush()
self.edit_file(tmpfile.name)
content = open(tmpfile.name).read()
os.unlink(tmpfile.name)
return content
def edit_file(self, path):
""" edit a file of a given name, using the editor
specified in EDITOR environment variable, or vi
if none specified.
"""
default_editor = 'vi'
editor = os.environ.get('EDITOR')
if not editor: editor = default_editor
os.system('%s %s' % (editor, path))
|
gpl-2.0
| -4,272,964,902,325,736,400 | 32.680412 | 75 | 0.565657 | false | 3.776879 | false | false | false |
taion/flask-jsonapiview
|
setup.py
|
1
|
1714
|
from setuptools import setup
EXTRAS_REQUIRE = {
"docs": ("sphinx", "pallets-sphinx-themes"),
"jwt": ("PyJWT>=1.4.0", "cryptography>=2.0.0"),
"tests": ("coverage", "psycopg2-binary", "pytest"),
}
EXTRAS_REQUIRE["dev"] = (
EXTRAS_REQUIRE["docs"] + EXTRAS_REQUIRE["tests"] + ("tox",)
)
setup(
name="Flask-RESTy",
version="1.5.0",
description="Building blocks for REST APIs for Flask",
url="https://github.com/4Catalyzer/flask-resty",
author="4Catalyzer",
author_email="[email protected]",
license="MIT",
python_requires=">=3.6",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Flask",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="rest flask",
packages=("flask_resty",),
install_requires=(
"Flask>=1.1.0",
"Flask-SQLAlchemy>=1.0",
"marshmallow>=3.0.0",
"SQLAlchemy>=1.0.0",
"Werkzeug>=0.11",
"konch>=4.0",
),
extras_require=EXTRAS_REQUIRE,
entry_points={
"pytest11": ("flask-resty = flask_resty.testing",),
"flask.commands": ("shell = flask_resty.shell:cli",),
},
)
|
mit
| 8,034,599,104,357,237,000 | 31.961538 | 71 | 0.572929 | false | 3.616034 | false | false | false |
giordi91/python_misc
|
widgets/plotWidget/colorWidget.py
|
1
|
11019
|
from PySide import QtGui , QtCore
import math
#this is just a color picker
class ColorWidget(QtGui.QWidget):
colorChangedSignal = QtCore.Signal(int , int ,int)
def __init__(self, parent = None , color = [255,0,0]):
QtGui.QWidget.__init__(self,parent)
self.width = 200
self.height = 100
self.setGeometry(0,0,self.width,self.height)
self.__color = color
@property
def color(self):
return self.__color
@color.setter
def color (self ,color = []):
self.__color = color
self.colorChangedSignal.emit(self.__color[0],self.__color[1],self.__color[2])
self.update()
def mousePressEvent(self, event):
col = QtGui.QColorDialog.getColor()
if col.isValid():
self.color = [col.red() , col.green(), col.blue()]
self.colorChangedSignal.emit(self.__color[0],self.__color[1],self.__color[2])
self.update()
def drawBG(self, qp):
pen= QtGui.QPen()
color = QtGui.QColor(0, 0, 0)
pen.setColor(color)
pen.setWidthF(2)
qp.setPen(pen)
brush = QtGui.QBrush(QtGui.QColor(self.color[0], self.color[1], self.color[2]))
qp.setBrush(brush)
rectangle=QtCore.QRectF (0.0, 0.0, self.width, self.height);
qp.drawRoundedRect(rectangle, 2.0, 2.0);
def paintEvent(self, e):
'''
This procedure draws the widget
'''
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing)
self.drawBG(qp)
qp.end()
def resizeEvent(self , event):
posX = event.size().width()
posY = event.size().height()
self.width = posX
self.height = posY
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
#simple ui to set directly on the plotter the colors etc
#nothing fancy not willing to spend too much time in commenting this:P
class OptionColorWidget(QtGui.QDialog):
def __init__(self,parent = None , plotter = None):
QtGui.QDialog.__init__(self, parent )
self.plotter= plotter
self.setObjectName(_fromUtf8("Dialog"))
self.resize(411, 310)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.settingsGB = QtGui.QGroupBox(self)
self.settingsGB.setObjectName(_fromUtf8("settingsGB"))
self.gridLayout_7 = QtGui.QGridLayout(self.settingsGB)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.bgGB = QtGui.QGroupBox(self.settingsGB)
self.bgGB.setObjectName(_fromUtf8("bgGB"))
self.startBgL = QtGui.QLabel(self.bgGB)
self.startBgL.setGeometry(QtCore.QRect(10, 23, 56, 16))
self.startBgL.setObjectName(_fromUtf8("startBgL"))
self.endBgL = QtGui.QLabel(self.bgGB)
self.endBgL.setGeometry(QtCore.QRect(10, 46, 51, 16))
self.endBgL.setObjectName(_fromUtf8("endBgL"))
self.startBgCL = ColorWidget(self.bgGB)
self.startBgCL.setGeometry(QtCore.QRect(72, 23, 51, 17))
self.startBgCL.setObjectName(_fromUtf8("startBgCL"))
self.endBgCL = ColorWidget(self.bgGB)
self.endBgCL.setGeometry(QtCore.QRect(72, 46, 51, 17))
self.endBgCL.setObjectName(_fromUtf8("endBgCL"))
self.gridLayout_7.addWidget(self.bgGB, 0, 0, 1, 1)
self.grapGB = QtGui.QGroupBox(self.settingsGB)
self.grapGB.setMinimumSize(QtCore.QSize(220, 0))
self.grapGB.setObjectName(_fromUtf8("grapGB"))
self.gridLayout_4 = QtGui.QGridLayout(self.grapGB)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.graphL = QtGui.QLabel(self.grapGB)
self.graphL.setObjectName(_fromUtf8("graphL"))
self.gridLayout_4.addWidget(self.graphL, 0, 0, 1, 1)
self.frame_4 = ColorWidget(self.grapGB)
self.frame_4.setObjectName(_fromUtf8("frame_4"))
self.gridLayout_4.addWidget(self.frame_4, 0, 1, 1, 1)
self.gTypeL = QtGui.QLabel(self.grapGB)
self.gTypeL.setObjectName(_fromUtf8("gTypeL"))
self.gridLayout_4.addWidget(self.gTypeL, 0, 2, 1, 1)
self.gTypeCB = QtGui.QComboBox(self.grapGB)
self.gTypeCB.setObjectName(_fromUtf8("gTypeCB"))
self.gTypeCB.addItem(_fromUtf8(""))
self.gTypeCB.addItem(_fromUtf8(""))
self.gridLayout_4.addWidget(self.gTypeCB, 0, 3, 1, 1)
self.thickL = QtGui.QLabel(self.grapGB)
self.thickL.setObjectName(_fromUtf8("thickL"))
self.gridLayout_4.addWidget(self.thickL, 1, 0, 1, 2)
self.thickSB = QtGui.QDoubleSpinBox(self.grapGB)
self.thickSB.setProperty("value", 2.0)
self.thickSB.setObjectName(_fromUtf8("thickSB"))
self.gridLayout_4.addWidget(self.thickSB, 1, 2, 1, 2)
self.gridLayout_7.addWidget(self.grapGB, 0, 1, 1, 1)
self.gridGB = QtGui.QGroupBox(self.settingsGB)
self.gridGB.setObjectName(_fromUtf8("gridGB"))
self.gridLayout_6 = QtGui.QGridLayout(self.gridGB)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.axisGB = QtGui.QGroupBox(self.gridGB)
self.axisGB.setObjectName(_fromUtf8("axisGB"))
self.gridLayout = QtGui.QGridLayout(self.axisGB)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.axisCK = QtGui.QCheckBox(self.axisGB)
self.axisCK.setObjectName(_fromUtf8("axisCK"))
self.gridLayout.addWidget(self.axisCK, 0, 0, 1, 2)
self.axisL = QtGui.QLabel(self.axisGB)
self.axisL.setObjectName(_fromUtf8("axisL"))
self.gridLayout.addWidget(self.axisL, 1, 0, 1, 1)
self.axisCL = ColorWidget(self.axisGB)
self.axisCL.setObjectName(_fromUtf8("axisCL"))
self.gridLayout.addWidget(self.axisCL, 1, 1, 1, 1)
self.gridLayout_6.addWidget(self.axisGB, 0, 0, 1, 1)
self.gridGB_2 = QtGui.QGroupBox(self.gridGB)
self.gridGB_2.setObjectName(_fromUtf8("gridGB_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridGB_2)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.showCK = QtGui.QCheckBox(self.gridGB_2)
self.showCK.setObjectName(_fromUtf8("showCK"))
self.gridLayout_2.addWidget(self.showCK, 0, 0, 1, 2)
self.gridL = QtGui.QLabel(self.gridGB_2)
self.gridL.setObjectName(_fromUtf8("gridL"))
self.gridLayout_2.addWidget(self.gridL, 1, 0, 1, 1)
self.gridCL = ColorWidget(self.gridGB_2)
self.gridCL.setObjectName(_fromUtf8("gridCL"))
self.gridLayout_2.addWidget(self.gridCL, 1, 1, 1, 1)
self.gridLayout_6.addWidget(self.gridGB_2, 0, 1, 1, 1)
self.numbersGB = QtGui.QGroupBox(self.gridGB)
self.numbersGB.setObjectName(_fromUtf8("numbersGB"))
self.gridLayout_3 = QtGui.QGridLayout(self.numbersGB)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.numbersCK = QtGui.QCheckBox(self.numbersGB)
self.numbersCK.setObjectName(_fromUtf8("numbersCK"))
self.gridLayout_3.addWidget(self.numbersCK, 0, 0, 1, 2)
self.numbersL = QtGui.QLabel(self.numbersGB)
self.numbersL.setObjectName(_fromUtf8("numbersL"))
self.gridLayout_3.addWidget(self.numbersL, 1, 0, 1, 1)
self.numbersCL = ColorWidget(self.numbersGB)
self.numbersCL.setObjectName(_fromUtf8("numbersCL"))
self.gridLayout_3.addWidget(self.numbersCL, 1, 1, 1, 1)
self.gridLayout_6.addWidget(self.numbersGB, 0, 2, 1, 1)
self.stepsL = QtGui.QLabel(self.gridGB)
self.stepsL.setObjectName(_fromUtf8("stepsL"))
self.gridLayout_6.addWidget(self.stepsL, 1, 0, 1, 1)
self.setpSB = QtGui.QSpinBox(self.gridGB)
self.setpSB.setProperty("value", 20)
self.setpSB.setObjectName(_fromUtf8("setpSB"))
self.gridLayout_6.addWidget(self.setpSB, 1, 1, 1, 1)
self.gridLayout_7.addWidget(self.gridGB, 1, 0, 1, 2)
self.donePB = QtGui.QPushButton(self.settingsGB)
self.donePB.setObjectName(_fromUtf8("donePB"))
self.gridLayout_7.addWidget(self.donePB, 2, 0, 1, 2)
self.verticalLayout.addWidget(self.settingsGB)
self.setWindowTitle(_translate("Dialog", "Dialog", None))
self.settingsGB.setTitle(_translate("Dialog", "Settings", None))
self.bgGB.setTitle(_translate("Dialog", "Background", None))
self.startBgL.setText(_translate("Dialog", "start color :", None))
self.endBgL.setText(_translate("Dialog", "end color :", None))
self.grapGB.setTitle(_translate("Dialog", "Graph", None))
self.graphL.setText(_translate("Dialog", "color :", None))
self.gTypeL.setText(_translate("Dialog", "type :", None))
self.gTypeCB.setItemText(0, _translate("Dialog", "dots", None))
self.gTypeCB.setItemText(1, _translate("Dialog", "line", None))
self.thickL.setText(_translate("Dialog", "Thickness :", None))
self.gridGB.setTitle(_translate("Dialog", "Grid", None))
self.axisGB.setTitle(_translate("Dialog", "Axis", None))
self.axisCK.setText(_translate("Dialog", " show", None))
self.axisL.setText(_translate("Dialog", "color:", None))
self.gridGB_2.setTitle(_translate("Dialog", "Grid", None))
self.showCK.setText(_translate("Dialog", " show", None))
self.gridL.setText(_translate("Dialog", "color:", None))
self.numbersGB.setTitle(_translate("Dialog", "Numbers", None))
self.numbersCK.setText(_translate("Dialog", " show", None))
self.numbersL.setText(_translate("Dialog", "color:", None))
self.stepsL.setText(_translate("Dialog", "Grid Step :", None))
self.donePB.setText(_translate("Dialog", "DONE", None))
self.showCK.setChecked(1)
self.axisCK.setChecked(1)
self.numbersCK.setChecked(1)
self.startBgCL.color = self.plotter.startBackgroundColor
self.endBgCL.color = self.plotter.endBackgroundColor
self.startBgCL.colorChangedSignal.connect(self.updateStartBG)
self.endBgCL.colorChangedSignal.connect(self.updateEndBG)
self.donePB.clicked.connect(self.close)
def updateStartBG(self , r,g,b):
self.plotter.startBackgroundColor = [r,g,b]
def updateEndBG(self , r,g,b):
self.plotter.endBackgroundColor = [r,g,b]
|
mit
| -1,275,120,345,756,301,000 | 41.544402 | 89 | 0.631909 | false | 3.508118 | false | false | false |
steveblamey/nautilus-archive
|
setup.py
|
1
|
1285
|
#!/usr/bin/env python
# coding: utf-8
from os import path
from distutils.core import setup
PROJECT_DIR = path.dirname(__file__)
extension = [
('/usr/share/nautilus-python/extensions',
[path.join(PROJECT_DIR, 'extension', 'nautilus-archive.py')]),
('/usr/share/icons/hicolor/48x48/emblems',
[path.join(PROJECT_DIR, 'extension', 'emblems', 'emblem-red-tag.png')]),
('/usr/share/icons/hicolor/scalable/emblems',
[path.join(PROJECT_DIR, 'extension', 'emblems', 'emblem-red-tag.svg')]),
('/usr/share/icons/hicolor/48x48/emblems',
[path.join(PROJECT_DIR, 'extension', 'emblems', 'emblem-green-tag.png')]),
('/usr/share/icons/hicolor/scalable/emblems',
[path.join(PROJECT_DIR, 'extension', 'emblems', 'emblem-green-tag.svg')]),
('/usr/sbin',
[path.join(PROJECT_DIR, 'scripts', 'tracker-archive-tagged')]),
]
setup(name='nautilus-archive',
version='0.2',
description='A file archiving extension for the Nautilus file manager',
long_description=open('README.rst').read(),
author='Steve Blamey',
author_email='[email protected]',
url='http://www.python.org/',
license='GPL-3',
platforms=['Linux'],
data_files=extension,
py_modules=['trackertag']
)
|
gpl-3.0
| -5,443,317,509,589,844,000 | 31.948718 | 79 | 0.632685 | false | 3.196517 | false | false | false |
srmagura/potential
|
ps/grid.py
|
1
|
8094
|
import numpy as np
from ps.extend import EType
class PsGrid:
# FIXME old way is deprecated
'''def ps_construct_grids(self, scheme_order):
self.construct_grids(scheme_order)
R = self.R # remove eventually?
a = self.a
self.all_Mplus = {0: set(), 1: set(), 2: set()}
self.all_Mminus = {0: set(), 1: set(), 2: set()}
self.all_gamma = {}
for i, j in self.M0:
r, th = self.get_polar(i, j)
x, y = self.get_coord(i, j)
boundary_r = self.boundary.eval_r(th)
# Segment 0
if th >= a and th <= 2*np.pi:
if r <= boundary_r:
self.all_Mplus[0].add((i, j))
else:
self.all_Mminus[0].add((i, j))
# Segment 1
if 0 <= x and x <= R:
if y <= 0:
if r <= boundary_r:
self.all_Mplus[1].add((i, j))
else:
self.all_Mminus[1].add((i, j))
# Segment 2
x1, y1 = self.get_radius_point(2, x, y)
dist = self.signed_dist_to_radius(2, x, y)
if 0 <= x1 and x1 <= R*np.cos(a):
if dist <= 0:
if r <= boundary_r and y >= 0:
self.all_Mplus[2].add((i, j))
else:
self.all_Mminus[2].add((i, j))
union_gamma_set = set()
for sid in range(3):
Nplus = set()
Nminus = set()
for i, j in self.M0:
Nm = set([(i, j), (i-1, j), (i+1, j), (i, j-1), (i, j+1)])
if scheme_order > 2:
Nm |= set([(i-1, j-1), (i+1, j-1), (i-1, j+1),
(i+1, j+1)])
if (i, j) in self.all_Mplus[sid]:
Nplus |= Nm
elif (i, j) in self.all_Mminus[sid]:
Nminus |= Nm
gamma_set = Nplus & Nminus
self.all_gamma[sid] = list(gamma_set)
union_gamma_set |= gamma_set
self.union_gamma = list(union_gamma_set)
if self.fake_grid:
return self.ps_construct_fake_grid()
def ps_construct_fake_grid(self):
"""
For testing extension test only. Dangerous
"""
R = self.R
a = self.a
h = self.AD_len / self.N
inv = self.get_coord_inv
self.all_gamma = {
0: [
#inv(R+h/2, h/2),
#inv(-R+h/2, h/2),
#inv(-R-h/2, 0),
#inv(R*np.cos(a)+h/2, R*np.sin(a)-h/2),
#inv(R*np.cos(a)-h/2, R*np.sin(a)-h/2),
#inv(R*np.cos(a)-h/2, R*np.sin(a)+h/2),
],
1: [
#inv(R+h/2, h/2),
#inv(R+h/2, -h/2),
],
2: []#inv(R*np.cos(a)+h/2, R*np.sin(a)-h/2)],
}
self.union_gamma = set()
for sid in range(3):
self.union_gamma |= set(self.all_gamma[sid])
self.union_gamma = list(self.union_gamma)
def ps_grid_dist_test(self):
def get_dist(node, setype):
def dformula(x0, y0, _x, _y):
return np.sqrt((x0-_x)**2 + (y0-_y)**2)
x, y = self.get_coord(*node)
dist = -1
R = self.R
a = self.a
if setype == (0, EType.standard):
n, th = self.boundary.get_boundary_coord(
*self.get_polar(*node)
)
dist = abs(n)
elif setype == (0, EType.left):
x0, y0 = (R*np.cos(a), R*np.sin(a))
dist = dformula(x0, y0, x, y)
elif setype == (0, EType.right):
x0, y0 = (R, 0)
dist = dformula(x0, y0, x, y)
elif setype == (1, EType.standard):
dist = abs(y)
elif setype == (1, EType.left):
x0, y0 = (0, 0)
dist = dformula(x0, y0, x, y)
elif setype == (1, EType.right):
x0, y0 = (R, 0)
dist = dformula(x0, y0, x, y)
elif setype == (2, EType.standard):
dist = self.dist_to_radius(2, x, y)
elif setype == (2, EType.left):
x0, y0 = (0, 0)
dist = dformula(x0, y0, x, y)
elif setype == (2, EType.right):
x0, y0 = (R*np.cos(a), R*np.sin(a))
dist = dformula(x0, y0, x, y)
return dist
all_gamma2 = {0: set(), 1: set(), 2: set()}
for node in self.union_gamma:
for sid in (0, 1, 2):
etype = self.get_etype(sid, *node)
setype = (sid, etype)
dist = get_dist(node, setype)
h = self.AD_len / self.N
if dist <= h*np.sqrt(2):
all_gamma2[sid].add(node)
for sid in (0, 1, 2):
print('=== {} ==='.format(sid))
diff = all_gamma2[sid] - set(self.all_gamma[sid])
print('all_gamma2 - all_gamma:', all_gamma2[sid] - set(self.all_gamma[sid]))
for node in diff:
print('{}: x={} y={}'.format(node, *self.get_coord(*node)))
print('all_gamma - all_gamma2:', set(self.all_gamma[sid]) - all_gamma2[sid])
print()
#assert self.all_gamma == all_gamma2
'''
def ps_construct_grids(self, scheme_order):
self.construct_grids(scheme_order)
self.Nplus = set()
self.Nminus = set()
for i, j in self.M0:
Nm = set([(i, j), (i-1, j), (i+1, j), (i, j-1), (i, j+1)])
if scheme_order > 2:
Nm |= set([(i-1, j-1), (i+1, j-1), (i-1, j+1),
(i+1, j+1)])
if (i, j) in self.global_Mplus:
self.Nplus |= Nm
elif (i, j) in self.global_Mminus:
self.Nminus |= Nm
self.union_gamma = list(self.Nplus & self.Nminus)
def get_dist(node, setype):
def dformula(x0, y0, _x, _y):
return np.sqrt((x0-_x)**2 + (y0-_y)**2)
x, y = self.get_coord(*node)
dist = -1
R = self.R
a = self.a
if setype == (0, EType.standard):
n, th = self.boundary.get_boundary_coord(
*self.get_polar(*node)
)
dist = abs(n)
elif setype == (0, EType.left):
x0, y0 = (R*np.cos(a), R*np.sin(a))
dist = dformula(x0, y0, x, y)
elif setype == (0, EType.right):
x0, y0 = (R, 0)
dist = dformula(x0, y0, x, y)
elif setype == (1, EType.standard):
dist = abs(y)
elif setype == (1, EType.left):
x0, y0 = (0, 0)
dist = dformula(x0, y0, x, y)
elif setype == (1, EType.right):
x0, y0 = (R, 0)
dist = dformula(x0, y0, x, y)
elif setype == (2, EType.standard):
dist = self.dist_to_radius(2, x, y)
elif setype == (2, EType.left):
x0, y0 = (0, 0)
dist = dformula(x0, y0, x, y)
elif setype == (2, EType.right):
x0, y0 = (R*np.cos(a), R*np.sin(a))
dist = dformula(x0, y0, x, y)
return dist
self.all_gamma = {0: [], 1: [], 2: []}
for node in self.union_gamma:
r, th = self.get_polar(*node)
placed = False
for sid in (0, 1, 2):
etype = self.get_etype(sid, *node)
setype = (sid, etype)
dist = get_dist(node, setype)
h = self.AD_len / self.N
if dist <= h*np.sqrt(2):
self.all_gamma[sid].append(node)
placed = True
# Every node in union_gamma should go in at least one of the
# all_gamma sets
assert placed
|
gpl-3.0
| -3,562,702,645,237,333,000 | 29.659091 | 88 | 0.407833 | false | 3.222134 | false | false | false |
a358003542/expython
|
expython/pattern/__init__.py
|
1
|
1439
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import UserList
import logging
logger = logging.getLogger(__name__)
class CycleList(UserList):
"""
一个无限循环输出元素的可迭代对象,如果是on-fly模式
(主要指for循环中的删除动作)推荐使用 `remove_item` 方法
"""
def __init__(self, data):
super().__init__(data)
def __iter__(self):
self.index = 0
while True:
if self.index == len(self.data):
self.index = 0
yield self.data[self.index]
self.index += 1
def remove_item(self, item):
"""
主要是用于 on-fly 模式的列表更改移除操作的修正,
不是on-fly 动态模式,就直接用列表原来的remove方法即可
为了保持和原来的remove方法一致,并没有捕捉异常。
"""
self.data.remove(item)
self.index -= 1
def last_out_game(data, number):
test = CycleList(data)
count = 1
for i in test:
logger.debug('testing', i)
if len(test.data) <= 1:
break
if count == number:
try:
test.remove_item(i)
logger.debug('removing', i)
except ValueError:
pass
count = 0
count += 1
return test.data[0]
|
mit
| 2,960,627,695,356,223,500 | 20.089286 | 44 | 0.497166 | false | 2.885514 | true | false | false |
p4lang/p4app
|
docker/scripts/mininet/p4_mininet.py
|
1
|
5757
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mininet.net import Mininet
from mininet.node import Switch, Host
from mininet.log import setLogLevel, info, error, debug
from mininet.moduledeps import pathCheck
from sys import exit
import os
import tempfile
import socket
class P4Host(Host):
def config(self, **params):
r = super(P4Host, self).config(**params)
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload %s %s off" % (self.defaultIntf().name, off)
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
def describe(self, sw_addr=None, sw_mac=None):
print "**********"
print "Network configuration for: %s" % self.name
print "Default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
if sw_addr is not None or sw_mac is not None:
print "Default route to switch: %s (%s)" % (sw_addr, sw_mac)
print "**********"
class P4Switch(Switch):
"""P4 virtual switch"""
device_id = 0
def __init__(self, name, sw_path = None, json_path = None,
log_file = None,
thrift_port = None,
pcap_dump = False,
log_console = False,
verbose = False,
device_id = None,
enable_debugger = False,
**kwargs):
Switch.__init__(self, name, **kwargs)
assert(sw_path)
assert(json_path)
# make sure that the provided sw_path is valid
pathCheck(sw_path)
# make sure that the provided JSON file exists
if not os.path.isfile(json_path):
error("Invalid JSON file.\n")
exit(1)
self.sw_path = sw_path
self.json_path = json_path
self.verbose = verbose
self.log_file = log_file
if self.log_file is None:
self.log_file = "/tmp/p4s.{}.log".format(self.name)
self.output = open(self.log_file, 'w')
self.thrift_port = thrift_port
self.pcap_dump = pcap_dump
self.enable_debugger = enable_debugger
self.log_console = log_console
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id)
@classmethod
def setup(cls):
pass
def check_switch_started(self, pid):
"""While the process is running (pid exists), we check if the Thrift
server has been started. If the Thrift server is ready, we assume that
the switch was started successfully. This is only reliable if the Thrift
server is started at the end of the init process"""
while True:
if not os.path.exists(os.path.join("/proc", str(pid))):
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
result = sock.connect_ex(("localhost", self.thrift_port))
if result == 0:
return True
def start(self, controllers):
"Start up a new P4 switch"
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:
args.append("--pcap")
# args.append("--useFiles")
if self.thrift_port:
args.extend(['--thrift-port', str(self.thrift_port)])
if self.nanomsg:
args.extend(['--nanolog', self.nanomsg])
args.extend(['--device-id', str(self.device_id)])
P4Switch.device_id += 1
args.append(self.json_path)
if self.enable_debugger:
args.append("--debugger")
if self.log_console:
args.append("--log-console")
info(' '.join(args) + "\n")
pid = None
with tempfile.NamedTemporaryFile() as f:
# self.cmd(' '.join(args) + ' > /dev/null 2>&1 &')
self.cmd(' '.join(args) + ' >' + self.log_file + ' 2>&1 & echo $! >> ' + f.name)
pid = int(f.read())
debug("P4 switch {} PID is {}.\n".format(self.name, pid))
if not self.check_switch_started(pid):
error("P4 switch {} did not start correctly.\n".format(self.name))
exit(1)
info("P4 switch {} has been started.\n".format(self.name))
def stop(self):
"Terminate P4 switch."
self.output.flush()
self.cmd('kill %' + self.sw_path)
self.cmd('wait')
self.deleteIntfs()
def attach(self, intf):
"Connect a data port"
assert(0)
def detach(self, intf):
"Disconnect a data port"
assert(0)
|
apache-2.0
| 1,125,914,588,463,093,400 | 35.436709 | 92 | 0.57061 | false | 3.697495 | false | false | false |
iaz3/ModReader
|
modreader/game/__init__.py
|
1
|
1209
|
# !/usr/bin/env python3
"""
Game Support Modules should be located in this package
Names should be all lowercase, unique, and code should follow the template,
The template represents the bare miniumum API you are required to conform to.
You are allowed to add new files and extend it.
"""
# ====================== GPL License and Copyright Notice ======================
# This file is part of ModReader
# Copyright (C) 2016 Diana Land
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ModReader. If not, see <http://www.gnu.org/licenses/>.
#
# https://github.com/iaz3/ModReader
#
# =============================================================================
VERSION = "0.1.0"
|
gpl-3.0
| 4,860,691,471,080,183,000 | 38 | 80 | 0.679074 | false | 4.212544 | false | false | false |
zhanglab/psamm
|
psamm/gapfill.py
|
1
|
11063
|
# This file is part of PSAMM.
#
# PSAMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAMM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PSAMM. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2017 Jon Lund Steffensen <[email protected]>
# Copyright 2015-2020 Keith Dufault-Thompson <[email protected]>
"""Identify blocked metabolites and possible reconstructions.
This implements a variant of the algorithms described in [Kumar07]_.
"""
import logging
from six import iteritems, raise_from
from .lpsolver import lp
logger = logging.getLogger(__name__)
class GapFillError(Exception):
"""Indicates an error while running GapFind/GapFill"""
def _find_integer_tolerance(epsilon, v_max, min_tol):
"""Find appropriate integer tolerance for gap-filling problems."""
int_tol = min(epsilon / (10 * v_max), 0.1)
min_tol = max(1e-10, min_tol)
if int_tol < min_tol:
eps_lower = min_tol * 10 * v_max
logger.warning(
'When the maximum flux is {}, it is recommended that'
' epsilon > {} to avoid numerical issues with this'
' solver. Results may be incorrect with'
' the current settings!'.format(v_max, eps_lower))
return min_tol
return int_tol
def gapfind(model, solver, epsilon=0.001, v_max=1000, implicit_sinks=True):
"""Identify compounds in the model that cannot be produced.
Yields all compounds that cannot be produced. This method
assumes implicit sinks for all compounds in the model so
the only factor that influences whether a compound can be
produced is the presence of the compounds needed to produce it.
Epsilon indicates the threshold amount of reaction flux for the products
to be considered non-blocked. V_max indicates the maximum flux.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
"""
prob = solver.create_problem()
# Set integrality tolerance such that w constraints are correct
min_tol = prob.integrality_tolerance.min
int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)
if int_tol < prob.integrality_tolerance.value:
prob.integrality_tolerance.value = int_tol
# Define flux variables
v = prob.namespace()
for reaction_id in model.reactions:
lower, upper = model.limits[reaction_id]
v.define([reaction_id], lower=lower, upper=upper)
# Define constraints on production of metabolites in reaction
w = prob.namespace(types=lp.VariableType.Binary)
binary_cons_lhs = {compound: 0 for compound in model.compounds}
for spec, value in iteritems(model.matrix):
compound, reaction_id = spec
if value != 0:
w.define([spec])
w_var = w(spec)
lower, upper = (float(x) for x in model.limits[reaction_id])
if value > 0:
dv = v(reaction_id)
else:
dv = -v(reaction_id)
lower, upper = -upper, -lower
prob.add_linear_constraints(
dv <= upper * w_var,
dv >= epsilon + (lower - epsilon) * (1 - w_var))
binary_cons_lhs[compound] += w_var
xp = prob.namespace(model.compounds, types=lp.VariableType.Binary)
objective = xp.sum(model.compounds)
prob.set_objective(objective)
for compound, lhs in iteritems(binary_cons_lhs):
prob.add_linear_constraints(lhs >= xp(compound))
# Define mass balance constraints
massbalance_lhs = {compound: 0 for compound in model.compounds}
for spec, value in iteritems(model.matrix):
compound, reaction_id = spec
massbalance_lhs[compound] += v(reaction_id) * value
for compound, lhs in iteritems(massbalance_lhs):
if implicit_sinks:
# The constraint is merely >0 meaning that we have implicit sinks
# for all compounds.
prob.add_linear_constraints(lhs >= 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
result = prob.solve(lp.ObjectiveSense.Maximize)
except lp.SolverError as e:
raise_from(GapFillError('Failed to solve gapfill: {}'.format(e), e))
for compound in model.compounds:
if result.get_value(xp(compound)) < 0.5:
yield compound
def gapfill(
model, core, blocked, exclude, solver, epsilon=0.001, v_max=1000,
weights={}, implicit_sinks=True, allow_bounds_expansion=False):
"""Find a set of reactions to add such that no compounds are blocked.
Returns two iterators: first an iterator of reactions not in
core, that were added to resolve the model. Second, an
iterator of reactions in core that had flux bounds expanded (i.e.
irreversible reactions become reversible). Similarly to
GapFind, this method assumes, by default, implicit sinks for all compounds
in the model so the only factor that influences whether a compound
can be produced is the presence of the compounds needed to produce
it. This means that the resulting model will not necessarily be
flux consistent.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
core: The set of core (already present) reactions in the model.
blocked: The compounds to unblock.
exclude: Set of reactions in core to be excluded from gap-filling (e.g.
biomass reaction).
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
weights: Dictionary of weights for reactions. Weight is the penalty
score for adding the reaction (non-core reactions) or expanding the
flux bounds (all reactions).
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
allow_bounds_expansion: Allow flux bounds to be expanded at the cost
of a penalty which can be specified using weights (traditional
GapFill does not allow this). This includes turning irreversible
reactions reversible.
"""
prob = solver.create_problem()
# Set integrality tolerance such that w constraints are correct
min_tol = prob.integrality_tolerance.min
int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)
if int_tol < prob.integrality_tolerance.value:
prob.integrality_tolerance.value = int_tol
# Define flux variables
v = prob.namespace(model.reactions, lower=-v_max, upper=v_max)
# Add binary indicator variables
database_reactions = set(model.reactions).difference(core, exclude)
ym = prob.namespace(model.reactions, types=lp.VariableType.Binary)
yd = prob.namespace(database_reactions, types=lp.VariableType.Binary)
objective = ym.expr(
(rxnid, weights.get(rxnid, 1)) for rxnid in model.reactions)
objective += yd.expr(
(rxnid, weights.get(rxnid, 1)) for rxnid in database_reactions)
prob.set_objective(objective)
# Add constraints on all reactions
for reaction_id in model.reactions:
lower, upper = (float(x) for x in model.limits[reaction_id])
if reaction_id in exclude or not allow_bounds_expansion:
prob.add_linear_constraints(
upper >= v(reaction_id), v(reaction_id) >= lower)
else:
# Allow flux bounds to expand up to v_max with penalty
delta_lower = min(0, -v_max - lower)
delta_upper = max(0, v_max - upper)
prob.add_linear_constraints(
v(reaction_id) >= lower + ym(reaction_id) * delta_lower,
v(reaction_id) <= upper + ym(reaction_id) * delta_upper)
# Add constraints on database reactions
for reaction_id in database_reactions:
lower, upper = model.limits[reaction_id]
prob.add_linear_constraints(
v(reaction_id) >= yd(reaction_id) * -v_max,
v(reaction_id) <= yd(reaction_id) * v_max)
# Define constraints on production of blocked metabolites in reaction
w = prob.namespace(types=lp.VariableType.Binary)
binary_cons_lhs = {compound: 0 for compound in blocked}
for (compound, reaction_id), value in iteritems(model.matrix):
if reaction_id not in exclude and compound in blocked and value != 0:
w.define([(compound, reaction_id)])
w_var = w((compound, reaction_id))
dv = v(reaction_id) if value > 0 else -v(reaction_id)
prob.add_linear_constraints(
dv <= v_max * w_var,
dv >= epsilon + (-v_max - epsilon) * (1 - w_var))
binary_cons_lhs[compound] += w_var
for compound, lhs in iteritems(binary_cons_lhs):
prob.add_linear_constraints(lhs >= 1)
# Define mass balance constraints
massbalance_lhs = {compound: 0 for compound in model.compounds}
for (compound, reaction_id), value in iteritems(model.matrix):
if reaction_id not in exclude:
massbalance_lhs[compound] += v(reaction_id) * value
for compound, lhs in iteritems(massbalance_lhs):
if implicit_sinks:
# The constraint is merely >0 meaning that we have implicit sinks
# for all compounds.
prob.add_linear_constraints(lhs >= 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
prob.solve(lp.ObjectiveSense.Minimize)
except lp.SolverError as e:
raise_from(GapFillError('Failed to solve gapfill: {}'.format(e)), e)
def added_iter():
for reaction_id in database_reactions:
if yd.value(reaction_id) > 0.5:
yield reaction_id
def no_bounds_iter():
for reaction_id in model.reactions:
if ym.value(reaction_id) > 0.5:
yield reaction_id
return added_iter(), no_bounds_iter()
|
gpl-3.0
| 1,452,733,391,583,776,800 | 39.375912 | 79 | 0.657417 | false | 3.91472 | false | false | false |
novafloss/ci-formula
|
_states/jenkins_job.py
|
1
|
1441
|
# -*- coding: utf-8 -*-
import os
def _recreate_job_check(old, new):
old_cls = old.splitlines()[-1]
new_cls = new.splitlines()[-1]
return old_cls != new_cls
def present(name, source, template=None, context=None):
update_or_create_xml = __salt__['jenkins.update_or_create_xml'] # noqa
get_file_str = __salt__['cp.get_file_str'] # noqa
get_template = __salt__['cp.get_template'] # noqa
if template:
get_template(source, '/tmp/job.xml', template=template,
context=context)
new = open('/tmp/job.xml').read().strip()
os.unlink('/tmp/job.xml')
else:
new = get_file_str(source)
return update_or_create_xml(
name, new, object_='job', recreate_callback=_recreate_job_check)
def absent(name):
_runcli = __salt__['jenkins.runcli'] # noqa
test = __opts__['test'] # noqa
ret = {
'name': name,
'changes': {},
'result': None if test else True,
'comment': ''
}
try:
_runcli('get-job', name)
except Exception:
ret['comment'] = 'Already removed'
return ret
if not test:
try:
ret['comment'] = _runcli('delete-job', name)
except Exception, e:
ret['comment'] = e.message
ret['result'] = False
return ret
ret['changes'] = {
'old': 'present',
'new': 'absent',
}
return ret
|
mit
| 5,004,029,649,107,946,000 | 23.844828 | 75 | 0.530881 | false | 3.531863 | false | false | false |
VU-Cog-Sci/PRF_experiment
|
ColorMatcherSession.py
|
1
|
3743
|
from __future__ import division
from psychopy import visual, core, misc, event
import numpy as np
from IPython import embed as shell
from math import *
import os, sys, time, pickle
import pygame
from pygame.locals import *
# from pygame import mixer, time
import Quest
sys.path.append( 'exp_tools' )
# sys.path.append( os.environ['EXPERIMENT_HOME'] )
from Session import *
from ColorMatcherTrial import *
from standard_parameters import *
from Staircase import YesNoStaircase
import appnope
appnope.nope()
class ColorMatcherSession(EyelinkSession):
def __init__(self, subject_initials, index_number, scanner, tracker_on):
super(ColorMatcherSession, self).__init__( subject_initials, index_number)
self.create_screen( size = screen_res, full_screen = 0, physical_screen_distance = 159.0, background_color = background_color, physical_screen_size = (70, 40) )
self.standard_parameters = standard_parameters
self.response_button_signs = response_button_signs
self.create_output_file_name()
if tracker_on:
self.create_tracker(auto_trigger_calibration = 1, calibration_type = 'HV9')
if self.tracker_on:
self.tracker_setup()
else:
self.create_tracker(tracker_on = False)
self.scanner = scanner
# trials can be set up independently of the staircases that support their parameters
self.prepare_trials()
self.all_color_values = []
self.exp_start_time = 0.0
self.color_step = 0.02
def prepare_trials(self):
"""docstring for prepare_trials(self):"""
self.RG_offsets = (np.random.rand(self.standard_parameters['num_trials']))
self.phase_durations = np.array([-0.0001,-0.0001, 1.00, -0.0001, 0.001])
# stimuli
self.fixation_rim = visual.PatchStim(self.screen, mask='raisedCos',tex=None, size=12.5, pos = np.array((0.0,0.0)), color = (0,0,0), maskParams = {'fringeWidth':0.4})
self.fixation_outer_rim = visual.PatchStim(self.screen, mask='raisedCos',tex=None, size=17.5, pos = np.array((0.0,0.0)), color = (-1.0,-1.0,-1.0), maskParams = {'fringeWidth':0.4})
self.fixation = visual.PatchStim(self.screen, mask='raisedCos',tex=None, size=9.0, pos = np.array((0.0,0.0)), color = (0,0,0), opacity = 1.0, maskParams = {'fringeWidth':0.4})
screen_width, screen_height = self.screen_pix_size
ecc_mask = filters.makeMask(matrixSize = 2048, shape='raisedCosine', radius=self.standard_parameters['stim_size'] * self.screen_pix_size[1] / self.screen_pix_size[0], center=(0.0, 0.0), range=[1, -1], fringeWidth=0.1 )
self.mask_stim = visual.PatchStim(self.screen, mask=ecc_mask,tex=None, size=(self.screen_pix_size[0], self.screen_pix_size[0]), pos = np.array((0.0,0.0)), color = self.screen.background_color) #
def close(self):
super(ColorMatcherSession, self).close()
text_file = open("data/%s_color_ratios.txt"%self.subject_initials, "w")
text_file.write('Mean RG/BY ratio: %.2f\nStdev RG/BY ratio: %.2f'%(np.mean(np.array(self.all_color_values)/self.standard_parameters['BY_comparison_color']),np.std(np.array(self.all_color_values)/self.standard_parameters['BY_comparison_color'])))
text_file.close()
def run(self):
"""docstring for fname"""
# cycle through trials
for i in range(self.standard_parameters['num_trials']):
# prepare the parameters of the following trial based on the shuffled trial array
this_trial_parameters = self.standard_parameters.copy()
this_trial_parameters['RG_offset'] = self.RG_offsets[i]
these_phase_durations = self.phase_durations.copy()
this_trial = ColorMatcherTrial(this_trial_parameters, phase_durations = these_phase_durations, session = self, screen = self.screen, tracker = self.tracker)
# run the prepared trial
this_trial.run(ID = i)
if self.stopped == True:
break
self.close()
|
mit
| 2,694,495,617,494,793,700 | 40.131868 | 247 | 0.711996 | false | 2.999199 | false | false | false |
systers/postorius
|
copybump.py
|
1
|
2797
|
#! /usr/bin/env python3
import os
import re
import sys
import stat
import datetime
FSF = 'by the Free Software Foundation, Inc.'
this_year = datetime.date.today().year
pyre_c = re.compile(r'# Copyright \(C\) ((?P<start>\d{4})-)?(?P<end>\d{4})')
pyre_n = re.compile(r'# Copyright ((?P<start>\d{4})-)?(?P<end>\d{4})')
new_c = '# Copyright (C) {}-{} {}'
new_n = '# Copyright {}-{} {}'
MODE = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if '--noc' in sys.argv:
pyre = pyre_n
new = new_n
sys.argv.remove('--noc')
else:
pyre = pyre_c
new = new_c
def do_file(path, owner):
permissions = os.stat(path).st_mode & MODE
with open(path) as in_file, open(path + '.out', 'w') as out_file:
try:
for line in in_file:
mo_c = pyre_c.match(line)
mo_n = pyre_n.match(line)
if mo_c is None and mo_n is None:
out_file.write(line)
continue
mo = (mo_n if mo_c is None else mo_c)
start = (mo.group('end')
if mo.group('start') is None
else mo.group('start'))
if int(start) == this_year:
out_file.write(line)
continue
print(new.format(start, this_year, owner), file=out_file) # noqa
print('=>', path)
for line in in_file:
out_file.write(line)
except UnicodeDecodeError:
print('Cannot convert path:', path)
os.remove(path + '.out')
return
os.rename(path + '.out', path)
os.chmod(path, permissions)
def remove(dirs, path):
try:
dirs.remove(path)
except ValueError:
pass
def do_walk():
try:
owner = sys.argv[1]
except IndexError:
owner = FSF
for root, dirs, files in os.walk('.'):
if root == '.':
remove(dirs, '.git')
remove(dirs, '.tox')
remove(dirs, 'bin')
remove(dirs, 'contrib')
remove(dirs, 'develop-eggs')
remove(dirs, 'eggs')
remove(dirs, 'parts')
remove(dirs, 'gnu-COPYING-GPL')
remove(dirs, '.installed.cfg')
remove(dirs, '.bzrignore')
remove(dirs, 'distribute_setup.py')
if root == './src':
remove(dirs, 'postorius.egg-info')
if root == './src/postorius':
remove(dirs, 'messages')
for file_name in files:
if os.path.splitext(file_name)[1] in ('.pyc', '.gz', '.egg'):
continue
path = os.path.join(root, file_name)
if os.path.isfile(path):
do_file(path, owner)
if __name__ == '__main__':
do_walk()
|
gpl-3.0
| 5,930,184,830,484,723,000 | 28.135417 | 80 | 0.490526 | false | 3.478856 | false | false | false |
pewen/ten
|
ten/post.py
|
1
|
2525
|
"""
Post processing tools
"""
import os
import numpy as np
def extrac4dir(dir_path, search):
"""
Extrar the epsilon, mean free path, total time and efficienci of all file in some directory.
Parameters
----------
dir_path : str
Path to the directory with the outputs files.
search : list
List with the number of acceptor to search only this effienci.
Return
------
out : matrix
"""
dirs = os.listdir(path=dir_path)
out = np.zeros((len(dirs), len(search) + 3))
for num_file, file_path in enumerate(dirs):
with open(os.path.join(dir_path, file_path), 'r') as file:
cnt = 0
while True:
line = file.readline()
if 'path' in line:
line_split = line.split()
out[num_file][0] = float(line_split[3])
elif 'Epsilon' in line:
line_split = line.split()
out[num_file][1] = float(line_split[1])
elif 'Total time =' in line:
line_split = line.split()
out[num_file][2] = float(line_split[3])
elif 'Nº acceptors' in line:
line = file.readline()
while True:
line = file.readline()
# Remove all spaces
line_without_space = ''.join(line.split())
line_split = line_without_space.split('|')
# End of file
if '+--------------' in line:
break
if line_split[1] == str(search[cnt]):
out[num_file][cnt + 3] = float(line_split[4])
cnt += 1
break
if '' == line:
break
return out
def diference2(efficience, eff_matrix):
"""
Squared difference between the simulated efficiencies (matrix) and a given.
Parameters
----------
efficience : array_like
Efficience to compare
eff_matrix : matrix
Matrix give for extrac4dir.
Return
------
"""
diff_matrix = np.zeros((eff_matrix.shape[0], eff_matrix.shape[1] + 1))
diff_matrix[:, :3] = eff_matrix[:, :3]
diff_matrix[:, 4:] = eff_matrix[:, 3:]
# Diff
for i in range(diff_matrix.shape[0]):
diff_matrix[i][3] = sum((diff_matrix[i][4:] - efficience)**2)
return diff_matrix
|
mit
| -8,547,275,649,797,289,000 | 27.681818 | 96 | 0.479398 | false | 4.117455 | false | false | false |
AstroFloyd/SolTrack
|
Python/soltrack/riseset.py
|
2
|
8500
|
"""SolTrack: a simple, free, fast and accurate C routine to compute the position of the Sun.
Copyright (c) 2014-2020 Marc van der Sluys, Paul van Kan and Jurgen Reintjes,
Sustainable Energy research group, HAN University of applied sciences, Arnhem, The Netherlands
This file is part of the SolTrack package, see: http://soltrack.sourceforge.net
SolTrack is derived from libTheSky (http://libthesky.sourceforge.net) under the terms of the GPL v.3
This is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
Public License as published by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License along with this code. If not, see
<http://www.gnu.org/licenses/>.
"""
import math as m
import numpy as np
import soltrack as st
from soltrack.data import PI,TWO_PI, R2D,R2H
from soltrack.dataclasses import Location, Time, RiseSet, copyDataclass
def computeSunRiseSet(location, time, rsAlt=0.0, useDegrees=False, useNorthEqualsZero=False):
"""Compute rise, transit and set times for the Sun, as well as their azimuths/altitude.
Parameters:
location (Location): Dataclass containing the geographic location to compute the Sun's rise and set times for.
time (Time): Dataclass containing date and time to compute the position for, in UT.
rsAlt (float): Altitude to return rise/set data for (radians; optional, default=0.0 meaning actual rise/set). Set rsAlt>pi/2 to compute transit only.
useDegrees (bool): Use degrees for input and output angular variables, rather than radians (optional, default=False).
useNorthEqualsZero (bool): Use the definition where azimuth=0 denotes north, rather than south (optional, default=False).
Returns:
(RiseSet): Dataclass containing the Sun's rise, transit and set data.
Note:
- if rsAlt == 0.0, actual rise and set times are computed
- if rsAlt != 0.0, the routine calculates when alt = rsAlt is reached
- returns times, rise/set azimuth and transit altitude in the dataclass riseSet
See:
- subroutine riset() in riset.f90 from libTheSky (libthesky.sf.net) for more info
"""
tmRad = np.zeros(3)
azalt = np.zeros(3)
alt=0.0; ha=0.0; h0=0.0
computeRefrEquatorial = True # Compure refraction-corrected equatorial coordinates (Hour angle, declination).
computeDistance = False # Compute the distance to the Sun in AU.
rsa = -0.8333/R2D # Standard altitude for the Sun in radians
if(abs(rsAlt) > 1.e-9): rsa = rsAlt # Use a user-specified altitude
# If the used uses degrees, convert the geographic location to radians:
# This was a local variable llocation in C
loc = copyDataclass(Location, location) # Local instance of the Location dataclass, so that it can be changed
if(useDegrees):
loc.longitude /= R2D
loc.latitude /= R2D
# Set date and time to midnight UT for the desired day:
rsTime = Time() # Local instance of the dataclass Time
rsTime.year = time.year
rsTime.month = time.month
rsTime.day = time.day
rsTime.hour = 0
rsTime.minute = 0
rsTime.second = 0.0
# Compute the Sun's position. Returns a Position object:
pos = st.computeSunPosition(loc, rsTime, False, useNorthEqualsZero, computeRefrEquatorial, computeDistance) # useDegrees = False: NEVER use degrees internally!
agst0 = pos.agst # AGST for midnight
evMax = 3 # Compute transit, rise and set times by default (1-3)
cosH0 = (m.sin(rsa)-m.sin(loc.latitude)*m.sin(pos.declination)) / \
(m.cos(loc.latitude)*m.cos(pos.declination))
if(abs(cosH0) > 1.0): # Body never rises/sets
evMax = 1 # Compute transit time and altitude only
else:
h0 = m.acos(cosH0) % PI # Should probably work without %
tmRad[0] = (pos.rightAscension - loc.longitude - pos.agst) % TWO_PI # Transit time in radians; lon0 > 0 for E
if(evMax > 1):
tmRad[1] = (tmRad[0] - h0) % TWO_PI # Rise time in radians
tmRad[2] = (tmRad[0] + h0) % TWO_PI # Set time in radians
accur = 1.0e-5 # Accuracy; 1e-5 rad ~ 0.14s. Don't make this smaller than 1e-16
for evi in range(evMax): # Loop over transit, rise, set
iter = 0
dTmRad = m.inf
while(abs(dTmRad) > accur):
th0 = agst0 + 1.002737909350795*tmRad[evi] # Solar day in sidereal days in 2000
rsTime.second = tmRad[evi]*R2H*3600.0 # Radians -> seconds - w.r.t. midnight (h=0,m=0)
pos = st.computeSunPosition(loc, rsTime, False, useNorthEqualsZero, computeRefrEquatorial, computeDistance) # useDegrees = False: NEVER use degrees internally!
ha = revPI(th0 + loc.longitude - pos.rightAscension) # Hour angle: -PI - +PI
alt = m.asin(m.sin(loc.latitude)*m.sin(pos.declination) +
m.cos(loc.latitude)*m.cos(pos.declination)*m.cos(ha)) # Altitude
# Correction to transit/rise/set times:
if(evi==0): # Transit
dTmRad = -revPI(ha) # -PI - +PI
else: # Rise/set
dTmRad = (alt-rsa)/(m.cos(pos.declination)*m.cos(loc.latitude)*m.sin(ha))
tmRad[evi] = tmRad[evi] + dTmRad
# Print debug output to stdOut:
# print(" %4i %2i %2i %2i %2i %9.3lf " % (rsTime.year,rsTime.month,rsTime.day, rsTime.hour,rsTime.minute,rsTime.second))
# print(" %3i %4i %9.3lf %9.3lf %9.3lf \n" % (evi,iter, tmRad[evi]*24,abs(dTmRad)*24,accur*24))
iter += 1
if(iter > 30): break # while loop doesn't seem to converge
# while(abs(dTmRad) > accur)
if(iter > 30): # Convergence failed
print("\n *** WARNING: riset(): Riset failed to converge: %i %9.3lf ***\n" % (evi,rsAlt))
tmRad[evi] = -m.inf
azalt[evi] = -m.inf
else: # Result converged, store it
if(evi == 0):
azalt[evi] = alt # Transit altitude
else:
azalt[evi] = m.atan2( m.sin(ha), ( m.cos(ha) * m.sin(loc.latitude) -
m.tan(pos.declination) * m.cos(loc.latitude) ) ) # Rise,set hour angle -> azimuth
if(tmRad[evi] < 0.0 and abs(rsAlt) < 1.e-9):
tmRad[evi] = -m.inf
azalt[evi] = -m.inf
# for-loop evi
# Set north to zero radians for azimuth if desired:
if(useNorthEqualsZero):
azalt[1] = (azalt[1] + PI) % TWO_PI # Add PI and fold between 0 and 2pi
azalt[2] = (azalt[2] + PI) % TWO_PI # Add PI and fold between 0 and 2pi
# Convert resulting angles to degrees if desired:
if(useDegrees):
azalt[0] *= R2D # Transit altitude
azalt[1] *= R2D # Rise azimuth
azalt[2] *= R2D # Set azimuth
# Store results:
riseSet = RiseSet() # Instance of the dataclass RiseSet, to store and return the results
riseSet.transitTime = tmRad[0]*R2H # Transit time - radians -> hours
riseSet.riseTime = tmRad[1]*R2H # Rise time - radians -> hours
riseSet.setTime = tmRad[2]*R2H # Set time - radians -> hours
riseSet.transitAltitude = azalt[0] # Transit altitude
riseSet.riseAzimuth = azalt[1] # Rise azimuth
riseSet.setAzimuth = azalt[2] # Set azimuth
return riseSet
def revPI(angle):
"""Fold an angle in radians to take a value between -PI and +PI.
Parameters:
angle (float): Angle to fold (rad).
"""
return ((angle + PI) % TWO_PI) - PI
|
lgpl-3.0
| 6,757,125,456,409,840,000 | 42.814433 | 173 | 0.597882 | false | 3.451076 | false | false | false |
asd43/Structural-Variation
|
popgen/mergeBD2bed.py
|
1
|
1961
|
#!/usr/bin/env python3
# Copyright (c) 2017 Genome Research Ltd.
# Author: Alistair Dunham
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License , or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful , but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not , see <http :// www.gnu.org/licenses/>.
## Script to merge filtered BD call files to bed format for processing
import argparse
import fileinput
parser = argparse.ArgumentParser(description="Merge filtered BreakDancer variant calls into a single bed file for genotyping. "
"If the ID flag is given an ID is expected in the column beyond the normal BD output, otherwise an id is generated.")
parser.add_argument('bd',metavar='B',type=str,help="File containing a list of BreakDancer variant call files (.BD_out format).")
parser.add_argument('--id','-i',action='store_true',help="Use IDs added to the BD_out file on filtering. Otherwise generate IDs enumerating only filtered calls.")
args = parser.parse_args()
bdFiles = []
with fileinput.input(args.bd) as fi:
for i in fi:
bdFiles.append(i.strip())
## Stream through each file and output bed formated versions
for fi in bdFiles:
with fileinput.input(fi) as bd:
if not args.id:
f = fi.split('/')
idBase = f[-1].strip('.BD_out')
n = 1
for li in bd:
if not li[0] == '#':
t = li.strip().split()
if args.id:
ID = t[12]
else:
ID = '.'.join(['BD',idBase,n])
n += 1
print(t[0],t[1],t[4],ID,sep='\t')
|
gpl-3.0
| 1,119,849,092,429,157,500 | 39.020408 | 162 | 0.684345 | false | 3.279264 | false | false | false |
maltsev/LatexWebOffice
|
app/tests/server/views/test_document.py
|
1
|
4826
|
# -*- coding: utf-8 -*-
"""
* Purpose : Test der Dokument- und Projektverwaltung (app/view/documents.py)
* Creation Date : 20-11-2014
* Last Modified : Mi 26 Nov 2014 14:58:13 CET
* Author : mattis
* Coauthors : christian
* Sprintnumber : 2
* Backlog entry : -
"""
from app.common.constants import ERROR_MESSAGES
from app.common import util
from app.tests.server.views.viewtestcase import ViewTestCase
class DocumentsTestClass(ViewTestCase):
def setUp(self):
"""Setup Methode für die einzelnen Tests
Diese Funktion wird vor jeder Testfunktion ausgeführt.
Damit werden die notwendigen Variablen und Modelle für jeden Test neu initialisiert.
Die Methoden hierzu befinden sich im ViewTestCase (viewtestcase.py).
:return: None
"""
self.setUpSingleUser()
def tearDown(self):
"""Freigabe von nicht mehr notwendigen Ressourcen.
Diese Funktion wird nach jeder Testfunktion ausgeführt.
:return: None
"""
pass
def test_Execute(self):
"""Test der execute() Methode des document view
Teste die Verteilfunktion, die die verschiedenen Document-commands den richtigen Methode zuweist.
Testfälle:
- user1 ruft createdir mit fehlendem Parameter id auf -> Fehler
- user1 ruft unbekannten Befehl auf -> Fehler
- user1 ruft createdir mit eine String als id auf -> Fehler
- user1 ruft updatefile auf ohne den content mitzusenden -> Fehler
:return: None
"""
missingpara_id = {'name': 'id', 'type': int}
missingpara_content = {'name': 'content', 'type': str}
missingpara_name = {'name': 'name', 'type': str}
# Teste Aufruf mit fehlendem Parameter
# createdir command benötigt Parameter 'id':parentdirid und 'name': directoryname
response = util.documentPoster(self, command='createdir', idpara=None, name='newfolder')
# erwartete Antwort des Servers
serveranswer = ERROR_MESSAGES['MISSINGPARAMETER'] % missingpara_id
# überprüfe die Antwort des Servers
# status sollte failure sein
# die Antwort des Servers sollte mit serveranswer übereinstimmen
util.validateJsonFailureResponse(self, response.content, serveranswer)
# --------------------------------------------------------------------------------------------------------------
# Teste unbekannten Befehl ('command')
response = util.documentPoster(self, command='DOESNOTEXIST')
# erwartete Antwort des Servers
serveranswer = ERROR_MESSAGES['COMMANDNOTFOUND']
# überprüfe die Antwort des Servers
# status sollte failure sein
# die Antwort des Servers sollte mit serveranswer übereinstimmen
util.validateJsonFailureResponse(self, response.content, serveranswer)
# --------------------------------------------------------------------------------------------------------------
# Sende Anfrage zum erstellen eines Ordners mit einem String als ID
response = util.documentPoster(self, command='createdir', idpara='noIntID', name='newfolder')
# erwartete Antwort des Servers
serveranswer = ERROR_MESSAGES['MISSINGPARAMETER'] % missingpara_id
# überprüfe die Antwort des Servers
# status sollte failure sein
# die Antwort des Servers sollte mit serveranswer übereinstimmen
util.validateJsonFailureResponse(self, response.content, serveranswer)
# --------------------------------------------------------------------------------------------------------------
# Sende Anfrage zum ändern des Inhalt einer .tex Datei ohne den Inhalt mitzusenden
response = util.documentPoster(self, command='updatefile', idpara=1)
# erwartete Antwort des Servers
serveranswer = ERROR_MESSAGES['MISSINGPARAMETER'] % missingpara_content
# überprüfe die Antwort des Servers
# status sollte failure sein
# die Antwort des Servers sollte mit serveranswer übereinstimmen
#util.validateJsonFailureResponse(self, response.content, serveranswer)
# --------------------------------------------------------------------------------------------------------------
# Sende Anfrage zum Umbenennen einer Datei ohne den neuen Namen mitzusenden
response = util.documentPoster(self, command='renamefile', idpara=1)
# erwartete Antwort des Servers
serveranswer = ERROR_MESSAGES['MISSINGPARAMETER'] % missingpara_name
# überprüfe die Antwort des Servers
# status sollte failure sein
# die Antwort des Servers sollte mit serveranswer übereinstimmen
#util.validateJsonFailureResponse(self, response.content, serveranswer)
|
gpl-3.0
| -6,987,938,852,695,280,000 | 37.134921 | 120 | 0.619484 | false | 3.729814 | true | false | false |
francois-berder/PyLetMeCreate
|
letmecreate/click/joystick.py
|
1
|
1291
|
#!/usr/bin/env python3
"""Python binding of Joystick wrapper of LetMeCreate library."""
import ctypes
_LIB = ctypes.CDLL('libletmecreate_click.so')
def get_x():
"""Returns the X position of the joystick.
Note: An exception is thrown if it fails to read the X position from the
chip.
"""
pos_x = ctypes.c_int8(0)
ret = _LIB.joystick_click_get_x(ctypes.byref(pos_x))
if ret < 0:
raise Exception("joystick click get x failed")
return pos_x.value
def get_y():
"""Returns the Y position of the joystick.
Note: An exception is thrown if it fails to read the Y position from the
chip.
"""
pos_y = ctypes.c_int8(0)
ret = _LIB.joystick_click_get_y(ctypes.byref(pos_y))
if ret < 0:
raise Exception("joystick click get y failed")
return pos_y.value
def get_position():
"""Returns the X position of the joystick.
Note: An exception is thrown if it fails to read the position from the
chip.
"""
pos_x = ctypes.c_int8(0)
pos_y = ctypes.c_int8(0)
ret = _LIB.joystick_click_get_position(ctypes.byref(pos_x),
ctypes.byref(pos_y))
if ret < 0:
raise Exception("joystick click get position failed")
return (pos_x.value, pos_y.value)
|
bsd-3-clause
| -7,252,341,453,528,857,000 | 26.468085 | 76 | 0.625871 | false | 3.34456 | false | false | false |
GoogleCloudPlatform/appengine-config-transformer
|
yaml_conversion/lib/google/appengine/api/yaml_listener.py
|
1
|
7849
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python2.4
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""PyYAML event listener
Contains class which interprets YAML events and forwards them to
a handler object.
"""
from yaml_conversion.lib.google.appengine.api import yaml_errors
import yaml
# Default mapping of event type to handler method name
_EVENT_METHOD_MAP = {
yaml.events.StreamStartEvent: 'StreamStart',
yaml.events.StreamEndEvent: 'StreamEnd',
yaml.events.DocumentStartEvent: 'DocumentStart',
yaml.events.DocumentEndEvent: 'DocumentEnd',
yaml.events.AliasEvent: 'Alias',
yaml.events.ScalarEvent: 'Scalar',
yaml.events.SequenceStartEvent: 'SequenceStart',
yaml.events.SequenceEndEvent: 'SequenceEnd',
yaml.events.MappingStartEvent: 'MappingStart',
yaml.events.MappingEndEvent: 'MappingEnd',
}
class EventHandler(object):
"""Handler interface for parsing YAML files.
Implement this interface to define specific YAML event handling class.
Implementing classes instances are passed to the constructor of
EventListener to act as a receiver of YAML parse events.
"""
def StreamStart(self, event, loader):
"""Handle start of stream event"""
def StreamEnd(self, event, loader):
"""Handle end of stream event"""
def DocumentStart(self, event, loader):
"""Handle start of document event"""
def DocumentEnd(self, event, loader):
"""Handle end of document event"""
def Alias(self, event, loader):
"""Handle alias event"""
def Scalar(self, event, loader):
"""Handle scalar event"""
def SequenceStart(self, event, loader):
"""Handle start of sequence event"""
def SequenceEnd(self, event, loader):
"""Handle end of sequence event"""
def MappingStart(self, event, loader):
"""Handle start of mappping event"""
def MappingEnd(self, event, loader):
"""Handle end of mapping event"""
class EventListener(object):
"""Helper class to re-map PyYAML events to method calls.
By default, PyYAML generates its events via a Python generator. This class
is a helper that iterates over the events from the PyYAML parser and forwards
them to a handle class in the form of method calls. For simplicity, the
underlying event is forwarded to the handler as a parameter to the call.
This object does not itself produce iterable objects, but is really a mapping
to a given handler instance.
Example use:
class PrintDocumentHandler(object):
def DocumentStart(event):
print "A new document has been started"
EventListener(PrintDocumentHandler()).Parse('''
key1: value1
---
key2: value2
'''
>>> A new document has been started
A new document has been started
In the example above, the implemented handler class (PrintDocumentHandler)
has a single method which reports each time a new document is started within
a YAML file. It is not necessary to subclass the EventListener, merely it
receives a PrintDocumentHandler instance. Every time a new document begins,
PrintDocumentHandler.DocumentStart is called with the PyYAML event passed
in as its parameter..
"""
def __init__(self, event_handler):
"""Initialize PyYAML event listener.
Constructs internal mapping directly from event type to method on actual
handler. This prevents reflection being used during actual parse time.
Args:
event_handler: Event handler that will receive mapped events. Must
implement at least one appropriate handler method named from
the values of the _EVENT_METHOD_MAP.
Raises:
ListenerConfigurationError if event_handler is not an EventHandler.
"""
if not isinstance(event_handler, EventHandler):
raise yaml_errors.ListenerConfigurationError(
'Must provide event handler of type yaml_listener.EventHandler')
self._event_method_map = {}
# For each event type in default method map...
for event, method in _EVENT_METHOD_MAP.iteritems():
# Map event class to actual method
self._event_method_map[event] = getattr(event_handler, method)
def HandleEvent(self, event, loader=None):
"""Handle individual PyYAML event.
Args:
event: Event to forward to method call in method call.
Raises:
IllegalEvent when receives an unrecognized or unsupported event type.
"""
# Must be valid event object
if event.__class__ not in _EVENT_METHOD_MAP:
raise yaml_errors.IllegalEvent(
"%s is not a valid PyYAML class" % event.__class__.__name__)
# Conditionally handle event
if event.__class__ in self._event_method_map:
self._event_method_map[event.__class__](event, loader)
def _HandleEvents(self, events):
"""Iterate over all events and send them to handler.
This method is not meant to be called from the interface.
Only use in tests.
Args:
events: Iterator or generator containing events to process.
raises:
EventListenerParserError when a yaml.parser.ParserError is raised.
EventError when an exception occurs during the handling of an event.
"""
for event in events:
try:
self.HandleEvent(*event)
except Exception, e:
event_object, loader = event
raise yaml_errors.EventError(e, event_object)
def _GenerateEventParameters(self,
stream,
loader_class=yaml.loader.SafeLoader):
"""Creates a generator that yields event, loader parameter pairs.
For use as parameters to HandleEvent method for use by Parse method.
During testing, _GenerateEventParameters is simulated by allowing
the harness to pass in a list of pairs as the parameter.
A list of (event, loader) pairs must be passed to _HandleEvents otherwise
it is not possible to pass the loader instance to the handler.
Also responsible for instantiating the loader from the Loader
parameter.
Args:
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work.
Loader: Loader class to use as per the yaml.parse method. Used to
instantiate new yaml.loader instance.
Yields:
Tuple(event, loader) where:
event: Event emitted by PyYAML loader.
loader_class: Used for dependency injection.
"""
assert loader_class is not None
try:
loader = loader_class(stream)
while loader.check_event():
yield (loader.get_event(), loader)
except yaml.error.YAMLError, e:
raise yaml_errors.EventListenerYAMLError(e)
def Parse(self, stream, loader_class=yaml.loader.SafeLoader):
"""Call YAML parser to generate and handle all events.
Calls PyYAML parser and sends resulting generator to handle_event method
for processing.
Args:
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work with the YAML parser.
loader_class: Used for dependency injection.
"""
self._HandleEvents(self._GenerateEventParameters(stream, loader_class))
|
apache-2.0
| 6,397,356,658,310,916,000 | 33.884444 | 79 | 0.706842 | false | 4.407075 | false | false | false |
shichao-an/leetcode-python
|
product_of_array_except_self/solution4.py
|
1
|
1363
|
"""
Given an array of n integers where n > 1, nums, return an array output such
that output[i] is equal to the product of all the elements of nums except
nums[i].
Solve it without division and in O(n).
For example, given [1,2,3,4], return [24,12,8,6].
Follow up:
Could you solve it with constant space complexity? (Note: The output array
does not count as extra space for the purpose of space complexity analysis.)
"""
class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
res = [1 for i in range(n)]
# Scan from left to right
for i in range(1, n):
# i is from 1 to n - 1
# res[i] is the product accumulated to the left
res[i] = res[i - 1] * nums[i - 1]
# right_product is the product accumulated to the right
right_product = 1
for i in range(1, n):
# j ranges from i - 2 to 0
j = n - 1 - i
right_product *= nums[j + 1]
res[j] *= right_product
return res
a0 = [0, 0]
a1 = [1, 2, 3]
a2 = [2, 3, 4]
a3 = [1, 2, 3, 4]
a4 = [2, 3, 4, 5]
s = Solution()
print(s.productExceptSelf(a0))
print(s.productExceptSelf(a1))
print(s.productExceptSelf(a2))
print(s.productExceptSelf(a3))
print(s.productExceptSelf(a4))
|
bsd-2-clause
| -8,392,910,952,196,059,000 | 25.72549 | 76 | 0.58474 | false | 3.111872 | false | false | false |
alok1974/nbCodeLines
|
modules/codeLines.py
|
1
|
7366
|
###########################################################################################
###########################################################################################
## ##
## Nb Code Lines v 1.0 (c) 2015 Alok Gandhi ([email protected]) ##
## ##
## ##
## This file is part of Nb Code Lines. ##
## ##
## Nb Code lines is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License, Version 3, 29 June 2007 ##
## as published by the Free Software Foundation, ##
## ##
## Nb Code Lines is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with Nb Code lines. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################################
###########################################################################################
import os
import sys
from datetime import date
from PyQt4 import QtCore, QtGui
import time
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.join(__file__)), '..'))
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
from gui.logger import Logger
class CodeLines(object):
def __init__(self, qThread=None, folder="", ext=[], startDate=0, startMonth=0, startYear=0, *args, **kwargs):
super(CodeLines, self).__init__(*args, **kwargs)
# Supplied Arguments
self._qThread = qThread
self._folder = folder
self._ext = ext
self._startDate = startDate
self._startMonth = startMonth
self._startYear = startYear
# Data to Calculate
self._data = []
self._prjStartDate = None
self._nbPrjDays = 0
self._nbTotalLines = 0
self._nbActualLines = 0
self._codeDensity = 0.0
self._avgLinesPerDay = 0
self._avgLinesPerHour = 0
self._hasError = False
self._errStr = ''
self._findAll = False
if '*.*' in self._ext:
self._findAll = True
# Initialization Methods
if not self._qThread:
self._assert()
self._generateData()
def runThread(self):
self._assert()
self._generateData()
return self.getData()
def _assert(self):
if self._folder == '':
self._hasError = True
self._errStr = 'No script folder provided!'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
if not os.path.exists(self._folder):
self._hasError = True
self._errStr = 'The folder <%s> does not exist!' % self._folder
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
if len(self._ext) == 0:
self._hasError = True
self._errStr = 'No script file extensions provided!'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
try:
self._prjStartDate = date(self._startYear, self._startMonth, self._startDate)
self._nbPrjDays = (date.today() - self._prjStartDate).days
if self._nbPrjDays <= 0:
self._hasError = True
self._errStr = 'Project Start Date should be smaller than current date !'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
except:
self._hasError = True
self._errStr = 'Supplied Date parameters are not valid!'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
def _generateData(self):
if self._hasError:
return
for root, dirs, files in os.walk(self._folder):
for f in files:
fName, ext = os.path.splitext(f)
openPath = os.path.abspath(os.path.join(root, f))
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("update(PyQt_PyObject)"), str(f))
if not self._findAll:
if ext not in self._ext:
continue
with open(openPath) as file:
lines = file.readlines()
nbLines = len(lines)
n = 0
for line in lines:
if not str(line).strip():
continue
n += 1
self._data.append(((n, nbLines), str(f), str(os.path.join(root, f))))
self._nbTotalLines += nbLines
self._nbActualLines += n
self._data.sort(reverse=True)
if len(self._data) == 0:
self._hasError = True
self._errStr = self._wrap(self._folder, 'No Script files found in the root folder:')
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
self._codeDensity = (round((self._nbActualLines / float(self._nbTotalLines)) * 100, 2))
self._avgLinesPerDay = int(self._nbActualLines / float(self._nbPrjDays))
self._avgLinesPerHour = int(self._avgLinesPerDay / 8.0)
@staticmethod
def _wrap(folderPath, defaultStr):
result = ''
if len(folderPath) > len(defaultStr):
result = folderPath[:len(defaultStr) - 2]
result += '... '
return '%s\n\n%s' % (defaultStr, result)
def getData(self):
return self._data, self._nbPrjDays, self._avgLinesPerDay, self._avgLinesPerHour, self._codeDensity
|
gpl-3.0
| 5,580,130,533,473,561,000 | 37.570681 | 113 | 0.435379 | false | 4.900865 | false | false | false |
jleclanche/fireplace
|
fireplace/cards/classic/neutral_common.py
|
1
|
5375
|
from ..utils import *
##
# Free basic minions
class CS2_122:
"""Raid Leader"""
update = Refresh(FRIENDLY_MINIONS - SELF, buff="CS2_122e")
CS2_122e = buff(atk=1)
class CS2_222:
"""Stormwind Champion"""
update = Refresh(FRIENDLY_MINIONS - SELF, buff="CS2_222o")
CS2_222o = buff(+1, +1)
class CS2_226:
"""Frostwolf Warlord"""
play = Buff(SELF, "CS2_226e") * Count(FRIENDLY_MINIONS - SELF)
CS2_226e = buff(+1, +1)
class EX1_011:
"""Voodoo Doctor"""
requirements = {PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Heal(TARGET, 2)
class EX1_015:
"""Novice Engineer"""
play = Draw(CONTROLLER)
class EX1_082:
"""Mad Bomber"""
play = Hit(RANDOM_OTHER_CHARACTER, 1) * 3
class EX1_102:
"""Demolisher"""
events = OWN_TURN_BEGIN.on(Hit(RANDOM_ENEMY_CHARACTER, 2))
class EX1_162:
"""Dire Wolf Alpha"""
update = Refresh(SELF_ADJACENT, buff="EX1_162o")
EX1_162o = buff(atk=1)
class EX1_399:
"""Gurubashi Berserker"""
events = SELF_DAMAGE.on(Buff(SELF, "EX1_399e"))
EX1_399e = buff(atk=3)
class EX1_508:
"""Grimscale Oracle"""
update = Refresh(FRIENDLY_MINIONS + MURLOC - SELF, buff="EX1_508o")
EX1_508o = buff(atk=1)
class EX1_593:
"""Nightblade"""
play = Hit(ENEMY_HERO, 3)
class EX1_595:
"""Cult Master"""
events = Death(FRIENDLY + MINION).on(Draw(CONTROLLER))
##
# Common basic minions
class CS2_117:
"""Earthen Ring Farseer"""
requirements = {PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Heal(TARGET, 3)
class CS2_141:
"""Ironforge Rifleman"""
requirements = {PlayReq.REQ_NONSELF_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Hit(TARGET, 1)
class CS2_146:
"""Southsea Deckhand"""
update = Find(FRIENDLY_WEAPON) & Refresh(SELF, {GameTag.CHARGE: True})
class CS2_147:
"""Gnomish Inventor"""
play = Draw(CONTROLLER)
class CS2_150:
"""Stormpike Commando"""
requirements = {PlayReq.REQ_NONSELF_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Hit(TARGET, 2)
class CS2_151:
"""Silver Hand Knight"""
play = Summon(CONTROLLER, "CS2_152")
class CS2_189:
"""Elven Archer"""
requirements = {PlayReq.REQ_NONSELF_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Hit(TARGET, 1)
class CS2_188:
"""Abusive Sergeant"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "CS2_188o")
CS2_188o = buff(atk=2)
class CS2_196:
"""Razorfen Hunter"""
play = Summon(CONTROLLER, "CS2_boar")
class CS2_203:
"""Ironbeak Owl"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Silence(TARGET)
class CS2_221:
"""Spiteful Smith"""
enrage = Refresh(FRIENDLY_WEAPON, buff="CS2_221e")
CS2_221e = buff(atk=2)
class CS2_227:
"""Venture Co. Mercenary"""
update = Refresh(FRIENDLY_HAND + MINION, {GameTag.COST: +3})
class DS1_055:
"""Darkscale Healer"""
play = Heal(FRIENDLY_CHARACTERS, 2)
class EX1_007:
"""Acolyte of Pain"""
events = SELF_DAMAGE.on(Draw(CONTROLLER))
class EX1_019:
"""Shattered Sun Cleric"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "EX1_019e")
EX1_019e = buff(+1, +1)
class EX1_025:
"""Dragonling Mechanic"""
play = Summon(CONTROLLER, "EX1_025t")
class EX1_029:
"""Leper Gnome"""
deathrattle = Hit(ENEMY_HERO, 2)
class EX1_046:
"""Dark Iron Dwarf"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "EX1_046e")
EX1_046e = buff(atk=2)
class EX1_048:
"""Spellbreaker"""
requirements = {
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_NONSELF_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Silence(TARGET)
class EX1_049:
"""Youthful Brewmaster"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_NONSELF_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Bounce(TARGET)
class EX1_057:
"""Ancient Brewmaster"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_NONSELF_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Bounce(TARGET)
class EX1_066:
"""Acidic Swamp Ooze"""
play = Destroy(ENEMY_WEAPON)
class EX1_096:
"""Loot Hoarder"""
deathrattle = Draw(CONTROLLER)
class EX1_283:
"""Frost Elemental"""
requirements = {PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Freeze(TARGET)
class EX1_390:
"""Tauren Warrior"""
enrage = Refresh(SELF, buff="EX1_390e")
EX1_390e = buff(atk=3)
class EX1_393:
"""Amani Berserker"""
enrage = Refresh(SELF, buff="EX1_393e")
EX1_393e = buff(atk=3)
class EX1_412:
"""Raging Worgen"""
enrage = Refresh(SELF, buff="EX1_412e")
class EX1_412e:
tags = {GameTag.ATK: +1}
windfury = SET(1)
class EX1_506:
"""Murloc Tidehunter"""
play = Summon(CONTROLLER, "EX1_506a")
class EX1_556:
"""Harvest Golem"""
deathrattle = Summon(CONTROLLER, "skele21")
class EX1_583:
"""Priestess of Elune"""
play = Heal(FRIENDLY_HERO, 4)
class NEW1_018:
"""Bloodsail Raider"""
play = Find(FRIENDLY_WEAPON) & Buff(SELF, "NEW1_018e", atk=ATK(FRIENDLY_WEAPON))
class NEW1_022:
"""Dread Corsair"""
cost_mod = -ATK(FRIENDLY_WEAPON)
class tt_004:
"""Flesheating Ghoul"""
events = Death(MINION).on(Buff(SELF, "tt_004o"))
tt_004o = buff(atk=1)
##
# Unused buffs
# Full Strength (Injured Blademaster)
CS2_181e = buff(atk=2)
|
agpl-3.0
| 2,019,998,216,743,704,000 | 16.33871 | 83 | 0.673116 | false | 2.258403 | false | false | false |
matteoredaelli/scrapy_web
|
scrapy_web/spiders/nomi_maschili_femminili_nomix_it.py
|
1
|
1908
|
# -*- coding: utf-8 -*-
# scrapy_web
# Copyright (C) 2016-2017 [email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# scrapy crawl dizionario_italiano_corriere -t jsonlines -o diz.json
import scrapy
class NomiMaschiliFemminiliNomixItSpider(scrapy.Spider):
name = "nomi-nomix.it"
allowed_domains = ["nomix.it"]
start_urls = (
'http://www.nomix.it/nomi-italiani-maschili-e-femminili.php',
)
def parse(self, response):
for nome in response.xpath('//div[@class="pure-g"]/div[1]/table//td/text()').extract():
yield {"word": nome,
"class": "nome proprio",
"sex": "male",
"source": "nomix.com"}
for nome in response.xpath('//div[@class="pure-g"]/div[2]/table//td/text()').extract():
yield {"word": nome,
"class": "nome proprio",
"sex": "female",
"source": "nomix.com"}
# extracting next pages
for next_page in response.xpath('//h2/a/@href').extract():
if next_page is not None and next_page != "#":
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
|
gpl-3.0
| 1,822,298,830,182,337,500 | 37.16 | 95 | 0.604822 | false | 3.613636 | false | false | false |
stephanehenry27/Sickbeard-anime
|
sickbeard/providers/newznab.py
|
1
|
7885
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import datetime
import re
import os
from xml.dom.minidom import parseString
import sickbeard
import generic
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
from sickbeard import scene_exceptions
from sickbeard import encodingKludge as ek
from sickbeard import exceptions
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import ex
class NewznabProvider(generic.NZBProvider):
def __init__(self, name, url, key=''):
generic.NZBProvider.__init__(self, name)
self.cache = NewznabCache(self)
self.url = url
self.key = key
# if a provider doesn't need an api key then this can be false
self.needs_auth = True
self.enabled = True
self.supportsBacklog = True
self.default = False
def configStr(self):
return self.name + '|' + self.url + '|' + self.key + '|' + str(int(self.enabled))
def imageName(self):
if ek.ek(os.path.isfile, ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', 'providers', self.getID()+'.gif')):
return self.getID()+'.gif'
return 'newznab.gif'
def isEnabled(self):
return self.enabled
def _get_season_search_strings(self, show, season=None, scene=False):
if not show:
return [{}]
to_return = []
# add new query strings for exceptions
name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid, season) + [show.name]
name_exceptions = set(name_exceptions)
for cur_exception in name_exceptions:
cur_params = {}
# search directly by tvrage id
if show.tvrid:
cur_params['rid'] = show.tvrid
# if we can't then fall back on a very basic name search
else:
cur_params['q'] = sanitizeSceneName(cur_exception).replace('.', '_')
if season != None:
# air-by-date means &season=2010&q=2010.03, no other way to do it atm
if show.air_by_date:
cur_params['season'] = season.split('-')[0]
if 'q' in cur_params:
cur_params['q'] += '.' + season.replace('-', '.')
else:
cur_params['q'] = season.replace('-', '.')
else:
cur_params['season'] = season
# hack to only add a single result if it's a rageid search
if not ('rid' in cur_params and to_return):
to_return.append(cur_params)
return to_return
def _get_episode_search_strings(self, ep_obj):
params = {}
if not ep_obj:
return [params]
# search directly by tvrage id
if ep_obj.show.tvrid:
params['rid'] = ep_obj.show.tvrid
# if we can't then fall back on a very basic name search
else:
params['q'] = sanitizeSceneName(ep_obj.show.name).replace('.', '_')
if ep_obj.show.air_by_date:
date_str = str(ep_obj.airdate)
params['season'] = date_str.partition('-')[0]
params['ep'] = date_str.partition('-')[2].replace('-','/')
else:
params['season'] = ep_obj.scene_season
params['ep'] = ep_obj.scene_episode
to_return = [params]
# only do exceptions if we are searching by name
if 'q' in params:
# add new query strings for exceptions
name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid)
for cur_exception in name_exceptions:
# don't add duplicates
if cur_exception == ep_obj.show.name:
continue
cur_return = params.copy()
cur_return['q'] = sanitizeSceneName(cur_exception).replace('.', '_')
to_return.append(cur_return)
return to_return
def _doGeneralSearch(self, search_string):
return self._doSearch({'q': search_string})
def _checkAuthFromData(self, data):
try:
parsedXML = parseString(data)
except Exception:
return False
if parsedXML.documentElement.tagName == 'error':
code = parsedXML.documentElement.getAttribute('code')
if code == '100':
raise exceptions.AuthException("Your API key for "+self.name+" is incorrect, check your config.")
elif code == '101':
raise exceptions.AuthException("Your account on "+self.name+" has been suspended, contact the administrator.")
elif code == '102':
raise exceptions.AuthException("Your account isn't allowed to use the API on "+self.name+", contact the administrator")
else:
logger.log(u"Unknown error given from "+self.name+": "+parsedXML.documentElement.getAttribute('description'), logger.ERROR)
return False
return True
def _doSearch(self, search_params, show=None):
params = {"t": "tvsearch",
"maxage": sickbeard.USENET_RETENTION,
"limit": 100,
"cat": '5030,5040'}
# hack this in for now
if self.getID() == 'nzbs_org':
params['cat'] += ',5070,5090'
if search_params:
params.update(search_params)
if self.key:
params['apikey'] = self.key
searchURL = self.url + 'api?' + urllib.urlencode(params)
logger.log(u"Search url: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
return []
# hack this in until it's fixed server side
if not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
try:
parsedXML = parseString(data)
items = parsedXML.getElementsByTagName('item')
except Exception, e:
logger.log(u"Error trying to load "+self.name+" RSS feed: "+ex(e), logger.ERROR)
logger.log(u"RSS data: "+data, logger.DEBUG)
return []
if not self._checkAuthFromData(data):
return []
if parsedXML.documentElement.tagName != 'rss':
logger.log(u"Resulting XML from "+self.name+" isn't RSS, not parsing it", logger.ERROR)
return []
results = []
for curItem in items:
(title, url) = self._get_title_and_url(curItem)
if not title or not url:
logger.log(u"The XML returned from the "+self.name+" RSS feed is incomplete, this result is unusable: "+data, logger.ERROR)
continue
results.append(curItem)
return results
def findPropers(self, date=None):
return []
results = []
for curResult in self._doGeneralSearch("proper repack"):
match = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', curResult.findtext('pubDate'))
if not match:
continue
resultDate = datetime.datetime.strptime(match.group(1), "%a, %d %b %Y %H:%M:%S")
if date == None or resultDate > date:
results.append(classes.Proper(curResult.findtext('title'), curResult.findtext('link'), resultDate))
return results
class NewznabCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll newznab providers every 15 minutes max
self.minTime = 15
def _getRSSData(self):
params = {"t": "tvsearch",
"age": sickbeard.USENET_RETENTION,
"cat": '5040,5030'}
# hack this in for now
if self.provider.getID() == 'nzbs_org':
params['cat'] += ',5070,5090'
if self.provider.key:
params['apikey'] = self.provider.key
url = self.provider.url + 'api?' + urllib.urlencode(params)
logger.log(self.provider.name + " cache update URL: "+ url, logger.DEBUG)
data = self.provider.getURL(url)
# hack this in until it's fixed server side
if data and not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
return data
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
|
gpl-3.0
| 2,714,241,382,934,127,000 | 26.378472 | 127 | 0.674445 | false | 3.202681 | false | false | false |
vericred/vericred-python
|
vericred_client/models/plan_deleted.py
|
1
|
11507
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class PlanDeleted(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
PlanDeleted - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| 2,301,925,499,552,926,000 | 37.744108 | 228 | 0.62223 | false | 3.612873 | false | false | false |
flaxatives/leetcode
|
minimum_window_substring.py
|
1
|
1883
|
#!/usr/bin/env python
"""
Given strings S and T, finds the minimum substring of S which contains all the
characters in T. Done in O(n) time.
"""
def min_window(S, T):
freq = {}
for letter in T:
freq[letter] = 0
# search S until we find a substring with all chars
start = 0
while start < len(S) and S[start] not in T:
start += 1
if start > len(S):
return ""
end = start
allfound = False
while not allfound and end < len(S):
char = S[end]
if char in T:
freq[char] += 1
allfound = allfound or all((freq[c] > 0 for c in T))
end += 1
end -= 1
if end == len(S):
return ""
# search the rest of the string for smaller windows
min_start = start
min_end = end
end += 1
while end < len(S):
# expand on the right side until we match the front char
while end < len(S) and S[start] != S[end]:
if S[end] in freq:
freq[S[end]] += 1
end += 1
if end >= len(S):
break
# remove excess characters from the front
start += 1
while start < end:
char = S[start]
if char in T and freq[char] > 1:
freq[S[start]] -= 1
elif char in T and freq[char] == 1:
break
start += 1
# check if new window is smaller
if end - start < min_end - min_start:
min_start, min_end = start, end
end += 1
return S[min_start:min_end+1]
if __name__ == "__main__":
import sys
if len(sys.argv) >= 3:
print(min_window(*sys.argv[1:3]))
|
mit
| 4,124,336,163,982,471,700 | 27.104478 | 78 | 0.446097 | false | 4.111354 | false | false | false |
swisscom/cleanerversion
|
versions/descriptors.py
|
1
|
25432
|
from collections import namedtuple
from django import VERSION
from django.core.exceptions import SuspiciousOperation, FieldDoesNotExist
from django.db import router, transaction
from django.db.models.base import Model
from django.db.models.fields.related import (ForwardManyToOneDescriptor,
ReverseManyToOneDescriptor,
ManyToManyDescriptor)
from django.db.models.fields.related_descriptors import \
create_forward_many_to_many_manager
from django.db.models.query_utils import Q
from django.utils.functional import cached_property
from versions.util import get_utc_now
def matches_querytime(instance, querytime):
"""
Checks whether the given instance satisfies the given QueryTime object.
:param instance: an instance of Versionable
:param querytime: QueryTime value to check against
"""
if not querytime.active:
return True
if not querytime.time:
return instance.version_end_date is None
return (instance.version_start_date <= querytime.time and (
instance.version_end_date is None or
instance.version_end_date > querytime.time))
class VersionedForwardManyToOneDescriptor(ForwardManyToOneDescriptor):
"""
The VersionedForwardManyToOneDescriptor is used when pointing another
Model using a VersionedForeignKey;
For example:
class Team(Versionable):
name = CharField(max_length=200)
city = VersionedForeignKey(City, null=True)
``team.city`` is a VersionedForwardManyToOneDescriptor
"""
def get_prefetch_queryset(self, instances, queryset=None):
"""
Overrides the parent method to:
- force queryset to use the querytime of the parent objects
- ensure that the join is done on identity, not id
- make the cache key identity, not id.
"""
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
# CleanerVersion change 1: force the querytime to be the same as the
# prefetched-for instance.
# This is necessary to have reliable results and avoid extra queries
# for cache misses when accessing the child objects from their
# parents (e.g. choice.poll).
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time must "
"match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
# CleanerVersion change 2: make rel_obj_attr return a tuple with
# the object's identity.
# rel_obj_attr = self.field.get_foreign_related_value
def versioned_fk_rel_obj_attr(versioned_rel_obj):
return versioned_rel_obj.identity,
rel_obj_attr = versioned_fk_rel_obj_attr
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
# CleanerVersion change 3: fake the related field so that it provides
# a name of 'identity'.
# related_field = self.field.foreign_related_fields[0]
related_field = namedtuple('VersionedRelatedFieldTuple', 'name')(
'identity')
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(
self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(
instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name())
else:
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name(), False)
def get_queryset(self, **hints):
queryset = self.field.remote_field.model.objects\
.db_manager(hints=hints).all()
if hasattr(queryset, 'querytime'):
if 'instance' in hints:
instance = hints['instance']
if hasattr(instance, '_querytime'):
if instance._querytime.active and \
instance._querytime != queryset.querytime:
queryset = queryset.as_of(instance._querytime.time)
else:
queryset = queryset.as_of(None)
return queryset
def __get__(self, instance, cls=None):
"""
The getter method returns the object, which points instance,
e.g. choice.poll returns a Poll instance, whereas the Poll class
defines the ForeignKey.
:param instance: The object on which the property was accessed
:param instance_type: The type of the instance object
:return: Returns a Versionable
"""
from versions.models import Versionable
if instance is None:
return self
current_elt = super(self.__class__, self).__get__(instance,
cls)
if not current_elt:
return None
if not isinstance(current_elt, Versionable):
raise TypeError("VersionedForeignKey target is of type " +
str(type(current_elt)) +
", which is not a subclass of Versionable")
if hasattr(instance, '_querytime'):
# If current_elt matches the instance's querytime, there's no
# need to make a database query.
if matches_querytime(current_elt, instance._querytime):
current_elt._querytime = instance._querytime
return current_elt
return current_elt.__class__.objects.as_of(
instance._querytime.time).get(identity=current_elt.identity)
else:
return current_elt.__class__.objects.current.get(
identity=current_elt.identity)
vforward_many_to_one_descriptor_class = VersionedForwardManyToOneDescriptor
class VersionedReverseManyToOneDescriptor(ReverseManyToOneDescriptor):
@cached_property
def related_manager_cls(self):
manager_cls = super(VersionedReverseManyToOneDescriptor,
self).related_manager_cls
rel_field = self.field
class VersionedRelatedManager(manager_cls):
def __init__(self, instance):
super(VersionedRelatedManager, self).__init__(instance)
# This is a hack, in order to get the versioned related objects
for key in self.core_filters.keys():
if '__exact' in key or '__' not in key:
self.core_filters[key] = instance.identity
def get_queryset(self):
from versions.models import VersionedQuerySet
queryset = super(VersionedRelatedManager, self).get_queryset()
# Do not set the query time if it is already correctly set.
# queryset.as_of() returns a clone of the queryset, and this
# will destroy the prefetched objects cache if it exists.
if isinstance(queryset, VersionedQuerySet) \
and self.instance._querytime.active \
and queryset.querytime != self.instance._querytime:
queryset = queryset.as_of(self.instance._querytime.time)
return queryset
def get_prefetch_queryset(self, instances, queryset=None):
"""
Overrides RelatedManager's implementation of
get_prefetch_queryset so that it works nicely with
VersionedQuerySets. It ensures that identities and time-limited
where clauses are used when selecting related reverse foreign
key objects.
"""
if queryset is None:
# Note that this intentionally call's VersionManager's
# get_queryset, instead of simply calling the superclasses'
# get_queryset (as the non-versioned RelatedManager does),
# because what is needed is a simple Versioned queryset
# without any restrictions (e.g. do not apply
# self.core_filters).
from versions.models import VersionManager
queryset = VersionManager.get_queryset(self)
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != \
instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time "
"must match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
# Use identities instead of ids so that this will work with
# versioned objects.
instances_dict = {(inst.identity,): inst for inst in instances}
identities = [inst.identity for inst in instances]
query = {'%s__identity__in' % rel_field.name: identities}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must
# manage the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, False,
cache_name)
else:
return (queryset, rel_obj_attr, instance_attr, False,
cache_name, False)
def add(self, *objs, **kwargs):
from versions.models import Versionable
cloned_objs = ()
for obj in objs:
if not isinstance(obj, Versionable):
raise TypeError(
"Trying to add a non-Versionable to a "
"VersionedForeignKey relationship")
cloned_objs += (obj.clone(),)
super(VersionedRelatedManager, self).add(*cloned_objs,
**kwargs)
# clear() and remove() are present if the FK is nullable
if 'clear' in dir(manager_cls):
def clear(self, **kwargs):
"""
Overridden to ensure that the current queryset is used,
and to clone objects before they are removed, so that
history is not lost.
"""
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model,
instance=self.instance)
queryset = self.current.using(db)
with transaction.atomic(using=db, savepoint=False):
cloned_pks = [obj.clone().pk for obj in queryset]
update_qs = self.current.filter(pk__in=cloned_pks)
self._clear(update_qs, bulk)
if 'remove' in dir(manager_cls):
def remove(self, *objs, **kwargs):
from versions.models import Versionable
val = rel_field.get_foreign_related_value(self.instance)
cloned_objs = ()
for obj in objs:
# Is obj actually part of this descriptor set?
# Otherwise, silently go over it, since Django
# handles that case
if rel_field.get_local_related_value(obj) == val:
# Silently pass over non-versionable items
if not isinstance(obj, Versionable):
raise TypeError(
"Trying to remove a non-Versionable from "
"a VersionedForeignKey realtionship")
cloned_objs += (obj.clone(),)
super(VersionedRelatedManager, self).remove(*cloned_objs,
**kwargs)
return VersionedRelatedManager
class VersionedManyToManyDescriptor(ManyToManyDescriptor):
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_versioned_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def __set__(self, instance, value):
"""
Completely overridden to avoid bulk deletion that happens when the
parent method calls clear().
The parent method's logic is basically: clear all in bulk, then add
the given objects in bulk.
Instead, we figure out which ones are being added and removed, and
call add and remove for these values.
This lets us retain the versioning information.
Since this is a many-to-many relationship, it is assumed here that
the django.db.models.deletion.Collector logic, that is used in
clear(), is not necessary here. Collector collects related models,
e.g. ones that should also be deleted because they have
a ON CASCADE DELETE relationship to the object, or, in the case of
"Multi-table inheritance", are parent objects.
:param instance: The instance on which the getter was called
:param value: iterable of items to set
"""
if not instance.is_current:
raise SuspiciousOperation(
"Related values can only be directly set on the current "
"version of an object")
if not self.field.remote_field.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError((
"Cannot set values on a ManyToManyField "
"which specifies an intermediary model. "
"Use %s.%s's Manager instead.") % (
opts.app_label, opts.object_name))
manager = self.__get__(instance)
# Below comment is from parent __set__ method. We'll force
# evaluation, too:
# clear() can change expected output of 'value' queryset, we force
# evaluation of queryset before clear; ticket #19816
value = tuple(value)
being_removed, being_added = self.get_current_m2m_diff(instance, value)
timestamp = get_utc_now()
manager.remove_at(timestamp, *being_removed)
manager.add_at(timestamp, *being_added)
def get_current_m2m_diff(self, instance, new_objects):
"""
:param instance: Versionable object
:param new_objects: objects which are about to be associated with
instance
:return: (being_removed id list, being_added id list)
:rtype : tuple
"""
new_ids = self.pks_from_objects(new_objects)
relation_manager = self.__get__(instance)
filter = Q(**{relation_manager.source_field.attname: instance.pk})
qs = self.through.objects.current.filter(filter)
try:
# Django 1.7
target_name = relation_manager.target_field.attname
except AttributeError:
# Django 1.6
target_name = relation_manager.through._meta.get_field_by_name(
relation_manager.target_field_name)[0].attname
current_ids = set(qs.values_list(target_name, flat=True))
being_removed = current_ids - new_ids
being_added = new_ids - current_ids
return list(being_removed), list(being_added)
def pks_from_objects(self, objects):
"""
Extract all the primary key strings from the given objects.
Objects may be Versionables, or bare primary keys.
:rtype : set
"""
return {o.pk if isinstance(o, Model) else o for o in objects}
def create_versioned_forward_many_to_many_manager(superclass, rel,
reverse=None):
many_related_manager_klass = create_forward_many_to_many_manager(
superclass, rel, reverse)
class VersionedManyRelatedManager(many_related_manager_klass):
def __init__(self, *args, **kwargs):
super(VersionedManyRelatedManager, self).__init__(*args, **kwargs)
# Additional core filters are:
# version_start_date <= t &
# (version_end_date > t | version_end_date IS NULL)
# but we cannot work with the Django core filters, since they
# don't support ORing filters, which is a thing we need to
# consider the "version_end_date IS NULL" case;
# So, we define our own set of core filters being applied when
# versioning
try:
_ = self.through._meta.get_field('version_start_date')
_ = self.through._meta.get_field('version_end_date')
except FieldDoesNotExist as e:
fields = [f.name for f in self.through._meta.get_fields()]
print(str(e) + "; available fields are " + ", ".join(fields))
raise e
# FIXME: this probably does not work when auto-referencing
def get_queryset(self):
"""
Add a filter to the queryset, limiting the results to be pointed
by relationship that are valid for the given timestamp (which is
taken at the current instance, or set to now, if not available).
Long story short, apply the temporal validity filter also to the
intermediary model.
"""
queryset = super(VersionedManyRelatedManager, self).get_queryset()
if hasattr(queryset, 'querytime'):
if self.instance._querytime.active and \
self.instance._querytime != queryset.querytime:
queryset = queryset.as_of(self.instance._querytime.time)
return queryset
def _remove_items(self, source_field_name, target_field_name, *objs):
"""
Instead of removing items, we simply set the version_end_date of
the current item to the current timestamp --> t[now].
Like that, there is no more current entry having that identity -
which is equal to not existing for timestamps greater than t[now].
"""
return self._remove_items_at(None, source_field_name,
target_field_name, *objs)
def _remove_items_at(self, timestamp, source_field_name,
target_field_name, *objs):
if objs:
if timestamp is None:
timestamp = get_utc_now()
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
# The Django 1.7-way is preferred
if hasattr(self, 'target_field'):
fk_val = \
self.target_field \
.get_foreign_related_value(obj)[0]
else:
raise TypeError(
"We couldn't find the value of the foreign "
"key, this might be due to the use of an "
"unsupported version of Django")
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
qs = self.through._default_manager.using(db).filter(**{
source_field_name: self.instance.id,
'%s__in' % target_field_name: old_ids
}).as_of(timestamp)
for relation in qs:
relation._delete_at(timestamp)
if 'add' in dir(many_related_manager_klass):
def add(self, *objs):
if not self.instance.is_current:
raise SuspiciousOperation(
"Adding many-to-many related objects is only possible "
"on the current version")
# The ManyRelatedManager.add() method uses the through model's
# default manager to get a queryset when looking at which
# objects already exist in the database.
# In order to restrict the query to the current versions when
# that is done, we temporarily replace the queryset's using
# method so that the version validity condition can be
# specified.
klass = self.through._default_manager.get_queryset().__class__
__using_backup = klass.using
def using_replacement(self, *args, **kwargs):
qs = __using_backup(self, *args, **kwargs)
return qs.as_of(None)
klass.using = using_replacement
super(VersionedManyRelatedManager, self).add(*objs)
klass.using = __using_backup
def add_at(self, timestamp, *objs):
"""
This function adds an object at a certain point in time
(timestamp)
"""
# First off, define the new constructor
def _through_init(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.version_birth_date = timestamp
self.version_start_date = timestamp
# Through-classes have an empty constructor, so it can easily
# be overwritten when needed;
# This is not the default case, so the overwrite only takes
# place when we "modify the past"
self.through.__init_backup__ = self.through.__init__
self.through.__init__ = _through_init
# Do the add operation
self.add(*objs)
# Remove the constructor again (by replacing it with the
# original empty constructor)
self.through.__init__ = self.through.__init_backup__
del self.through.__init_backup__
add_at.alters_data = True
if 'remove' in dir(many_related_manager_klass):
def remove_at(self, timestamp, *objs):
"""
Performs the act of removing specified relationships at a
specified time (timestamp);
So, not the objects at a given time are removed, but their
relationship!
"""
self._remove_items_at(timestamp, self.source_field_name,
self.target_field_name, *objs)
# For consistency, also handle the symmetrical case
if self.symmetrical:
self._remove_items_at(timestamp, self.target_field_name,
self.source_field_name, *objs)
remove_at.alters_data = True
return VersionedManyRelatedManager
|
apache-2.0
| 2,862,264,786,888,047,600 | 44.741007 | 79 | 0.557093 | false | 4.863645 | false | false | false |
jrbl/invenio
|
modules/bibauthorid/lib/bibauthorid_cluster_set.py
|
1
|
8369
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from itertools import chain, groupby
from operator import itemgetter
from bibauthorid_matrix_optimization import maximized_mapping
from bibauthorid_backinterface import save_cluster
from bibauthorid_backinterface import get_all_papers_of_pids
from bibauthorid_backinterface import get_bib10x, get_bib70x
from bibauthorid_backinterface import get_all_valid_bibrecs
from bibauthorid_backinterface import get_bibrefrec_subset
from bibauthorid_backinterface import remove_result_cluster
from bibauthorid_name_utils import generate_last_name_cluster_str
class Blob:
def __init__(self, personid_records):
'''
@param personid_records:
A list of tuples: (personid, bibrefrec, flag).
Notice that all bibrefrecs should be the same
since the Blob represents only one bibrefrec.
'''
self.bib = personid_records[0][1]
assert all(p[1] == self.bib for p in personid_records)
self.claimed = set()
self.assigned = set()
self.rejected = set()
for pid, unused, flag in personid_records:
if flag > 1:
self.claimed.add(pid)
elif flag >= -1:
self.assigned.add(pid)
else:
self.rejected.add(pid)
def create_blobs_by_pids(pids):
'''
Returs a list of blobs by a given set of personids.
Blob is an object which describes all information
for a bibrefrec in the personid table.
@type pids: iterable of integers
'''
all_bibs = get_all_papers_of_pids(pids)
all_bibs = ((x[0], (int(x[1]), x[2], x[3]), x[4]) for x in all_bibs)
bibs_dict = groupby(sorted(all_bibs, key=itemgetter(1)), key=itemgetter(1))
blobs = [Blob(list(bibs)) for unused, bibs in bibs_dict]
return blobs
def group_blobs(blobs):
'''
Separates the blobs into two groups
of objects - those with claims and
those without.
'''
# created from blobs, which are claimed
# [(bibrefrec, personid)]
union = []
# created from blobs, which are not claimed
# [(bibrefrec, personid/None, [personid])]
independent = []
for blob in blobs:
assert len(blob.claimed) + len(blob.assigned) == 1
if len(blob.claimed) > 0:
union.append((blob.bib, list(blob.claimed)[0]))
else:
independent.append((blob.bib, list(blob.assigned)[0], list(blob.rejected)))
return (union, independent)
class Cluster_set:
class Cluster:
def __init__(self, bibs, hate = []):
# hate is a symetrical relation
self.bibs = set(bibs)
self.hate = set(hate)
def hates(self, other):
return other in self.hate
def quarrel(self, cl2):
self.hate.add(cl2)
cl2.hate.add(self)
def _debug_test_hate_relation(self):
for cl2 in self.hate:
if not self.hates(cl2) or not cl2.hates(self):
return False
return True
def __init__(self):
self.clusters = []
def create_skeleton(self, personids, last_name):
blobs = create_blobs_by_pids(personids)
self.last_name = last_name
union, independent = group_blobs(blobs)
union_clusters = {}
for uni in union:
union_clusters[uni[1]] = union_clusters.get(uni[1], []) + [uni[0]]
cluster_dict = dict((personid, self.Cluster(bibs)) for personid, bibs in union_clusters.items())
self.clusters = cluster_dict.values()
for i, cl in enumerate(self.clusters):
cl.hate = set(chain(self.clusters[:i], self.clusters[i+1:]))
for ind in independent:
bad_clusters = [cluster_dict[i] for i in ind[2] if i in cluster_dict]
cl = self.Cluster([ind[0]], bad_clusters)
for bcl in bad_clusters:
bcl.hate.add(cl)
self.clusters.append(cl)
# Creates a cluster set, ignoring the claims and the
# rejected papers.
def create_pure(self, personids, last_name):
blobs = create_blobs_by_pids(personids)
self.last_name = last_name
self.clusters = [self.Cluster((blob.bib,)) for blob in blobs]
# no longer used
def create_body(self, blobs):
union, independent = group_blobs(blobs)
arranged_clusters = {}
for cls in chain(union, independent):
arranged_clusters[cls[1]] = arranged_clusters.get(cls[1], []) + [cls[0]]
for pid, bibs in arranged_clusters.items():
cl = self.Cluster(bibs)
cl.personid = pid
self.clusters.append(cl)
# a *very* slow fucntion checking when the hate relation is no longer symetrical
def _debug_test_hate_relation(self):
for cl1 in self.clusters:
if not cl1._debug_test_hate_relation():
return False
return True
# similar to the function above
def _debug_duplicated_recs(self, mapping=None):
for cl in self.clusters:
if mapping:
setty = set(mapping[x][2] for x in cl.bibs)
else:
setty = set(x[2] for x in cl.bibs)
if len(cl.bibs) != len(setty):
return False
return True
# No longer used but it might be handy.
@staticmethod
def match_cluster_sets(cs1, cs2):
"""
This functions tries to generate the best matching
between cs1 and cs2 acoarding to the shared bibrefrecs.
It returns a dictionary with keys, clsuters in cs1,
and values, clusters in cs2.
@param and type of cs1 and cs2: cluster_set
@return: dictionary with the matching clusters.
@return type: { cluster : cluster }
"""
matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters]
mapping = maximized_mapping(matr)
return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping)
def store(self):
'''
Stores the cluster set in a special table.
This is used to store the results of
tortoise/wedge in a table and later merge them
with personid.
'''
remove_result_cluster("%s." % self.last_name)
named_clusters = (("%s.%d" % (self.last_name, idx), cl) for idx, cl in enumerate(self.clusters))
map(save_cluster, named_clusters)
def cluster_sets_from_marktables():
# { (100, 123) -> name }
ref100 = get_bib10x()
ref700 = get_bib70x()
bibref_2_name = dict([((100, ref), generate_last_name_cluster_str(name)) for ref, name in ref100] +
[((700, ref), generate_last_name_cluster_str(name)) for ref, name in ref700])
all_recs = get_all_valid_bibrecs()
all_bibrefrecs = chain(set((100, ref, rec) for rec, ref in get_bibrefrec_subset(100, all_recs, map(itemgetter(0), ref100))),
set((700, ref, rec) for rec, ref in get_bibrefrec_subset(700, all_recs, map(itemgetter(0), ref700))))
last_name_2_bibs = {}
for bibrefrec in all_bibrefrecs:
table, ref, unused = bibrefrec
name = bibref_2_name[(table, ref)]
last_name_2_bibs[name] = last_name_2_bibs.get(name, []) + [bibrefrec]
cluster_sets = []
for name, bibrecrefs in last_name_2_bibs.items():
new_cluster_set = Cluster_set()
new_cluster_set.clusters = [Cluster_set.Cluster([bib]) for bib in bibrecrefs]
new_cluster_set.last_name = name
cluster_sets.append(new_cluster_set)
return cluster_sets
|
gpl-2.0
| -7,200,099,607,998,172,000 | 33.870833 | 128 | 0.6168 | false | 3.608883 | false | false | false |
chundongwang/Guess2014
|
minimizeJs.py
|
1
|
3325
|
import os
import time
from httplib import HTTPConnection
from urllib import urlencode
files = [
"js/main.js",
"js/service/guesser.js",
"js/service/miner.js",
"js/directive/navbar.js",
"js/directive/notice.js",
"js/directive/footer.js",
"js/directive/matchdiv.js",
"js/directive/betmodal.js",
"js/directive/betmodalextra.js",
"js/directive/eulamodal.js",
"js/directive/chartwin.js",
"js/directive/chartfav.js",
"js/directive/chartleast.js",
"js/directive/spinner.js",
"js/directive/chartallbets.js",
"js/directive/chartpop.js",
"js/directive/chartbetscoredist.js",
"js/directive/chartbetmatchdist.js",
"js/directive/charttopbet.js",
"js/view/topview.js",
"js/view/home.js",
"js/view/date.js",
"js/view/my.js",
"js/view/betanalysis.js",
"js/view/bestbet.js",
"js/view/carlnan.js"
]
raw_files = [
"js/third-party/Chart.min.js",
"js/third-party/moment.min.js"
]
copyright = '/*! GuessWorldCup2014 (c) 2014 */'
index_template = """{%% extends "base.html" %%}
{%% block script %%}
<script src="//ajax.aspnetcdn.com/ajax/jQuery/jquery-1.11.0.min.js"></script>
<script src="//ajax.aspnetcdn.com/ajax/bootstrap/3.1.1/bootstrap.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular-route.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular-animate.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular-cookies.min.js"></script>
<script src="%s"></script>
{%% endblock %%}"""
def replaceIndexHtml(root,filename):
with open(os.path.join(root, 'templates/index.html'), 'w+') as f:
f.write(index_template % filename)
def minimizeAllJs(root):
minimized_content = minimizeJsHelper(combineFiles(root, files))
raw_content = combineFiles(root, raw_files)
filename = 'js/%s.js'%str(int(time.time()))
with open(os.path.join(root, filename), 'w+') as f:
f.write(raw_content)
f.write('\n'+copyright+'\n')
f.write(minimized_content)
return filename
def combineFiles(root, file_list):
combined_content = ''
for file in file_list:
with open(os.path.join(root,file),'r+') as f:
combined_content += f.read()
combined_content += '\n'
return combined_content
def minimizeJs(path):
js_content = None
with open(path,'r+') as f:
js_content = f.read()
return minimizeJsHelper(js_content)
def minimizeJsHelper(js_content):
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
params = urlencode({
'js_code': js_content,
'compilation_level': 'SIMPLE_OPTIMIZATIONS',
'output_format': 'text',
'output_info': 'compiled_code'
})
conn = HTTPConnection('closure-compiler.appspot.com');
conn.request('POST', '/compile', params, headers)
r = conn.getresponse()
if r.status == 200:
data = r.read()
if not data.startswith('Error'):
return data
return None
if __name__ == '__main__':
root = os.path.dirname(os.path.abspath(__file__))
replaceIndexHtml(root, minimizeAllJs(root))
|
apache-2.0
| -6,989,141,133,847,485,000 | 31.920792 | 97 | 0.640602 | false | 3.078704 | false | false | false |
bulax41/Commands
|
scripts/cme_decode_pcap.py
|
1
|
1258
|
#!/usr/bin/env python
import dpkt
import argparse
import struct
import sys
import datetime
def main():
parser = argparse.ArgumentParser(description='Read PCAP file, decode CME data and output message sequence gaps')
parser.add_argument('-f','--file',help="PCAP File to read")
args = parser.parse_args()
with open(args.file, 'rb') as f:
pcap = dpkt.pcap.Reader(f)
Packets = 0
Gaps = 0
MsgSeqNum = 0
for timestamp, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
(seqnum,pcktime) = struct.unpack_from(">IQ",ip.data[:5])
diff = int(seqnum) - MsgSeqNum
if MsgSeqNum == 0:
print "Initial sequence number: %s" % int(seqnum)
elif diff!=1:
Gaps = Gaps + diff - 1
now = datetime.datetime.utcfromtimestamp(timestamp).strftime("%b %d %Y %X.%f")
print "Gapped Detected, %s Packets, Sequence Numbers %s-%s at %s" % (diff-1,MsgSeqNum+1,int(Num)-1,now)
MsgSeqNum = int(seqnum)
Packets = Packets + 1
pcap.close()
print "Ending Sequence number: %s, total packets %s" % (MsgSeqNum,Packets)
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,447,482,395,164,316,000 | 29.682927 | 120 | 0.573132 | false | 3.475138 | false | false | false |
ging/horizon
|
openstack_dashboard/dashboards/idm/utils.py
|
1
|
4929
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import math
from horizon import exceptions
from django.conf import settings
from django.core import urlresolvers
from openstack_dashboard import api
from openstack_dashboard import fiware_api
from openstack_dashboard.local import local_settings
from django_gravatar.helpers import get_gravatar_url, has_gravatar
LOG = logging.getLogger('idm_logger')
DEFAULT_ORG_MEDIUM_AVATAR = 'dashboard/img/logos/medium/group.png'
DEFAULT_APP_MEDIUM_AVATAR = 'dashboard/img/logos/medium/app.png'
DEFAULT_USER_MEDIUM_AVATAR = 'dashboard/img/logos/medium/user.png'
DEFAULT_ORG_SMALL_AVATAR = 'dashboard/img/logos/small/group.png'
DEFAULT_APP_SMALL_AVATAR = 'dashboard/img/logos/small/app.png'
DEFAULT_USER_SMALL_AVATAR = 'dashboard/img/logos/small/user.png'
AVATAR_SIZE = {'img_small': 25,
'img_medium': 36,
'img_original': 100}
def filter_default(items):
"""Remove from a list the automated created project for a user. This project
is created during the user registration step and is needed for the user to be
able to perform operations in the cloud, as a work around the Keystone-OpenStack
project behaviour. We don't want the user to be able to do any operations to this
project nor even notice it exists.
Also filters other default items we dont want to show, like internal
applications.
"""
filtered = [i for i in items if not getattr(i, 'is_default', False)]
return filtered
def check_elements(elements, valid_elements):
"""Checks a list of elements are present in an allowed elements list"""
invalid_elements = [k for k in elements if k not in valid_elements]
if invalid_elements:
raise TypeError('The elements {0} are not defined \
in {1}'.format(invalid_elements, valid_elements))
def swap_dict(old_dict):
"""Returns a new dictionary in wich the keys are all the values of the old
dictionary and the values are lists of keys that had that value.
Example:
d = { 'a':['c','v','b'], 's':['c','v','d']}
swap_dict(d) -> {'c': ['a', 's'], 'b': ['a'], 'd': ['s'], 'v': ['a', 's']}
"""
new_dict = {}
for key in old_dict:
for value in old_dict[key]:
new_dict[value] = new_dict.get(value, [])
new_dict[value].append(key)
return new_dict
def get_avatar(obj, avatar_type, default_avatar):
"""Gets the object avatar or a default one."""
if type(obj) == dict:
use_gravatar = obj.get('use_gravatar', None)
email = obj.get('name', None)
avatar = obj.get(avatar_type, None)
else:
use_gravatar = getattr(obj, 'use_gravatar', None)
email = getattr(obj, 'name', None)
avatar = getattr(obj, avatar_type, None)
if use_gravatar and has_gravatar(email):
return get_gravatar_url(email, size=AVATAR_SIZE[avatar_type])
if avatar and avatar != '':
return settings.MEDIA_URL + avatar
else:
return settings.STATIC_URL + default_avatar
def get_switch_url(organization, check_switchable=True):
if check_switchable and not getattr(organization, 'switchable', False):
return False
if type(organization) == dict:
organization_id = organization['id']
else:
organization_id = organization.id
return urlresolvers.reverse('switch_tenants',
kwargs={'tenant_id': organization_id})
def page_numbers(elements, page_size):
return range(1, int(math.ceil(float(len(elements))/page_size)) + 1)
def total_pages(elements, page_size):
if not elements:
return 0
return page_numbers(elements, page_size)[-1]
def paginate_list(elements, page_number, page_size):
index = (page_number - 1) * page_size
return elements[index:index + page_size]
class PickleObject():
"""Extremely simple class that holds the very little information we need
to cache. Keystoneclient resource objects are not pickable.
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def obj_to_jsonable_dict(obj, attrs):
"""converts a object into a json-serializable dict, geting the
specified attributes.
"""
as_dict = {}
for attr in attrs:
if hasattr(obj, attr):
as_dict[attr] = getattr(obj, attr)
return as_dict
|
apache-2.0
| 8,689,818,560,495,523,000 | 32.530612 | 86 | 0.673159 | false | 3.694903 | false | false | false |
FLYKingdom/MyCode
|
PycharmProjects/PythonTest/FunctionalProgramming.py
|
1
|
3606
|
# 函数式编程
# 高阶函数
# map函数
def square(x):
return x * x * x
r = map(square, [1, 2, 3])
print(list(r))
# reduce 函数
from functools import reduce
def fn(x, y):
return x * 10 + y
print(reduce(fn, [1, 2, 3]))
is_str = isinstance(reduce(fn, [1, 2, 3]), int)
print(is_str)
DIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
def str2int(s):
def fn(x, y):
return x * 10 + y
def char2num(s):
return DIGITS[s]
return reduce(fn, map(char2num, s))
def char2num1(s):
return DIGITS[s]
def str2int1(s):
return reduce(lambda x, y: x * 10 + y, map(char2num1, s))
print(str2int('6450131'))
print(str2int1('6450131'))
# 求积
def prod(l):
return reduce(lambda x, y: x * y, l)
print(prod([1, 2, 3]))
# filter
def not_empty(s):
return s and s.strip()
l = list(filter(not_empty, ['junjun', None, '', 'A', ' ']))
print(l)
# 素数
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
s = ''
for n in primes():
if n < 30:
s = s + str(n) + ','
else:
break
print('s:', s)
# 回数
import math
def is_palindrome(n):
strs = str(n)
count = len(strs)
center = math.ceil(count // 2)
i = 0
j = count - 1
while True:
if j <= i:
return True
if strs[i] == strs[j]:
i = i + 1
j = j - 1
else:
return False
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101,
111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
# 排序函数
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_name(t):
return t[0]
def by_score(t):
return t[1]
print('sorted t1:', sorted(L, key=by_name))
print('sorted t2:', sorted(L, key=by_score, reverse=True))
# 返回函数 闭包 (没太掌握呀 写递增函数没搞定)
# 递增整数
def count():
fs = []
for i in range(1, 4):
def f():
return i * i
fs.append(f)
return fs
def createCounter():
a = 0
def counter():
nonlocal a
a += 1
return a
return counter
counterA = createCounter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = createCounter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
# 匿名函数 lambda x: x + 1
counter = lambda x: x + 1
print(counter(1))
# 装饰器
# __name__
print('count: name', count.__name__, 'annotations ', count.__annotations__, 'class ', count.__class__, 'code',
count.__code__)
import functools
def log(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s() text:' % func.__name__, text)
return func(*args, **kw)
return wrapper
return decorator
@log('new text')
def now():
print('2018-8-27')
f = now
f()
print(f.__name__)
# 便函数
import functools
int2 = functools.partial(int, base=2)
print(int2('100'))
max10 = functools.partial(max, 10)
print(max10(1, 2, 3))
|
mit
| 1,903,475,979,759,218,700 | 14.594595 | 117 | 0.519931 | false | 2.536264 | false | false | false |
sunqm/pyscf
|
pyscf/tools/dump_mat.py
|
1
|
8525
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.lib.parameters import OUTPUT_DIGITS, OUTPUT_COLS
from pyscf import __config__
BASE = getattr(__config__, 'BASE', 0)
def dump_tri(stdout, c, label=None,
ncol=OUTPUT_COLS, digits=OUTPUT_DIGITS, start=BASE):
''' Format print for the lower triangular part of an array
Args:
stdout : file object
eg sys.stdout, or stdout = open('/path/to/file') or
mol.stdout if mol is an object initialized from :class:`gto.Mole`
c : numpy.ndarray
coefficients
Kwargs:
label : list of strings
Row labels (default is 1,2,3,4,...)
ncol : int
Number of columns in the format output (default 5)
digits : int
Number of digits of precision for floating point output (default 5)
start : int
The number to start to count the index (default 0)
Examples:
>>> import sys, numpy
>>> dm = numpy.eye(3)
>>> dump_tri(sys.stdout, dm)
#0 #1 #2
0 1.00000
1 0.00000 1.00000
2 0.00000 0.00000 1.00000
>>> from pyscf import gto
>>> mol = gto.M(atom='C 0 0 0')
>>> dm = numpy.eye(mol.nao_nr())
>>> dump_tri(sys.stdout, dm, label=mol.ao_labels(), ncol=9, digits=2)
#0 #1 #2 #3 #4 #5 #6 #7 #8
0 C 1s 1.00
0 C 2s 0.00 1.00
0 C 3s 0.00 0.00 1.00
0 C 2px 0.00 0.00 0.00 1.00
0 C 2py 0.00 0.00 0.00 0.00 1.00
0 C 2pz 0.00 0.00 0.00 0.00 0.00 1.00
0 C 3px 0.00 0.00 0.00 0.00 0.00 0.00 1.00
0 C 3py 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
0 C 3pz 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
'''
nc = c.shape[1]
for ic in range(0, nc, ncol):
dc = c[:,ic:ic+ncol]
m = dc.shape[1]
fmt = (' %%%d.%df'%(digits+4,digits))*m + '\n'
if label is None:
stdout.write(((' '*(digits+3))+'%s\n') %
(' '*(digits)).join(['#%-4d'%i for i in range(start+ic,start+ic+m)]))
for k, v in enumerate(dc[ic:ic+m]):
fmt = (' %%%d.%df'%(digits+4,digits))*(k+1) + '\n'
stdout.write(('%-5d' % (ic+k+start)) + (fmt % tuple(v[:k+1])))
for k, v in enumerate(dc[ic+m:]):
stdout.write(('%-5d' % (ic+m+k+start)) + (fmt % tuple(v)))
else:
stdout.write(((' '*(digits+10))+'%s\n') %
(' '*(digits)).join(['#%-4d'%i for i in range(start+ic,start+ic+m)]))
#stdout.write(' ')
#stdout.write(((' '*(digits)+'#%-5d')*m) % tuple(range(ic+start,ic+m+start)) + '\n')
for k, v in enumerate(dc[ic:ic+m]):
fmt = (' %%%d.%df'%(digits+4,digits))*(k+1) + '\n'
stdout.write(('%12s' % label[ic+k]) + (fmt % tuple(v[:k+1])))
for k, v in enumerate(dc[ic+m:]):
stdout.write(('%12s' % label[ic+m+k]) + (fmt % tuple(v)))
def dump_rec(stdout, c, label=None, label2=None,
ncol=OUTPUT_COLS, digits=OUTPUT_DIGITS, start=BASE):
''' Print an array in rectangular format
Args:
stdout : file object
eg sys.stdout, or stdout = open('/path/to/file') or
mol.stdout if mol is an object initialized from :class:`gto.Mole`
c : numpy.ndarray
coefficients
Kwargs:
label : list of strings
Row labels (default is 1,2,3,4,...)
label2 : list of strings
Col labels (default is 1,2,3,4,...)
ncol : int
Number of columns in the format output (default 5)
digits : int
Number of digits of precision for floating point output (default 5)
start : int
The number to start to count the index (default 0)
Examples:
>>> import sys, numpy
>>> dm = numpy.eye(3)
>>> dump_rec(sys.stdout, dm)
#0 #1 #2
0 1.00000 0.00000 0.00000
1 0.00000 1.00000 0.00000
2 0.00000 0.00000 1.00000
>>> from pyscf import gto
>>> mol = gto.M(atom='C 0 0 0')
>>> dm = numpy.eye(mol.nao_nr())
>>> dump_rec(sys.stdout, dm, label=mol.ao_labels(), ncol=9, digits=2)
#0 #1 #2 #3 #4 #5 #6 #7 #8
0 C 1s 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2s 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 3s 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2px 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00
0 C 2py 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00
0 C 2pz 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00
0 C 3px 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00
0 C 3py 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00
0 C 3pz 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
'''
nc = c.shape[1]
if label2 is None:
fmt = '#%%-%dd' % (digits+3)
label2 = [fmt%i for i in range(start,nc+start)]
else:
fmt = '%%-%ds' % (digits+4)
label2 = [fmt%i for i in label2]
for ic in range(0, nc, ncol):
dc = c[:,ic:ic+ncol]
m = dc.shape[1]
fmt = (' %%%d.%df'%(digits+4,digits))*m + '\n'
if label is None:
stdout.write(((' '*(digits+3))+'%s\n') % ' '.join(label2[ic:ic+m]))
for k, v in enumerate(dc):
stdout.write(('%-5d' % (k+start)) + (fmt % tuple(v)))
else:
stdout.write(((' '*(digits+10))+'%s\n') % ' '.join(label2[ic:ic+m]))
for k, v in enumerate(dc):
stdout.write(('%12s' % label[k]) + (fmt % tuple(v)))
def dump_mo(mol, c, label=None,
ncol=OUTPUT_COLS, digits=OUTPUT_DIGITS, start=BASE):
''' Format print for orbitals
Args:
stdout : file object
eg sys.stdout, or stdout = open('/path/to/file') or
mol.stdout if mol is an object initialized from :class:`gto.Mole`
c : numpy.ndarray
Orbitals, each column is an orbital
Kwargs:
label : list of strings
Row labels (default is AO labels)
Examples:
>>> from pyscf import gto
>>> mol = gto.M(atom='C 0 0 0')
>>> mo = numpy.eye(mol.nao_nr())
>>> dump_mo(mol, mo)
#0 #1 #2 #3 #4 #5 #6 #7 #8
0 C 1s 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2s 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 3s 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2px 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00
0 C 2py 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00
0 C 2pz 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00
0 C 3px 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00
0 C 3py 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00
0 C 3pz 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
'''
if label is None:
label = mol.ao_labels()
dump_rec(mol.stdout, c, label, None, ncol, digits, start)
del(BASE)
if __name__ == '__main__':
import sys
import numpy
c = numpy.random.random((16,16))
label = ['A%5d' % i for i in range(16)]
dump_tri(sys.stdout, c, label, 10, 2, 1)
dump_rec(sys.stdout, c, None, label, start=1)
|
apache-2.0
| -8,020,765,779,473,733,000 | 41.20297 | 96 | 0.482111 | false | 2.876181 | false | false | false |
bayesimpact/bob-emploi
|
frontend/server/asynchronous/test/update_email_sent_status_test.py
|
1
|
9477
|
"""Tests for the update_email_sent_status module."""
import datetime
import typing
import unittest
from unittest import mock
import mailjet_rest
import mongomock
from bob_emploi.frontend.server.asynchronous import update_email_sent_status
from bob_emploi.frontend.server.test import mailjetmock
@mailjetmock.patch()
class MainTestCase(unittest.TestCase):
"""Unit tests for the update_email_sent_status module."""
def setUp(self) -> None:
super().setUp()
self.database = mongomock.MongoClient().test
db_patcher = mock.patch(update_email_sent_status.__name__ + '._DB', self.database)
db_patcher.start()
self.addCleanup(db_patcher.stop)
def _send_email(self, email_address: str = '[email protected]') -> int:
return typing.cast(int, mailjet_rest.Client(version='v3.1').send.create({'Messages': [{
'To': [{'Email': email_address}],
'TemplateID': 123456,
}]}).json()['Messages'][0]['To'][0]['MessageID'])
def test_with_message_id(self) -> None:
"""Test retrieving info when message ID is present."""
message_id = self._send_email('[email protected]')
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': message_id,
}],
})
# Mark the message as opened.
mailjetmock.get_message(message_id).open()
update_email_sent_status.main(['--disable-sentry'])
updated_data = self.database.user.find_one()
assert updated_data
self.assertEqual('field', updated_data.get('other'))
self.assertEqual(
message_id, int(updated_data.get('emailsSent')[0].get('mailjetMessageId')))
self.assertEqual(
'EMAIL_SENT_OPENED',
updated_data.get('emailsSent')[0].get('status'))
@mock.patch(update_email_sent_status.__name__ + '.now')
def test_refresh_old_status(self, mock_now: mock.MagicMock) -> None:
"""Test refreshing old status."""
# On Nov. the 5th, the email had been opened.
message_id = self._send_email('[email protected]')
mailjetmock.get_message(message_id).open()
mock_now.get.return_value = datetime.datetime(2017, 11, 5, 15, 13)
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-11-01T09:25:46.145001Z',
'mailjetMessageId': message_id,
}],
})
update_email_sent_status.main(['--disable-sentry'])
# A week later the email link had been clicked.
mock_now.get.return_value = datetime.datetime(2017, 11, 13, 15, 13)
mailjetmock.get_message(message_id).click()
update_email_sent_status.main(['--disable-sentry'])
updated_data = self.database.user.find_one()
assert updated_data
self.assertEqual(
'EMAIL_SENT_CLICKED',
updated_data.get('emailsSent')[0].get('status'))
@mock.patch(update_email_sent_status.mail_blast.__name__ + '.campaign')
def test_campaign_specific(self, mock_campaigns: mock.MagicMock) -> None:
"""Test retrieving info for a specific campaign."""
message_id = self._send_email('[email protected]')
mailjetmock.get_message(message_id).open()
mock_campaigns.list_all_campaigns.return_value = ['this-campaign', 'other-campaign']
self.database.user.insert_many([
{
'profile': {'email': '[email protected]'},
'emailsSent': [
{
'campaignId': 'this-campaign',
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': message_id,
},
{
'campaignId': 'other-campaign',
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': self._send_email('[email protected]'),
},
],
},
{
'profile': {'email': '[email protected]'},
'emailsSent': [{
'campaignId': 'other-campaign',
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': self._send_email('[email protected]'),
}],
},
])
update_email_sent_status.main(['--campaigns', 'this-campaign', '--disable-sentry'])
updated_user = self.database.user.find_one({'profile.email': '[email protected]'})
assert updated_user
self.assertEqual(
'EMAIL_SENT_OPENED',
updated_user.get('emailsSent')[0].get('status'))
self.assertIsNone(updated_user.get('emailsSent')[1].get('status'))
not_updated_user = self.database.user.find_one({'profile.email': '[email protected]'})
assert not_updated_user
self.assertIsNone(not_updated_user.get('emailsSent')[0].get('status'))
@mock.patch(update_email_sent_status.__name__ + '.now')
@mock.patch(update_email_sent_status.__name__ + '.mail_send')
def test_multiple_checks(self, mock_mail: mock.MagicMock, mock_now: mock.MagicMock) -> None:
"""Test checking the status of an email several times."""
# Note that in this test we do not use mailjetmock because what's
# important is to check when calls to Mailjet are made (i.e. not too often).
mock_now.get.return_value = datetime.datetime(2017, 9, 8, 15, 13)
mock_mail.get_message.return_value = {
'ArrivedAt': '2017-09-08T09:25:48Z',
'ID': 6789,
'Comment': 'Right message, arrived 2 seconds after being sent',
'Status': 'opened',
}
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': 6789,
}],
})
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.reset_mock()
# Check again, an hour later.
mock_now.get.return_value = datetime.datetime(2017, 9, 8, 16, 13)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_called_once()
mock_mail.get_message.reset_mock()
# Check again the next day.
mock_now.get.return_value = datetime.datetime(2017, 9, 9, 17, 13)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_called_once()
mock_mail.get_message.reset_mock()
# Check again an hour later the next day.
mock_now.get.return_value = datetime.datetime(2017, 9, 9, 18, 13)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_not_called()
# Check again 15 days later.
mock_now.get.return_value = datetime.datetime(2017, 9, 24, 18, 14)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_called_once()
mock_mail.get_message.reset_mock()
# Check again the next day.
mock_now.get.return_value = datetime.datetime(2017, 9, 25, 18, 14)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_not_called()
def test_update_helper(self) -> None:
"""Test updating the sent emails for another collection."""
message_id = self._send_email('[email protected]')
mailjetmock.get_message(message_id).open()
self.database.other_users.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': message_id,
}],
})
update_email_sent_status.main(['--mongo-collection', 'other_users', '--disable-sentry'])
updated_data = self.database.other_users.find_one()
assert updated_data
self.assertEqual('field', updated_data.get('other'))
self.assertEqual(
message_id, int(updated_data.get('emailsSent')[0].get('mailjetMessageId')))
self.assertEqual(
'EMAIL_SENT_OPENED',
updated_data.get('emailsSent')[0].get('status'))
def test_mailjet_unknown(self) -> None:
"""Test retrieving info but MailJet never heard of the message."""
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': 9876554,
}],
})
update_email_sent_status.main(['--disable-sentry'])
updated_data = self.database.user.find_one()
assert updated_data
self.assertEqual('field', updated_data.get('other'))
self.assertEqual(
9876554, int(updated_data.get('emailsSent')[0].get('mailjetMessageId')))
self.assertNotIn('status', updated_data.get('emailsSent')[0])
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| 1,764,484,004,379,997,400 | 38.65272 | 96 | 0.574549 | false | 3.717929 | true | false | false |
WhatWorksWhenForWhom/nlppln
|
nlppln/commands/save_ner_data.py
|
1
|
1125
|
#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
from nlppln.utils import create_dirs, get_files
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
@click.option('--name', '-n', default='ner_stats.csv')
def nerstats(in_dir, out_dir, name):
create_dirs(out_dir)
frames = []
in_files = get_files(in_dir)
for fi in in_files:
with codecs.open(fi, encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [os.path.basename(fi)
for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(os.path.join(out_dir, name), encoding='utf-8')
if __name__ == '__main__':
nerstats()
|
apache-2.0
| -5,148,481,464,431,719,000 | 27.846154 | 77 | 0.580444 | false | 3 | false | false | false |
nictuku/nwu
|
nwu/common/scheduler.py
|
1
|
3753
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Stephan Peijnik ([email protected])
#
# This file is part of NWU.
#
# NWU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NWU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NWU. If not, see <http://www.gnu.org/licenses/>.
from threading import Thread, Lock, Event
from time import time
class Task:
""" Task base class. """
TYPE_ONESHOT = 0
TYPE_RECURRING = 1
def __init__(self, name, type, exec_time):
self.name = name
self.type = type
self.exec_time = exec_time
def execute(self):
""" Method that is executed by the scheduler.
Override to add your own code.
"""
pass
class RecurringTask(Task):
""" A recurring task.
Is executed all <interval> seconds"""
def __init__(self, name, interval):
self.interval = interval
Task.__init__(self, name, Task.TYPE_RECURRING, int(time())+interval)
class OneshotTask(Task):
""" A one shot task.
Is executed at <exec_time>.
"""
def __init__(self, name, exec_time):
Task.__init__(self, name, Task.TYPE_ONESHOT, exec_time)
class Scheduler(Thread):
""" Manages scheduled tasks """
def __init__(self, app, name='Scheduler'):
Thread.__init__(self)
self.setName(name)
self.app = app
self.tasks = []
self.taskLock = Lock()
self.exitEvent = Event()
def init_thread(self):
""" Custom thread initialization code.
This method can be overridden to, for example, establish
a database connection.
"""
pass
def stop(self):
""" Stop the Scheduler. """
self.exitEvent.set()
def add_task(self, task):
""" Add a task to the scheduler """
if self.exitEvent.isSet():
return False
self.taskLock.acquire()
self.tasks.append(task)
self.taskLock.release()
return True
def remove_task(self, task):
""" Remove a task from the scheduler """
if self.exitEvent.isSet():
return False
self.taskLock.acquire()
self.tasks.remove(task)
self.taskLock.release()
return True
def run(self):
""" Thread main loop. """
self.init_thread()
while not self.exitEvent.isSet():
exec_tasks = []
# Keep lock time as short as possible!
self.taskLock.acquire()
for ac in self.tasks:
if ac.exec_time <= int(time()):
exec_tasks.append(ac)
self.taskLock.release()
for ac in exec_tasks:
try:
ac.execute()
except Exception, e:
# TODO: Log this rather than printing it
print 'Task %s raised exception: %s' % (ac.name, e)
if ac.type == Task.TYPE_RECURRING:
ac.exec_time = int(time()) + ac.interval
self.taskLock.acquire()
for ac in exec_tasks:
if ac.type == Task.TYPE_ONESHOT:
self.tasks.remove(ac)
self.taskLock.release()
self.exitEvent.wait(0.1)
|
gpl-3.0
| 219,441,531,832,932,300 | 28.320313 | 76 | 0.563016 | false | 4.022508 | false | false | false |
Shu-Ji/multi-supervisord-web-admin
|
src/models.py
|
1
|
2943
|
# coding: u8
from tornado.util import ObjectDict
from sqlalchemy import create_engine
from sqlalchemy import (Column, Integer, Text, String, Boolean)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.attributes import InstrumentedAttribute
import settings
import utils
params = dict(
encoding='utf8',
echo=False,
pool_recycle=7200,
)
conn_str = 'sqlite:///%s' % settings.DB_PATH
engine = create_engine(conn_str, **params)
db_factory = lambda: sessionmaker(bind=engine)()
_Base = declarative_base()
class Base(_Base):
__abstract__ = True
id = Column(Integer, primary_key=True, autoincrement=True)
def as_dict(self):
r = {c: getattr(self, c) for c in self.columns()}
return ObjectDict(r)
@classmethod
def get_columns(cls):
c = {}
for k, v in vars(cls).iteritems():
if type(v) is InstrumentedAttribute:
c[k] = v
return ObjectDict(c)
@classmethod
def columns(cls):
return cls.get_columns().keys()
class User(Base):
__tablename__ = 'user'
name = Column(Text, index=True)
pwd = Column(String(32))
@staticmethod
def reset_password(handler, old, new):
db = handler.db
user = db.query(User).filter_by(name=handler.username).first()
if user.pwd != utils.md5(old):
return False
user.pwd = utils.md5(new)
return True
class Host(Base):
__tablename__ = 'host'
user = Column(Text)
pwd = Column(Text)
host = Column(Text)
port = Column(Integer)
is_active = Column(Boolean, server_default='1')
@staticmethod
def delete(db, id):
return bool(db.query(Host).filter_by(id=id).delete())
@staticmethod
def update(db, id, user, pwd, host, port):
return bool(db.query(Host).filter_by(id=id).update(
{'user': user, 'pwd': pwd, 'host': host, 'port': port}
))
@staticmethod
def add(handler, user, pwd, host, port):
db = handler.db
if db.query(Host).filter_by(host=host, port=port).first() is not None:
return False
db.add(Host(user=user, pwd=pwd, host=host, port=port))
return True
@staticmethod
def get_all_active_hosts(handler):
return handler.db.query(Host).filter_by(is_active=True)
@staticmethod
def get_one_host_info_by_id(db, id):
return db.query(Host).filter_by(id=id).first()
@staticmethod
def get_one_host_info(handler, host, port):
return handler.db.query(Host).filter_by(host=host, port=port).first()
@staticmethod
def get_all_hosts(handler):
return handler.db.query(Host)
if __name__ == '__main__':
metadata = Base.metadata
metadata.create_all(engine)
db = db_factory()
db.merge(User(id=1, name='admin', pwd=utils.md5('AdminDemo')))
db.commit()
db.close()
|
unlicense
| -1,953,575,648,705,451,000 | 23.525 | 78 | 0.622834 | false | 3.503571 | false | false | false |
huiyiqun/check_mk
|
cmk/log.py
|
1
|
7362
|
#!/usr/bin/env python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2016 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import sys
import logging as _logging
# Just for reference, the predefined logging levels:
#
# syslog/CMC Python added to Python
# --------------------------------------------
# emerg 0
# alert 1
# crit 2 CRITICAL 50
# err 3 ERROR 40
# warn 4 WARNING 30 <= default level in Python
# notice 5 <= default level in CMC
# info 6 INFO 20
# VERBOSE 15
# debug 7 DEBUG 10
#
# NOTE: VERBOSE is a bit confusing and suffers from the not-invented-here
# syndrome. If we really insist on 3 verbosity levels (normal, verbose, very
# verbose), we should probably do the following:
#
# * Nuke VERBOSE.
# * Introduce NOTICE (25).
# * Make NOTICE the default level.
# * Optionally introduce EMERGENCY (70) and ALERT (60) for consistency.
#
# This would make our whole logging story much more consistent internally
# (code) and externally (GUI always offers the same levels). Nevertheless, we
# should keep in mind that the Python documentation strongly discourages
# introducing new log levels, at least for libraries. OTOH, with 3 verbosity
# levels, this would force us to log normal stuff with a WARNING level, which
# looks wrong.
# Users should be able to set log levels without importing "logging"
CRITICAL = _logging.CRITICAL
ERROR = _logging.ERROR
WARNING = _logging.WARNING
INFO = _logging.INFO
DEBUG = _logging.DEBUG
# We need an additional log level between INFO and DEBUG to reflect the
# verbose() and vverbose() mechanisms of Check_MK.
VERBOSE = 15
class CMKLogger(_logging.getLoggerClass()):
def __init__(self, name, level=_logging.NOTSET):
super(CMKLogger, self).__init__(name, level)
_logging.addLevelName(VERBOSE, "VERBOSE")
def verbose(self, msg, *args, **kwargs):
if self.is_verbose():
self._log(VERBOSE, msg, args, **kwargs)
def is_verbose(self):
return self.isEnabledFor(VERBOSE)
def is_very_verbose(self):
return self.isEnabledFor(DEBUG)
def set_format(self, fmt):
handler = _logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(get_formatter(fmt))
del self.handlers[:] # Remove all previously existing handlers
self.addHandler(handler)
_logging.setLoggerClass(CMKLogger)
# Set default logging handler to avoid "No handler found" warnings.
# Python 2.7+
logger = _logging.getLogger("cmk")
logger.addHandler(_logging.NullHandler())
logger.setLevel(INFO)
def get_logger(name):
"""This function provides the logging object for client code.
It returns a child logger of the "cmk" main logger, identified
by the given name. The name of the child logger will be prefixed
with "cmk.", for example "cmk.mkeventd" in case of "mkeventd".
"""
return logger.getChild(name)
def get_formatter(format="%(asctime)s [%(levelno)s] [%(name)s %(process)d] %(message)s"):
"""Returns a new message formater instance that uses the standard
Check_MK log format by default. You can also set another format
if you like."""
return _logging.Formatter(format)
def setup_console_logging():
"""This method enables all log messages to be written to the console
without any additional information like date/time, logger-name. Just
the log line is written.
This can be used for existing command line applications which were
using sys.stdout.write() or print() before.
"""
handler = _logging.StreamHandler(stream=sys.stdout)
formatter = _logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def open_log(log_file_path, fallback_to=None):
"""Open logfile and fall back to stderr if this is not successfull
The opened file() object is returned.
"""
if fallback_to is None:
fallback_to = sys.stderr
logfile = None
try:
logfile = file(log_file_path, "a")
logfile.flush()
except Exception, e:
logger.exception("Cannot open log file '%s': %s" % (log_file_path , e))
if fallback_to:
logfile = fallback_to
if logfile:
setup_logging_handler(logfile)
return logfile
def setup_logging_handler(stream):
"""This method enables all log messages to be written to the given
stream file object. The messages are formated in Check_MK standard
logging format.
"""
handler = _logging.StreamHandler(stream=stream)
handler.setFormatter(get_formatter("%(asctime)s [%(levelno)s] [%(name)s] %(message)s"))
del logger.handlers[:] # Remove all previously existing handlers
logger.addHandler(handler)
def set_verbosity(verbosity):
"""Values for "verbosity":
0: enables INFO and above
1: enables VERBOSE and above
2: enables DEBUG and above (ALL messages)
"""
if verbosity == 0:
logger.setLevel(INFO)
elif verbosity == 1:
logger.setLevel(VERBOSE)
elif verbosity == 2:
logger.setLevel(DEBUG)
else:
raise NotImplementedError()
# TODO: Experiment. Not yet used.
class LogMixin(object):
"""Inherit from this class to provide logging support.
Makes a logger available via "self.logger" for objects and
"self.cls_logger" for the class.
"""
__parent_logger = None
__logger = None
__cls_logger = None
@property
def _logger(self):
if not self.__logger:
parent = self.__parent_logger or logger
self.__logger = parent.getChild('.'.join([self.__class__.__name__]))
return self.__logger
@classmethod
def _cls_logger(cls):
if not cls.__cls_logger:
parent = cls.__parent_logger or logger
cls.__cls_logger = parent.getChild('.'.join([cls.__name__]))
return cls.__cls_logger
|
gpl-2.0
| -2,892,559,792,175,105,500 | 31.72 | 91 | 0.609617 | false | 3.90557 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.