repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mlcommons/training | language_model/tensorflow/bert/cleanup_scripts/pick_eval_samples.py | 1 | 3819 | """Script for picking certain number of sampels.
"""
import argparse
import time
import logging
import collections
import tensorflow as tf
parser = argparse.ArgumentParser(
description="Eval sample picker for BERT.")
parser.add_argument(
'--input_tfrecord',
type=str,
default='',
help='Input tfrecord path')
parser.add_argument(
'--output_tfrecord',
type=str,
default='',
help='Output tfrecord path')
parser.add_argument(
'--num_examples_to_pick',
type=int,
default=10000,
help='Number of examples to pick')
parser.add_argument(
'--max_seq_length',
type=int,
default=512,
help='The maximum number of tokens within a sequence.')
parser.add_argument(
'--max_predictions_per_seq',
type=int,
default=76,
help='The maximum number of predictions within a sequence.')
args = parser.parse_args()
max_seq_length = args.max_seq_length
max_predictions_per_seq = args.max_predictions_per_seq
logging.basicConfig(level=logging.INFO)
def decode_record(record):
"""Decodes a record to a TensorFlow example."""
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
if __name__ == '__main__':
tic = time.time()
tf.enable_eager_execution()
d = tf.data.TFRecordDataset(args.input_tfrecord)
num_examples = 0
records = []
for record in d:
records.append(record)
num_examples += 1
writer = tf.python_io.TFRecordWriter(args.output_tfrecord)
i = 0
pick_ratio = num_examples / args.num_examples_to_pick
num_examples_picked = 0
for i in range(args.num_examples_to_pick):
example = decode_record(records[int(i * pick_ratio)])
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(
example["input_ids"].numpy())
features["input_mask"] = create_int_feature(
example["input_mask"].numpy())
features["segment_ids"] = create_int_feature(
example["segment_ids"].numpy())
features["masked_lm_positions"] = create_int_feature(
example["masked_lm_positions"].numpy())
features["masked_lm_ids"] = create_int_feature(
example["masked_lm_ids"].numpy())
features["masked_lm_weights"] = create_float_feature(
example["masked_lm_weights"].numpy())
features["next_sentence_labels"] = create_int_feature(
example["next_sentence_labels"].numpy())
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
num_examples_picked += 1
writer.close()
toc = time.time()
logging.info("Picked %d examples out of %d samples in %.2f sec",
num_examples_picked, num_examples, toc - tic)
| apache-2.0 | -7,561,222,218,181,708,000 | 29.552 | 79 | 0.66431 | false | 3.481313 | false | false | false |
KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_visual_scenes/visual_scene/asset/created/created/created.py | 2 | 5414 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
import sys, string, os
from xml.dom import minidom, Node
from datetime import datetime, timedelta
from Core.Common.FUtils import FindXmlChild, GetXmlContent, ParseDate
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_visual_scenes', 'visual_scene', 'asset', 'created']
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def CheckDate(self, context):
# Get the <created> time for the input file
root = minidom.parse(context.GetInputFilename()).documentElement
inputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "asset", "created")))
if inputCreatedDate == None:
context.Log("FAILED: Couldn't read <created> value from test input file.")
return None
# Get the output file
outputFilenames = context.GetStepOutputFilenames("Export")
if len(outputFilenames) == 0:
context.Log("FAILED: There are no export steps.")
return None
# Get the <created> time for the output file
root = minidom.parse(outputFilenames[0]).documentElement
outputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "asset", "created")))
if outputCreatedDate == None:
context.Log("FAILED: Couldn't read <created> value from the exported file.")
return None
if (outputCreatedDate - inputCreatedDate) != timedelta(0):
context.Log("FAILED: <created> is not preserved.")
context.Log("The original <created> time is " + str(inputCreatedDate))
context.Log("The exported <created> time is " + str(outputCreatedDate))
return False
context.Log("PASSED: <created> element is preserved.")
return True
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
self.status_exemplary = self.CheckDate(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit | -7,654,057,124,771,957,000 | 49.609524 | 466 | 0.693572 | false | 4.246275 | false | false | false |
jeffh/rpi_courses | rpi_courses/scheduler.py | 1 | 5690 | from pyconstraints import Problem, is_nil, BruteForceSolver
__all__ = ['compute_schedules', 'TimeRange', 'Scheduler']
class TimeRange(object):
"Represents a time range to be restricted."
def __init__(self, start, end, dow):
self.start = start
self.end = end
self.days_of_week = dow
def __repr__(self):
return "<TimeRange: %r to %r on %r>" % (
self.start, self.end, self.days_of_week
)
def days_conflict(self, days):
print self.days_of_week, days
for day in self.days_of_week:
if day in days:
return True
return False
def __contains__(self, period):
days, start, end = period
return self.days_conflict(days) and (
self.start <= start <= self.end or
start <= self.start <= end or
self.start <= end <= self.end or
start <= self.end <= end
)
def conflicts_with(self, section):
"Returns True if the given section conflicts with this time range."
for p in section.periods:
t = (p.int_days, p.start, p.end)
if t in self:
return True
return False
def section_constraint(section1, section2):
if is_nil(section1) or is_nil(section2):
return True
return not section1.conflicts_with(section2)
class Scheduler(object):
"""High-level API that wraps the course scheduling feature.
``free_sections_only``: bool. Determines if the only the available sections should be
used when using courses provided. Defaults to True.
``problem``: Optional problem instance to provide. If None, the default one is created.
"""
def __init__(self, free_sections_only=True, problem=None, constraint=None):
self.p = Problem()
if problem is not None:
self.p = problem
self.free_sections_only = free_sections_only
self.section_constraint = constraint or section_constraint
self.clear_excluded_times()
def clear_excluded_times(self):
"""Clears all previously set excluded times."""
self._excluded_times = []
return self
def exclude_time(self, start, end, days):
"""Added an excluded time by start, end times and the days.
``start`` and ``end`` are in military integer times (e.g. - 1200 1430).
``days`` is a collection of integers or strings of fully-spelt, lowercased days
of the week.
"""
self._excluded_times.append(TimeRange(start, end, days))
return self
def exclude_times(self, *tuples):
"""Adds multiple excluded times by tuple of (start, end, days) or by
TimeRange instance.
``start`` and ``end`` are in military integer times (e.g. - 1200 1430).
``days`` is a collection of integers or strings of fully-spelt, lowercased days
of the week.
"""
for item in tuples:
if isinstance(item, TimeRange):
self._excluded_times.append(item)
else:
self.exclude_time(*item)
return self
def find_schedules(self, courses=None, return_generator=False):
"""Returns all the possible course combinations. Assumes no duplicate courses.
``return_generator``: If True, returns a generator instead of collection. Generators
are friendlier to your memory and save computation time if not all solutions are
used.
"""
self.p.reset()
self.create_variables(courses)
self.create_constraints(courses)
if return_generator:
return self.p.iter_solutions()
return self.p.get_solutions()
# internal methods -- can be overriden for custom use.
def get_sections(self, course):
"""Internal use. Returns the sections to use for the solver for a given course.
"""
return course.available_sections if self.free_sections_only else course.sections
def time_conflict(self, schedule):
"""Internal use. Determines when the given time range conflicts with the set of
excluded time ranges.
"""
if is_nil(schedule):
return True
for timerange in self._excluded_times:
if timerange.conflicts_with(schedule):
return False
return True
def create_variables(self, courses):
"""Internal use. Creates all variables in the problem instance for the given
courses. If given a dict of {course: sections}, will use the provided sections.
"""
has_sections = isinstance(courses, dict)
for course in courses:
self.p.add_variable(course, courses.get(course, []) if has_sections else self.get_sections(course))
def create_constraints(self, courses):
"""Internal use. Creates all constraints in the problem instance for the given
courses.
"""
for i, course1 in enumerate(courses):
for j, course2 in enumerate(courses):
if i <= j:
continue
self.p.add_constraint(self.section_constraint, [course1, course2])
self.p.add_constraint(self.time_conflict, [course1])
def compute_schedules(courses=None, excluded_times=(), free_sections_only=True, problem=None, return_generator=False, section_constraint=None):
"""
Returns all possible schedules for the given courses.
"""
s = Scheduler(free_sections_only, problem, constraint=section_constraint)
s.exclude_times(*tuple(excluded_times))
return s.find_schedules(courses, return_generator)
| mit | -3,827,083,058,477,227,000 | 35.948052 | 143 | 0.614411 | false | 4.310606 | false | false | false |
urinieto/msaf-gpl | setup.py | 1 | 4267 | from setuptools import setup, Extension, find_packages
import glob
import imp
import sys
import numpy.distutils.misc_util
version = imp.load_source('msaf.version', 'msaf/version.py')
# Compile the CC algorithm
extra_compile_flags = ""
extra_linker_flags = ""
if "linux" in sys.platform:
extra_compile_flags = "-std=c++11 -DUSE_PTHREADS"
extra_linker_flags = "-llapack -lblas -lm"
elif "darwin" in sys.platform:
extra_compile_flags = "-DUSE_PTHREADS"
extra_linker_flags = "-framework Accelerate"
cc_path = "msaf/algorithms/cc/"
cc_segmenter = Extension(cc_path + "cc_segmenter",
sources=[cc_path + "base/Pitch.cpp",
cc_path + "dsp/chromagram/Chromagram.cpp",
cc_path + "dsp/chromagram/ConstantQ.cpp",
cc_path + "dsp/mfcc/MFCC.cpp",
cc_path + "dsp/onsets/DetectionFunction.cpp",
cc_path + "dsp/phasevocoder/PhaseVocoder.cpp",
cc_path + "dsp/rateconversion/Decimator.cpp",
cc_path + "dsp/segmentation/cluster_melt.c",
cc_path + "dsp/segmentation/ClusterMeltSegmenter.cpp",
cc_path + "dsp/segmentation/cluster_segmenter.c",
cc_path + "dsp/segmentation/Segmenter.cpp",
cc_path + "dsp/transforms/FFT.cpp",
cc_path + "hmm/hmm.c",
cc_path + "maths/Correlation.cpp",
cc_path + "maths/CosineDistance.cpp",
cc_path + "maths/MathUtilities.cpp",
cc_path + "maths/pca/pca.c",
cc_path + "main.cpp"
],
include_dirs=[cc_path + "dsp/segmentation",
cc_path,
cc_path + "include"],
libraries=["stdc++"],
extra_compile_args=[extra_compile_flags],
extra_link_args=[extra_linker_flags],
language="c++")
# MSAF configuration
setup(
name='msaf',
version=version.version,
description='Python module to discover the structure of music files',
author='Oriol Nieto',
author_email='[email protected]',
url='https://github.com/urinieto/msaf',
download_url='https://github.com/urinieto/msaf/releases',
packages=find_packages(),
package_data={'msaf': ['algorithms/olda/models/*.npy']},
data_files=[('msaf/algorithms/olda/models',
glob.glob('msaf/algorithms/olda/models/*.npy'))],
long_description="""A python module to segment audio into all its """
"""different large-scale sections and label them based on their """
"""acoustic similarity""",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
keywords='audio music sound',
license='GPL',
install_requires=[
'audioread',
'cvxopt',
'decorator',
'enum34',
'future',
'jams >= 0.3.0',
'joblib',
'librosa >= 0.6.0',
'mir_eval',
'matplotlib >= 1.5',
'numpy >= 1.8.0',
'pandas',
'scikit-learn >= 0.17.0',
'scipy >= 0.13.0',
'seaborn', # For notebook example (but everyone should have this :-))
'vmo >= 0.3.3'
],
extras_require={
'resample': 'scikits.samplerate>=0.3'
},
ext_modules=[cc_segmenter],
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs()
)
| gpl-2.0 | 6,130,109,003,087,455,000 | 40.833333 | 88 | 0.511366 | false | 3.98041 | false | false | false |
misan/RepRap-iTopie | odmt/ezdxf/entity.py | 3 | 7057 | # Purpose: entity module
# Created: 11.03.2011
# Copyright (C) 2011, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <[email protected]>"
from .tags import cast_tag_value, DXFTag, DXFStructureError
class DXFNamespace(object):
""" Provides the dxf namespace for GenericWrapper.
"""
__slots__ = ('_setter', '_getter')
def __init__(self, wrapper):
# DXFNamespace.__setattr__ can not set _getter and _setter
super(DXFNamespace, self).__setattr__('_getter', wrapper.get_dxf_attrib)
super(DXFNamespace, self).__setattr__('_setter', wrapper.set_dxf_attrib)
def __getattr__(self, attrib):
"""Returns value of DXF attribute *attrib*. usage: value = GenericWrapper.dxf.attrib
"""
return self._getter(attrib)
def __setattr__(self, attrib, value):
"""Set DXF attribute *attrib* to *value. usage: GenericWrapper.dxf.attrib = value
"""
self._setter(attrib, value)
# noinspection PyUnresolvedReferences
class GenericWrapper(object):
TEMPLATE = None
DXFATTRIBS = {}
def __init__(self, tags):
self.tags = tags
self.dxf = DXFNamespace(self) # all DXF attributes are accessible by the dxf attribute, like entity.dxf.handle
@classmethod
def new(cls, handle, dxfattribs=None, dxffactory=None):
if cls.TEMPLATE is None:
raise NotImplementedError("new() for type %s not implemented." % cls.__name__)
entity = cls(cls.TEMPLATE.clone())
entity.dxf.handle = handle
if dxfattribs is not None:
entity.update_dxf_attribs(dxfattribs)
entity.post_new_hook()
return entity
def post_new_hook(self):
pass
def dxftype(self):
return self.tags.noclass[0].value
def has_dxf_attrib(self, key):
return key in self.DXFATTRIBS
def get_dxf_attrib(self, key, default=ValueError):
try:
dxfattr = self.DXFATTRIBS[key]
return self._get_dxf_attrib(dxfattr)
except KeyError:
raise AttributeError(key)
except ValueError:
if default is ValueError:
raise ValueError("DXFAttrib '%s' does not exist." % key)
else:
return default
def set_dxf_attrib(self, key, value):
try:
dxfattr = self.DXFATTRIBS[key]
except KeyError:
raise AttributeError(key)
# no subclass is subclass index 0
subclasstags = self.tags.subclasses[dxfattr.subclass]
if dxfattr.xtype is not None:
tags = DXFExtendedPointType(subclasstags)
tags.set_value(dxfattr.code, dxfattr.xtype, value)
else:
self._set_tag(subclasstags, dxfattr.code, value)
def clone_dxf_attribs(self):
dxfattribs = {}
for key in self.DXFATTRIBS.keys():
try:
dxfattribs[key] = self.get_dxf_attrib(key)
except ValueError:
pass
return dxfattribs
def update_dxf_attribs(self, dxfattribs):
for key, value in dxfattribs.items():
self.set_dxf_attrib(key, value)
def _get_dxf_attrib(self, dxfattr):
# no subclass is subclass index 0
subclass_tags = self.tags.subclasses[dxfattr.subclass]
if dxfattr.xtype is not None:
tags = DXFExtendedPointType(subclass_tags)
return tags.get_value(dxfattr.code, dxfattr.xtype)
else:
return subclass_tags.get_value(dxfattr.code)
def _get_extended_type(self, code, xtype):
tags = DXFExtendedPointType(self.tags)
return tags.get_value(code, xtype)
def _set_extended_type(self, code, xtype, value):
tags = DXFExtendedPointType(self.tags)
return tags.set_value(code, xtype, value)
@staticmethod
def _set_tag(tags, code, value):
tags.set_first(code, cast_tag_value(code, value))
class DXFExtendedPointType(object):
def __init__(self, tags):
self.tags = tags
def get_value(self, code, xtype):
if xtype == 'Point2D':
return self._get_fix_point(code, axis=2)
elif xtype == 'Point3D':
return self._get_fix_point(code, axis=3)
elif xtype == 'Point2D/3D':
return self._get_flexible_point(code)
else:
raise TypeError('Unknown extended type: %s' % xtype)
def _get_fix_point(self, code, axis):
point = self._get_point(code)
if len(point) != axis:
raise DXFStructureError('Invalid axis count for code: %d' % code)
return point
def _get_point(self, code):
index = self._point_index(code)
return tuple(
(tag.value for x, tag in enumerate(self.tags[index:index + 3])
if tag.code == code + x * 10)
)
def _point_index(self, code):
return self.tags.tag_index(code)
def _get_flexible_point(self, code):
point = self._get_point(code)
if len(point) in (2, 3):
return point
else:
raise DXFStructureError('Invalid axis count for code: %d' % code)
def set_value(self, code, xtype, value):
def set_point(code, axis):
if len(value) != axis:
raise ValueError('%d axis required' % axis)
if self._count_axis(code) != axis:
raise DXFStructureError('Invalid axis count for code: %d' % code)
self._set_point(code, value)
if xtype == 'Point2D':
set_point(code, axis=2)
elif xtype == 'Point3D':
set_point(code, axis=3)
elif xtype == 'Point2D/3D':
self._set_flexible_point(code, value)
else:
raise TypeError('Unknown extended type: %s' % xtype)
def _set_point(self, code, value):
def set_tag(index, tag):
if self.tags[index].code == tag.code:
self.tags[index] = tag
else:
raise DXFStructureError('DXF coordinate error')
index = self._point_index(code)
for x, coord in enumerate(value):
set_tag(index + x, DXFTag(code + x * 10, float(coord)))
def _set_flexible_point(self, code, value):
def append_axis():
index = self._point_index(code)
self.tags.insert(index + 2, DXFTag(code + 20, 0.0))
def remove_axis():
index = self._point_index(code)
self.tags.pop(index + 2)
new_axis = len(value)
if new_axis not in (2, 3):
raise ValueError("2D or 3D point required (tuple).")
old_axis = self._count_axis(code)
if old_axis > 1:
if new_axis == 2 and old_axis == 3:
remove_axis()
elif new_axis == 3 and old_axis == 2:
append_axis()
else:
raise DXFStructureError("Invalid axis count of point.")
self._set_point(code, value)
def _count_axis(self, code):
return len(self._get_point(code))
| gpl-3.0 | 8,453,596,017,149,558,000 | 32.76555 | 119 | 0.582117 | false | 3.671696 | false | false | false |
DropMuse/DropMuse | app/recommendation.py | 1 | 8083 | from lightfm import LightFM
import pickle
import db_utils
import scipy
import numpy as np
import numpy.linalg as la
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
regex_tokenizer = RegexpTokenizer(r'\w+')
eng_stopwords = stopwords.words('english')
NUM_COMPONENTS = 30
NUM_EPOCHS = 20
MODEL_LOCATION = 'lightfm_model.pickle'
def get_interactions(engine):
num_playlists = db_utils.playlist_max_id(engine)
num_songs = db_utils.song_max_id(engine)
interactions = scipy.sparse.lil_matrix((num_playlists+1, num_songs+1))
plist_records = db_utils.get_playlist_interactions(engine)
for r in plist_records:
interaction_value = 2 if r.vote else 1
interactions[r.playlist_id, r.song_id] = interaction_value
return interactions
def get_audio_analysis_features(engine):
features = db_utils.song_audio_features(engine)
num_songs = db_utils.song_max_id(engine)
feature_mat = scipy.sparse.lil_matrix((num_songs+1, 4))
for s in features:
pitch = s.pi or 0
harmonic = s.h or 0
percussive = s.pe or 0
temp = s.t or 0
feature_mat[s.id] = np.array([pitch, harmonic, percussive, temp])
return feature_mat
def artist_matrix(engine):
'''
Returns matrix of shape (num_songs, num_artists)
'''
songs = db_utils.song_artists(engine)
num_songs = db_utils.song_max_id(engine)
artists = set(s.artist for s in songs)
artist_indices = {s: i for i, s in enumerate(artists)}
artist_mat = scipy.sparse.lil_matrix((num_songs+1, len(artists)))
for s in songs:
artist_mat[s.id, artist_indices[s.artist]] = 1
return artist_mat
def get_item_features(engine):
'''
- Resultant matrix is of shape: (num_songs, num_features)
- Matrix can be indexed as (song_id, feature_idx)
'''
sentiments = db_utils.song_sentiments(engine)
num_songs = db_utils.song_max_id(engine)
item_features = scipy.sparse.lil_matrix((num_songs+1, 3))
for s in sentiments:
pos = s.pos or 0
neu = s.neu or 0
neg = s.neg or 0
sent_arr = np.array([pos, neu, neg])
norm = la.norm(sent_arr)
if norm > 0:
item_features[s.id] = sent_arr / norm
keywords = keyword_sparse_matrix(engine)
artists = artist_matrix(engine)
audio = get_audio_analysis_features(engine)
results = scipy.sparse.hstack([item_features, keywords, artists, audio])
return results
def train_model(engine):
'''
interactions is of:
shape: (num_users, num_items)
format: 1 if positive interaction, -1 if negative interaciton
item_features is of:
shape: (num_items, num_features)
format: [pos_sent, neu_sent, neg_sent]
'''
model = load_model()
interactions = get_interactions(engine)
item_features = get_item_features(engine)
model.fit(interactions,
item_features=item_features,
epochs=NUM_EPOCHS)
dump_model(model)
return model
def get_recommendations(engine, playlist_id):
model = train_model(engine)
item_features = get_item_features(engine)
num_items = item_features.shape[0]
predictions = model.predict(playlist_id,
np.arange(num_items),
item_features=item_features)
return [int(i) for i in np.argsort(-predictions)]
def keyword_sparse_matrix(engine):
keyword_list = list(db_utils.all_keywords(engine))
keyword_dict = {}
curr_idx = 0
for k in keyword_list:
if k.word not in keyword_dict:
keyword_dict[k.word] = curr_idx
curr_idx += 1
num_songs = db_utils.song_max_id(engine)
keyword_mat = scipy.sparse.lil_matrix((num_songs + 1, curr_idx + 1))
for k in keyword_list:
keyword_mat[k.song_id, keyword_dict[k.word]] = k.weight
# Normalize rows
for r in range(keyword_mat.shape[0]):
norm = la.norm(keyword_mat.getrow(r).todense())
if norm > 0:
keyword_mat[r] = keyword_mat.getrow(r) / norm
return keyword_mat
def load_model():
'''
Loads LightFM model from file
Returns empty model if no pickled model found
'''
try:
with open(MODEL_LOCATION, 'rb') as f:
return pickle.load(f)
except IOError:
return LightFM(loss='warp',
no_components=NUM_COMPONENTS)
def dump_model(model):
'''
Saves LightFM model to file
'''
with open(MODEL_LOCATION, 'wb') as f:
pickle.dump(model, f)
def word_tokenize_no_punct(sent):
tokens = regex_tokenizer.tokenize(sent)
return [w.lower() for w in tokens
if w.lower() not in eng_stopwords and len(w) > 1]
def songs_to_vocab(songs):
vocab = set()
for s in songs:
if not s.lyrics:
continue
sent_vocab = set(word_tokenize_no_punct(s.lyrics))
vocab |= sent_vocab
return list(vocab)
def tf(mat, doc, term):
s = np.sum(mat.getrow(doc).todense())
if s != 0:
return mat[doc, term] / float(s)
return 0
def idf(mat, term):
s = mat.getcol(term).nnz
if s != 0:
return mat.shape[0] / float(s)
return 0
def extract_keywords(engine):
'''
- Constructs a TFIDF of all lyrics of all songs
- Extracts the most meaningful keywords of each song
- Updates the keyword table accordingly
'''
songs = db_utils.song_lyrics(engine)
# lyrics = [s.lyrics if s.lyrics else "" for s in songs]
# tfidf = TfidfVectorizer(stop_words='english',
# max_df=0.7)
# tfidf_mat = tfidf.fit_transform(lyrics).toarray()
vocab = songs_to_vocab(songs)
w_indices = {k: idx for idx, k in enumerate(vocab)}
# Construct document term frequency matrix
# matrix
# (word_idx, doc_idx) => word_doc_count
matrix = scipy.sparse.lil_matrix((len(songs), len(vocab)))
for i, s in enumerate(songs):
if not s.lyrics:
continue
for w in word_tokenize_no_punct(s.lyrics):
matrix[i, w_indices[w]] += 1
# Calculate tfidf score for each term
# tfidf
# (word_idx, doc_idx) => word_doc_tfidf_score
tfidf = scipy.sparse.lil_matrix((len(songs), len(vocab)))
nzx, nzy = matrix.nonzero() # Only conerned w/ nonzero term entries
for i in range(len(nzx)):
doc_idx, term_idx = nzx[i], nzy[i]
term_freq = tf(matrix, doc_idx, term_idx)
inv_doc_freq = idf(matrix, term_idx)
tfidf[doc_idx, term_idx] = term_freq * inv_doc_freq
print "Calculated TFIDF for all songs."
# Do insertion for keywords of all songs
for i in range(len(songs)):
print "Inserting keywords ({}/{})".format(i, len(songs))
# Sort tfidf score descending, find 10 most relevant words
max_indices = (-tfidf.getrow(i).toarray()[0]).argsort()[:10]
song_id = songs[i].id
# Delete old keywords
db_utils.delete_song_keywords(engine, song_id)
for term_idx in max_indices:
if tfidf[i, term_idx] == 0:
continue
kw_str = vocab[int(term_idx)]
kw_weight = tfidf[i, term_idx]
# Do insertion into database
db_utils.add_song_keyword(engine,
song_id,
kw_str,
float(kw_weight))
def similar_songs(engine, song_id, num_results=5):
'''
- Returns song most similar to the given song using cosine similarity
'''
features = get_item_features(engine)
sample_v = np.array(features.getrow(song_id).todense())
sample_norm = la.norm(sample_v)
cos_diffs = []
for i in range(features.shape[0]):
test_v = features.getrow(i).todense().T
norm = sample_norm * la.norm(test_v)
cos_diffs.append(np.dot(sample_v, test_v) / norm if norm != 0 else 0)
most_similar = np.argsort(-np.array(cos_diffs))
similar_ids = [int(i) for i in most_similar if i != song_id][:5]
return similar_ids
| mit | 1,937,414,757,606,261,200 | 30.574219 | 77 | 0.610046 | false | 3.340083 | false | false | false |
mioann47/mobile-app-privacy-analyzer | mypythonscripts/AndroBugs_Framework-master/tools/modified/androguard/decompiler/dad/decompile.py | 7 | 17351 | # This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('./')
import logging
import struct
from collections import defaultdict
import tools.modified.androguard.core.androconf as androconf
import tools.modified.androguard.decompiler.dad.util as util
from tools.modified.androguard.core.analysis import analysis
from tools.modified.androguard.core.bytecodes import apk, dvm
from tools.modified.androguard.decompiler.dad.ast import (JSONWriter, parse_descriptor,
literal_string, literal_null, literal_int, literal_long, literal_float,
literal_double, literal_bool, literal_hex_int, dummy)
from tools.modified.androguard.decompiler.dad.control_flow import identify_structures
from tools.modified.androguard.decompiler.dad.dataflow import (build_def_use,
place_declarations,
dead_code_elimination,
register_propagation,
split_variables)
from tools.modified.androguard.decompiler.dad.graph import construct, simplify, split_if_nodes
from tools.modified.androguard.decompiler.dad.instruction import Param, ThisParam
from tools.modified.androguard.decompiler.dad.writer import Writer
from tools.modified.androguard.util import read
def auto_vm(filename):
ret = androconf.is_android(filename)
if ret == 'APK':
return dvm.DalvikVMFormat(apk.APK(filename).get_dex())
elif ret == 'DEX':
return dvm.DalvikVMFormat(read(filename))
elif ret == 'DEY':
return dvm.DalvikOdexVMFormat(read(filename))
return None
# No seperate DvField class currently
def get_field_ast(field):
triple = field.get_class_name()[1:-1], field.get_name(), field.get_descriptor()
expr = None
if field.init_value:
val = field.init_value.value
expr = dummy(str(val))
if val is not None:
if field.get_descriptor() == 'Ljava/lang/String;':
expr = literal_string(val)
elif field.proto == 'B':
expr = literal_hex_int(struct.unpack('<b', val)[0])
return {
'triple': triple,
'type': parse_descriptor(field.get_descriptor()),
'flags': util.get_access_field(field.get_access_flags()),
'expr': expr,
}
class DvMethod(object):
def __init__(self, methanalysis):
method = methanalysis.get_method()
self.method = method
self.start_block = next(methanalysis.get_basic_blocks().get(), None)
self.cls_name = method.get_class_name()
self.name = method.get_name()
self.lparams = []
self.var_to_name = defaultdict()
self.writer = None
self.graph = None
self.ast = None
self.access = util.get_access_method(method.get_access_flags())
desc = method.get_descriptor()
self.type = desc.split(')')[-1]
self.params_type = util.get_params_type(desc)
self.triple = method.get_triple()
self.exceptions = methanalysis.exceptions.exceptions
code = method.get_code()
if code is None:
logger.debug('No code : %s %s', self.name, self.cls_name)
else:
start = code.registers_size - code.ins_size
if 'static' not in self.access:
self.var_to_name[start] = ThisParam(start, self.cls_name)
self.lparams.append(start)
start += 1
num_param = 0
for ptype in self.params_type:
param = start + num_param
self.lparams.append(param)
self.var_to_name[param] = Param(param, ptype)
num_param += util.get_type_size(ptype)
if not __debug__:
from tools.modified.androguard.core import bytecode
bytecode.method2png('/tmp/dad/graphs/%s#%s.png' % \
(self.cls_name.split('/')[-1][:-1], self.name), methanalysis)
def process(self, doAST=False):
logger.debug('METHOD : %s', self.name)
# Native methods... no blocks.
if self.start_block is None:
logger.debug('Native Method.')
if doAST:
self.ast = JSONWriter(None, self).get_ast()
else:
self.writer = Writer(None, self)
self.writer.write_method()
return
graph = construct(self.start_block, self.var_to_name, self.exceptions)
self.graph = graph
if not __debug__:
util.create_png(self.cls_name, self.name, graph, '/tmp/dad/blocks')
use_defs, def_uses = build_def_use(graph, self.lparams)
split_variables(graph, self.var_to_name, def_uses, use_defs)
dead_code_elimination(graph, def_uses, use_defs)
register_propagation(graph, def_uses, use_defs)
place_declarations(graph, self.var_to_name, def_uses, use_defs)
del def_uses, use_defs
# After the DCE pass, some nodes may be empty, so we can simplify the
# graph to delete these nodes.
# We start by restructuring the graph by spliting the conditional nodes
# into a pre-header and a header part.
split_if_nodes(graph)
# We then simplify the graph by merging multiple statement nodes into
# a single statement node when possible. This also delete empty nodes.
simplify(graph)
graph.compute_rpo()
if not __debug__:
util.create_png(self.cls_name, self.name, graph,
'/tmp/dad/pre-structured')
identify_structures(graph, graph.immediate_dominators())
if not __debug__:
util.create_png(self.cls_name, self.name, graph,
'/tmp/dad/structured')
if doAST:
self.ast = JSONWriter(graph, self).get_ast()
else:
self.writer = Writer(graph, self)
self.writer.write_method()
def get_ast(self):
return self.ast
def show_source(self):
print self.get_source()
def get_source(self):
if self.writer:
return '%s' % self.writer
return ''
def get_source_ext(self):
if self.writer:
return self.writer.str_ext()
return []
def __repr__(self):
#return 'Method %s' % self.name
return 'class DvMethod(object): %s' % self.name
class DvClass(object):
def __init__(self, dvclass, vma):
name = dvclass.get_name()
if name.find('/') > 0:
pckg, name = name.rsplit('/', 1)
else:
pckg, name = '', name
self.package = pckg[1:].replace('/', '.')
self.name = name[:-1]
self.vma = vma
self.methods = dvclass.get_methods()
self.fields = dvclass.get_fields()
self.subclasses = {}
self.code = []
self.inner = False
access = dvclass.get_access_flags()
# If interface we remove the class and abstract keywords
if 0x200 & access:
prototype = '%s %s'
if access & 0x400:
access -= 0x400
else:
prototype = '%s class %s'
self.access = util.get_access_class(access)
self.prototype = prototype % (' '.join(self.access), self.name)
self.interfaces = dvclass.get_interfaces()
self.superclass = dvclass.get_superclassname()
self.thisclass = dvclass.get_name()
logger.info('Class : %s', self.name)
logger.info('Methods added :')
for meth in self.methods:
logger.info('%s (%s, %s)', meth.get_method_idx(), self.name, meth.name)
logger.info('')
def add_subclass(self, innername, dvclass):
self.subclasses[innername] = dvclass
dvclass.inner = True
def get_methods(self):
return self.methods
def process_method(self, num, doAST=False):
method = self.methods[num]
if not isinstance(method, DvMethod):
method.set_instructions([i for i in method.get_instructions()])
self.methods[num] = DvMethod(self.vma.get_method(method))
self.methods[num].process(doAST=doAST)
method.set_instructions([])
else:
method.process(doAST=doAST)
def process(self, doAST=False):
for klass in self.subclasses.values():
klass.process(doAST=doAST)
for i in range(len(self.methods)):
try:
self.process_method(i, doAST=doAST)
except Exception as e:
logger.debug(
'Error decompiling method %s: %s', self.methods[i], e)
def get_ast(self):
fields = [get_field_ast(f) for f in self.fields]
methods = [m.get_ast() for m in self.methods if m.ast is not None]
isInterface = 'interface' in self.access
return {
'rawname': self.thisclass[1:-1],
'name': parse_descriptor(self.thisclass),
'super': parse_descriptor(self.superclass),
'flags': self.access,
'isInterface': isInterface,
'interfaces': map(parse_descriptor, self.interfaces),
'fields': fields,
'methods': methods,
}
def get_source(self):
source = []
if not self.inner and self.package:
source.append('package %s;\n' % self.package)
superclass, prototype = self.superclass, self.prototype
if superclass is not None and superclass != 'Ljava/lang/Object;':
superclass = superclass[1:-1].replace('/', '.')
prototype += ' extends %s' % superclass
if len(self.interfaces) > 0:
prototype += ' implements %s' % ', '.join(
[n[1:-1].replace('/', '.') for n in self.interfaces])
source.append('%s {\n' % prototype)
for field in self.fields:
name = field.get_name()
access = util.get_access_field(field.get_access_flags())
f_type = util.get_type(field.get_descriptor())
source.append(' ')
if access:
source.append(' '.join(access))
source.append(' ')
if field.init_value:
value = field.init_value.value
if f_type == 'String':
value = '"%s"' % value
elif field.proto == 'B':
value = '0x%x' % struct.unpack('b', value)[0]
source.append('%s %s = %s;\n' % (f_type, name, value))
else:
source.append('%s %s;\n' % (f_type, name))
for klass in self.subclasses.values():
source.append(klass.get_source())
for method in self.methods:
if isinstance(method, DvMethod):
source.append(method.get_source())
source.append('}\n')
return ''.join(source)
def get_source_ext(self):
source = []
if not self.inner and self.package:
source.append(
('PACKAGE', [('PACKAGE_START', 'package '),
('NAME_PACKAGE', '%s' % self.package),
('PACKAGE_END', ';\n')]))
list_proto = []
list_proto.append(
('PROTOTYPE_ACCESS', '%s class ' % ' '.join(self.access)))
list_proto.append(('NAME_PROTOTYPE', '%s' % self.name, self.package))
superclass = self.superclass
if superclass is not None and superclass != 'Ljava/lang/Object;':
superclass = superclass[1:-1].replace('/', '.')
list_proto.append(('EXTEND', ' extends '))
list_proto.append(('NAME_SUPERCLASS', '%s' % superclass))
if len(self.interfaces) > 0:
list_proto.append(('IMPLEMENTS', ' implements '))
for i, interface in enumerate(self.interfaces):
if i != 0:
list_proto.append(('COMMA', ', '))
list_proto.append(
('NAME_INTERFACE', interface[1:-1].replace('/', '.')))
list_proto.append(('PROTOTYPE_END', ' {\n'))
source.append(("PROTOTYPE", list_proto))
for field in self.fields:
field_access_flags = field.get_access_flags()
access = [util.ACCESS_FLAGS_FIELDS[flag] for flag in
util.ACCESS_FLAGS_FIELDS if flag & field_access_flags]
f_type = util.get_type(field.get_descriptor())
name = field.get_name()
if access:
access_str = ' %s ' % ' '.join(access)
else:
access_str = ' '
source.append(
('FIELD', [('FIELD_ACCESS', access_str),
('FIELD_TYPE', '%s' % f_type),
('SPACE', ' '),
('NAME_FIELD', '%s' % name, f_type, field),
('FIELD_END', ';\n')]))
#TODO: call get_source_ext for each subclass?
for klass in self.subclasses.values():
source.append((klass, klass.get_source()))
for method in self.methods:
if isinstance(method, DvMethod):
source.append(("METHOD", method.get_source_ext()))
source.append(("CLASS_END", [('CLASS_END', '}\n')]))
return source
def show_source(self):
print self.get_source()
def __repr__(self):
if not self.subclasses:
return 'Class(%s)' % self.name
return 'Class(%s) -- Subclasses(%s)' % (self.name, self.subclasses)
class DvMachine(object):
def __init__(self, name):
vm = auto_vm(name)
if vm is None:
raise ValueError('Format not recognised: %s' % name)
self.vma = analysis.uVMAnalysis(vm)
self.classes = dict((dvclass.get_name(), dvclass)
for dvclass in vm.get_classes())
#util.merge_inner(self.classes)
def get_classes(self):
return self.classes.keys()
def get_class(self, class_name):
for name, klass in self.classes.iteritems():
if class_name in name:
if isinstance(klass, DvClass):
return klass
dvclass = self.classes[name] = DvClass(klass, self.vma)
return dvclass
def process(self):
for name, klass in self.classes.iteritems():
logger.info('Processing class: %s', name)
if isinstance(klass, DvClass):
klass.process()
else:
dvclass = self.classes[name] = DvClass(klass, self.vma)
dvclass.process()
def show_source(self):
for klass in self.classes.values():
klass.show_source()
def process_and_show(self):
for name, klass in sorted(self.classes.iteritems()):
logger.info('Processing class: %s', name)
if not isinstance(klass, DvClass):
klass = DvClass(klass, self.vma)
klass.process()
klass.show_source()
logger = logging.getLogger('dad')
sys.setrecursionlimit(5000)
def main():
# logger.setLevel(logging.DEBUG) for debugging output
# comment the line to disable the logging.
logger.setLevel(logging.INFO)
console_hdlr = logging.StreamHandler(sys.stdout)
console_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(console_hdlr)
default_file = 'examples/android/TestsAndroguard/bin/TestActivity.apk'
if len(sys.argv) > 1:
machine = DvMachine(sys.argv[1])
else:
machine = DvMachine(default_file)
logger.info('========================')
logger.info('Classes:')
for class_name in sorted(machine.get_classes()):
logger.info(' %s', class_name)
logger.info('========================')
cls_name = raw_input('Choose a class: ')
if cls_name == '*':
machine.process_and_show()
else:
cls = machine.get_class(cls_name)
if cls is None:
logger.error('%s not found.', cls_name)
else:
logger.info('======================')
for i, method in enumerate(cls.get_methods()):
logger.info('%d: %s', i, method.name)
logger.info('======================')
meth = raw_input('Method: ')
if meth == '*':
logger.info('CLASS = %s', cls)
cls.process()
else:
cls.process_method(int(meth))
logger.info('Source:')
logger.info('===========================')
cls.show_source()
if __name__ == '__main__':
main()
| gpl-3.0 | -4,954,932,580,214,477,000 | 36.154176 | 94 | 0.55674 | false | 3.955095 | false | false | false |
caveman-dick/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py | 29 | 19862 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: ec2_vpc_nacl
short_description: create and delete Network ACLs.
description:
- Read the AWS documentation for Network ACLS
U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
version_added: "2.2"
options:
name:
description:
- Tagged name identifying a network ACL.
- One and only one of the I(name) or I(nacl_id) is required.
required: false
nacl_id:
description:
- NACL id identifying a network ACL.
- One and only one of the I(name) or I(nacl_id) is required.
required: false
version_added: "2.4"
vpc_id:
description:
- VPC id of the requesting VPC.
- Required when state present.
required: false
subnets:
description:
- The list of subnets that should be associated with the network ACL.
- Must be specified as a list
- Each subnet can be specified as subnet ID, or its tagged name.
required: false
egress:
description:
- A list of rules for outgoing traffic.
- Each rule must be specified as a list.
required: false
ingress:
description:
- List of rules for incoming traffic.
- Each rule must be specified as a list.
required: false
tags:
description:
- Dictionary of tags to look for and apply when creating a network ACL.
required: false
state:
description:
- Creates or modifies an existing NACL
- Deletes a NACL and reassociates subnets to the default NACL
required: false
choices: ['present', 'absent']
default: present
author: Mike Mochan(@mmochan)
extends_documentation_fragment: aws
requirements: [ botocore, boto3, json ]
'''
EXAMPLES = '''
# Complete example to create and delete a network ACL
# that allows SSH, HTTP and ICMP in, and all traffic out.
- name: "Create and associate production DMZ network ACL with DMZ subnets"
ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
region: ap-southeast-2
subnets: ['prod-dmz-1', 'prod-dmz-2']
tags:
CostCode: CC1234
Project: phoenix
Description: production DMZ
ingress: [
# rule no, protocol, allow/deny, cidr, icmp_code, icmp_type,
# port from, port to
[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22],
[200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80],
[300, 'icmp', 'allow', '0.0.0.0/0', 0, 8],
]
egress: [
[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
]
state: 'present'
- name: "Remove the ingress and egress rules - defaults to deny all"
ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
region: ap-southeast-2
subnets:
- prod-dmz-1
- prod-dmz-2
tags:
CostCode: CC1234
Project: phoenix
Description: production DMZ
state: present
- name: "Remove the NACL subnet associations and tags"
ec2_vpc_nacl:
vpc_id: 'vpc-12345678'
name: prod-dmz-nacl
region: ap-southeast-2
state: present
- name: "Delete nacl and subnet associations"
ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
state: absent
- name: "Delete nacl by its id"
ec2_vpc_nacl:
nacl_id: acl-33b4ee5b
state: absent
'''
RETURN = '''
task:
description: The result of the create, or delete action.
returned: success
type: dictionary
'''
try:
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
# Common fields for the default rule that is contained within every VPC NACL.
DEFAULT_RULE_FIELDS = {
'RuleNumber': 32767,
'RuleAction': 'deny',
'CidrBlock': '0.0.0.0/0',
'Protocol': '-1'
}
DEFAULT_INGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', False)])
DEFAULT_EGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', True)])
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
#Utility methods
def icmp_present(entry):
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
return True
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').items():
tags.append({'Key': name, 'Value': str(value)})
tags.append({'Key': "Name", 'Value': module.params.get('name')})
else:
tags.append({'Key': "Name", 'Value': module.params.get('name')})
return tags
def subnets_removed(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
associations = results['NetworkAcls'][0]['Associations']
subnet_ids = [assoc['SubnetId'] for assoc in associations]
return [subnet for subnet in subnet_ids if subnet not in subnets]
def subnets_added(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
associations = results['NetworkAcls'][0]['Associations']
subnet_ids = [assoc['SubnetId'] for assoc in associations]
return [subnet for subnet in subnets if subnet not in subnet_ids]
def subnets_changed(nacl, client, module):
changed = False
vpc_id = module.params.get('vpc_id')
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
subnets = subnets_to_associate(nacl, client, module)
if not subnets:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
if subnets:
replace_network_acl_association(default_nacl_id, subnets, client, module)
changed = True
return changed
changed = False
return changed
subs_added = subnets_added(nacl_id, subnets, client, module)
if subs_added:
replace_network_acl_association(nacl_id, subs_added, client, module)
changed = True
subs_removed = subnets_removed(nacl_id, subnets, client, module)
if subs_removed:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
replace_network_acl_association(default_nacl_id, subs_removed, client, module)
changed = True
return changed
def nacls_changed(nacl, client, module):
changed = False
params = dict()
params['egress'] = module.params.get('egress')
params['ingress'] = module.params.get('ingress')
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
nacl = describe_network_acl(client, module)
entries = nacl['NetworkAcls'][0]['Entries']
tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
if rules_changed(egress, params['egress'], True, nacl_id, client, module):
changed = True
if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
changed = True
return changed
def tags_changed(nacl_id, client, module):
changed = False
tags = dict()
if module.params.get('tags'):
tags = module.params.get('tags')
tags['Name'] = module.params.get('name')
nacl = find_acl_by_id(nacl_id, client, module)
if nacl['NetworkAcls']:
nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
nacl_tags = [item for sublist in nacl_values for item in sublist]
tag_values = [[key, str(value)] for key, value in tags.items()]
tags = [item for sublist in tag_values for item in sublist]
if sorted(nacl_tags) == sorted(tags):
changed = False
return changed
else:
delete_tags(nacl_id, client, module)
create_tags(nacl_id, client, module)
changed = True
return changed
return changed
def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
changed = False
rules = list()
for entry in param_rules:
rules.append(process_rule_entry(entry, Egress))
if rules == aws_rules:
return changed
else:
removed_rules = [x for x in aws_rules if x not in rules]
if removed_rules:
params = dict()
for rule in removed_rules:
params['NetworkAclId'] = nacl_id
params['RuleNumber'] = rule['RuleNumber']
params['Egress'] = Egress
delete_network_acl_entry(params, client, module)
changed = True
added_rules = [x for x in rules if x not in aws_rules]
if added_rules:
for rule in added_rules:
rule['NetworkAclId'] = nacl_id
create_network_acl_entry(rule, client, module)
changed = True
return changed
def process_rule_entry(entry, Egress):
params = dict()
params['RuleNumber'] = entry[0]
params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
params['RuleAction'] = entry[2]
params['Egress'] = Egress
params['CidrBlock'] = entry[3]
if icmp_present(entry):
params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
else:
if entry[6] or entry[7]:
params['PortRange'] = {"From": entry[6], 'To': entry[7]}
return params
def restore_default_associations(assoc_ids, default_nacl_id, client, module):
if assoc_ids:
params = dict()
params['NetworkAclId'] = default_nacl_id[0]
for assoc_id in assoc_ids:
params['AssociationId'] = assoc_id
restore_default_acl_association(params, client, module)
return True
def construct_acl_entries(nacl, client, module):
for entry in module.params.get('ingress'):
params = process_rule_entry(entry, Egress=False)
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
create_network_acl_entry(params, client, module)
for rule in module.params.get('egress'):
params = process_rule_entry(rule, Egress=True)
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
create_network_acl_entry(params, client, module)
## Module invocations
def setup_network_acl(client, module):
changed = False
nacl = describe_network_acl(client, module)
if not nacl['NetworkAcls']:
nacl = create_network_acl(module.params.get('vpc_id'), client, module)
nacl_id = nacl['NetworkAcl']['NetworkAclId']
create_tags(nacl_id, client, module)
subnets = subnets_to_associate(nacl, client, module)
replace_network_acl_association(nacl_id, subnets, client, module)
construct_acl_entries(nacl, client, module)
changed = True
return(changed, nacl['NetworkAcl']['NetworkAclId'])
else:
changed = False
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
subnet_result = subnets_changed(nacl, client, module)
nacl_result = nacls_changed(nacl, client, module)
tag_result = tags_changed(nacl_id, client, module)
if subnet_result is True or nacl_result is True or tag_result is True:
changed = True
return(changed, nacl_id)
return (changed, nacl_id)
def remove_network_acl(client, module):
changed = False
result = dict()
nacl = describe_network_acl(client, module)
if nacl['NetworkAcls']:
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
vpc_id = nacl['NetworkAcls'][0]['VpcId']
associations = nacl['NetworkAcls'][0]['Associations']
assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
if not default_nacl_id:
result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
return changed, result
if restore_default_associations(assoc_ids, default_nacl_id, client, module):
delete_network_acl(nacl_id, client, module)
changed = True
result[nacl_id] = "Successfully deleted"
return changed, result
if not assoc_ids:
delete_network_acl(nacl_id, client, module)
changed = True
result[nacl_id] = "Successfully deleted"
return changed, result
return changed, result
#Boto3 client methods
def create_network_acl(vpc_id, client, module):
try:
if module.check_mode:
nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000"))
else:
nacl = client.create_network_acl(VpcId=vpc_id)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return nacl
def create_network_acl_entry(params, client, module):
try:
if not module.check_mode:
client.create_network_acl_entry(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def create_tags(nacl_id, client, module):
try:
delete_tags(nacl_id, client, module)
if not module.check_mode:
client.create_tags(Resources=[nacl_id], Tags=load_tags(module))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def delete_network_acl(nacl_id, client, module):
try:
if not module.check_mode:
client.delete_network_acl(NetworkAclId=nacl_id)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def delete_network_acl_entry(params, client, module):
try:
if not module.check_mode:
client.delete_network_acl_entry(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def delete_tags(nacl_id, client, module):
try:
if not module.check_mode:
client.delete_tags(Resources=[nacl_id])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def describe_acl_associations(subnets, client, module):
if not subnets:
return []
try:
results = client.describe_network_acls(Filters=[
{'Name': 'association.subnet-id', 'Values': subnets}
])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
associations = results['NetworkAcls'][0]['Associations']
return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
def describe_network_acl(client, module):
try:
if module.params.get('nacl_id'):
nacl = client.describe_network_acls(Filters=[
{'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]}
])
else:
nacl = client.describe_network_acls(Filters=[
{'Name': 'tag:Name', 'Values': [module.params.get('name')]}
])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return nacl
def find_acl_by_id(nacl_id, client, module):
try:
return client.describe_network_acls(NetworkAclIds=[nacl_id])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def find_default_vpc_nacl(vpc_id, client, module):
try:
response = client.describe_network_acls(Filters=[
{'Name': 'vpc-id', 'Values': [vpc_id]}])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
nacls = response['NetworkAcls']
return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True]
def find_subnet_ids_by_nacl_id(nacl_id, client, module):
try:
results = client.describe_network_acls(Filters=[
{'Name': 'association.network-acl-id', 'Values': [nacl_id]}
])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
if results['NetworkAcls']:
associations = results['NetworkAcls'][0]['Associations']
return [s['SubnetId'] for s in associations if s['SubnetId']]
else:
return []
def replace_network_acl_association(nacl_id, subnets, client, module):
params = dict()
params['NetworkAclId'] = nacl_id
for association in describe_acl_associations(subnets, client, module):
params['AssociationId'] = association
try:
if not module.check_mode:
client.replace_network_acl_association(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
params = dict()
for entry in entries:
params = entry
params['NetworkAclId'] = nacl_id
try:
if not module.check_mode:
client.replace_network_acl_entry(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def restore_default_acl_association(params, client, module):
try:
if not module.check_mode:
client.replace_network_acl_association(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
def subnets_to_associate(nacl, client, module):
params = list(module.params.get('subnets'))
if not params:
return []
if params[0].startswith("subnet-"):
try:
subnets = client.describe_subnets(Filters=[
{'Name': 'subnet-id', 'Values': params}])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
else:
try:
subnets = client.describe_subnets(Filters=[
{'Name': 'tag:Name', 'Values': params}])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
vpc_id=dict(),
name=dict(),
nacl_id=dict(),
subnets=dict(required=False, type='list', default=list()),
tags=dict(required=False, type='dict'),
ingress=dict(required=False, type='list', default=list()),
egress=dict(required=False, type='list', default=list(),),
state=dict(default='present', choices=['present', 'absent']),
),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'nacl_id']],
required_if=[['state', 'present', ['vpc_id']]])
if not HAS_BOTO3:
module.fail_json(msg='json, botocore and boto3 are required.')
state = module.params.get('state').lower()
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - %s" % str(e))
invocations = {
"present": setup_network_acl,
"absent": remove_network_acl
}
(changed, results) = invocations[state](client, module)
module.exit_json(changed=changed, nacl_id=results)
if __name__ == '__main__':
main()
| gpl-3.0 | 886,884,641,792,100,200 | 33.602787 | 126 | 0.627127 | false | 3.643735 | false | false | false |
neuromat/nira | scientific_mission/forms.py | 1 | 4327 | # -*- coding: utf-8 -*-
from cities_light.models import City
from configuration.models import ProcessNumber, PrincipalInvestigator
from dal import autocomplete
from .models import ScientificMission, Route
from django import forms
from django.utils.translation import ugettext_lazy as _
from person.models import Person
from helpers.forms.date_range import DateInput
annex_seven_choices = ((0, '----------------'),
(1, 'transporte aéreo'),
(2, 'transporte terrestre'),
(3, 'seguro saúde'))
class ProcessField(forms.CharField):
def to_python(self, value):
# Return an empty list if no input was given.
return value
def validate(self, value):
"""Check if value consists only of valid emails."""
# Use the parent's handling of required fields, etc.
super(ProcessField, self).validate(value)
def clean(self, value):
return value
class RouteForm(forms.ModelForm):
class Meta:
model = Route
fields = ('origin_city', 'destination_city', 'departure', 'arrival')
widgets = {
'origin_city': autocomplete.ModelSelect2(url='city_autocomplete'),
'destination_city': autocomplete.ModelSelect2(url='city_autocomplete'),
}
class Media:
css = {
'all': ('/static/css/inline_autocomplete.css',)
}
class ScientificMissionForm(forms.ModelForm):
class Meta:
model = ScientificMission
fields = '__all__'
widgets = {
'destination_city': autocomplete.ModelSelect2(url='city_autocomplete'),
}
localized_fields = ('amount_paid',)
class AnnexSixForm(forms.Form):
# process = ProcessNumber.get_solo()
value = forms.DecimalField(label=_('Value'), max_digits=10, decimal_places=2, required=True)
start_date = forms.DateField(label=_('Start date'), widget=DateInput, required=False)
end_date = forms.DateField(label=_('End date'), widget=DateInput, required=False)
# process = ProcessField(label=_('Process'), widget=forms.TextInput(attrs={'placeholder': process.process_number}))
class Media:
css = {
'all': ('/static/css/inline_autocomplete.css',)
}
def clean(self):
cleaned_data = super(AnnexSixForm, self).clean()
daily_stipend = cleaned_data.get('daily_stipend')
process = cleaned_data.get('process')
class AnnexSevenForm(forms.Form):
try:
principal_investigator = PrincipalInvestigator.get_solo()
name = principal_investigator.name
except:
name = None
if name:
CHOICES = (
('1', 'FAPESP'),
('2', name),
)
else:
CHOICES = (
('1', 'FAPESP'),
)
# process = ProcessNumber.get_solo()
choice = forms.ChoiceField(label=_('Provider'), choices=CHOICES, required=True)
start_date = forms.DateField(label=_('Start date'), widget=DateInput, required=False)
end_date = forms.DateField(label=_('End date'), widget=DateInput, required=False)
stretch = forms.CharField(label=_('Stretch'), required=True)
reimbursement = forms.ChoiceField(label=_('Reimbursement'), choices=annex_seven_choices,
required=True)
person = forms.ModelChoiceField(label=_('Person'), queryset=Person.objects.all(),
empty_label="----------", required=True)
value = forms.DecimalField(label=_('Value'), max_digits=10, decimal_places=2, required=True)
# process = ProcessField(label=_('Process'), widget=forms.TextInput(
# attrs={'placeholder': process.process_number}))
class AnnexNineForm(forms.Form):
# process = ProcessNumber.get_solo()
job = forms.CharField(label=_('Job'), required=True)
person = forms.ModelChoiceField(label=_('Service provider'), queryset=Person.objects.all(),
empty_label="----------", required=True)
note = forms.BooleanField(label=_('Note'), initial=True, required=False)
value = forms.DecimalField(label=_('Value'), max_digits=10, decimal_places=2, required=True)
# process = ProcessField(label=_('Process'), widget=forms.TextInput(
# attrs={'placeholder': process.process_number}))
| mpl-2.0 | 8,598,163,446,868,805,000 | 33.325397 | 119 | 0.621734 | false | 4.061033 | false | false | false |
Sorsly/subtle | google-cloud-sdk/lib/surface/deployment_manager/type_providers/create.py | 3 | 3619 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""type-providers create command."""
from googlecloudsdk.api_lib.deployment_manager import dm_labels
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.deployment_manager import dm_beta_base
from googlecloudsdk.command_lib.deployment_manager import dm_write
from googlecloudsdk.command_lib.deployment_manager import flags
from googlecloudsdk.command_lib.deployment_manager import type_providers
from googlecloudsdk.command_lib.util import labels_util
from googlecloudsdk.core import log
def LogResource(request, async):
log.CreatedResource(request.typeProvider.name,
kind='type_provider',
async=async)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Create(base.CreateCommand):
"""Create a type provider.
This command inserts (creates) a new type provider based on a provided
configuration file.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To create a new type provider, run:
$ {command} my-type-provider --api-options-file=my-options.yaml --descriptor-url <descriptor URL> --description "My type."
""",
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
flags.AddAsyncFlag(parser)
type_providers.AddTypeProviderNameFlag(parser)
type_providers.AddApiOptionsFileFlag(parser)
type_providers.AddDescriptionFlag(parser)
type_providers.AddDescriptorUrlFlag(parser)
labels_util.AddCreateLabelsFlags(parser)
def Run(self, args):
"""Run 'type-providers create'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Raises:
HttpException: An http error response was received while executing api
request.
"""
messages = dm_beta_base.GetMessages()
type_provider_ref = dm_beta_base.GetResources().Parse(
args.provider_name,
collection='deploymentmanager.typeProviders')
update_labels_dict = labels_util.GetUpdateLabelsDictFromArgs(args)
labels = dm_labels.UpdateLabels([],
messages.TypeProviderLabelEntry,
update_labels=update_labels_dict)
type_provider = messages.TypeProvider(
name=type_provider_ref.typeProvider,
description=args.description,
descriptorUrl=args.descriptor_url,
labels=labels)
type_providers.AddOptions(args.api_options_file, type_provider)
request = messages.DeploymentmanagerTypeProvidersInsertRequest(
project=type_provider_ref.project,
typeProvider=type_provider)
dm_write.Execute(request,
args.async,
dm_beta_base.GetClient().typeProviders.Insert,
LogResource)
| mit | -8,182,365,602,064,727,000 | 35.19 | 134 | 0.700193 | false | 4.293001 | false | false | false |
datahuborg/datahub | src/inventory/models.py | 2 | 4354 | from django.conf import settings
from django.db import models
from django.db.utils import IntegrityError
class DataHubLegacyUser(models.Model):
"""DataHub's old User model. Replaced by the Django User model."""
id = models.AutoField(primary_key=True)
email = models.CharField(max_length=100, unique=True)
username = models.CharField(max_length=50, unique=True)
f_name = models.CharField(max_length=50, null=True)
l_name = models.CharField(max_length=50, null=True)
password = models.CharField(max_length=50)
active = models.BooleanField(default=False)
def __unicode__(self):
return self.username
class Meta:
db_table = "datahub_legacy_users"
class Card(models.Model):
id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField(auto_now=True)
repo_base = models.CharField(max_length=50)
repo_name = models.CharField(max_length=50)
card_name = models.CharField(max_length=50)
public = models.BooleanField(default=False)
query = models.TextField()
def __unicode__(self):
return 'card: %s.%s %s' % (self.repo_base,
self.repo_name, self.card_name)
class Meta:
db_table = "cards"
unique_together = ('repo_base', 'repo_name', 'card_name')
class Annotation(models.Model):
id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField(auto_now=True)
url_path = models.CharField(max_length=500, unique=True)
annotation_text = models.TextField()
def __unicode__(self):
return self.url_path
class Meta:
db_table = "annotations"
# Thrift Apps
class App(models.Model):
id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField(auto_now=True)
app_id = models.CharField(max_length=100, unique=True)
app_name = models.CharField(max_length=100)
app_token = models.CharField(max_length=500)
legacy_user = models.ForeignKey('DataHubLegacyUser', null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
def __unicode__(self):
return self.app_name
class Meta:
db_table = "apps"
class Collaborator(models.Model):
id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField(auto_now=True)
# user is the person permission is being granted to.
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
app = models.ForeignKey('App', null=True)
repo_name = models.TextField()
repo_base = models.TextField()
permission = models.TextField() # e.g. 'SELECT, UPDATE, INSERT'
file_permission = models.TextField() # e.g. 'read, write'
license_id = models.IntegerField(default=-1)
def __unicode__(self):
if self.user:
c = self.user
elif self.app:
c = self.app
else:
c = ''
return "{base}.{repo}/{collaborator}".format(
base=self.repo_base, repo=self.repo_name, collaborator=c)
def save(self, *args, **kwargs):
if bool(self.user) == bool(self.app):
raise IntegrityError(
"Collaborator objects must have an associated user or app, "
"not both or neither.")
super(Collaborator, self).save(*args, **kwargs)
class Meta:
db_table = "collaborators"
unique_together = ('repo_name', 'repo_base', 'user', 'app')
class LicenseView(models.Model):
id = models.AutoField(primary_key=True)
start_date = models.DateTimeField(auto_now=True)
end_date = models.DateTimeField(auto_now=True)
# view_sql is the sql used to generate the view from the table
view_sql = models.TextField()
repo_name = models.TextField()
repo_base = models.TextField()
table = models.TextField()
license_id = models.IntegerField()
class Meta:
db_table = "license_views"
unique_together = ('repo_name', 'repo_base', 'table', 'license_id')
def __unicode__(self):
return """
Base: {base}\n Repo: {repo}\n Table: {table}\n
Viewsql: {view_sql}\n LicenseID: {license_id} \n ID: {id}\n
""".format(
base=self.repo_base,
repo=self.repo_name,
table=self.table,
view_sql=self.view_sql,
license_id=self.license_id,
id=self.id)
| mit | 5,546,509,452,174,952,000 | 32.236641 | 76 | 0.634819 | false | 3.640468 | false | false | false |
bpatyi/simpleCRM | crm/migrations/0005_auto_20161023_1605.py | 1 | 4426 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 16:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0004_auto_20161023_1143'),
]
operations = [
migrations.RenameField(
model_name='inboundcontactaddress',
old_name='city',
new_name='administrative_area',
),
migrations.RenameField(
model_name='inboundcontactaddress',
old_name='address',
new_name='formatted_address',
),
migrations.RenameField(
model_name='individualaddress',
old_name='city',
new_name='administrative_area',
),
migrations.RenameField(
model_name='individualaddress',
old_name='address',
new_name='formatted_address',
),
migrations.RemoveField(
model_name='inboundcontactaddress',
name='zip_code',
),
migrations.RemoveField(
model_name='individualaddress',
name='zip_code',
),
migrations.AddField(
model_name='inboundcontactaddress',
name='county',
field=models.CharField(blank=True, max_length=127),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='final_type',
field=models.CharField(blank=True, max_length=32),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='latitude',
field=models.FloatField(blank=True, default=None),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='locality',
field=models.CharField(blank=True, max_length=127),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='longitude',
field=models.FloatField(blank=True, default=None),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='postal_code',
field=models.CharField(blank=True, max_length=16),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='postal_code_suffix',
field=models.CharField(blank=True, max_length=16),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='route',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='inboundcontactaddress',
name='street_number',
field=models.IntegerField(blank=True, default=None),
),
migrations.AddField(
model_name='individualaddress',
name='county',
field=models.CharField(blank=True, max_length=127),
),
migrations.AddField(
model_name='individualaddress',
name='final_type',
field=models.CharField(blank=True, max_length=32),
),
migrations.AddField(
model_name='individualaddress',
name='latitude',
field=models.FloatField(blank=True, default=None),
),
migrations.AddField(
model_name='individualaddress',
name='locality',
field=models.CharField(blank=True, max_length=127),
),
migrations.AddField(
model_name='individualaddress',
name='longitude',
field=models.FloatField(blank=True, default=None),
),
migrations.AddField(
model_name='individualaddress',
name='postal_code',
field=models.CharField(blank=True, max_length=16),
),
migrations.AddField(
model_name='individualaddress',
name='postal_code_suffix',
field=models.CharField(blank=True, max_length=16),
),
migrations.AddField(
model_name='individualaddress',
name='route',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='individualaddress',
name='street_number',
field=models.IntegerField(blank=True, default=None),
),
]
| mit | -7,381,797,444,684,891,000 | 32.278195 | 64 | 0.553999 | false | 4.663857 | false | false | false |
megasan/210-CT | coursework 9.py | 1 | 1511 | """ Adapt the binary search algorithm so that instead of outputting whether a specific value was found, it outputs whether a value within an interval (specified by you) was found. Write the pseudocode and code and give the time complexity of the algorithm using the Big O notation."""
def binarySearch(lower, upper, alist):
"""
take in a lower bound, upper bound, and a list as input.
then you have the range of numbers to search for.
binary search after but the search element is a range of numbers.
"""
bounds = range(lower, upper + 1)
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
if alist[midpoint] in bounds:
found = True
else:
if bounds[0] > alist[midpoint]:
first = midpoint + 1
else:
last = midpoint - 1
return found
"""
take lower bound, upper bound and list as inputs
set search start point, end point
while the list is not empty and element is not found
set a new midpoint using the start point and end point
if the midpoint is in bounds
element is found
else
if the smallest element is greater than the midpoint
set lower bound to the current midpoint
else
set upper bound to the current midpoint
return whether or not an element matching was found
O(N)
"""
| mit | -8,335,320,677,247,389,000 | 33.97619 | 310 | 0.619457 | false | 4.22067 | false | false | false |
horoshenkih/rgg | generate-hrg.py | 1 | 1095 | #!/usr/bin/python
import sys
from argparse import ArgumentParser
from collections import Counter
import matplotlib.pyplot as plt
from lib.graph import HypRG
def main():
parser = ArgumentParser(description='Emulate hyperbolic random graph', epilog='Returns only edges (without isolated vertices!)')
parser.add_argument('n', type=int, help='number of vertices')
parser.add_argument('--alpha', type=float, help='alpha', default=1.)
parser.add_argument('-C', type=float, help='C', default=0.)
parser.add_argument('-f', help='outfile')
parser.add_argument('-s', '--seed', help='random seed', type=int)
args = parser.parse_args()
if args.f:
out_f = open(args.f, 'w')
else:
out_f = sys.stdout
n = args.n
alpha = args.alpha
C = args.C
seed = 0 if args.seed is None else args.seed
g = HypRG(n, alpha=alpha, C=C, seed=seed)
for e in g.edges():
e_fmt = []
for v in e:
e_fmt.append("{0:.3f},{1:.3f}".format(*v))
out_f.write(' '.join(e_fmt) + '\n')
if __name__ == '__main__':
main()
| mit | 67,064,435,395,282,450 | 27.076923 | 132 | 0.610046 | false | 3.318182 | false | false | false |
ircwaves/gips | gips/inventory/__init__.py | 1 | 16895 | #!/usr/bin/env python
################################################################################
# GIPS: Geospatial Image Processing System
#
# AUTHOR: Matthew Hanson
# EMAIL: [email protected]
#
# Copyright (C) 2014-2018 Applied Geosolutions
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
################################################################################
import sys
import os
from datetime import datetime as dt
import traceback
import numpy
from copy import deepcopy
from collections import defaultdict
import gippy
from gips.tiles import Tiles
from gips.utils import VerboseOut, Colors
from gips import utils
from gips.mapreduce import MapReduce
from . import dbinv, orm
class Inventory(object):
""" Base class for inventories """
_colors = [Colors.PURPLE, Colors.RED, Colors.GREEN, Colors.BLUE, Colors.YELLOW]
def __init__(self):
pass
def __getitem__(self, date):
""" Indexing operator for class """
return self.data[date]
def __len__(self):
""" Length of inventory (# of dates) """
return len(self.dates)
def get_subset(self, dates):
""" Return subset of inventory """
inv = deepcopy(self)
for d in inv.dates:
if d not in dates:
del inv.data[d]
return inv
@property
def sensor_set(self):
sset = set()
for date in self.dates:
sset.update(self.data[date].sensor_set)
return sorted(sset)
@property
def dates(self):
""" Get sorted list of dates """
return sorted(self.data.keys())
@property
def numfiles(self):
""" Total number of files in inventory """
return sum([len(dat) for dat in self.data.values()])
@property
def datestr(self):
return '%s dates (%s - %s)' % (len(self.dates), self.dates[0], self.dates[-1])
def color(self, sensor):
""" Return color for sensor """
return self._colors[list(self.sensor_set).index(sensor)]
def pprint(self, md=False, size=False):
""" Print the inventory """
if len(self.data) == 0:
print 'No matching files in inventory'
return
self.data[self.data.keys()[0]].pprint_asset_header()
dformat = '%m-%d' if md else '%j'
oldyear = 0
formatstr = '{:<12}\n'
colors = {k: self.color(k) for k in self.sensor_set}
for date in self.dates:
# if new year then write out the year
if date.year != oldyear:
sys.stdout.write(Colors.BOLD + formatstr.format(date.year) + Colors.OFF)
self.data[date].pprint(dformat, colors)
oldyear = date.year
if self.numfiles != 0:
VerboseOut("\n\n%s files on %s dates" % (self.numfiles, len(self.dates)), 1)
if size:
filelist_gen = (
tile.filenames.values() + [a.filename for a in tile.assets.values()]
for tiles in self.data.values()
for tile in tiles.tiles.values()
)
total_size = sum(
sum(os.stat(f).st_size for f in fl)
for fl in filelist_gen
)
sitename = self.spatial.sitename
if sitename == 'tiles':
sitename += str(self.spatial.tiles)
print('{} includes {:.0f} Mebibytes of local gips archive data'
.format(sitename, total_size / 2 ** 20))
class ProjectInventory(Inventory):
""" Inventory of project directory (collection of Data class) """
def __init__(self, projdir='', products=[]):
""" Create inventory of a GIPS project directory """
self.projdir = os.path.abspath(projdir)
if not os.path.exists(self.projdir):
raise Exception('Directory %s does not exist!' % self.projdir)
self.data = {}
product_set = set()
sensor_set = set()
with utils.error_handler("Project directory error for " + self.projdir):
# can't import Data at module scope due to circular dependencies
from gips.data.core import Data
for dat in Data.discover(self.projdir):
self.data[dat.date] = dat
# All products and sensors used across all dates
product_set = product_set.union(dat.product_set)
sensor_set = sensor_set.union(dat.sensor_set)
if not products:
products = list(product_set)
self.requested_products = products
self.sensors = sensor_set
def products(self, date=None):
""" Intersection of available products and requested products for this date """
if date is not None:
return set(self.data[date].products).intersection(set(self.requested_products))
else:
products = {}
for date in self.dates:
products[date] = set(self.data[date].products).intersection(set(self.requested_products))
return products
def new_image(self, filename, dtype=gippy.GDT_Byte, numbands=1, nodata=None):
""" Create new image with the same template as the files in project """
img = gippy.GeoImage(self.data[self.dates[0]].open(self.requested_products[0]))
imgout = gippy.GeoImage(filename, img, dtype, numbands)
img = None
if nodata is not None:
imgout.SetNoData(nodata)
return imgout
def data_size(self):
""" Get 'shape' of inventory: #products x rows x columns """
img = gippy.GeoImage(self.data[self.dates[0]].open(self.requested_products[0]))
sz = (len(self.requested_products), img.YSize(), img.XSize())
return sz
def get_data(self, dates=None, products=None, chunk=None):
""" Read all files as time series, stacking all products """
# TODO - change to absolute dates
if dates is None:
dates = self.dates
days = numpy.array([int(d.strftime('%j')) for d in dates])
imgarr = []
if products is None:
products = self.requested_products
for p in products:
gimg = self.get_timeseries(p, dates=dates)
# TODO - move numpy.squeeze into swig interface file?
ch = gippy.Recti(chunk[0], chunk[1], chunk[2], chunk[3])
arr = numpy.squeeze(gimg.TimeSeries(days.astype('float64'), ch))
arr[arr == gimg[0].NoDataValue()] = numpy.nan
if len(days) == 1:
dims = arr.shape
arr = arr.reshape(1, dims[0], dims[1])
imgarr.append(arr)
data = numpy.vstack(tuple(imgarr))
return data
def get_location(self):
# this is a terrible hack to get the name of the feature associated with the inventory
data = self.data[self.dates[0]]
location = os.path.split(os.path.split(data.filenames.values()[0])[0])[1]
return location
def get_timeseries(self, product='', dates=None):
""" Read all files as time series """
if dates is None:
dates = self.dates
# TODO - multiple sensors
filenames = [self.data[date][product] for date in dates]
img = gippy.GeoImage(filenames)
return img
def map_reduce(self, func, numbands=1, products=None, readfunc=None, nchunks=100, **kwargs):
""" Apply func to inventory to generate an image with numdim output bands """
if products is None:
products = self.requested_products
if readfunc is None:
readfunc = lambda x: self.get_data(products=products, chunk=x)
inshape = self.data_size()
outshape = [numbands, inshape[1], inshape[2]]
mr = MapReduce(inshape, outshape, readfunc, func, **kwargs)
mr.run(nchunks=nchunks)
return mr.assemble()
class DataInventory(Inventory):
""" Manager class for data inventories (collection of Tiles class) """
def __init__(self, dataclass, spatial, temporal, products=None,
fetch=False, update=False, **kwargs):
""" Create a new inventory
:dataclass: The Data class to use (e.g., LandsatData, ModisData)
:spatial: The SpatialExtent requested
:temporal: The temporal extent requested
:products: List of requested products of interest
:fetch: bool indicated if missing data should be downloaded
"""
VerboseOut('Retrieving inventory for site %s for date range %s' % (spatial.sitename, temporal) , 2)
self.dataclass = dataclass
Repository = dataclass.Asset.Repository
self.spatial = spatial
self.temporal = temporal
self.products = dataclass.RequestedProducts(products)
self.update = update
if fetch:
# command-line arguments could have lists, which lru_cache chokes
# one due to being unhashable. Also tiles is passed in, which
# conflicts with the explicit tiles argument.
fetch_kwargs = {k: v for (k, v) in
utils.prune_unhashable(kwargs).items() if k != 'tiles'}
archived_assets = dataclass.fetch(self.products.base,
self.spatial.tiles, self.temporal, self.update, **fetch_kwargs)
if orm.use_orm():
# save metadata about the fetched assets in the database
driver = dataclass.name.lower()
for a in archived_assets:
dbinv.update_or_add_asset(
asset=a.asset, sensor=a.sensor, tile=a.tile, date=a.date,
name=a.archived_filename, driver=driver)
# if the new asset comes with any "free" products, save that info:
for (prod_type, fp) in a.products.items():
dbinv.update_or_add_product(
product=prod_type, sensor=a.sensor, tile=a.tile, date=a.date,
name=fp, driver=driver)
# Build up the inventory: One Tiles object per date. Each contains one Data object. Each
# of those contain one or more Asset objects.
self.data = {}
dates = self.temporal.prune_dates(spatial.available_dates)
if orm.use_orm():
# populate the object tree under the DataInventory (Tiles, Data, Asset) by querying the
# DB quick-like then assigning things we iterate: The DB is a flat table of data; we
# have to hierarchy-ize it. Do this by setting up a temporary collection of objects
# for populating Data instances: the collection is basically a simple version of the
# complicated hierarchy that GIPS constructs on its own:
# collection = {
# (tile, date): {'a': [asset, asset, asset],
# 'p': [product, product, product]},
# (tile, date): {'a': [asset, asset, asset],
# 'p': [product, product, product]},
# }
collection = defaultdict(lambda: {'a': [], 'p': []})
def add_to_collection(date, tile, kind, item):
key = (date, str(tile)) # str() to avoid possible unicode trouble
collection[key][kind].append(item)
search_criteria = { # same for both Assets and Products
'driver': Repository.name.lower(),
'tile__in': spatial.tiles,
'date__in': dates,
}
for p in dbinv.product_search(**search_criteria).order_by('date', 'tile'):
add_to_collection(p.date, p.tile, 'p', str(p.name))
for a in dbinv.asset_search(**search_criteria).order_by('date', 'tile'):
add_to_collection(a.date, a.tile, 'a', str(a.name))
# the collection is now complete so use it to populate the GIPS object hierarchy
for k, v in collection.items():
(date, tile) = k
# find or else make a Tiles object
if date not in self.data:
self.data[date] = Tiles(dataclass, spatial, date, self.products, **kwargs)
tiles_obj = self.data[date]
# add a Data object (should not be in tiles_obj.tiles already)
assert tile not in tiles_obj.tiles # sanity check
data_obj = dataclass(tile, date, search=False)
# add assets and products
[data_obj.add_asset(dataclass.Asset(a)) for a in v['a']]
data_obj.ParseAndAddFiles(v['p'])
# add the new Data object to the Tiles object if it checks out
if data_obj.valid and data_obj.filter(**kwargs):
tiles_obj.tiles[tile] = data_obj
return
# Perform filesystem search since user wants that. Data object instantiation results
# in filesystem search (thanks to search=True).
self.data = {} # clear out data dict in case it has partial results
for date in dates:
tiles_obj = Tiles(dataclass, spatial, date, self.products, **kwargs)
for t in spatial.tiles:
data_obj = dataclass(t, date, search=True)
if data_obj.valid and data_obj.filter(**kwargs):
tiles_obj.tiles[t] = data_obj
if len(tiles_obj) > 0:
self.data[date] = tiles_obj
@property
def sensor_set(self):
""" The set of all sensors used in this inventory """
return sorted(self.dataclass.Asset._sensors.keys())
def process(self, *args, **kwargs):
""" Process assets into requested products """
# TODO - some check on if any processing was done
start = dt.now()
VerboseOut('Processing [%s] on %s dates (%s files)' % (self.products, len(self.dates), self.numfiles), 3)
if len(self.products.standard) > 0:
for date in self.dates:
with utils.error_handler(continuable=True):
self.data[date].process(*args, **kwargs)
if len(self.products.composite) > 0:
self.dataclass.process_composites(self, self.products.composite, **kwargs)
VerboseOut('Processing completed in %s' % (dt.now() - start), 2)
def mosaic(self, datadir='./', tree=False, **kwargs):
""" Create project files for data in inventory """
# make sure products have been processed first
self.process(overwrite=False)
start = dt.now()
VerboseOut('Creating mosaic project %s' % datadir, 2)
VerboseOut(' Dates: %s' % self.datestr)
VerboseOut(' Products: %s' % self.products)
dout = datadir
for d in self.dates:
if tree:
dout = os.path.join(datadir, d.strftime('%Y%j'))
self.data[d].mosaic(dout, **kwargs)
VerboseOut('Completed mosaic project in %s' % (dt.now() - start), 2)
# def warptiles(self):
# """ Just copy or warp all tiles in the inventory """
def pprint(self, **kwargs):
""" Print inventory """
print
if self.spatial.site is not None:
print Colors.BOLD + 'Asset Coverage for site %s' % (self.spatial.sitename) + Colors.OFF
self.spatial.print_tile_coverage()
print
else:
# constructor makes it safe to assume there is only one tile when
# self.spatial.site is None, but raise an error anyway just in case
if len(self.spatial.tiles) > 1:
raise RuntimeError('Expected 1 tile but got ' + repr(self.spatial.tiles))
print Colors.BOLD + 'Asset Holdings for tile ' + self.spatial.tiles[0] + Colors.OFF
super(DataInventory, self).pprint(**kwargs)
print Colors.BOLD + '\nSENSORS' + Colors.OFF
_sensors = self.dataclass.Asset._sensors
for key in sorted(self.sensor_set):
if key in _sensors:
desc = _sensors[key]['description']
scode = key + ': ' if key != '' else ''
else:
desc = ''
scode = key
print self.color(key) + '%s%s' % (scode, desc) + Colors.OFF
| gpl-2.0 | 829,240,851,418,279,600 | 41.664141 | 113 | 0.576088 | false | 4.067164 | false | false | false |
tonghuashuai/42qu-notepad | lib/txt.py | 1 | 1143 | #coding:utf-8
def cnenlen(s):
if type(s) is str:
s = s.decode('utf-8', 'ignore')
return len(s.encode('gb18030', 'ignore')) // 2
def cnencut(s, length):
ts = type(s)
if ts is str:
s = s.decode('utf-8', 'ignore')
s = s.encode('gb18030', 'ignore')[:length*2].decode('gb18030', 'ignore')
if ts is str:
s = s.encode('utf-8', 'ignore')
return s
def cnenoverflow(s, length):
txt = cnencut(s , length)
if txt != s:
txt = '%s ...' % txt.rstrip()
has_more = True
else:
has_more = False
return txt, has_more
def txt_rsrtip(txt):
return '\n'.join(
map(
str.rstrip,
txt.replace('\r\n', '\n')\
.replace('\r', '\n').rstrip('\n ')\
.split('\n')
)
)
def make_tag_list(tag_txt):
_tag_list = txt_rsrtip(tag_txt).split('\n')
result = []
for i in _tag_list:
tag = i.strip()
if not tag:
continue
if tag not in result:
result.append(tag)
return result
if __name__ == '__main__':
pass
print repr(txt_rsrtip('b\r\nx'))
| mit | -5,451,762,747,738,168,000 | 22.326531 | 76 | 0.488189 | false | 3.048 | false | false | false |
roman-kachanovsky/cmd.fm-python | commands/play.py | 1 | 2267 | from __future__ import unicode_literals, absolute_import
import random
import time
from .base import Command
from utils.colorize import colorize, Colors
from player.player import Player
class Play(Command):
name = 'play'
pattern = 'play {genre}'
example = ('play chillout', 'p jazz',)
description = 'Use this command to play genres and resume paused track.'
@staticmethod
def handle(self, *args):
arg = args[0] if args else ''
if not arg:
if self.player and self.player.is_paused:
self.player.play()
return self.INDENT + colorize(Colors.BLUE, '\u25B6 ' + self.client.active_station['name'])
self.stdout_print(self.INDENT + colorize(Colors.GRAY, 'Pick random genre...'))
arg = random.choice([genre.get('title', '') for genre in self.client.genres])
genre = self.client.search_genre(arg)
genre_id = genre.get('id') if genre else None
if genre_id is None:
return self.INDENT + colorize(Colors.RED, 'Genre ') + arg + colorize(Colors.RED, ' not found.')
self.stdout_print(self.INDENT + colorize(Colors.GREEN, 'Tuning in...'))
self.stdout_print(self.INDENT + colorize(Colors.GREEN, 'Starting genre: ') + genre.get('title', ''))
num_of_tries = 0
while num_of_tries < 3:
num_of_tries += 1
stream = self.client.get_stream(genre_id, renew_active_station=True)
if not stream:
return self.INDENT + colorize(Colors.RED, 'No active stations found... Please, try another genre.')
if self.player:
self.player.stop()
self.player = Player(stream)
self.player.play()
num_of_checks = 0
while num_of_checks < 5:
num_of_checks += 1
time.sleep(1)
if self.player.is_playing:
return self.INDENT + colorize(Colors.BLUE, '\u25B6 ' + self.client.active_station['name'])
return self.INDENT + colorize(Colors.RED, 'No active stations found... Please, try another genre.')
class P(Play):
name = 'p'
pattern = 'p {genre}'
example = ('p chillout', 'play jazz',)
show_in_main_help = False
| bsd-3-clause | -1,280,759,512,750,446,800 | 34.421875 | 115 | 0.591972 | false | 3.816498 | false | false | false |
geniusproject/ramses | ramses/registry.py | 5 | 2063 | """
Naive registry that is just a subclass of a python dictionary.
It is meant to be used to store objects and retrieve them when needed.
The registry is recreated on each app launch and is best suited to store some
dynamic or short-term data.
Storing an object should be performed by using the `add` function, and
retrieving it by using the `get` function.
Examples:
Register a function under a function name::
from ramses import registry
@registry.add
def foo():
print 'In foo'
assert registry.get('foo') is foo
Register a function under a different name::
from ramses import registry
@registry.add('bar')
def foo():
print 'In foo'
assert registry.get('bar') is foo
Register an arbitrary object::
from ramses import registry
myvar = 'my awesome var'
registry.add('my_stored_var', myvar)
assert registry.get('my_stored_var') == myvar
Register and get an object by namespace::
from ramses import registry
myvar = 'my awesome var'
registry.add('Foo.my_stored_var', myvar)
assert registry.mget('Foo') == {'my_stored_var': myvar}
"""
import six
class Registry(dict):
pass
registry = Registry()
def add(*args):
def decorator(function):
registry[name] = function
return function
if len(args) == 1 and six.callable(args[0]):
function = args[0]
name = function.__name__
return decorator(function)
elif len(args) == 2:
registry[args[0]] = args[1]
else:
name = args[0]
return decorator
def get(name):
try:
return registry[name]
except KeyError:
raise KeyError(
"Object named '{}' is not registered in ramses "
"registry".format(name))
def mget(namespace):
namespace = namespace.lower() + '.'
data = {}
for key, val in registry.items():
key = key.lower()
if not key.startswith(namespace):
continue
clean_key = key.split(namespace)[-1]
data[clean_key] = val
return data
| apache-2.0 | -1,219,513,007,815,364,400 | 20.268041 | 77 | 0.632574 | false | 3.922053 | false | false | false |
huggingface/transformers | examples/research_projects/distillation/utils.py | 2 | 4280 | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utils to train DistilBERT
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def git_log(folder_path: str):
"""
Log commit info.
"""
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
with open(os.path.join(folder_path, "git_log.json"), "w") as f:
json.dump(repo_infos, f, indent=4)
def init_gpu_params(params):
"""
Handle single and multi-GPU / multi-node.
"""
if params.n_gpu <= 0:
params.local_rank = 0
params.master_port = -1
params.is_master = True
params.multi_gpu = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs")
if params.n_gpu > 1:
assert params.local_rank != -1
params.world_size = int(os.environ["WORLD_SIZE"])
params.n_gpu_per_node = int(os.environ["N_GPU_NODE"])
params.global_rank = int(os.environ["RANK"])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
params.multi_gpu = True
assert params.n_nodes == int(os.environ["N_NODES"])
assert params.node_id == int(os.environ["NODE_RANK"])
# local job (single GPU)
else:
assert params.local_rank == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
params.multi_gpu = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
# summary
PREFIX = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
logger.info(PREFIX + "Node ID : %i" % params.node_id)
logger.info(PREFIX + "Local rank : %i" % params.local_rank)
logger.info(PREFIX + "World size : %i" % params.world_size)
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
logger.info(PREFIX + "Master : %s" % str(params.is_master))
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node))
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
logger.info(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed")
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
def set_seed(args):
"""
Set the random seed.
"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
| apache-2.0 | 6,582,052,665,545,975,000 | 31.180451 | 90 | 0.628505 | false | 3.404932 | false | false | false |
ngageoint/scale | scale/storage/delete_files_job.py | 1 | 1370 | """Defines the functions necessary to delete a file from a workspace"""
from __future__ import unicode_literals
import logging
import os
import sys
from error.exceptions import ScaleError, get_error_by_exception
logger = logging.getLogger(__name__)
GENERAL_FAIL_EXIT_CODE = 1
def delete_files(files, volume_path, broker):
"""Deletes the given files within a workspace.
:param files: List of named tuples containing path and ID of the file to delete.
:type files: [collections.namedtuple]
:param volume_path: Absolute path to the local container location onto which the volume file system was mounted,
None if this broker does not use a container volume
:type volume_path: string
:param broker: The storage broker
:type broker: `storage.brokers.broker.Broker`
"""
logger.info('Deleting %i files', len(files))
try:
broker.delete_files(volume_path=volume_path, files=files, update_model=False)
except ScaleError as err:
err.log()
sys.exit(err.exit_code)
except Exception as ex:
exit_code = GENERAL_FAIL_EXIT_CODE
err = get_error_by_exception(ex.__class__.__name__)
if err:
err.log()
exit_code = err.exit_code
else:
logger.exception('Error performing delete_files steps')
sys.exit(exit_code)
return
| apache-2.0 | -7,106,730,222,914,185,000 | 29.444444 | 116 | 0.671533 | false | 4.041298 | false | false | false |
fanchao01/spider | mini_spider/scheduler.py | 1 | 3735 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
This module provides classes to manage the threads
Classes:
SpiderThread, spider works in this classes
Scheduler, manage all the threads
Date: 2014/10/29 17:23:06
"""
import os
import logging
import threading
import htmlsaver
import utils
import errors
import interface
import spider
class SpiderThread(threading.Thread, interface.Worker):
"""the spider thread
Args:
spider, the spider worker
crawl_interval, the work interval of the spider
args, arguments passed to spider
kwargs, positional arguments passed to spider
"""
def __init__(self, spider, crawl_interval, *args, **kwargs):
self.spider = spider
self.stopped = threading.Event()
self.crawl_interval = crawl_interval
super(SpiderThread, self).__init__(*args, **kwargs)
self.daemon = True
def run(self):
while True:
if self.spider.stopped:
logging.info('spider thread will be exiting by spider')
break
if not self.stopped.wait(self.crawl_interval):
self.spider.run()
else:
self.spider.stop()
logging.info('spider thread will be exiting by thread')
def terminate(self):
self.stopped.set()
logging.info('spider thread will be exiting by terminate')
class Scheduler(object):
"""manage all the threads
Args:
arg_parser, the argument parser
"""
def __init__(self, arg_parser):
self.thread_num = arg_parser.get_spider_option('thread_count', int)
self.max_level = arg_parser.get_spider_option('max_depth', int)
self.output_dir = arg_parser.get_spider_option('output_directory', dir)
self.target_url_regx = arg_parser.get_spider_option('target_url', str)
self.crawl_interval = arg_parser.get_spider_option('crawl_interval', float)
self.urls_cache = utils.UrlQueue(self.thread_num, self.max_level)
self.saver = htmlsaver.HtmlSaver(self.output_dir)
self.workers = [ ]
self._put_level_zero_urls(arg_parser.get_spider_option('url_list_files', str))
def _put_level_zero_urls(self, urls_file):
if not os.path.exists(urls_file):
raise errors.ArgumentFileError(
'file not exits: {0}'.format(urls_file))
try:
with open(urls_file, 'r') as infile:
for url in infile:
self.urls_cache.put(utils.UrlLevelTuple(url=url.strip(), level=0))
except IOError as e:
logging.warn('file can not read: %s', f)
if self.urls_cache.empty():
raise errors.QueueEmptyError('no urls at fisrt')
def init(self):
"""initial method to prepare the environmetn"""
for i in range(self.thread_num):
worker = spider.Spider(self.urls_cache, self.saver, self.target_url_regx)
self.workers.append(SpiderThread(worker, self.crawl_interval))
def execute(self):
"""start all threads and run"""
self.saver.start()
for worker in self.workers:
worker.start()
while self.workers:
for worker in self.workers[:]:
worker.join(self.crawl_interval)
if not worker.is_alive():
self.workers.remove(worker)
logging.info('worker thread is removed: %d', worker.ident)
self.saver.terminate()
logging.info('all worker thread exited, exit now')
def terminate(self):
for worker in self.workers:
worker.terminate()
if __name__ == '__main__':
Scheduler()
| gpl-2.0 | 1,711,785,014,132,505,300 | 28.88 | 86 | 0.601874 | false | 4.029126 | false | false | false |
mistercrunch/airflow | airflow/example_dags/example_trigger_controller_dag.py | 5 | 1617 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example usage of the TriggerDagRunOperator. This example holds 2 DAGs:
1. 1st DAG (example_trigger_controller_dag) holds a TriggerDagRunOperator, which will trigger the 2nd DAG
2. 2nd DAG (example_trigger_target_dag) which will be triggered by the TriggerDagRunOperator in the 1st DAG
"""
from airflow import DAG
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.utils.dates import days_ago
dag = DAG(
dag_id="example_trigger_controller_dag",
default_args={"owner": "airflow"},
start_date=days_ago(2),
schedule_interval="@once",
tags=['example'],
)
trigger = TriggerDagRunOperator(
task_id="test_trigger_dagrun",
trigger_dag_id="example_trigger_target_dag", # Ensure this equals the dag_id of the DAG to trigger
conf={"message": "Hello World"},
dag=dag,
)
| apache-2.0 | -1,910,846,741,003,032,300 | 38.439024 | 107 | 0.75201 | false | 3.887019 | false | false | false |
michimussato/pypelyne2 | pypelyne2/payload/rr/7.0.29__installer/files/render_apps/scripts/houdini_render.py | 1 | 3430 | #python
# -*- coding: cp1252 -*-
######################################################################
#
# Royal Render Render script for Houdini
# Author: Royal Render, Holger Schoenberger, Binary Alchemy
# Version v 7.0.11
# Copyright (c) Holger Schoenberger - Binary Alchemy
#
######################################################################
import sys
import traceback
def formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.__dict__["args"]
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
try:
print( "RR - start " )
idx = sys.argv.index( "-frames" )
seqStart = int(sys.argv[ idx + 1 ])
seqEnd = int(sys.argv[ idx + 2 ])
seqStep = int(sys.argv[ idx + 3 ])
if "-fileName" not in sys.argv:
fileName = None
else:
idx = sys.argv.index( "-fileName" )
fileName = sys.argv[ idx + 1 ]
if "-fileExt" not in sys.argv:
fileExt = None
else:
idx = sys.argv.index( "-fileExt" )
fileExt = sys.argv[ idx + 1 ]
if "-filePadding" not in sys.argv:
filePadding = None
else:
idx = sys.argv.index( "-filePadding" )
filePadding = int(sys.argv[ idx + 1 ])
imgRes = ()
if "-res" in sys.argv:
idx = sys.argv.index( "-res" )
width = int( sys.argv[ idx + 1 ] )
height = int( sys.argv[ idx + 2 ] )
imgRes = (width,height)
idx = sys.argv.index( "-driver" )
driver = sys.argv[ idx + 1 ]
inputFile = sys.argv[ len(sys.argv) - 1 ]
try:
hou.hipFile.load( inputFile, True )
except hou.LoadWarning, e:
print( "Error: LoadWarning (probably wrong houdini version)")
print( e)
rop = hou.node( driver )
if rop == None:
print( "Error: Driver node \"" + driver + "\" does not exist" )
else:
if "-threads" in sys.argv:
idx = sys.argv.index( "-threads" )
threadCount = sys.argv[ idx + 1 ]
if (rop.type().name() != "arnold"):
try:
usemaxthread=rop.parm('vm_usemaxthreads')
usemaxthread.set(0)
threadCount = int(threadCount)
threadCountParam=rop.parm('vm_threadcount')
threadCountParam.set(threadCount)
except hou.LoadWarning, e:
print( "Error: Unable to set thread count")
print( e)
for fr in range( seqStart, seqEnd + 1 ):
print( "Rendering Frame #" + str(fr) +" ...")
if fileName != None:
if filePadding != None:
pad = "%0*d" % (filePadding,fr)
filenameComplete = fileName+pad+fileExt
else:
filenameComplete = fileName+fileExt
else:
filenameComplete = None
print( filenameComplete )
rop.render( (fr,fr,seqStep), imgRes, filenameComplete, fileExt )
print( "Frame Rendered #" + str(fr) )
except hou.OperationFailed, e:
print( "Error: OperationFailed")
print( e)
print( formatExceptionInfo())
except:
print( "Error: Error executing script")
print( formatExceptionInfo())
| gpl-2.0 | -989,458,583,689,781,900 | 31.056075 | 76 | 0.508163 | false | 3.806881 | false | false | false |
uclmr/inferbeddings | inferbeddings/models/training/corrupt.py | 1 | 2333 | # -*- coding: utf-8 -*-
import abc
import numpy as np
class ACorruptor(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, steps, entities):
raise NotImplementedError
class SimpleCorruptor(ACorruptor):
def __init__(self, index_generator=None, candidate_indices=None, corrupt_objects=False):
self.index_generator = index_generator
self.candidate_indices = candidate_indices
self.corrupt_objects = corrupt_objects
def __call__(self, steps, entities):
"""
Generates sets of negative examples, by corrupting the facts (walks) provided as input.
:param steps: [nb_samples, m] matrix containing the walk relation indices.
:param entities: [nb_samples, 2] matrix containing subject and object indices.
:return: ([nb_samples, 1], [nb_samples, 2]) pair containing sets of negative examples.
"""
nb_samples = steps.shape[0]
# Relation indices are not changed. For corrupting them, use a SimpleRelationCorruptor.
negative_steps = steps
# Entity (subject and object) indices are corrupted for generating two new sets of walks
entities_corr = np.copy(entities)
entities_corr[:, 1 if self.corrupt_objects else 0] = self.index_generator(nb_samples, self.candidate_indices)
return negative_steps, entities_corr
class SimpleRelationCorruptor(ACorruptor):
def __init__(self, index_generator=None, candidate_indices=None):
self.index_generator = index_generator
self.candidate_indices = candidate_indices
def __call__(self, steps, entities):
"""
Generates sets of negative examples, by corrupting the facts (walks) provided as input.
:param steps: [nb_samples, m] matrix containing the walk relation indices.
:param entities: [nb_samples, 2] matrix containing subject and object indices.
:return: ([nb_samples, 1], [nb_samples, 2]) pair containing sets of negative examples.
"""
nb_samples = steps.shape[0]
# Corrupting the relation indices
negative_steps = np.copy(steps)
negative_steps[:, 0] = self.index_generator(nb_samples, self.candidate_indices)
# We leave entities unchanged
entities_corr = entities
return negative_steps, entities_corr
| mit | -7,507,990,518,301,316,000 | 37.883333 | 117 | 0.673382 | false | 4.181004 | false | false | false |
martinjrobins/hobo | pints/_mcmc/_relativistic.py | 1 | 15546 | #
# Relativistic MCMC method
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pints
import numpy as np
class RelativisticMCMC(pints.SingleChainMCMC):
r"""
Implements Relativistic Monte Carlo as described in [1]_.
Uses a physical analogy of a particle moving across a landscape under
Hamiltonian dynamics to aid efficient exploration of parameter space.
Introduces an auxilary variable -- the momentum (``p_i``) of a particle
moving in dimension ``i`` of negative log posterior space -- which
supplements the position (``q_i``) of the particle in parameter space. The
particle's motion is dictated by solutions to Hamilton's equations,
.. math::
dq_i/dt &= \partial H/\partial p_i\\
dp_i/dt &= - \partial H/\partial q_i.
The Hamiltonian is given by,
.. math::
H(q,p) &= U(q) + KE(p)\\
&= -\text{log}(p(q|X)p(q)) +
mc^2 (\Sigma_{i=1}^{d} p_i^2 / (mc^2) + 1)^{0.5}
where ``d`` is the dimensionality of model, ``m`` is the scalar 'mass'
given to each particle (chosen to be 1 as default) and ``c`` is the
speed of light (chosen to be 10 by default).
To numerically integrate Hamilton's equations, it is essential to use a
sympletic discretisation routine, of which the most typical approach is
the leapfrog method,
.. math::
p_i(t + \epsilon/2) &= p_i(t) - (\epsilon/2) d U(q_i(t))/dq_i\\
q_i(t + \epsilon) &= q_i(t) +
\epsilon M^{-1}(p_i(t + \epsilon/2)) p_i(t + \epsilon/2)\\
p_i(t + \epsilon) &= p_i(t + \epsilon/2) -
(\epsilon/2) d U(q_i(t + \epsilon))/dq_i
where relativistic mass (a scalar) is,
.. math::
M(p) = m (\Sigma_{i=1}^{d} p_i^2 / (mc^2) + 1)^{0.5}
In particular, the algorithm we implement follows eqs. in section 2.1 of
[1]_.
Extends :class:`SingleChainMCMC`.
References
----------
.. [1] "Relativistic Monte Carlo". Xiaoyu Lu, Valerio Perrone,
Leonard Hasenclever, Yee Whye Teh, Sebastian J. Vollmer,
2017, Proceedings of Machine Learning Research.
"""
def __init__(self, x0, sigma0=None):
super(RelativisticMCMC, self).__init__(x0, sigma0)
# Set initial state
self._running = False
self._ready_for_tell = False
# Current point in the Markov chain
self._current = None # Aka current_q in the chapter
self._current_energy = None # Aka U(current_q) = -log_pdf
self._current_gradient = None
self._current_momentum = None # Aka current_p
# Current point in the leapfrog iterations
self._momentum = None # Aka p in the chapter
self._position = None # Aka q in the chapter
self._gradient = None # Aka grad_U(q) in the chapter
# Iterations, acceptance monitoring, and leapfrog iterations
self._mcmc_iteration = 0
self._mcmc_acceptance = 0
self._frog_iteration = 0
# Default number of leapfrog iterations
self._n_frog_iterations = 20
# Default integration step size for leapfrog algorithm
self._epsilon = 0.1
self._step_size = None
self._mass = 1
self._c = 10
self.set_leapfrog_step_size(np.diag(self._sigma0))
# Divergence checking
# Create a vector of divergent iterations
self._divergent = np.asarray([], dtype='int')
# Default threshold for Hamiltonian divergences
# (currently set to match Stan)
self._hamiltonian_threshold = 10**3
def ask(self):
""" See :meth:`SingleChainMCMC.ask()`. """
# Check ask/tell pattern
if self._ready_for_tell:
raise RuntimeError('Ask() called when expecting call to tell().')
# Initialise on first call
if not self._running:
self._running = True
self._mc2 = self._mass * self._c**2
# Notes:
# Ask is responsible for updating the position, which is the point
# returned to the user
# Tell is then responsible for updating the momentum, which uses the
# gradient at this new point
# The MCMC step happens in tell, and does not require any new
# information (it uses the log_pdf and gradient of the final point
# in the leapfrog run).
# Very first iteration
if self._current is None:
# Ask for the pdf and gradient of x0
self._ready_for_tell = True
return np.array(self._x0, copy=True)
# First iteration of a run of leapfrog iterations
if self._frog_iteration == 0:
# Sample random momentum for current point using identity cov
self._current_momentum = np.random.multivariate_normal(
np.zeros(self._n_parameters), np.eye(self._n_parameters))
# First leapfrog position is the current sample in the chain
self._position = np.array(self._current, copy=True)
self._gradient = np.array(self._current_gradient, copy=True)
self._momentum = np.array(self._current_momentum, copy=True)
# Perform a half-step before starting iteration 0 below
self._momentum -= self._scaled_epsilon * self._gradient * 0.5
# Perform a leapfrog step for the position
squared = np.sum(np.array(self._momentum)**2)
relativistic_mass = self._mass * np.sqrt(squared / self._mc2 + 1)
self._position += (
self._scaled_epsilon * self._momentum / relativistic_mass)
# Ask for the pdf and gradient of the current leapfrog position
# Using this, the leapfrog step for the momentum is performed in tell()
self._ready_for_tell = True
return np.array(self._position, copy=True)
def current_log_pdf(self):
""" See :meth:`SingleChainMCMC.current_log_pdf()`. """
return -self._current_energy
def divergent_iterations(self):
"""
Returns the iteration number of any divergent iterations
"""
return self._divergent
def epsilon(self):
"""
Returns epsilon used in leapfrog algorithm
"""
return self._epsilon
def hamiltonian_threshold(self):
"""
Returns threshold difference in Hamiltonian value from one iteration to
next which determines whether an iteration is divergent.
"""
return self._hamiltonian_threshold
def leapfrog_steps(self):
"""
Returns the number of leapfrog steps to carry out for each iteration.
"""
return self._n_frog_iterations
def leapfrog_step_size(self):
"""
Returns the step size for the leapfrog algorithm.
"""
return self._step_size
def _log_init(self, logger):
""" See :meth:`Loggable._log_init()`. """
logger.add_float('Accept.')
def _log_write(self, logger):
""" See :meth:`Loggable._log_write()`. """
logger.log(self._mcmc_acceptance)
def _kinetic_energy(self, momentum):
"""
Kinetic energy of relativistic particle, which is defined in [1]_.
"""
squared = np.sum(np.array(momentum)**2)
return self._mc2 * (squared / self._mc2 + 1)**0.5
def mass(self):
""" Returns ``mass`` which is the rest mass of particle. """
return self._mass
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 4
def name(self):
""" See :meth:`pints.MCMCSampler.name()`. """
return 'Relativistic MCMC'
def needs_sensitivities(self):
""" See :meth:`pints.MCMCSampler.needs_sensitivities()`. """
return True
def scaled_epsilon(self):
"""
Returns scaled epsilon used in leapfrog algorithm
"""
return self._scaled_epsilon
def _set_scaled_epsilon(self):
"""
Rescales epsilon along the dimensions of step_size
"""
self._scaled_epsilon = np.zeros(self._n_parameters)
for i in range(self._n_parameters):
self._scaled_epsilon[i] = self._epsilon * self._step_size[i]
def set_epsilon(self, epsilon):
"""
Sets epsilon for the leapfrog algorithm
"""
epsilon = float(epsilon)
if epsilon <= 0:
raise ValueError('epsilon must be positive for leapfrog algorithm')
self._epsilon = epsilon
self._set_scaled_epsilon()
def set_hamiltonian_threshold(self, hamiltonian_threshold):
"""
Sets threshold difference in Hamiltonian value from one iteration to
next which determines whether an iteration is divergent.
"""
if hamiltonian_threshold < 0:
raise ValueError('Threshold for divergent iterations must be ' +
'non-negative.')
self._hamiltonian_threshold = hamiltonian_threshold
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[leapfrog_steps, leapfrog_step_size,
mass, c]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_leapfrog_steps(x[0])
self.set_leapfrog_step_size(x[1])
self.set_mass(x[2])
self.set_speed_of_light(x[3])
def set_leapfrog_steps(self, steps):
"""
Sets the number of leapfrog steps to carry out for each iteration.
"""
steps = int(steps)
if steps < 1:
raise ValueError('Number of steps must exceed 0.')
self._n_frog_iterations = steps
def set_leapfrog_step_size(self, step_size):
"""
Sets the step size for the leapfrog algorithm.
"""
a = np.atleast_1d(step_size)
if len(a[a < 0]) > 0:
raise ValueError(
'Step size for leapfrog algorithm must' +
'be greater than zero.'
)
if len(a) == 1:
step_size = np.repeat(step_size, self._n_parameters)
elif not len(step_size) == self._n_parameters:
raise ValueError(
'Step size should either be of length 1 or equal to the' +
'number of parameters'
)
self._step_size = step_size
self._set_scaled_epsilon()
def set_mass(self, mass):
""" Sets scalar mass. """
if isinstance(mass, list):
raise ValueError('Mass must be scalar.')
if mass <= 0:
raise ValueError('Mass must be positive.')
self._mass = mass
def set_speed_of_light(self, c):
""" Sets `speed of light`. """
if c <= 0:
raise ValueError('Speed of light must be positive.')
self._c = c
def speed_of_light(self):
""" Returns `speed of light`. """
return self._c
def tell(self, reply):
""" See :meth:`pints.SingleChainMCMC.tell()`. """
if not self._ready_for_tell:
raise RuntimeError('Tell called before proposal was set.')
self._ready_for_tell = False
# Unpack reply
energy, gradient = reply
# Check reply, copy gradient
energy = float(energy)
gradient = pints.vector(gradient)
assert(gradient.shape == (self._n_parameters, ))
# Energy = -log_pdf, so flip both signs!
energy = -energy
gradient = -gradient
# Very first call
if self._current is None:
# Check first point is somewhere sensible
if not np.isfinite(energy):
raise ValueError(
'Initial point for MCMC must have finite logpdf.')
# Set current sample, energy, and gradient
self._current = self._x0
self._current_energy = energy
self._current_gradient = gradient
# Increase iteration count
self._mcmc_iteration += 1
# Mark current as read-only, so it can be safely returned
self._current.setflags(write=False)
# Return first point in chain
return self._current
# Set gradient of current leapfrog position
self._gradient = gradient
# Update the leapfrog iteration count
self._frog_iteration += 1
# Not the last iteration? Then perform a leapfrog step and return
if self._frog_iteration < self._n_frog_iterations:
self._momentum -= self._scaled_epsilon * self._gradient
# Return None to indicate there is no new sample for the chain
return None
# Final leapfrog iteration: only do half a step
self._momentum -= self._scaled_epsilon * self._gradient * 0.5
# Before starting accept/reject procedure, check if the leapfrog
# procedure has led to a finite momentum and logpdf. If not, reject.
accept = 0
if np.isfinite(energy) and np.all(np.isfinite(self._momentum)):
# Evaluate potential and kinetic energies at start and end of
# leapfrog trajectory
current_U = self._current_energy
current_K = self._kinetic_energy(self._current_momentum)
proposed_U = energy
proposed_K = self._kinetic_energy(self._momentum)
# Check for divergent iterations by testing whether the
# Hamiltonian difference is above a threshold
div = proposed_U + proposed_K - (self._current_energy + current_K)
if np.abs(div) > self._hamiltonian_threshold: # pragma: no cover
self._divergent = np.append(
self._divergent, self._mcmc_iteration)
self._momentum = self._position = self._gradient = None
self._frog_iteration = 0
# Update MCMC iteration count
self._mcmc_iteration += 1
# Update acceptance rate (only used for output!)
self._mcmc_acceptance = (
(self._mcmc_iteration * self._mcmc_acceptance + accept) /
(self._mcmc_iteration + 1))
self._current.setflags(write=False)
return self._current
# Accept/reject
else:
r = np.exp(current_U - proposed_U + current_K - proposed_K)
if np.random.uniform(0, 1) < r:
accept = 1
self._current = self._position
self._current_energy = energy
self._current_gradient = gradient
# Mark current as read-only, so it can be safely returned
self._current.setflags(write=False)
# Reset leapfrog mechanism
self._momentum = self._position = self._gradient = None
self._frog_iteration = 0
# Update MCMC iteration count
self._mcmc_iteration += 1
# Update acceptance rate (only used for output!)
self._mcmc_acceptance = (
(self._mcmc_iteration * self._mcmc_acceptance + accept) /
(self._mcmc_iteration + 1))
# Return current position as next sample in the chain
return self._current
| bsd-3-clause | 4,326,371,463,587,282,000 | 34.903002 | 79 | 0.580021 | false | 4.026418 | false | false | false |
sputnick-dev/weboob | modules/dresdenwetter/pages.py | 7 | 2368 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser.pages import HTMLPage
from weboob.browser.elements import ListElement, ItemElement, method
from weboob.browser.filters.standard import CleanText, Regexp, Field, Filter, debug
from weboob.capabilities.gauge import GaugeMeasure, GaugeSensor
from weboob.capabilities.base import NotAvailable
class Split(Filter):
def __init__(self, selector, mode):
super(Split, self).__init__(selector)
self.mode = mode
@debug()
def filter(self, txt):
if u"Temperatur" in txt:
value = txt.split(': ')[1].split(u'°')[0]
unit = u'°C'
else:
value = txt.split(':')[-1].split()[0]
unit = txt.split(':')[-1].split()[1]
if unit == u"W/m":
unit = u"W/m²"
try:
value = float(value)
except ValueError:
value = NotAvailable
return [value, unit][self.mode]
class StartPage(HTMLPage):
@method
class get_sensors_list(ListElement):
item_xpath = '//p[@align="center"]'
class item(ItemElement):
klass = GaugeSensor
obj_name = Regexp(CleanText('.'), '(.*?) {0,}: .*', "\\1")
obj_id = CleanText(Regexp(Field('name'), '(.*)', "dd-\\1"), " .():")
obj_gaugeid = u"wetter"
obj_forecast = NotAvailable
obj_unit = Split(CleanText('.'), 1)
def obj_lastvalue(self):
lastvalue = GaugeMeasure()
lastvalue.level = Split(CleanText('.'), 0)(self)
lastvalue.alarm = NotAvailable
return lastvalue
| agpl-3.0 | 4,160,871,167,166,075,000 | 33.779412 | 83 | 0.614376 | false | 3.90264 | false | false | false |
ananthonline/grpc | src/python/grpcio/grpc/beta/_stub.py | 10 | 5555 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Beta API stub implementation."""
import threading
from grpc._links import invocation
from grpc.framework.core import implementations as _core_implementations
from grpc.framework.crust import implementations as _crust_implementations
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.links import utilities
_DEFAULT_POOL_SIZE = 6
class _AutoIntermediary(object):
def __init__(self, up, down, delegate):
self._lock = threading.Lock()
self._up = up
self._down = down
self._in_context = False
self._delegate = delegate
def __getattr__(self, attr):
with self._lock:
if self._delegate is None:
raise AttributeError('No useful attributes out of context!')
else:
return getattr(self._delegate, attr)
def __enter__(self):
with self._lock:
if self._in_context:
raise ValueError('Already in context!')
elif self._delegate is None:
self._delegate = self._up()
self._in_context = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
if not self._in_context:
raise ValueError('Not in context!')
self._down()
self._in_context = False
self._delegate = None
return False
def __del__(self):
with self._lock:
if self._delegate is not None:
self._down()
self._delegate = None
class _StubAssemblyManager(object):
def __init__(
self, thread_pool, thread_pool_size, end_link, grpc_link, stub_creator):
self._thread_pool = thread_pool
self._pool_size = thread_pool_size
self._end_link = end_link
self._grpc_link = grpc_link
self._stub_creator = stub_creator
self._own_pool = None
def up(self):
if self._thread_pool is None:
self._own_pool = logging_pool.pool(
_DEFAULT_POOL_SIZE if self._pool_size is None else self._pool_size)
assembly_pool = self._own_pool
else:
assembly_pool = self._thread_pool
self._end_link.join_link(self._grpc_link)
self._grpc_link.join_link(self._end_link)
self._end_link.start()
self._grpc_link.start()
return self._stub_creator(self._end_link, assembly_pool)
def down(self):
self._end_link.stop(0).wait()
self._grpc_link.stop()
self._end_link.join_link(utilities.NULL_LINK)
self._grpc_link.join_link(utilities.NULL_LINK)
if self._own_pool is not None:
self._own_pool.shutdown(wait=True)
self._own_pool = None
def _assemble(
channel, host, metadata_transformer, request_serializers,
response_deserializers, thread_pool, thread_pool_size, stub_creator):
end_link = _core_implementations.invocation_end_link()
grpc_link = invocation.invocation_link(
channel, host, metadata_transformer, request_serializers,
response_deserializers)
stub_assembly_manager = _StubAssemblyManager(
thread_pool, thread_pool_size, end_link, grpc_link, stub_creator)
stub = stub_assembly_manager.up()
return _AutoIntermediary(
stub_assembly_manager.up, stub_assembly_manager.down, stub)
def _dynamic_stub_creator(service, cardinalities):
def create_dynamic_stub(end_link, invocation_pool):
return _crust_implementations.dynamic_stub(
end_link, service, cardinalities, invocation_pool)
return create_dynamic_stub
def generic_stub(
channel, host, metadata_transformer, request_serializers,
response_deserializers, thread_pool, thread_pool_size):
return _assemble(
channel, host, metadata_transformer, request_serializers,
response_deserializers, thread_pool, thread_pool_size,
_crust_implementations.generic_stub)
def dynamic_stub(
channel, host, service, cardinalities, metadata_transformer,
request_serializers, response_deserializers, thread_pool,
thread_pool_size):
return _assemble(
channel, host, metadata_transformer, request_serializers,
response_deserializers, thread_pool, thread_pool_size,
_dynamic_stub_creator(service, cardinalities))
| bsd-3-clause | 1,938,392,540,574,530,300 | 34.83871 | 78 | 0.711791 | false | 3.965025 | false | false | false |
hms-dbmi/higlass | scripts/genes_by_popularity.py | 2 | 6935 | import os.path as op
base_dir = '/scr/fluidspace/pkerp/projects/genbank'
from pyspark.sql import *
sqlContext = SQLContext(sc)
# get the gene_id -> pubmed mapping
gene2pubmed = (sc.textFile(op.join(base_dir, "data/gene2pubmed"))
.filter(lambda x: x[0] !== '#')
.map(lambda x: x.split('\t'))
.map(lambda p: {'taxid': int(p[0]), 'geneid': int(p[1]), 'pmid': int(p[2]), 'count': 1}))
#schemaGene2Pubmed = sqlContext.inferSchema(gene2pubmed)
#schemaGene2Pubmed.registerTempTable("gene2pubmed")
gene2refseq = (sc.textFile(op.join(base_dir, "data/gene2refseq"))
.filter(lambda x: x[0] !== '#')
.map(lambda x: x.split('\t'))
.map(lambda p: { 'taxid': int(p[0]),
'geneid' :int(p[1]),
'start_pos': p[9],
'end_pos': p[10],
'nucleotide_accession': p[7],
'orientation': p[11],
'assembly': p[12]}))
gene_info = (sc.textFile(op.join(base_dir, "data/gene_info"))
.filter(lambda x: x[0] !== '#')
.map(lambda x: x.split('\t'))
.map(lambda x: { 'taxid': int(x[0]),
'geneid': int(x[1]),
'description': x[8],
'symbol': x[2],
'name': x[11]}))
gene_info_keyed = gene_info.map(lambda x: ((x['taxid'], x['geneid']), x))
#schemaGene2Refseq = sqlContext.inferSchema(gene2refseq)
#schemaGene2Refseq.registerTempTable("gene2refseq")
# get the most popular genes
#gene_pubmed = sqlContext.sql("select taxid, geneid, count(*) as cnt from gene2pubmed where taxid = 9606 group by geneid, taxid order by cnt desc")
#gene_pubmed.take(10)
#filtered_refseq = sqlContext.sql("select * from gene2refseq where assembly like '%GRCh38%'")
#filtered_refseq.take(10)
# filter for human genes
human_gene_pubmed = (gene2pubmed.filter(lambda x: x['taxid'] == 9606)
.map(lambda x: ((x['taxid'], x['geneid']), x)))
def reduce_count(r1, r2):
'''
A reduce function that simply counts the number of elements in the table.
@param r1: A Row
@param r2: A Row
@return: A new Row, equal to the first Row with a summed count.
'''
#print >>sys.stderr, "r1:", r1
r1['count'] += r2['count']
return r1
# count how many references each id has
# ((taxid, geneid), row)
counted_human_gene_pubmed = (human_gene_pubmed.reduceByKey(reduce_count))
counted_human_gene_pubmed.take(1)
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
# filter the refseq genes to those in the human GRCh38 assembly
# ((taxid, geneid), row)
human_refseq = (gene2refseq.filter(lambda x: x['assembly'].find('GRCh38') >= 0)
.filter(lambda x: x['nucleotide_accession'].find('NC_') >= 0)
.map(lambda x: ((x['taxid'], x['geneid']), x)))
human_refseq_info = (human_refseq.join(gene_info_keyed)
.map(lambda x: (x[0], merge_two_dicts(x[1][0], x[1][1]))))
# join (K,V) and (K,W) -> (K, (V,W)) pairs
# map (K,(V,W)) -> (K,W)
# join the genes with reference counts with the refseq information
human_refseq_pubmed = (counted_human_gene_pubmed.join(human_refseq)
.map(lambda x: ((x[1][0]['count'], x[0][0], x[0][1]), x[1][1])))
#.map(lambda x: x['start_end_pos'] = (x['nucleotide_accession'], x['orientation'], x['start_pos'], x['end_pos']))
def consolidate_start_and_end(r):
'''
Consolidate the start and end rows
from a row.
:param r: (key, {'start_pos': 1000, 'end_pos': 1010})
:return: (key, {'start_end_pos': set((1000, 1010))}
'''
r[1]['start_end_pos'] = set([(r[1]['nucleotide_accession'], r[1]['orientation'],int(r[1]['start_pos']), int(r[1]['end_pos']))])
return (r[0], r[1])
def reduce_by_start_end_pos(r1,r2):
'''
Reduce all of the rows by their start / send positions.
:param r: {'start_end_pos': set((1000, 1010))}
'''
#print >>sys.stderr, "r1:", r1
r1['start_end_pos'] = r1['start_end_pos'].union(r2['start_end_pos'])
return r1
reduced_human_refseq_pubmed = (human_refseq_pubmed.map(consolidate_start_and_end)
.reduceByKey(reduce_by_start_end_pos))
reduced_human_refseq_pubmed.sortByKey(ascending=False)
reduced_human_refseq_pubmed.take(1)
# take every (chr, orientation, start, end) tuple from the set and create one
# big list out of it
# then convert it all to TSV strings
flattened_human_refseq_pubmed = (reduced_human_refseq_pubmed.flatMap(lambda x: [[x[0][0]] + list(y) for y in x[1]['start_end_pos']])
.map(lambda x: "\t".join(map(str, x))))
flattened_human_refseq_pubmed.saveAsTextFile('/scr/fluidspace/pkerp/projects/goomba/output/genes_by_popularity')
'''
gene_pubmed = sqlContext.sql("select geneid, start_pos, count(*) as cnt from gene_starts group by geneid, start_pos order by cnt desc")
gene_pubmed.take(1)
gene_starts = sqlContext.sql('select gene2refseq.geneid, start_pos, pmid from gene2pubmed, gene2refseq where gene2pubmed.geneid = gene2refseq.geneid')
gene_starts.registerTempTable('gene_starts')
genes_sorted = sqlContext.sql("select tax_id, GeneID, count(*) as cnt from gene2refseq order by cnt desc")
gene_pubmed.registerTempTable('gene_pubmed')
gene_starts = sqlContext.sql('select gene2refseq.geneid, start_pos from gene2pubmed, gene2refseq where gene2pubmed.geneid = gene2refseq.geneid')
result.take(1)
gene_info = (sc.textFile(op.join(base_dir, "data/gene_info"))
.filter(lambda x: x[0] !== '#')
.map(lambda x: x.split('\t'))
.map(lambda p: Row(tax_id=int(p[0]),
GeneID=int(p[1]),
Symbol=p[2],
LocusTag=p[3],
Synonyms=p[4],
dbXrefs=p[5],
chromosome=p[6],
map_location=p[7],
description=p[8],
type_of_gene=p[9],
Symbol_from_nomenclature_authority=p[10],
Full_name_from_nomenclature_authority=p[11],
Nomenclature_status=p[12],
Other_designations=p[13],
Modification_date=p[14])))
'''
| mit | -8,093,512,945,961,685,000 | 42.34375 | 151 | 0.537563 | false | 3.237628 | false | false | false |
trianam/tests | python/vtkFileWrite.py | 2 | 1091 | #!/bin/python
import vtk
import vtk.util.colors
points = vtk.vtkPoints()
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(0, 1, 0)
points.InsertNextPoint(1, 0, 0)
points.InsertNextPoint(0, 0, 1)
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(points)
unstructuredGrid.InsertNextCell(vtk.VTK_TETRA, 4, [0,1,2,3])
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName('test.vtu')
writer.SetInputData(unstructuredGrid)
writer.Update()
writer.Write()
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(vtk.util.colors.banana)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(0.3,0.6,0.3)
renderWindowInteractor.Initialize()
#renderer.ResetCamera()
#renderer.GetActiveCamera().Zoom(1.5)
renderWindow.Render()
renderWindowInteractor.Start()
| gpl-2.0 | -6,554,544,121,934,885,000 | 23.244444 | 60 | 0.80385 | false | 3.030556 | false | true | false |
ThoughtWorksInc/treadmill | treadmill/cli/show.py | 3 | 5434 | """Manage Treadmill app manifest.
"""
import logging
import urllib.request
import urllib.parse
import urllib.error
import click
from treadmill import cli
from treadmill import restclient
from treadmill import context
_LOGGER = logging.getLogger(__name__)
_STATE_FORMATTER = cli.make_formatter(cli.InstanceStatePrettyFormatter)
_ENDPOINT_FORMATTER = cli.make_formatter(cli.EndpointPrettyFormatter)
_APP_FORMATTER = cli.make_formatter(cli.AppPrettyFormatter)
def _show_state(apis, match, finished):
"""Show cell state."""
url = '/state/'
query = []
if match:
query.append(('match', match))
if finished:
query.append(('finished', '1'))
if query:
url += '?' + '&'.join(
[urllib.parse.urlencode([param]) for param in query]
)
response = restclient.get(apis, url)
cli.out(_STATE_FORMATTER(response.json()))
def _show_list(apis, match, states, finished=False):
"""Show list of instnces in given state."""
url = '/state/'
query = []
if match:
query.append(('match', match))
if finished:
query.append(('finished', '1'))
if query:
url += '?' + '&'.join(
[urllib.parse.urlencode([param]) for param in query]
)
response = restclient.get(apis, url)
names = [item['name']
for item in response.json() if item['state'] in states]
for name in names:
print(name)
def _show_endpoints(apis, pattern, endpoint, proto):
"""Show cell endpoints."""
url = '/endpoint/%s' % urllib.parse.quote(pattern)
if endpoint:
if proto:
url += '/' + proto
else:
url += '/*'
url += '/' + endpoint
response = restclient.get(apis, url)
endpoints = [{
'name': end['name'],
'proto': end['proto'],
'endpoint': end['endpoint'],
'hostport': '{0}:{1}'.format(end['host'], end['port'])
} for end in response.json()]
cli.out(_ENDPOINT_FORMATTER(endpoints))
def _show_instance(apis, instance_id):
"""Show instance manifest."""
url = '/instance/%s' % urllib.parse.quote(instance_id)
response = restclient.get(apis, url)
cli.out(_APP_FORMATTER(response.json()))
def init():
"""Return top level command handler."""
ctx = {}
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='API url to use.',
metavar='URL',
envvar='TREADMILL_STATEAPI')
def show(api):
"""Show state of scheduled applications."""
ctx['api'] = api
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
@click.option('--finished', is_flag=True, default=False,
help='Show finished instances.')
def state(match, finished):
"""Show state of Treadmill scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_state(apis, match, finished)
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def pending(match):
"""Show pending instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending'])
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def running(match):
"""Show running instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running'])
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def finished(match):
"""Show finished instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['finished'], finished=True)
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def scheduled(match):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running', 'scheduled'])
@show.command(name='all')
@cli.ON_REST_EXCEPTIONS
@click.option('--match', help='Application name pattern match')
def _all(match):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending', 'running', 'scheduled'])
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.argument('pattern')
@click.argument('endpoint', required=False)
@click.argument('proto', required=False)
def endpoints(pattern, endpoint, proto):
"""Show application endpoints."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_endpoints(apis, pattern, endpoint, proto)
@show.command()
@cli.ON_REST_EXCEPTIONS
@click.argument('instance_id')
def instance(instance_id):
"""Show scheduled instance manifest."""
apis = context.GLOBAL.cell_api(ctx['api'])
return _show_instance(apis, instance_id)
del _all
del running
del scheduled
del pending
del finished
del instance
del state
del endpoints
return show
| apache-2.0 | -5,616,701,384,826,257,000 | 28.058824 | 75 | 0.604343 | false | 3.889764 | false | false | false |
0x1997/webassets | src/webassets/filter/rjsmin/rjsmin.py | 15 | 13044 | #!/usr/bin/env python
# -*- coding: ascii -*-
#
# Copyright 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
=====================
Javascript Minifier
=====================
Javascript Minifier based on `jsmin.c by Douglas Crockford`_\.
This module is a re-implementation based on the semantics of jsmin.c. Usually
it produces the same results. It differs in the following ways:
- there is no error detection: unterminated string, regex and comment
literals are treated as regular javascript code and minified as such.
- Control characters inside string and regex literals are left untouched; they
are not converted to spaces (nor to \n)
- Newline characters are not allowed inside string and regex literals, except
for line continuations in string literals (ECMA-5).
- rjsmin does not handle streams, but only complete strings. (However, the
module provides a "streamy" interface).
Besides the list above it differs from direct python ports of jsmin.c in
speed. Since most parts of the logic are handled by the regex engine it's way
faster than the original python port by Baruch Even. The speed factor varies
between about 6 and 55 depending on input and python version (it gets faster
the more compressed the input already is). Compared to the speed-refactored
python port by Dave St.Germain the performance gain is less dramatic but still
between 1.2 and 7. See the docs/BENCHMARKS file for details.
rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
Both python 2 and python 3 are supported.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
"""
__author__ = "Andr\xe9 Malo"
__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = '1.0.1'
__all__ = ['jsmin', 'jsmin_for_posers']
import re as _re
from webassets.six.moves import map
from webassets.six.moves import zip
def _make_jsmin(extended=True, python_only=False):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`extended` : ``bool``
Extended Regexps? (using lookahead and lookbehind). This is faster,
because it can be optimized way more. The regexps used with `extended`
being false are only left here to allow easier porting to platforms
without extended regex features (and for my own reference...)
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = R0912, R0914, W0612
if not python_only:
try:
import _rjsmin
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range # pylint: disable = W0622
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
if extended:
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
nospecial, charclass, nospecial
)
else:
regex = (
r'(?:/(?:[^*/\\\r\n\[]|%s|\\[^\r\n])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)'
)
regex = regex % (charclass, nospecial, charclass, nospecial)
pre_regex = r'[(,=:\[!&|?{};\r\n]'
space = r'(?:%s|%s)' % (space_chars, space_comment)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result])
return _re.sub(r'([\000-\040\047])', # for better portability
lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in range(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in range(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
if extended:
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_sub = _re.compile((
r'([^\047"/\000-\040]+)'
r'|(%(strings)s[^\047"/\000-\040]*)'
r'|(?:(?<=%(pre_regex)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
def space_subber(match):
""" Substitution callback """
# pylint: disable = C0321
groups = match.groups()
if groups[0]: return groups[0]
elif groups[1]: return groups[1]
elif groups[2]: return groups[2]
elif groups[3]: return '\n'
elif groups[4]: return ' '
return ''
def jsmin(script): # pylint: disable = W0621
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub(space_subber, '\n%s\n' % script).strip()
else:
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
not_id_literal_open = not_id_literal_(r'[a-zA-Z0-9_${\[(+-]')
not_id_literal_close = not_id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_norm_sub = _re.compile((
r'(%(strings)s)'
r'|(?:(%(pre_regex)s)%(space)s*(%(regex)s))'
r'|(%(space)s)+'
r'|(?:(%(newline)s)%(space)s*)+'
) % locals()).sub
def space_norm_subber(match):
""" Substitution callback """
# pylint: disable = C0321
groups = match.groups()
if groups[0]: return groups[0]
elif groups[1]: return groups[1].replace('\r', '\n') + groups[2]
elif groups[3]: return ' '
elif groups[4]: return '\n'
space_sub1 = _re.compile((
r'[\040\n]?(%(strings)s|%(pre_regex)s%(regex)s)'
r'|\040(%(not_id_literal)s)'
r'|\n(%(not_id_literal_open)s)'
) % locals()).sub
def space_subber1(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2]
space_sub2 = _re.compile((
r'(%(strings)s)\040?'
r'|(%(pre_regex)s%(regex)s)[\040\n]?'
r'|(%(not_id_literal)s)\040'
r'|(%(not_id_literal_close)s)\n'
) % locals()).sub
def space_subber2(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2] or groups[3]
def jsmin(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach. The script is minified with three passes:
normalization
Control character are mapped to spaces, spaces and newlines
are squeezed and comments are stripped.
space removal 1
Spaces before certain tokens are removed
space removal 2
Spaces after certain tokens are remove
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub2(space_subber2,
space_sub1(space_subber1,
space_norm_sub(space_norm_subber, '\n%s\n' % script)
)
).strip()
return jsmin
jsmin = _make_jsmin()
def jsmin_for_posers(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regex. It's just for fun here and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
(groups[3] and '\n') or
(groups[4] and ' ') or
''
)
return _re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?<=[^\000-!#%&(*,./:-@\[\\^`{|~])(?:[\000-\011\013\014\016-\04'
r'0]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n'
r']))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)'
r'*/))*)+(?=[^\000-#%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-'
r'^`{-~-])((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*'
r']*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^`{-~-])|(?:[\000-\011\013\014\0'
r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)'
r'?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]'
r'*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
if __name__ == '__main__':
import sys as _sys
_sys.stdout.write(jsmin(_sys.stdin.read()))
| bsd-2-clause | 1,364,870,503,790,118,100 | 35.847458 | 78 | 0.511116 | false | 3.395991 | false | false | false |
induane/pazar | src/pazar/webapp/migrations/0001_initial.py | 1 | 5684 | # Generated by Django 2.2.1 on 2019-05-08 12:18
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import pazar.utils.html
import pazar.webapp.models.mixins
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('public_key', models.TextField()),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'ordering': ('last_name', 'first_name'),
},
bases=(models.Model, pazar.webapp.models.mixins.BaseMixin),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('thumbnail', models.BinaryField()),
('filename', models.CharField(db_index=True, max_length=1024)),
('name', models.CharField(db_index=True, max_length=255)),
('file_hash', models.CharField(db_index=True, max_length=74)),
('_tags', models.CharField(db_index=True, max_length=1024)),
],
options={
'ordering': ('name', 'filename'),
},
bases=(models.Model, pazar.webapp.models.mixins.BaseMixin),
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(db_index=True, max_length=1024)),
('product_type', models.PositiveSmallIntegerField(choices=[(1, 'physical'), (2, 'digital'), (3, 'service')], default=1)),
('description', models.TextField()),
],
bases=(models.Model, pazar.webapp.models.mixins.BaseMixin),
),
migrations.CreateModel(
name='TextHash',
fields=[
('checksum', models.CharField(editable=False, max_length=64, primary_key=True, serialize=False, verbose_name='Content Checksum')),
('_text', models.BinaryField(default=b'')),
],
options={
'unique_together': {('checksum', '_text')},
},
bases=(models.Model, pazar.webapp.models.mixins.BaseMixin),
),
migrations.CreateModel(
name='Listing',
fields=[
('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(db_index=True, max_length=1024)),
('_page_text', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='webapp.TextHash')),
('product_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='webapp.ProductCategory')),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(models.Model, pazar.webapp.models.mixins.BaseMixin, pazar.utils.html.RstMixin),
),
]
| gpl-3.0 | 2,336,907,514,226,991,000 | 58.208333 | 329 | 0.614708 | false | 4.232316 | false | false | false |
moagstar/python-uncompyle6 | uncompyle6/parsers/parse15.py | 1 | 1053 | # Copyright (c) 2016 Rocky Bernstein
# Copyright (c) 2000-2002 by hartmut Goebel <[email protected]>
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
from uncompyle6.parser import PythonParserSingle
from uncompyle6.parsers.parse21 import Python21Parser
class Python15Parser(Python21Parser):
def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG):
super(Python15Parser, self).__init__(debug_parser)
self.customized = {}
def p_import15(self, args):
"""
importstmt ::= filler IMPORT_NAME STORE_FAST
importstmt ::= filler IMPORT_NAME STORE_NAME
importfrom ::= filler IMPORT_NAME importlist
importfrom ::= filler filler IMPORT_NAME importlist POP_TOP
importlist ::= importlist IMPORT_FROM
importlist ::= IMPORT_FROM
"""
class Python15ParserSingle(Python21Parser, PythonParserSingle):
pass
if __name__ == '__main__':
# Check grammar
p = Python15Parser()
p.checkGrammar()
p.dumpGrammar()
# local variables:
# tab-width: 4
| mit | 1,067,095,209,950,221,700 | 28.25 | 70 | 0.68471 | false | 3.581633 | false | false | false |
r-o-b-b-i-e/pootle | pootle/apps/pootle_store/models.py | 1 | 47262 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import datetime
import operator
from hashlib import md5
from collections import OrderedDict
from translate.filters.decorators import Category
from translate.storage import base
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import F
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.http import urlquote
from pootle.core.contextmanagers import update_data_after
from pootle.core.delegate import data_tool, format_syncers, format_updaters
from pootle.core.log import (
TRANSLATION_ADDED, TRANSLATION_CHANGED, TRANSLATION_DELETED,
UNIT_ADDED, UNIT_DELETED, UNIT_OBSOLETE, UNIT_RESURRECTED,
STORE_ADDED, STORE_DELETED, STORE_OBSOLETE,
MUTE_QUALITYCHECK, UNMUTE_QUALITYCHECK,
action_log, store_log)
from pootle.core.mixins import CachedTreeItem
from pootle.core.models import Revision
from pootle.core.search import SearchBroker
from pootle.core.signals import update_data
from pootle.core.storage import PootleFileSystemStorage
from pootle.core.url_helpers import (
get_editor_filter, split_pootle_path, to_tp_relative_path)
from pootle.core.utils import dateformat
from pootle.core.utils.aggregate import max_column
from pootle.core.utils.multistring import PLURAL_PLACEHOLDER, SEPARATOR
from pootle.core.utils.timezone import datetime_min, make_aware
from pootle.i18n.gettext import ugettext_lazy as _
from pootle_format.models import Format
from pootle_misc.checks import check_names
from pootle_misc.util import import_func
from pootle_statistics.models import (Submission, SubmissionFields,
SubmissionTypes)
from .constants import (
DEFAULT_PRIORITY, FUZZY, NEW, OBSOLETE, POOTLE_WINS,
TRANSLATED, UNTRANSLATED)
from .fields import MultiStringField, TranslationStoreField
from .managers import StoreManager, SuggestionManager, UnitManager
from .store.deserialize import StoreDeserialization
from .store.serialize import StoreSerialization
from .util import SuggestionStates, vfolders_installed
TM_BROKER = None
def get_tm_broker():
global TM_BROKER
if TM_BROKER is None:
TM_BROKER = SearchBroker()
return TM_BROKER
# # # # # # # # Quality Check # # # # # # #
class QualityCheck(models.Model):
"""Database cache of results of qualitychecks on unit."""
name = models.CharField(max_length=64, db_index=True)
unit = models.ForeignKey("pootle_store.Unit", db_index=True)
category = models.IntegerField(null=False, default=Category.NO_CATEGORY)
message = models.TextField()
false_positive = models.BooleanField(default=False, db_index=True)
def __unicode__(self):
return self.name
@property
def display_name(self):
return check_names.get(self.name, self.name)
@classmethod
def delete_unknown_checks(cls):
unknown_checks = QualityCheck.objects \
.exclude(name__in=check_names.keys())
unknown_checks.delete()
# # # # # # # # # Suggestion # # # # # # # #
class Suggestion(models.Model, base.TranslationUnit):
"""Suggested translation for a :cls:`~pootle_store.models.Unit`, provided
by users or automatically generated after a merge.
"""
target_f = MultiStringField()
target_hash = models.CharField(max_length=32, db_index=True)
unit = models.ForeignKey('pootle_store.Unit')
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False,
related_name='suggestions', db_index=True)
reviewer = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
related_name='reviews', db_index=True)
translator_comment_f = models.TextField(null=True, blank=True)
state_choices = [
(SuggestionStates.PENDING, _('Pending')),
(SuggestionStates.ACCEPTED, _('Accepted')),
(SuggestionStates.REJECTED, _('Rejected')),
]
state = models.CharField(max_length=16, default=SuggestionStates.PENDING,
null=False, choices=state_choices, db_index=True)
creation_time = models.DateTimeField(db_index=True, null=True)
review_time = models.DateTimeField(null=True, db_index=True)
objects = SuggestionManager()
# # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #
@property
def _target(self):
return self.target_f
@_target.setter
def _target(self, value):
self.target_f = value
self._set_hash()
@property
def _source(self):
return self.unit._source
@property
def translator_comment(self, value):
return self.translator_comment_f
@translator_comment.setter
def translator_comment(self, value):
self.translator_comment_f = value
self._set_hash()
# # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #
def __unicode__(self):
return unicode(self.target)
def _set_hash(self):
string = self.translator_comment_f
if string:
string = self.target_f + SEPARATOR + string
else:
string = self.target_f
self.target_hash = md5(string.encode("utf-8")).hexdigest()
# # # # # # # # Unit # # # # # # # # # #
wordcount_f = import_func(settings.POOTLE_WORDCOUNT_FUNC)
def count_words(strings):
wordcount = 0
for string in strings:
wordcount += wordcount_f(string)
return wordcount
def stringcount(string):
try:
return len(string.strings)
except AttributeError:
return 1
class Unit(models.Model, base.TranslationUnit):
store = models.ForeignKey("pootle_store.Store", db_index=True)
index = models.IntegerField(db_index=True)
unitid = models.TextField(editable=False)
unitid_hash = models.CharField(max_length=32, db_index=True,
editable=False)
source_f = MultiStringField(null=True)
source_hash = models.CharField(max_length=32, db_index=True,
editable=False)
source_wordcount = models.SmallIntegerField(default=0, editable=False)
source_length = models.SmallIntegerField(db_index=True, default=0,
editable=False)
target_f = MultiStringField(null=True, blank=True)
target_wordcount = models.SmallIntegerField(default=0, editable=False)
target_length = models.SmallIntegerField(db_index=True, default=0,
editable=False)
developer_comment = models.TextField(null=True, blank=True)
translator_comment = models.TextField(null=True, blank=True)
locations = models.TextField(null=True, editable=False)
context = models.TextField(null=True, editable=False)
state = models.IntegerField(null=False, default=UNTRANSLATED,
db_index=True)
revision = models.IntegerField(null=False, default=0, db_index=True,
blank=True)
# Metadata
creation_time = models.DateTimeField(auto_now_add=True, db_index=True,
editable=False, null=True)
mtime = models.DateTimeField(auto_now=True, db_index=True, editable=False)
# unit translator
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
db_index=True, related_name='submitted')
submitted_on = models.DateTimeField(db_index=True, null=True)
commented_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
db_index=True, related_name='commented')
commented_on = models.DateTimeField(db_index=True, null=True)
# reviewer: who has accepted suggestion or removed FUZZY
# None if translation has been submitted by approved translator
reviewed_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
db_index=True, related_name='reviewed')
reviewed_on = models.DateTimeField(db_index=True, null=True)
objects = UnitManager()
simple_objects = models.Manager()
class Meta(object):
unique_together = (
('store', 'unitid_hash'),
("store", "state", "index", "unitid_hash"))
get_latest_by = 'mtime'
index_together = [
["store", "index"],
["store", "revision"],
["store", "mtime"],
["store", "state"]]
# # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #
@property
def _source(self):
return self.source_f
@_source.setter
def _source(self, value):
self.source_f = value
self._source_updated = True
@property
def _target(self):
return self.target_f
@_target.setter
def _target(self, value):
self.target_f = value
self._target_updated = True
# # # # # # # # # # # # # Class & static methods # # # # # # # # # # # # #
@classmethod
def max_revision(cls):
"""Returns the max revision number across all units."""
return max_column(cls.objects.all(), 'revision', 0)
# # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #
def __unicode__(self):
# FIXME: consider using unit id instead?
return unicode(self.source)
def __str__(self):
return str(self.convert())
def __init__(self, *args, **kwargs):
super(Unit, self).__init__(*args, **kwargs)
self._rich_source = None
self._source_updated = False
self._rich_target = None
self._target_updated = False
self._state_updated = False
self._comment_updated = False
self._auto_translated = False
self._encoding = 'UTF-8'
def delete(self, *args, **kwargs):
action_log(user='system', action=UNIT_DELETED,
lang=self.store.translation_project.language.code,
unit=self.id, translation='', path=self.store.pootle_path)
super(Unit, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
created = self.id is None
source_updated = kwargs.pop("source_updated", None) or self._source_updated
target_updated = kwargs.pop("target_updated", None) or self._target_updated
state_updated = kwargs.pop("state_updated", None) or self._state_updated
auto_translated = (
kwargs.pop("auto_translated", None)
or self._auto_translated)
comment_updated = (
kwargs.pop("comment_updated", None)
or self._comment_updated)
action = kwargs.pop("action", None) or getattr(self, "_save_action", None)
if not hasattr(self, '_log_user'):
User = get_user_model()
self._log_user = User.objects.get_system_user()
user = kwargs.pop("user", self._log_user)
if created:
action = UNIT_ADDED
if source_updated:
# update source related fields
self.source_hash = md5(self.source_f.encode("utf-8")).hexdigest()
self.source_length = len(self.source_f)
self.update_wordcount(auto_translate=True)
if target_updated:
# update target related fields
self.target_wordcount = count_words(self.target_f.strings)
self.target_length = len(self.target_f)
if filter(None, self.target_f.strings):
if self.state == UNTRANSLATED:
self.state = TRANSLATED
action = action or TRANSLATION_ADDED
else:
action = action or TRANSLATION_CHANGED
else:
action = TRANSLATION_DELETED
# if it was TRANSLATED then set to UNTRANSLATED
if self.state > FUZZY:
self.state = UNTRANSLATED
# Updating unit from the .po file set its revision property to
# a new value (the same for all units during its store updated)
# since that change doesn't require further sync but note that
# auto_translated units require further sync
revision = kwargs.pop('revision', None)
if revision is not None and not auto_translated:
self.revision = revision
elif target_updated or state_updated or comment_updated:
self.revision = Revision.incr()
if not created and action:
action_log(
user=self._log_user,
action=action,
lang=self.store.translation_project.language.code,
unit=self.id,
translation=self.target_f,
path=self.store.pootle_path)
was_fuzzy = (
state_updated and self.state == TRANSLATED
and action == TRANSLATION_CHANGED
and not target_updated)
if was_fuzzy:
# set reviewer data if FUZZY has been removed only and
# translation hasn't been updated
self.reviewed_on = timezone.now()
self.reviewed_by = self._log_user
elif self.state == FUZZY:
# clear reviewer data if unit has been marked as FUZZY
self.reviewed_on = None
self.reviewed_by = None
elif self.state == UNTRANSLATED:
# clear reviewer and translator data if translation
# has been deleted
self.reviewed_on = None
self.reviewed_by = None
self.submitted_by = None
self.submitted_on = None
super(Unit, self).save(*args, **kwargs)
if action and action == UNIT_ADDED:
action_log(
user=self._log_user,
action=action,
lang=self.store.translation_project.language.code,
unit=self.id,
translation=self.target_f,
path=self.store.pootle_path)
self.add_initial_submission(user=user)
if source_updated or target_updated:
if not (created and self.state == UNTRANSLATED):
self.update_qualitychecks()
if self.istranslated():
self.update_tmserver()
# done processing source/target update remove flag
self._source_updated = False
self._target_updated = False
self._state_updated = False
self._comment_updated = False
self._auto_translated = False
update_data.send(
self.store.__class__, instance=self.store)
def get_absolute_url(self):
return self.store.get_absolute_url()
def get_translate_url(self):
return (
"%s%s"
% (self.store.get_translate_url(),
'#unit=%s' % unicode(self.id)))
def get_search_locations_url(self):
(proj_code, dir_path,
filename) = split_pootle_path(self.store.pootle_path)[1:]
return u''.join([
reverse('pootle-project-translate',
args=[proj_code, dir_path, filename]),
get_editor_filter(search=self.locations, sfields='locations'),
])
def get_screenshot_url(self):
prefix = self.store.translation_project.\
project.screenshot_search_prefix
if prefix:
return prefix + urlquote(self.source_f)
def is_accessible_by(self, user):
"""Returns `True` if the current unit is accessible by `user`."""
if user.is_superuser:
return True
from pootle_project.models import Project
user_projects = Project.accessible_by_user(user)
return self.store.translation_project.project.code in user_projects
def add_initial_submission(self, user=None):
if self.istranslated() or self.isfuzzy():
Submission.objects.create(
creation_time=self.creation_time,
translation_project=self.store.translation_project,
submitter=user or self._log_user,
unit=self,
store=self.store,
type=SubmissionTypes.UNIT_CREATE,
field=SubmissionFields.TARGET,
new_value=self.target,
)
@cached_property
def unit_syncer(self):
return self.store.syncer.unit_sync_class(self)
def convert(self, unitclass=None):
"""Convert to a unit of type :param:`unitclass` retaining as much
information from the database as the target format can support.
"""
return self.unit_syncer.convert(unitclass)
def sync(self, unit):
"""Sync in file unit with translations from the DB."""
changed = False
if not self.isobsolete() and unit.isobsolete():
unit.resurrect()
changed = True
if unit.target != self.target:
if unit.hasplural():
nplurals = self.store.translation_project.language.nplurals
target_plurals = len(self.target.strings)
strings = self.target.strings
if target_plurals < nplurals:
strings.extend([u'']*(nplurals - target_plurals))
if unit.target.strings != strings:
unit.target = strings
changed = True
else:
unit.target = self.target
changed = True
self_notes = self.getnotes(origin="translator")
unit_notes = unit.getnotes(origin="translator")
if unit_notes != (self_notes or ''):
if self_notes != '':
unit.addnote(self_notes, origin="translator",
position="replace")
else:
unit.removenotes()
changed = True
if unit.isfuzzy() != self.isfuzzy():
unit.markfuzzy(self.isfuzzy())
changed = True
if self.isobsolete() and not unit.isobsolete():
unit.makeobsolete()
changed = True
return changed
def update(self, unit, user=None):
"""Update in-DB translation from the given :param:`unit`.
:param user: User to attribute updates to.
:rtype: bool
:return: True if the new :param:`unit` differs from the current unit.
Two units differ when any of the fields differ (source, target,
translator/developer comments, locations, context, status...).
"""
changed = False
if user is None:
User = get_user_model()
user = User.objects.get_system_user()
update_source = (
self.source != unit.source
or (len(self.source.strings)
!= stringcount(unit.source))
or (self.hasplural()
!= unit.hasplural()))
if update_source:
if unit.hasplural() and len(unit.source.strings) == 1:
self.source = [unit.source, PLURAL_PLACEHOLDER]
else:
self.source = unit.source
changed = True
update_target = (
self.target != unit.target
or (len(self.target.strings)
!= stringcount(unit.target)))
if update_target:
notempty = filter(None, self.target_f.strings)
self.target = unit.target
self.submitted_by = user
self.submitted_on = timezone.now()
if filter(None, self.target_f.strings) or notempty:
# FIXME: we need to do this cause we discard nplurals for empty
# plurals
changed = True
notes = unit.getnotes(origin="developer")
if (self.developer_comment != notes and
(self.developer_comment or notes)):
self.developer_comment = notes or None
changed = True
notes = unit.getnotes(origin="translator")
if (self.translator_comment != notes and
(self.translator_comment or notes)):
self.translator_comment = notes or None
changed = True
self._comment_updated = True
locations = "\n".join(unit.getlocations())
if self.locations != locations and (self.locations or locations):
self.locations = locations or None
changed = True
context = unit.getcontext()
if self.context != unit.getcontext() and (self.context or context):
self.context = context or None
changed = True
if self.isfuzzy() != unit.isfuzzy():
self.markfuzzy(unit.isfuzzy())
changed = True
if self.isobsolete() != unit.isobsolete():
if unit.isobsolete():
self.makeobsolete()
else:
self.resurrect(unit.isfuzzy())
changed = True
if self.unitid != unit.getid():
self.unitid = unicode(unit.getid()) or unicode(unit.source)
self.unitid_hash = md5(self.unitid.encode("utf-8")).hexdigest()
changed = True
return changed
def update_wordcount(self, auto_translate=False):
"""Updates the source wordcount for a unit.
:param auto_translate: when set to `True`, it will copy the
source string into the target field.
"""
self.source_wordcount = count_words(self.source_f.strings)
if self.source_wordcount == 0:
# We can't set the actual wordcount to zero since the unit
# will essentially disappear from statistics thus for such
# units set word count to 1
self.source_wordcount = 1
if (auto_translate
and not bool(filter(None, self.target_f.strings))):
# auto-translate untranslated strings
self.target = self.source
self.state = FUZZY
self._auto_translated = True
def update_qualitychecks(self, keep_false_positives=False):
"""Run quality checks and store result in the database.
:param keep_false_positives: when set to `False`, it will activate
(unmute) any existing false positive checks.
:return: `True` if quality checks were updated or `False` if they
left unchanged.
"""
unmute_list = []
result = False
checks = self.qualitycheck_set.all()
existing = {}
for check in checks.values('name', 'false_positive', 'id'):
existing[check['name']] = {
'false_positive': check['false_positive'],
'id': check['id'],
}
# no checks if unit is untranslated
if not self.target:
if existing:
self.qualitycheck_set.all().delete()
return True
return False
checker = self.store.translation_project.checker
qc_failures = checker.run_filters(self, categorised=True)
checks_to_add = []
for name in qc_failures.iterkeys():
if name in existing:
# keep false-positive checks if check is active
if (existing[name]['false_positive'] and
not keep_false_positives):
unmute_list.append(name)
del existing[name]
continue
message = qc_failures[name]['message']
category = qc_failures[name]['category']
checks_to_add.append(
QualityCheck(
unit=self,
name=name,
message=message,
category=category))
result = True
if checks_to_add:
self.qualitycheck_set.bulk_create(checks_to_add)
if not keep_false_positives and unmute_list:
self.qualitycheck_set.filter(name__in=unmute_list) \
.update(false_positive=False)
# delete inactive checks
if existing:
self.qualitycheck_set.filter(name__in=existing).delete()
changed = result or bool(unmute_list) or bool(existing)
return changed
def get_qualitychecks(self):
return self.qualitycheck_set.all()
def get_critical_qualitychecks(self):
return self.get_qualitychecks().filter(category=Category.CRITICAL)
def get_active_critical_qualitychecks(self):
return self.get_active_qualitychecks().filter(
category=Category.CRITICAL)
def get_warning_qualitychecks(self):
return self.get_qualitychecks().exclude(category=Category.CRITICAL)
def get_active_qualitychecks(self):
return self.qualitycheck_set.filter(false_positive=False)
# # # # # # # # # # # Related Submissions # # # # # # # # # # # #
def get_edits(self):
return self.submission_set.get_unit_edits()
def get_comments(self):
return self.submission_set.get_unit_comments()
def get_state_changes(self):
return self.submission_set.get_unit_state_changes()
def get_suggestion_reviews(self):
return self.submission_set.get_unit_suggestion_reviews()
# # # # # # # # # # # TranslationUnit # # # # # # # # # # # # # #
def update_tmserver(self):
obj = {
'id': self.id,
# 'revision' must be an integer for statistical queries to work
'revision': self.revision,
'project': self.store.translation_project.project.fullname,
'path': self.store.pootle_path,
'source': self.source,
'target': self.target,
'username': '',
'fullname': '',
'email_md5': '',
}
if self.submitted_on:
obj.update({
'iso_submitted_on': self.submitted_on.isoformat(),
'display_submitted_on': dateformat.format(self.submitted_on),
})
if self.submitted_by:
obj.update({
'username': self.submitted_by.username,
'fullname': self.submitted_by.full_name,
'email_md5': md5(self.submitted_by.email).hexdigest(),
})
get_tm_broker().update(self.store.translation_project.language.code,
obj)
def get_tm_suggestions(self):
return get_tm_broker().search(self)
# # # # # # # # # # # TranslationUnit # # # # # # # # # # # # # #
def getnotes(self, origin=None):
if origin is None:
notes = ''
if self.translator_comment is not None:
notes += self.translator_comment
if self.developer_comment is not None:
notes += self.developer_comment
return notes
elif origin == "translator":
return self.translator_comment or ''
elif origin in ["programmer", "developer", "source code"]:
return self.developer_comment or ''
else:
raise ValueError("Comment type not valid")
def addnote(self, text, origin=None, position="append"):
if not (text and text.strip()):
return
if origin in ["programmer", "developer", "source code"]:
self.developer_comment = text
else:
self.translator_comment = text
def getid(self):
return self.unitid
def setid(self, value):
self.unitid = value
self.unitid_hash = md5(self.unitid.encode("utf-8")).hexdigest()
def getlocations(self):
if self.locations is None:
return []
return filter(None, self.locations.split('\n'))
def addlocation(self, location):
if self.locations is None:
self.locations = ''
self.locations += location + "\n"
def getcontext(self):
return self.context
def setcontext(self, value):
self.context = value
def isfuzzy(self):
return self.state == FUZZY
def markfuzzy(self, value=True):
if self.state <= OBSOLETE:
return
if value != (self.state == FUZZY):
# when Unit toggles its FUZZY state the number of translated words
# also changes
self._state_updated = True
# that's additional check
# but leave old value in case _save_action is set
if not hasattr(self, '_save_action'):
self._save_action = TRANSLATION_CHANGED
if value:
self.state = FUZZY
elif self.state <= FUZZY:
if filter(None, self.target_f.strings):
self.state = TRANSLATED
else:
self.state = UNTRANSLATED
# that's additional check
# but leave old value in case _save_action is set
if not hasattr(self, '_save_action'):
self._save_action = TRANSLATION_DELETED
def hasplural(self):
return (self.source is not None and
(len(self.source.strings) > 1 or
hasattr(self.source, "plural") and
self.source.plural))
def isobsolete(self):
return self.state == OBSOLETE
def makeobsolete(self):
if self.state > OBSOLETE:
# when Unit becomes obsolete the cache flags should be updated
self._state_updated = True
self._save_action = UNIT_OBSOLETE
self.state = OBSOLETE
self.index = 0
def resurrect(self, is_fuzzy=False):
if self.state > OBSOLETE:
return
if filter(None, self.target_f.strings):
# when Unit toggles its OBSOLETE state the number of translated
# words or fuzzy words also changes
if is_fuzzy:
self.state = FUZZY
else:
self.state = TRANSLATED
else:
self.state = UNTRANSLATED
self.update_qualitychecks(keep_false_positives=True)
self._state_updated = True
self._save_action = UNIT_RESURRECTED
def istranslated(self):
return self.state >= TRANSLATED
# # # # # # # # # # # Suggestions # # # # # # # # # # # # # # # # #
def get_suggestions(self):
return self.suggestion_set.pending().select_related('user').all()
def has_critical_checks(self):
return self.qualitycheck_set.filter(
category=Category.CRITICAL,
).exists()
def toggle_qualitycheck(self, check_id, false_positive, user):
check = self.qualitycheck_set.get(id=check_id)
if check.false_positive == false_positive:
return
check.false_positive = false_positive
check.save()
self._log_user = user
if false_positive:
self._save_action = MUTE_QUALITYCHECK
else:
self._save_action = UNMUTE_QUALITYCHECK
# create submission
if false_positive:
sub_type = SubmissionTypes.MUTE_CHECK
else:
sub_type = SubmissionTypes.UNMUTE_CHECK
sub = Submission(creation_time=timezone.now(),
translation_project=self.store.translation_project,
submitter=user, field=SubmissionFields.NONE,
unit=self, store=self.store, type=sub_type,
quality_check=check)
sub.save()
# update timestamp
# log user action
self.save()
def get_terminology(self):
"""get terminology suggestions"""
matcher = self.store.translation_project.gettermmatcher()
if matcher is None:
return []
return matcher.matches(self.source)
def get_last_updated_info(self):
return {
"display_datetime": dateformat.format(self.creation_time),
"iso_datetime": self.creation_time.isoformat(),
"creation_time": int(dateformat.format(self.creation_time, 'U')),
"unit_source": truncatechars(self, 50),
"unit_url": self.get_translate_url(),
}
# # # # # # # # # # # Store # # # # # # # # # # # # # #
def validate_no_slashes(value):
if '/' in value:
raise ValidationError('Store name cannot contain "/" characters')
if '\\' in value:
raise ValidationError('Store name cannot contain "\\" characters')
# Needed to alter storage location in tests
fs = PootleFileSystemStorage()
class Store(models.Model, CachedTreeItem, base.TranslationStore):
"""A model representing a translation store (i.e. a PO or XLIFF file)."""
UnitClass = Unit
Name = "Model Store"
is_dir = False
file = TranslationStoreField(max_length=255, storage=fs, db_index=True,
null=False, editable=False)
parent = models.ForeignKey('pootle_app.Directory',
related_name='child_stores', db_index=True,
editable=False)
translation_project_fk = 'pootle_translationproject.TranslationProject'
translation_project = models.ForeignKey(translation_project_fk,
related_name='stores',
db_index=True, editable=False)
filetype = models.ForeignKey(
Format,
related_name='stores',
null=True,
blank=True,
db_index=True)
is_template = models.BooleanField(default=False)
# any changes to the `pootle_path` field may require updating the schema
# see migration 0007_case_sensitive_schema.py
pootle_path = models.CharField(max_length=255, null=False, unique=True,
db_index=True, verbose_name=_("Path"))
tp_path = models.CharField(
max_length=255,
null=True,
blank=True,
db_index=True,
verbose_name=_("Path"))
# any changes to the `name` field may require updating the schema
# see migration 0007_case_sensitive_schema.py
name = models.CharField(max_length=128, null=False, editable=False,
validators=[validate_no_slashes])
file_mtime = models.DateTimeField(default=datetime_min)
state = models.IntegerField(null=False, default=NEW, editable=False,
db_index=True)
creation_time = models.DateTimeField(auto_now_add=True, db_index=True,
editable=False, null=True)
last_sync_revision = models.IntegerField(db_index=True, null=True,
blank=True)
obsolete = models.BooleanField(default=False)
# this is calculated from virtualfolders if installed and linked
priority = models.FloatField(
db_index=True, default=1,
validators=[MinValueValidator(0)])
objects = StoreManager()
simple_objects = models.Manager()
class Meta(object):
ordering = ['pootle_path']
index_together = [
["translation_project", "is_template"],
["translation_project", "pootle_path", "is_template", "filetype"]]
unique_together = (
('parent', 'name'),
("obsolete", "translation_project", "tp_path"))
# # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #
@property
def code(self):
return self.name.replace('.', '-')
@property
def tp(self):
return self.translation_project
@property
def real_path(self):
return self.file.name
@property
def has_terminology(self):
"""is this a project specific terminology store?"""
# TODO: Consider if this should check if the store belongs to a
# terminology project. Probably not, in case this might be called over
# several files in a project.
return self.name.startswith('pootle-terminology')
@property
def units(self):
return self.unit_set.filter(state__gt=OBSOLETE).order_by('index')
@units.setter
def units(self, value):
"""Null setter to avoid tracebacks if :meth:`TranslationStore.__init__`
is called.
"""
pass
# # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #
@cached_property
def path(self):
"""Returns just the path part omitting language and project codes.
If the `pootle_path` of a :cls:`Store` object `store` is
`/af/project/dir1/dir2/file.po`, `store.path` will return
`dir1/dir2/file.po`.
"""
return to_tp_relative_path(self.pootle_path)
def __init__(self, *args, **kwargs):
super(Store, self).__init__(*args, **kwargs)
def __unicode__(self):
return unicode(self.pootle_path)
def __str__(self):
return str(self.syncer.convert())
def save(self, *args, **kwargs):
created = not self.id
self.pootle_path = self.parent.pootle_path + self.name
self.tp_path = self.parent.tp_path + self.name
# Force validation of fields.
self.full_clean()
super(Store, self).save(*args, **kwargs)
if created:
store_log(user='system', action=STORE_ADDED,
path=self.pootle_path, store=self.id)
def delete(self, *args, **kwargs):
store_log(user='system', action=STORE_DELETED,
path=self.pootle_path, store=self.id)
lang = self.translation_project.language.code
for unit in self.unit_set.iterator():
action_log(user='system', action=UNIT_DELETED, lang=lang,
unit=unit.id, translation='', path=self.pootle_path)
super(Store, self).delete(*args, **kwargs)
def calculate_priority(self):
if not vfolders_installed():
return DEFAULT_PRIORITY
from virtualfolder.models import VirtualFolder
vfolders = VirtualFolder.objects
priority = (
vfolders.filter(stores=self)
.aggregate(priority=models.Max("priority"))["priority"])
if priority is None:
return DEFAULT_PRIORITY
return priority
def set_priority(self, priority=None):
priority = (
self.calculate_priority()
if priority is None
else priority)
if priority != self.priority:
Store.objects.filter(pk=self.pk).update(priority=priority)
def makeobsolete(self):
"""Make this store and all its units obsolete."""
store_log(user='system', action=STORE_OBSOLETE,
path=self.pootle_path, store=self.id)
lang = self.translation_project.language.code
unit_query = self.unit_set.filter(state__gt=OBSOLETE)
unit_ids = unit_query.values_list('id', flat=True)
for unit_id in unit_ids:
action_log(user='system', action=UNIT_OBSOLETE, lang=lang,
unit=unit_id, translation='', path=self.pootle_path)
unit_query.update(state=OBSOLETE, index=0)
self.obsolete = True
self.save()
def get_absolute_url(self):
return reverse(
'pootle-tp-store-browse',
args=split_pootle_path(self.pootle_path))
def get_translate_url(self, **kwargs):
return u''.join(
[reverse("pootle-tp-store-translate",
args=split_pootle_path(self.pootle_path)),
get_editor_filter(**kwargs)])
def findid_bulk(self, ids, unit_set=None):
chunks = 200
for i in xrange(0, len(ids), chunks):
units = (unit_set or self.unit_set).filter(id__in=ids[i:i+chunks])
for unit in units.iterator():
yield unit
def get_file_mtime(self):
disk_mtime = datetime.datetime.fromtimestamp(self.file.getpomtime()[0])
# set microsecond to 0 for comparing with a time value without
# microseconds
disk_mtime = make_aware(disk_mtime.replace(microsecond=0))
return disk_mtime
def update_index(self, start, delta):
with update_data_after(self):
Unit.objects.filter(store_id=self.id, index__gte=start).update(
index=operator.add(F('index'), delta))
def mark_units_obsolete(self, uids_to_obsolete, update_revision=None):
"""Marks a bulk of units as obsolete.
:param uids_to_obsolete: UIDs of the units to be marked as obsolete.
:return: The number of units marked as obsolete.
"""
obsoleted = 0
for unit in self.findid_bulk(uids_to_obsolete):
# Use the same (parent) object since units will
# accumulate the list of cache attributes to clear
# in the parent Store object
unit.store = self
if not unit.isobsolete():
unit.makeobsolete()
unit.save(revision=update_revision)
obsoleted += 1
return obsoleted
@cached_property
def data_tool(self):
return data_tool.get(self.__class__)(self)
@cached_property
def updater(self):
updaters = format_updaters.gather()
updater_class = (
updaters.get(self.filetype.name)
or updaters.get("default"))
return updater_class(self)
@cached_property
def syncer(self):
syncers = format_syncers.gather()
syncer_class = (
syncers.get(self.filetype.name)
or syncers.get("default"))
return syncer_class(self)
def record_submissions(self, unit, old_target, old_state, current_time, user,
submission_type=None, **kwargs):
"""Records all applicable submissions for `unit`.
EXTREME HAZARD: this relies on implicit `._<field>_updated` members
being available in `unit`. Let's look into replacing such members with
something saner (#3895).
"""
state_updated = kwargs.get("state_updated") or unit._state_updated
target_updated = kwargs.get("target_updated") or unit._target_updated
comment_updated = kwargs.get("comment_updated") or unit._comment_updated
create_subs = OrderedDict()
if state_updated:
create_subs[SubmissionFields.STATE] = [
old_state,
unit.state]
if target_updated:
create_subs[SubmissionFields.TARGET] = [
old_target,
unit.target_f]
if comment_updated:
create_subs[SubmissionFields.COMMENT] = [
'',
unit.translator_comment or '']
if submission_type is None:
submission_type = SubmissionTypes.SYSTEM
subs_created = []
for field in create_subs:
subs_created.append(
Submission(
creation_time=current_time,
translation_project_id=self.translation_project_id,
submitter=user,
unit=unit,
store_id=self.id,
field=field,
type=submission_type,
old_value=create_subs[field][0],
new_value=create_subs[field][1]))
if subs_created:
unit.submission_set.add(*subs_created, bulk=False)
def update(self, store, user=None, store_revision=None,
submission_type=None, resolve_conflict=POOTLE_WINS,
allow_add_and_obsolete=True):
"""Update DB with units from a ttk Store.
:param store: a source `Store` instance from TTK.
:param store_revision: revision at which the source `Store` was last
synced.
:param user: User to attribute updates to.
:param submission_type: Submission type of saved updates.
:param allow_add_and_obsolete: allow to add new units
and make obsolete existing units
"""
self.updater.update(
store, user=user, store_revision=store_revision,
submission_type=submission_type, resolve_conflict=resolve_conflict,
allow_add_and_obsolete=allow_add_and_obsolete)
def deserialize(self, data):
return StoreDeserialization(self).deserialize(data)
def serialize(self):
return StoreSerialization(self).serialize()
def sync(self, update_structure=False, conservative=True,
user=None, skip_missing=False, only_newer=True):
"""Sync file with translations from DB."""
if skip_missing and not self.file.exists():
return
self.syncer.sync(
update_structure=update_structure,
conservative=conservative,
user=user,
only_newer=only_newer)
# # # # # # # # # # # # TranslationStore # # # # # # # # # # # # #
suggestions_in_format = True
def max_index(self):
"""Largest unit index"""
return max_column(self.unit_set.all(), 'index', -1)
def addunit(self, unit, index=None, user=None, update_revision=None):
if index is None:
index = self.max_index() + 1
newunit = self.UnitClass(store=self, index=index)
newunit.update(unit, user=user)
if self.id:
newunit.save(revision=update_revision, user=user)
return newunit
def findunits(self, source, obsolete=False):
if not obsolete and hasattr(self, "sourceindex"):
return super(Store, self).findunits(source)
# find using hash instead of index
source_hash = md5(source.encode("utf-8")).hexdigest()
units = self.unit_set.filter(source_hash=source_hash)
if obsolete:
units = units.filter(state=OBSOLETE)
else:
units = units.filter(state__gt=OBSOLETE)
if units.count():
return units
def findunit(self, source, obsolete=False):
units = self.findunits(source, obsolete)
if units:
return units[0]
def findid(self, id):
if hasattr(self, "id_index"):
return self.id_index.get(id, None)
unitid_hash = md5(id.encode("utf-8")).hexdigest()
try:
return self.unit_set.get(unitid_hash=unitid_hash)
except Unit.DoesNotExist:
return None
def header(self):
# FIXME: we should store some metadata in db
if self.file and hasattr(self.file.store, 'header'):
return self.file.store.header()
def get_max_unit_revision(self):
return max_column(self.unit_set.all(), 'revision', 0)
# # # TreeItem
def get_parents(self):
if self.parent.is_translationproject():
return [self.translation_project]
return [self.parent]
# # # /TreeItem
# # # # # # # # # # # # # # # # Translation # # # # # # # # # # # # # # #
| gpl-3.0 | 2,638,046,119,793,675,000 | 34.08686 | 83 | 0.583217 | false | 4.111169 | false | false | false |
codesmart-co/bit | bit/migrations/versions/4ad33f99723a_.py | 1 | 2133 | """empty message
Revision ID: 4ad33f99723a
Revises: 743a0a1b5bc9
Create Date: 2017-08-17 14:36:49.229000
"""
# revision identifiers, used by Alembic.
revision = '4ad33f99723a'
down_revision = '743a0a1b5bc9'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('bit_facebook_daily_ad_insights_impression_device',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_id', sa.String(length=255), nullable=True),
sa.Column('campaign_id', sa.String(length=255), nullable=True),
sa.Column('adset_id', sa.String(length=255), nullable=True),
sa.Column('campaign_name', sa.String(length=255), nullable=True),
sa.Column('spend', sa.Numeric(), nullable=True),
sa.Column('cost_per_unique_click', sa.Numeric(), nullable=True),
sa.Column('unique_clicks', sa.Integer(), nullable=True),
sa.Column('unique_impressions', sa.Integer(), nullable=True),
sa.Column('unique_social_clicks', sa.Integer(), nullable=True),
sa.Column('unique_social_impressions', sa.Integer(), nullable=True),
sa.Column('website_clicks', sa.Integer(), nullable=True),
sa.Column('date_start', sa.DateTime(), nullable=True),
sa.Column('date_stop', sa.DateTime(), nullable=True),
sa.Column('impression_device', sa.String(length=255), nullable=True),
sa.Column('ad_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['ad_id'], ['bit_facebook_ad.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_bit_facebook_daily_ad_insights_impression_device_impression_device'), 'bit_facebook_daily_ad_insights_impression_device', ['impression_device'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_bit_facebook_daily_ad_insights_impression_device_impression_device'), table_name='bit_facebook_daily_ad_insights_impression_device')
op.drop_table('bit_facebook_daily_ad_insights_impression_device')
# ### end Alembic commands ###
| apache-2.0 | 6,579,955,067,525,217,000 | 43.4375 | 187 | 0.700422 | false | 3.226929 | false | false | false |
bhargavz/py-twitter-sentiment-analysis | data/db/base/friendObj.py | 1 | 1835 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: friendObj.py
#
# An object that mirrors the friends table in the database
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
#
import copy
class friendObj(object):
def __init__(self):
self.rid = None
self.user = u""
self.friend = u""
self.user_id = 0
self.friend_id = 0
self.user_local_id = 0
self.friend_local_id = 0
def to_dict(self):
rec = {}
if( self.rid > 0 ):
rec['rid'] = self.rid
rec['user'] = self.user
rec['friend'] = self.friend
rec['user_id'] = self.user_id
rec['friend_id'] = self.friend_id
rec['user_local_id'] = self.user_local_id
rec['friend_local_id'] = self.friend_local_id
return rec
def from_dict(self, rec):
nobj = friendObj()
if( rec ):
nobj.user = rec['user']
nobj.friend = rec['friend']
nobj.user_id = rec['user_id']
nobj.friend_id = rec['friend_id']
nobj.user_local_id = rec['user_local_id']
nobj.friend_local_id = rec['friend_local_id']
return nobj
def clone(self):
nobj = friendObj()
if( self.rid > 0 ):
nobj.rid = self.rid
nobj.user = self.user
nobj.friend = self.friend
nobj.user_id = self.user_id
nobj.friend_id = self.friend_id
nobj.user_local_id = self.user_local_id
nobj.friend_local_id = self.friend_local_id
return nobj
def __repr__(self):
return "<friendObj('%s','%s','%s','%s','%s','%s','%s')>"%(str(self.rid),str(self.user),str(self.friend),str(self.user_id),str(self.friend_id),str(self.user_local_id),str(self.friend_local_id))
| mit | -2,967,404,588,936,243,000 | 28.596774 | 200 | 0.53951 | false | 3.224956 | false | false | false |
johnmartinsson/bird-species-classification | scripts/split_data.py | 1 | 1398 | import glob
import os
import tqdm
import numpy as np
import shutil
from bird import loader as l
source_dir = "/disk/martinsson-spring17/birdClef2016Subset"
classes = os.listdir(os.path.join(source_dir, "train"))
percentage_validation_sampels = 0.10
progress = tqdm.tqdm(range(len(classes)))
class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c
in classes]
unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in
class_segmentss]
print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels")
for ((c, segments), p) in zip(unique_sampels, progress):
nb_samples = len(segments)
nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels))
valid_class_path = os.path.join(source_dir, "valid", c)
if not os.path.exists(valid_class_path):
#print("os.makedirs("+valid_class_path+")")
os.makedirs(valid_class_path)
i_valid_samples = np.random.choice(range(len(segments)),
nb_validation_samples, replace=False)
valid_samples = [segments[i] for i in i_valid_samples]
for sample in valid_samples:
#print(c, "validation")
for segment in sample:
#print("shutil.move("+segment+","+valid_class_path+")")
shutil.move(segment, valid_class_path)
| mit | 5,780,680,124,122,952,000 | 35.789474 | 86 | 0.648069 | false | 3.304965 | false | false | false |
MSMBA/msmba-workflow | msmba-workflow/srclib/wax/examples/colored-buttondemo-1.py | 1 | 2145 | # colored-buttondemo-1.py
import sys
sys.path.append("../..")
from wax import *
COLORS = ['orange', 'chartreuse', 'papayawhip', 'dark blue', 'gold',
'red', 'yellow green', 'snow', 'hotpink', 'cadet blue']
# stick a custom event in Button
def MyOnClick(self, event):
print 'U clicked the button with label', `self.GetLabel()`
Button.OnClick = MyOnClick
class MainFrame(Frame):
def Body(self):
self.AddComponent(Button(self, "one"), stretch=1)
self.AddComponent(Button(self, "two"), expand=1, stretch=1)
self.AddComponent(Button(self, "three"), stretch=1)
# adding a panel, using a class
class Panel1(Panel):
def Body(self):
self.AddComponent(Button(self, "AAA"), stretch=1)
self.AddComponent(Button(self, "BBB"), expand=1, stretch=1)
self.AddComponent(Button(self, "CCC"), stretch=1)
panel1 = Panel1(self, direction="HORIZONTAL")
panel1.Pack()
self.AddComponent(panel1, stretch=1)
# adding two nested panels
panel2 = Panel(self, direction="H")
panel2.AddComponent(Button(panel2, "DD"), expand=1, stretch=1)
panel2.AddComponent(Button(panel2, "EE"), expand=1, stretch=1)
panel3 = Panel(panel2, direction="V")
panel3.AddComponent(Button(panel3, "999"), stretch=1)
b = Button(panel3, "888")
panel3.AddComponent(b, expand=1, stretch=1)
panel3.Pack()
panel2.AddComponent(panel3, stretch=1)
panel2.Pack()
self.AddComponent(panel2, expand=1, stretch=1)
self.Pack()
# override event for this button
def my_event(event):
print "Wahey!"
b.OnClick = my_event
# color these buttons, using the GetAllChildren() method (new as of
# 0.2.7)
all_buttons = [widget for widget in self.GetAllChildren()
if isinstance(widget, Button)]
for color, button in zip(COLORS, all_buttons):
button.SetBackgroundColor(color)
app = Application(MainFrame, direction='vertical', title="Test test...")
app.MainLoop()
| gpl-2.0 | 1,223,521,510,981,702,100 | 32.515625 | 75 | 0.610723 | false | 3.569052 | false | false | false |
fogbow/fogbow-dashboard | openstack_dashboard/dashboards/fogbow/instance/tabs.py | 1 | 3301 | from django.utils.translation import ugettext_lazy as _
from horizon import tabs
import openstack_dashboard.models as fogbow_models
COMPUTE_TERM = fogbow_models.FogbowConstants.COMPUTE_TERM
STATE_TERM = fogbow_models.FogbowConstants.STATE_TERM
SHH_PUBLIC_KEY_TERM = fogbow_models.FogbowConstants.SHH_PUBLIC_KEY_TERM
CONSOLE_VNC_TERM = fogbow_models.FogbowConstants.CONSOLE_VNC_TERM
MEMORY_TERM = fogbow_models.FogbowConstants.MEMORY_TERM
CORES_TERM = fogbow_models.FogbowConstants.CORES_TERM
IMAGE_SCHEME = fogbow_models.FogbowConstants.IMAGE_SCHEME
EXTRA_PORT_SCHEME = fogbow_models.FogbowConstants.EXTRA_PORT_SCHEME
class InstanceDetailTabInstancePanel(tabs.Tab):
name = _("Instance details")
slug = "instance_details"
template_name = ("fogbow/instance/_detail_instance.html")
def get_context_data(self, request):
instanceId = self.tab_group.kwargs['instance_id']
response = fogbow_models.doRequest('get', COMPUTE_TERM + instanceId,
None, request)
instance = None
try:
instance = getInstancePerResponse(instanceId, response)
except Exception:
instance = {'instanceId': '-' , 'state': '-', 'sshPublic': '-',
'extra' : '-', 'memory' : '-', 'cores' : '-',
'image' : '-', 'extraPorts': '-'}
return {'instance' : instance}
def getInstancePerResponse(instanceId, response):
if instanceId == 'null':
instanceId = '-'
instanceDetails = response.text.split('\n')
state,sshPublic,console_vnc,memory,cores,image,extraPort = '-', '-', '-', '-', '-', '-', '-'
for detail in instanceDetails:
if STATE_TERM in detail:
state = normalizeAttributes(detail, STATE_TERM)
elif SHH_PUBLIC_KEY_TERM in detail:
sshPublic = normalizeAttributes(detail, SHH_PUBLIC_KEY_TERM)
elif MEMORY_TERM in detail:
memory = normalizeAttributes(detail, MEMORY_TERM)
elif CORES_TERM in detail:
cores = normalizeAttributes(detail, CORES_TERM)
elif IMAGE_SCHEME in detail:
image = getFeatureInCategoryPerScheme('title', detail)
elif EXTRA_PORT_SCHEME in detail:
extraPort = normalizeAttributes(detail, EXTRA_PORT_SCHEME)
return {'instanceId': instanceId , 'state': state, 'sshPublic':sshPublic,
'extra' : instanceDetails, 'memory' : memory, 'cores' : cores,
'image' : image, 'extraPorts': extraPort}
def normalizeAttributes(propertie, term):
try:
return propertie.split(term)[1].replace('=', '').replace('"', '')
except:
return ''
def getFeatureInCategoryPerScheme(featureName, features):
try:
features = features.split(';')
for feature in features:
if featureName in feature:
return feature.replace(featureName + '=', '') \
.replace('"','').replace('Image:','') \
.replace(' image', '')
return ''
except Exception:
return '-'
class InstanceDetailTabGroupInstancePanel(tabs.TabGroup):
slug = "instance_details"
tabs = (InstanceDetailTabInstancePanel,)
| apache-2.0 | 8,149,906,809,736,603,000 | 40.78481 | 97 | 0.617692 | false | 4.080346 | false | false | false |
joepetrini/bike-counter | webapp/main/logic.py | 1 | 4298 | import random
from datetime import datetime
from django.db import transaction
from django.utils.timezone import now
from .models import *
def csv_for_appt(appt):
out = ''
# Headers
out += "Time,Bike,Direction,"
for m in appt.organization.organizationmetrics_set.all():
out += "%s," % m.metric.name
out = out[:-1] + "\n"
# Detail
for s in appt.survey_set.all():
out += "%s,%s,%s," % (s.created, s.is_bicycle, s.direction)
for sv in s.surveyvalue_set.all():
out += "%s," % sv.value.stored_value
out = out[:-1] + "\n"
return out
def stats_for_appt(appt):
stat = {}
stat['total'] = appt.survey_set.all().count()
metrics = {}
min = {}
for i in range(0, ((appt.actual_end - appt.actual_start).seconds / 60)):
min[i] = 0
metrics[-1] = {'name': 'direction', 'stats': {}}
# List of metrics
for m in appt.organization.organizationmetrics_set.filter(report=True):
metrics[m.metric.id] = {'name': m.metric.name, 'stats': {}}
# Value counts across all recorded info
for s in appt.survey_set.all():
# Direction
try:
metrics[-1]['stats'][s.direction] += 1
except KeyError:
metrics[-1]['stats'][s.direction] = 1
minutes_in = (s.recorded_at - appt.actual_start).seconds / 60
try:
min[minutes_in] += 1
except KeyError:
min[minutes_in] = 1
for sv in s.surveyvalue_set.select_related().all():
# Not in reportable metrics
if sv.metric.id not in metrics.keys():
continue
try:
metrics[sv.metric.id]['stats'][sv.value.display_value] += 1
except KeyError:
metrics[sv.metric.id]['stats'][sv.value.display_value] = 1
print min
stat['metrics'] = metrics
stat['minutes'] = min
return stat
def sim_appt(appt, avg_time=25):
with transaction.atomic():
# Clear data
appt.reset()
#for s in appt.survey_set.all():
# SurveyValue.objects.filter(survey=s).delete()
#Survey.objects.filter(appointment=appt).delete()
start = now()
total_time = 0
while True:
sec = random.randint(0, avg_time * 2)
total_time += sec
t = start + datetime.timedelta(seconds=total_time)
s = Survey.objects.create(appointment=appt, recorded_at=t)
for m in appt.organization.organizationmetrics_set.all():
metric = m.metric
if metric.value_set.system_name == 'direction':
val = random.choice(list(appt.location.directions()))
else:
val = random.choice(list(m.metric.value_set.value_set.all()))
# TODO handle defaults
has_def = m.metric.value_set.value_set.filter(is_default=True).count()
sv = SurveyValue.objects.create(survey=s, metric=metric, value=val)
# TODO Add events
if total_time > appt.organization.session_length * 60:
break
appt.actual_start = start
appt.actual_end = start + datetime.timedelta(0, total_time)
appt.time_taken = total_time
appt.save()
def get_appts_choices(theOrg, theYear=None):
all_appts_choices = [('default', '--Pick--'),('ALL', 'Download All Appointments')]
if theYear is not None:
all_appts_choices += [(a['id'],
(str(a['id']) + ' - ' + str(a['location__name'])) )
for a in Appointment.objects.filter(scheduled_start__year = theYear, organization = Organization.objects.get(slug=theOrg)).order_by('id').values('id', 'location__name') ]
else:
all_appts_choices += [(a['id'],
(str(a['id']) + ' - ' + str(a['location__name'])) )
for a in Appointment.objects.filter(organization = Organization.objects.get(slug=theOrg)).order_by('id').values('id', 'location__name') ]
#for the count year drown-down, pull down all unique start_date years for the appts in the dB
# to accomodate for potential DB compatibilities with django's distinct() function (only postgreSQL works), I'll do the unique year filtering myself
return all_appts_choices
| mit | 9,043,612,495,408,714,000 | 32.84252 | 178 | 0.577013 | false | 3.657872 | false | false | false |
frascoweb/frasco-tasks | frasco_tasks.py | 1 | 6915 | from frasco import Feature, action, execute_action, command, current_app, import_string, signal, copy_extra_feature_options, has_app_context
from celery import Celery
from celery.bin.worker import worker as celery_worker
from celery.bin.beat import beat as celery_beat
from celery.schedules import crontab
def pack_task_args(data):
"""Traverse data and converts every object with a __taskdump__() method
"""
if hasattr(data, "__taskdump__"):
cls, state = data.__taskdump__()
if not cls:
cls = data.__class__.__module__ + "." + data.__class__.__name__
return {"$taskobj": [cls, state]}
if isinstance(data, (list, tuple)):
lst = []
for item in data:
lst.append(pack_task_args(item))
return lst
if isinstance(data, dict):
dct = {}
for k, v in data.iteritems():
dct[k] = pack_task_args(v)
return dct
return data
def unpack_task_args(data):
"""Traverse data and transforms back objects which where dumped
using __taskdump()
"""
if isinstance(data, (list, tuple)):
lst = []
for item in data:
lst.append(unpack_task_args(item))
return lst
if isinstance(data, dict):
if "$taskobj" in data:
cls = import_string(data["$taskobj"][0])
return cls.__taskload__(data["$taskobj"][1])
else:
dct = {}
for k, v in data.iteritems():
dct[k] = unpack_task_args(v)
return dct
return data
def run_action(name, **kwargs):
"""Instanciates and executes an action from current_app.
This is the actual function which will be queued.
"""
kwargs = unpack_task_args(kwargs)
current_user = None
if '_current_user' in kwargs:
current_user = kwargs.pop('_current_user')
current_app.features.users.start_user_context(current_user)
try:
current_app.features.tasks.before_task_event.send(name=name)
action = current_app.actions[name](kwargs)
rv = execute_action(action)
current_app.features.tasks.after_task_event.send(name=name)
finally:
if current_user:
current_app.features.users.stop_user_context()
return rv
class TasksFeature(Feature):
"""Enqueue tasks to process them in the background
"""
name = "tasks"
command_group = False
defaults = {"broker_url": None,
"result_backend": None,
"accept_content": ['json', 'msgpack', 'yaml'],
"task_serializer": "json",
"result_serializer": "json",
"schedule": {},
"delay_if_models_transaction": False,
"run_beat_with_worker": True}
before_task_event = signal("before_task")
after_task_event = signal("after_task")
task_enqueued_event = signal("task_enqueued")
def init_app(self, app):
self.app = app
broker = self.options["broker_url"]
backend = self.options["result_backend"]
if not broker:
if app.features.exists("redis"):
broker = app.features.redis.options["url"]
else:
broker = "redis://localhost"
if not backend:
backend = broker
self.celery = Celery(__name__, broker=broker, backend=backend)
self.celery.conf["CELERY_ACCEPT_CONTENT"] = self.options["accept_content"]
self.celery.conf["CELERY_TASK_SERIALIZER"] = self.options["task_serializer"]
self.celery.conf["CELERY_RESULT_SERIALIZER"] = self.options["result_serializer"]
self.celery.conf["CELERYBEAT_SCHEDULE_FILENAME"] = ".celerybeat-schedule"
copy_extra_feature_options(self, self.celery.conf, "CELERY_")
TaskBase = self.celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
if has_app_context():
# useful for testing if running tasks synchronously
return TaskBase.__call__(self, *args, **kwargs)
else:
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
self.celery.Task = ContextTask
self.celery.conf["CELERYBEAT_SCHEDULE"] = {}
if self.options["schedule"]:
for action, schedule in self.options["schedule"].iteritems():
self.schedule_action(action, schedule)
self.run_action_task = self.celery.task(name="frasco_run_action")(run_action)
app.processes.append(("worker", ["frasco", "worker"]))
if not self.options['run_beat_with_worker']:
app.processes.append(("scheduler", ["frasco", "scheduler"]))
def add_task(self, func, **kwargs):
return self.celery.task(**kwargs)(func)
def send_task(self, *args, **kwargs):
return self.celery.send_task(*args, **kwargs)
def schedule_task(self, schedule_name, name, schedule, **kwargs):
if isinstance(schedule, dict):
schedule = crontab(**schedule)
elif isinstance(schedule, str):
schedule = crontab(*schedule.split(" "))
self.celery.conf["CELERYBEAT_SCHEDULE"][schedule_name] = dict(
task=name, schedule=schedule, **kwargs)
def schedule_action(self, action, schedule, name=None):
if not name:
name = "scheduled_%s" % action
self.schedule_task(name, "frasco_run_action", schedule,
args=(action,))
@action(default_option="action")
def enqueue(self, action, **kwargs):
if current_app.features.exists('models') and current_app.features.models.delayed_tx_calls.top is not None \
and self.options['delay_if_models_transaction']:
current_app.features.models.delayed_tx_calls.call(self.enqueue, (action,), kwargs)
return
if current_app.features.exists('users') and current_app.features.users.logged_in():
kwargs.setdefault('_current_user', current_app.features.users.current)
result = self.run_action_task.apply_async(args=(action,), kwargs=pack_task_args(kwargs))
self.task_enqueued_event.send(self, action=action, result=result)
return result
def get_result(self, id):
return self.run_action_task.AsyncResult(id)
@command(with_reloader=True, with_app_ctx=False)
def worker(self, hostname=None):
options = {'hostname': hostname, 'beat': False}
if self.options['run_beat_with_worker'] and self.celery.conf["CELERYBEAT_SCHEDULE"]:
options['beat'] = True
if self.app.debug:
options['concurrency'] = 1
w = celery_worker(self.celery)
w.run(**options)
@command(with_reloader=True, with_app_ctx=False)
def scheduler(self):
b = celery_beat(self.celery)
b.run()
| mit | 7,540,645,045,358,001,000 | 38.289773 | 140 | 0.597831 | false | 3.935686 | false | false | false |
alorchhota/bioinformatics-algorithms-1 | week9/code/5.LongestSharedSubstring.py | 1 | 6261 | import os
import csv
import sys
import re
import importlib
import networkx as nx
# settings
curDir = 'E:/GitHub/bioinformatics-algorithms-1/week9'
#curDir = 'D:/Copy/Coursera/Bioinformatics Algorithms (part-I)/MyPrograms/week9'
inputFile = './data/5.LongestSharedSubstring-2.txt'
inputFile = 'C:/Users/Ashis/Downloads/dataset_296_5.txt'
outputFile = './results/5.LongestSharedSubstring.txt'
# set current directory
os.chdir(curDir)
## read input
with open(inputFile) as f:
inputs = f.readlines()
genome1 = inputs[0].strip() + '$'
genome2 = inputs[1].strip() + '$'
## function to find longest common prefix
def longestCommonPrefix(str1, str2):
n = min([len(str1), len(str2)])
i = 0
while i < n and str1[i]==str2[i]:
i += 1
prefix = str1[0:i]
return prefix
## function to build a suffix tree from a genome
def suffixTree(genome):
## build suffix tree
g = nx.DiGraph()
g.add_node(1) # add root with id 1
# two required function
neighborsWithLabelPrefix = lambda node, prefix: [e[1] for e in g.edges_iter(node, data=True) if genome[e[2]['labelIdx'][0]] == prefix]
getNewNode = lambda : len(g.nodes())+1
#print(longestCommonPrefix('abc','ab'))
genomeLen = len(genome)
for idx in range(genomeLen):
# traverse as long as pattern matches
curNode = 1
i = idx
while(i < genomeLen):
# find the edge with the first prefix character
nextNode = neighborsWithLabelPrefix(curNode, genome[i])
# if there is no edge with the first prefix character,
# it must be a new edge with the rest of the string.
if len(nextNode) == 0:
newNode = getNewNode()
g.add_edge(curNode, newNode, {'labelIdx':[i,genomeLen]})
g.node[newNode]['startIdx'] = idx
break
# get the edge label
nextNode = nextNode[0]
edgeLabelIndices = g.edge[curNode][nextNode]['labelIdx']
edgeLabel = genome[edgeLabelIndices[0]:edgeLabelIndices[1]]
edgeLabelLen = len(edgeLabel)
# if the rest of the string starts with edgeLabel,
# move to the next node
if genome[i:i+edgeLabelLen] == edgeLabel:
curNode = nextNode
i += edgeLabelLen
else:
# edgeLabel matches partially
prefix = longestCommonPrefix(genome[i:i+edgeLabelLen], edgeLabel)
prefixLen = len(prefix)
# create two new node, one intermediate, another for unmatched string
intermediateNode = getNewNode()
unmatchedNode = intermediateNode + 1
# remove existing edge from curNode to nextNode
g.remove_edge(curNode, nextNode)
# add edge from curNode to intermediateNode
g.add_edge(curNode, intermediateNode, {'labelIdx':(edgeLabelIndices[0],edgeLabelIndices[0]+prefixLen)})
# add edge from intermediateNode to nextNode
g.add_edge(intermediateNode, nextNode, {'labelIdx':(edgeLabelIndices[0]+prefixLen, edgeLabelIndices[1])})
# add edge from intermediateNode to unmatchedNode
g.add_edge(intermediateNode, unmatchedNode, {'labelIdx':(i+prefixLen, genomeLen)})
g.node[unmatchedNode]['startIdx'] = idx
break
return g
## build two suffix tree for two genomes
g1 = suffixTree(genome1)
g2 = suffixTree(genome2)
## function to find edges with prefix from a node
neighborsWithLabelPrefix = lambda genome, g, node, prefix:\
[e[1] for e in g.edges_iter(node, data=True) \
if genome[e[2]['labelIdx'][0]] == prefix]
## function to find edge label from suffix tree
edgeLabelInSuffixTree = lambda genome, g, startNode, endNode:\
genome[g.edge[startNode][endNode]['labelIdx'][0]:\
g.edge[startNode][endNode]['labelIdx'][1]]
def longestSharedSubstring(root1, root2):
longestSubstr = ''
## create matched edge pairs
n1 = nx.neighbors(g1, root1)
if len(n1) == 0:
return longestSubstr
edges1 = [(edgeLabelInSuffixTree(genome1, g1, root1, node), node) for node in n1]
edgePairs = []
for e in edges1:
n2 = neighborsWithLabelPrefix(genome2, g2, root2, e[0][0])
if len(n2)>0:
e2 = (edgeLabelInSuffixTree(genome2, g2, root2, n2[0]), n2[0])
edgePairs += [(e,e2)]
if len(edgePairs) == 0:
return longestSubstr
## traverse each edge pairs and update longest substr
for ep in edgePairs:
## find substr in each pair
substr = ''
cur1 = root1
cur2 = root2
next1 = ep[0][1]
next2 = ep[1][1]
edge1 = ep[0][0]
edge2 = ep[1][0]
while True:
if edge1 == edge2:
substr = edge1 + longestSharedSubstring(next1, next2)
break
# update substr with prefix
prefix = longestCommonPrefix(edge1, edge2)
substr += prefix
if len(edge1) < len(edge2):
edge2 = edge2[len(prefix):]
cur1 = next1
next1 = neighborsWithLabelPrefix(genome1, g1, cur1, edge2[0])
if len(next1) == 0:
break
next1 = next1[0]
edge1 = edgeLabelInSuffixTree(genome1, g1, cur1, next1)
else:
edge1 = edge1[len(prefix):]
cur2 = next2
next2 = neighborsWithLabelPrefix(genome2, g2, cur2, edge1[0])
if len(next2) == 0:
break
next2 = next2[0]
edge2 = edgeLabelInSuffixTree(genome2, g2, cur2, next2)
# update longest substring
if len(substr) > len(longestSubstr):
longestSubstr = substr
return longestSubstr
## find longest shared sustring
lss = longestSharedSubstring(1,1)
print(lss)
## output
with open(outputFile, "w") as f:
f.writelines(lss)
print('done.')
| gpl-2.0 | -353,684,072,726,431,040 | 31.952632 | 138 | 0.575307 | false | 3.776236 | false | false | false |
chinchliff/autophy | other_scripts/get_family_order_distributions.py | 1 | 2722 | import sys,sqlite3,os
from Bio import SeqIO
if __name__ == "__main__":
if len(sys.argv) != 3:
print "python get_family_order_distributions.py db outfile [working dir]"
sys.exit(0)
conn = sqlite3.connect(sys.argv[1])
c = conn.cursor()
try:
os.chdir(sys.argv[3])
except IndexError:
pass
# init variables to store processed data
genes = {}
allo = []
for i in os.listdir("."):
# for every final phlawd output file, open it, create an empty dict to store info
if i[-9:] == "FINAL.aln":
print i
infile = open(i,"r")
genes[i] = {}
# for every sequence in this file
for j in SeqIO.parse(infile,"fasta"):
# get left and right values for the otu, if we can't find it in the db, use zeroes
sql = "SELECT left_value,right_value from taxonomy where ncbi_id = "+str(j.id)+";"
c.execute(sql)
left = 0
right = 0
for h in c:
left = h[0]
right = h[1]
# get the family for this otu
sql = "SELECT name from taxonomy where left_value < "+str(left)+" and right_value > "+ \
str(right)+" and node_rank = 'family' and name_class = 'scientific name';"
c.execute(sql)
# if we can't find a family (wtf?) substitute this otu id. apparently for some
# ncbi taxa, no family has been assigned (e.g. Foetidia, ncbi_id = 79568)
nm = ""
for h in c:
nm = str(h[0])
# print nm
if len(nm) == 0:
nm = j.id
# if we haven't seen this family/unassigned otu id yet,
# record it, set the count to zero
if nm not in allo:
allo.append(nm)
if nm not in genes[i]:
genes[i][nm] = 0
genes[i][nm] += 1
# done with this gene
infile.close()
# done counting records
conn.close()
outfile = open(sys.argv[2],"w")
# build/write the header line (names of families/unassigned otus)
st = ""
for i in allo:
st += "\t"+i
outfile.write(st+"\n")
# write gene name, then family/otu counts for each gene
for i in genes:
outfile.write(i)
for j in allo:
if j in genes[i]:
outfile.write("\t"+str(genes[i][j]))
else:
outfile.write("\t0")
outfile.write("\n")
# done
outfile.close()
| gpl-3.0 | -8,521,853,416,352,856,000 | 29.58427 | 104 | 0.477223 | false | 3.91092 | false | false | false |
juanjosegzl/learningpygame | bullet.py | 1 | 1307 | import pygame
from os.path import join as path_join
from vector import Vector
from entity import Entity
from constants import COLOR_BLACK
class Bullet(Entity):
""" Make the bullets independant"""
SPEED = 500
IMAGE_FILENAME = path_join('assets', "images", "laser.png")
def __init__(self, world, flip=False, location=None, direction=None):
sprite = pygame.image.load(Bullet.IMAGE_FILENAME).convert()
sprite.set_colorkey(COLOR_BLACK)
super(Bullet, self).__init__(
world, 'Bullet', sprite,
flip=flip,
speed=Bullet.SPEED,
location=location
)
self.direction = direction
def process(self, time_passed):
if not self.get_destination():
x, y = self.get_location().x, self.get_location().y
if self.direction == 'up':
y = 0 - self.get_height()
elif self.direction == 'down':
y = self.world.get_world_limits()[1] + self.get_height()
elif self.direction == 'left':
x = 0 - self.get_width()
elif self.direction == 'right':
x = self.world.get_world_limits()[0] + self.get_width()
self.set_destination(Vector(x, y))
super(Bullet, self).process(time_passed)
| gpl-3.0 | 257,684,221,911,288,420 | 33.394737 | 73 | 0.575363 | false | 3.821637 | false | false | false |
google/deepvariant | deepvariant/resources.py | 1 | 5776 | # Copyright 2017 Google LLC. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Library to gather runtime performance metrics.
This module exposes the ResourceMonitor class, which client code can use to
gather resource usage metrics about their program. An example usage would look
something like:
with ResourceMonitor() as monitor:
... do work ...
metrics = monitor.metrics()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
import resource
import time
import psutil
from deepvariant.protos import resources_pb2
class ResourceMonitor(object):
"""Class for collecting resource usage info from this or child process."""
def __init__(self):
"""Constructs a ResourceMonitor object."""
self.wall_start = None
self.metrics_pb = self._initial_metrics_protobuf()
def _initial_metrics_protobuf(self):
"""Returns an initialized ResourceMetrics proto.
This function also fills in the "constant" fields of the ResourceMetrics
proto that don't depend on the actual running commands, such as host_name.
Returns:
learning.genomics.deepvariant.ResourceMetrics proto.
"""
return resources_pb2.ResourceMetrics(
host_name=_get_host_name(),
cpu_frequency_mhz=_get_cpu_frequency(),
physical_core_count=_get_cpu_count(),
total_memory_mb=_get_total_memory())
def __enter__(self):
return self.start()
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def start(self):
"""Starts timers associated with resource collection.
This method must be called before metrics().
Returns:
self to enable the idiom `monitor = ResourceMonitor().start()`.
"""
self.wall_start = time.time()
return self
def metrics(self):
"""Collects and return runtime metrics as a ResourceMetrics proto.
This method can be called multiple times, but wall clock time is always
reckoned from the time of the last start() call.
Returns:
A learning.genomics.deepvariant.ResourceMetrics proto message.
Raises:
RuntimeError: if start() was not called previously.
"""
if self.wall_start is None:
raise RuntimeError('start() must be called prior to metrics()')
self.metrics_pb.wall_time_seconds = time.time() - self.wall_start
# Consider using psutil.cpu_times() instead to get more detailed information
# about the usage in self and all children.
try:
rusage = resource.getrusage(resource.RUSAGE_SELF)
self.metrics_pb.cpu_user_time_seconds = rusage.ru_utime
self.metrics_pb.cpu_system_time_seconds = rusage.ru_stime
self.metrics_pb.memory_peak_rss_mb = int(rusage.ru_maxrss / 1024)
except resource.error:
# The OS call to get rusage failed, so just don't set the field values,
# leaving them as the defalt values of 0.
pass
# Create a psutil.Process pointed at the current process.
process = psutil.Process()
io_counters = process.io_counters()
self.metrics_pb.read_bytes = io_counters.read_bytes
self.metrics_pb.write_bytes = io_counters.write_bytes
return self.metrics_pb
# ------------------------------------------------------------------------------
# Simple functions for getting host_name, cpu count, etc. Isolated here to make
# them mockable.
# ------------------------------------------------------------------------------
def _get_host_name():
"""Gets the host name of this machine."""
return platform.node()
def _get_cpu_count():
"""Gets the number of physical cores in this machine.
Returns:
int >= 1 if the call to get the cpu_count succeeded, or 0 if not.
"""
return psutil.cpu_count(logical=False) or 0
def _get_cpu_frequency():
"""Gets the frequency in MHz of the cpus in this machine.
Returns:
float > 0 if the call to get the cpu_frequency succeeded. This information
may not be available on all systems, in which case we return 0.0.
"""
try:
freq = psutil.cpu_freq()
return freq.current if freq is not None else 0.0
except NotImplementedError:
return 0.0
def _get_total_memory():
"""Gets the total memory in megabytes in this machine."""
return int(psutil.virtual_memory().total / (1024 * 1024))
| bsd-3-clause | -8,756,887,809,240,900,000 | 33.586826 | 80 | 0.701004 | false | 4.240822 | false | false | false |
leongold/lago | lago/config.py | 1 | 7126 | #
# Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
import re
from collections import defaultdict
from io import StringIO
from warnings import warn
import configparser
from xdg import BaseDirectory as base_dirs
from lago.constants import CONFS_PATH
from lago.utils import argparse_to_ini
def _get_configs_path():
"""Get a list of possible configuration files, from the following
sources:
1. All files that exists in constants.CONFS_PATH.
2. All XDG standard config files for "lago.conf", in reversed
order of importance.
Returns:
list(str): list of files
"""
paths = []
xdg_paths = [
path for path in base_dirs.load_config_paths('lago', 'lago.conf')
]
paths.extend([path for path in CONFS_PATH if os.path.exists(path)])
paths.extend(reversed(xdg_paths))
return paths
def get_env_dict(root_section):
"""Read all Lago variables from the environment.
The lookup format is:
LAGO_VARNAME - will land into 'lago' section
LAGO__SECTION1__VARNAME - will land into 'section1' section, notice
the double '__'.
LAGO__LONG_SECTION_NAME__VARNAME - will land into 'long_section_name'
Returns:
dict: dict of section configuration dicts
Examples:
>>> os.environ['LAGO_GLOBAL_VAR'] = 'global'
>>> os.environ['LAGO__INIT__REPO_PATH'] = '/tmp/store'
>>>
>>> config.get_env_dict()
{'init': {'repo_path': '/tmp/store'}, 'lago': {'global_var': 'global'}}
"""
env_lago = defaultdict(dict)
decider = re.compile(
(
r'^{0}(?:_(?!_)|(?P<has>__))'
r'(?(has)(?P<section>.+?)__)'
r'(?P<name>.+)$'
).format(root_section.upper())
)
for key, value in os.environ.iteritems():
match = decider.match(key)
if not match:
continue
if not match.group('name') or not value:
warn(
'empty environment variable definition:'
'{0}, ignoring.'.format(key)
)
else:
section = match.group('section') or root_section
env_lago[section.lower()][match.group('name').lower()] = value
return dict(env_lago)
class ConfigLoad(object):
"""Merges configuration parameters from 3 different sources:
1. Enviornment vairables
2. config files in .INI format
3. argparse.ArgumentParser
The assumed order(but not necessary) order of calls is:
load() - load from config files and environment variables
update_parser(parser) - update from the declared argparse parser
update_args(args) - update from passed arguments to the parser
"""
def __init__(self, root_section='lago'):
"""__init__
Args:
root_section (str):
"""
self.root_section = root_section
self._config = defaultdict(dict)
self._config.update(self.load())
self._parser = None
def load(self):
"""Load all configuration from INI format files and ENV, always
preferring the last read. Order of loading is:
1) Custom paths as defined in constants.CONFS_PATH
2) XDG standard paths
3) Environment variables
Returns:
dict: dict of section configuration dicts
"""
configp = configparser.ConfigParser()
for path in _get_configs_path():
try:
with open(path, 'r') as config_file:
configp.read_file(config_file)
except IOError:
pass
configp.read_dict(get_env_dict(self.root_section))
return {s: dict(configp.items(s)) for s in configp.sections()}
def update_args(self, args):
"""Update config dictionary with parsed args, as resolved by argparse.
Only root positional arguments that already exist will overridden.
Args:
args (namespace): args parsed by argparse
"""
for arg in vars(args):
if self.get(arg):
self._config[self.root_section][arg] = getattr(args, arg)
def update_parser(self, parser):
"""Update config dictionary with declared arguments in an argparse.parser
New variables will be created, and existing ones overridden.
Args:
parser (argparse.ArgumentParser): parser to read variables from
"""
self._parser = parser
ini_str = argparse_to_ini(parser)
configp = configparser.ConfigParser(allow_no_value=True)
configp.read_dict(self._config)
configp.read_string(ini_str)
self._config.update(
{s: dict(configp.items(s))
for s in configp.sections()}
)
def get(self, *args):
"""Get a variable from the default section
Args:
*args (args): dict.get() args
Returns:
str: config variable
"""
return self._config[self.root_section].get(*args)
def __getitem__(self, key):
"""Get a variable from the default section, good for fail-fast
if key does not exists.
Args:
key (str): key
Returns:
str: config variable
"""
return self._config[self.root_section][key]
def get_section(self, *args):
"""get a section dictionary
Args:
Returns:
dict: section config dictionary
"""
return self._config.get(*args)
def get_ini(self, defaults_only=False, incl_unset=False):
"""Return the config dictionary in INI format
Args:
defaults_only (bool): if set, will ignore arguments set by the CLI.
Returns:
str: string of the config file in INI format
"""
if self._parser:
if not defaults_only:
self._parser.set_defaults(
**self.get_section(self.root_section)
)
return argparse_to_ini(parser=self._parser, incl_unset=incl_unset)
else:
configp = configparser.ConfigParser(allow_no_value=True)
configp.read_dict(self._config)
with StringIO() as out_ini:
configp.write(out_ini)
return out_ini.getvalue()
def __repr__(self):
return self._config.__repr__()
def __str__(self):
return self._config.__str__()
config = ConfigLoad()
| gpl-2.0 | 1,298,325,999,725,875,200 | 28.691667 | 81 | 0.60174 | false | 4.164816 | true | false | false |
wesm/statsmodels | scikits/statsmodels/base/wrapper.py | 1 | 2951 | import inspect
import functools
import types
import numpy as np
class ResultsWrapper(object):
"""
Class which wraps a statsmodels estimation Results class and steps in to
reattach metadata to results (if available)
"""
_wrap_attrs = {}
_wrap_methods = {}
def __init__(self, results):
self._results = results
self.__doc__ = results.__doc__
def __dir__(self):
return [x for x in dir(self._results)]
def __getattribute__(self, attr):
get = lambda name: object.__getattribute__(self, name)
results = get('_results')
try:
return get(attr)
except AttributeError:
pass
obj = getattr(results, attr)
data = results.model._data
how = self._wrap_attrs.get(attr)
if how:
obj = data.wrap_output(obj, how=how)
return obj
def union_dicts(*dicts):
result = {}
for d in dicts:
result.update(d)
return result
def make_wrapper(func, how):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
results = object.__getattribute__(self, '_results')
data = results.model._data
return data.wrap_output(func(results, *args, **kwargs), how)
argspec = inspect.getargspec(func)
formatted = inspect.formatargspec(argspec.args, varargs=argspec.varargs,
defaults=argspec.defaults)
wrapper.__doc__ = "%s%s\n%s" % (func.im_func.func_name, formatted,
wrapper.__doc__)
return wrapper
def populate_wrapper(klass, wrapping):
for meth, how in klass._wrap_methods.iteritems():
if not hasattr(wrapping, meth):
continue
func = getattr(wrapping, meth)
wrapper = make_wrapper(func, how)
setattr(klass, meth, wrapper)
if __name__ == '__main__':
import scikits.statsmodels.api as sm
from pandas import DataFrame
data = sm.datasets.longley.load()
df = DataFrame(data.exog, columns=data.exog_name)
y = data.endog
# data.exog = sm.add_constant(data.exog)
df['intercept'] = 1.
olsresult = sm.OLS(y, df).fit()
rlmresult = sm.RLM(y, df).fit()
# olswrap = RegressionResultsWrapper(olsresult)
# rlmwrap = RLMResultsWrapper(rlmresult)
data = sm.datasets.wfs.load()
# get offset
offset = np.log(data.exog[:,-1])
exog = data.exog[:,:-1]
# convert dur to dummy
exog = sm.tools.categorical(exog, col=0, drop=True)
# drop reference category
# convert res to dummy
exog = sm.tools.categorical(exog, col=0, drop=True)
# convert edu to dummy
exog = sm.tools.categorical(exog, col=0, drop=True)
# drop reference categories and add intercept
exog = sm.add_constant(exog[:,[1,2,3,4,5,7,8,10,11,12]])
endog = np.round(data.endog)
mod = sm.GLM(endog, exog, family=sm.families.Poisson()).fit()
# glmwrap = GLMResultsWrapper(mod)
| bsd-3-clause | -7,016,303,070,008,334,000 | 28.217822 | 76 | 0.604541 | false | 3.66129 | false | false | false |
inuitwallet/NuBippy | encrypt/bip38.py | 1 | 3003 | import hashlib
import binascii
import encrypt.aes as aes
import encrypt.scrypt as scrypt
import num.enc as enc
def encrypt(privK, Baddress, Saddress, passphrase):
"""
BIP0038 private key encryption, Non-EC
"""
# 1. take the first four bytes of SHA256(SHA256(address)) of it. Let's call this "addresshash".
addresshash = hashlib.sha256(hashlib.sha256(Baddress + Saddress).digest()).digest()[:4]
#2. Derive a key from the passphrase using scrypt
# a. Parameters: passphrase is the passphrase itself encoded in UTF-8.
# addresshash came from the earlier step, n=16384, r=8, p=8, length=64
# (n, r, p are provisional and subject to consensus)
key = scrypt.hash(passphrase, addresshash, 16384, 8, 8)
#Let's split the resulting 64 bytes in half, and call them derivedhalf1 and derivedhalf2.
derivedhalf1 = key[0:32]
derivedhalf2 = key[32:64]
#3. Do AES256Encrypt(bitcoinprivkey[0...15] xor derivedhalf1[0...15], derivedhalf2), call the 16-byte result encryptedhalf1
Aes = aes.Aes(derivedhalf2)
encryptedhalf1 = Aes.enc(enc.sxor(privK[:16], derivedhalf1[:16]))
#4. Do AES256Encrypt(bitcoinprivkey[16...31] xor derivedhalf1[16...31], derivedhalf2), call the 16-byte result encryptedhalf2
encryptedhalf2 = Aes.enc(enc.sxor(privK[16:32], derivedhalf1[16:32]))
#5. The encrypted private key is the Base58Check-encoded concatenation of the following, which totals 39 bytes without Base58 checksum:
# 0x01 0x42 + flagbyte + salt + encryptedhalf1 + encryptedhalf2
flagbyte = chr(0b11100000) # 11 no-ec 1 compressed-pub 00 future 0 ec only 00 future
privkey = ('\x01\x42' + flagbyte + addresshash + encryptedhalf1 + encryptedhalf2)
check = hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]
return enc.b58encode(privkey + check)
def decrypt(encrypted_privkey, passphrase):
# 1. Collect encrypted private key and passphrase from user.
# passed as parameters
data = enc.b58decode(encrypted_privkey)
flagbyte = data[2:3]
check = data[-4:]
if check != hashlib.sha256(hashlib.sha256(data[:-4]).digest()).digest()[:4]:
return False, 'checksum'
addresshash = data[3:7]
encryptedhalf1 = data[7:23]
encryptedhalf2 = data[23:39]
#3. Derive decryption key for seedb using scrypt with passpoint, addresshash, and ownersalt
key = scrypt.hash(passphrase, addresshash, 16384, 8, 8)
derivedhalf1 = key[0:32]
derivedhalf2 = key[32:64]
#4. Decrypt encryptedpart2 using AES256Decrypt to yield the last 8 bytes of seedb and the last 8 bytes of encryptedpart1.
Aes = aes.Aes(derivedhalf2)
decryptedhalf2 = Aes.dec(encryptedhalf2)
#5. Decrypt encryptedpart1 to yield the remainder of seedb.
decryptedhalf1 = Aes.dec(encryptedhalf1)
priv = decryptedhalf1 + decryptedhalf2
priv = binascii.unhexlify('%064x' % (long(binascii.hexlify(priv), 16) ^ long(binascii.hexlify(derivedhalf1), 16)))
return priv, addresshash
| mit | 267,483,785,632,943,680 | 41.295775 | 139 | 0.708292 | false | 3.366592 | false | false | false |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/opt/python/training/nadam_optimizer.py | 57 | 4017 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Nadam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import training_ops
class NadamOptimizer(adam.AdamOptimizer):
"""Optimizer that implements the Nadam algorithm.
See [Dozat, T., 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.apply_adam(
var,
m,
v,
math_ops.cast(self._beta1_power, var.dtype.base_dtype),
math_ops.cast(self._beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking,
use_nesterov=True).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
math_ops.cast(self._beta1_power, grad.dtype.base_dtype),
math_ops.cast(self._beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking,
use_nesterov=True)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# m_bar = (1 - beta1) * g_t + beta1 * m_t
m_bar = m_scaled_g_values + beta1_t * m_t
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_bar / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_bar, v_t])
| mit | 7,756,948,106,105,243,000 | 42.193548 | 80 | 0.643266 | false | 3.066412 | false | false | false |
Zaharid/reportengine | src/reportengine/templateparser.py | 1 | 4814 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 14:58:12 2015
@author: zah
"""
import functools
import contextlib
import jinja2
import jinja2.runtime
from jinja2.exceptions import TemplateError
from reportengine.resourcebuilder import ResourceError
class TemplateRecordError(ResourceError, TemplateError): pass
@functools.total_ordering
class TargetRecord:
def __init__(self, recorder, name, args=None, kwargs=None):
self.name = name
self.recorder = recorder
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
for val in (*args, *kwargs.values()):
if isinstance(val, TargetRecord):
raise TemplateRecordError("Cannot determine the value of "
"parameter inside a top"
"level template: %s" % val.name)
return type(self)(recorder=self.recorder, name=self.name,
args=args, kwargs=kwargs)
def __iter__(self):
raise TemplateRecordError("Cannot iterate a resource inside a top "
"level template: %s" % self.name)
def __eq__(self, other):
raise TemplateRecordError("Cannot compare resources inside a top "
"level template: %s" % self.name)
def __lt__(self, other):
raise TemplateRecordError("Cannot compare resources inside a top "
"level template: %s" % self.name)
def __bool__(self):
raise TemplateRecordError("Cannot determine boolean value of a "
"resource inside a top "
"level template: %s" % self.name)
def __str__(self):
"""Do not call this!"""
#This is dangerous as it will produce wrong results if called
#outside the the template. Maybe it would be better to use some other
#name, and overwrite buitins.str in the template code context.
if self.args is not None and self.kwargs is not None:
if self.args:
msg = ("Error at {0.name}. Positional arguments like {0.args} "
"are not aloowed inside a top-level template. "
"Use keyword arguments, such as "
"{0.name}(argname={0.args[0]},...)").format(self)
raise TemplateRecordError(msg)
target = {self.name: self.kwargs}
else:
target = self.name
env_targets = self.recorder.environment.targets
env_targets.append(target)
return "<Provider {} args={} kwargs={}>".format(self.name, self.args,
self.kwargs)
class TargetSubs(TargetRecord):
def __str__(self):
return str(next(self.recorder.environment.results))
class TargetRecorder(jinja2.runtime.Context):
record_class = TargetRecord
def resolve(self, item):
return self[item]
def __contains__(self, item):
return True
def __getitem__(self, item):
#TODO: Make sure we are not overwritting this
if item in self.environment.globals:
return self.environment.globals[item]
record = self.record_class(self, item)
return record
class TargetSubstituter(TargetRecorder):
record_class = TargetSubs
class Environment(jinja2.Environment):
"""This class is the same as `jinja2.Environment` except that is adds a
`fetch_mode` context manager, where the rendered templates register the
variables and functions (with parameters) that will be called to
render the template. This is used to extract the target resources and
perform the corresponding checks. Also it imposes some restrictions on
what the template can do, which is OK because we don't want a lot of
logic in the user templates (we can always use another environment to
render complex objects like figures)."""
@contextlib.contextmanager
def _change_context(self, context_class):
past_context = self.context_class
self.context_class = context_class
try:
yield
finally:
self.context_class = past_context
@contextlib.contextmanager
def fetch_mode(self):
self.targets = []
with self._change_context(TargetRecorder):
yield
@contextlib.contextmanager
def subs_mode(self, results):
self.results = iter(results)
with self._change_context(TargetSubstituter):
yield
def render_with_targets(self, template):
with self.fetch_mode():
template.render()
results = yield self.targets
with self.subs_mode(results):
yield template.render()
| gpl-2.0 | -8,748,257,079,778,189,000 | 34.659259 | 79 | 0.60241 | false | 4.520188 | false | false | false |
kohr-h/odl | examples/solvers/pdhg_denoising_L1_HuberTV.py | 2 | 2689 | """Total variation denoising using PDHG.
This example solves the L1-HuberTV problem
min_{x >= 0} ||x - d||_1
+ lam * sum_i eta_gamma(||grad(x)_i||_2)
where ``grad`` is the spatial gradient and ``d`` is given noisy data. Here
``eta_gamma`` denotes the Huber function. For more details, see the Huber
documentation.
For further details and a description of the solution method used, see
https://odlgroup.github.io/odl/guide/pdhg_guide.html in the ODL documentation.
"""
import numpy as np
import odl
import matplotlib.pyplot as plt
# Define ground truth, space and noisy data
shape = [100, 100]
space = odl.uniform_discr([0, 0], shape, shape)
orig = odl.phantom.smooth_cuboid(space)
d = odl.phantom.salt_pepper_noise(orig, fraction=0.2)
# Define objective functional
op = odl.Gradient(space) # operator
norm_op = np.sqrt(8) + 1e-2 # norm with forward differences is well-known
lam = 2 # Regularization parameter
const = 0.5
f = const / lam * odl.solvers.L1Norm(space).translated(d) # data fit
g = const * odl.solvers.Huber(op.range, gamma=.01) # regularization
obj_fun = f + g * op # combined functional
mu_g = 1 / g.grad_lipschitz # Strong convexity of "f*"
# Define algorithm parameters
class CallbackStore(odl.solvers.Callback): # Callback to store function values
def __init__(self):
self.iteration_count = 0
self.iteration_counts = []
self.obj_function_values = []
def __call__(self, x):
self.iteration_count += 1
self.iteration_counts.append(self.iteration_count)
self.obj_function_values.append(obj_fun(x))
def reset(self):
self.iteration_count = 0
self.iteration_counts = []
self.obj_function_values = []
callback = odl.solvers.CallbackPrintIteration(step=10) & CallbackStore()
niter = 500 # Number of iterations
tau = 1.0 / norm_op # Step size for primal variable
sigma = 1.0 / norm_op # Step size for dual variable
# Run algorithm
x = space.zero()
callback(x) # store values for initialization
odl.solvers.pdhg(x, f, g, op, niter, tau, sigma, gamma_dual=mu_g,
callback=callback)
obj = callback.callbacks[1].obj_function_values
# %% Display results
# Show images
clim = [0, 1]
cmap = 'gray'
orig.show('Original', clim=clim, cmap=cmap)
d.show('Noisy', clim=clim, cmap=cmap)
x.show('Denoised', clim=clim, cmap=cmap)
# Show convergence rate
def rel_fun(x):
x = np.array(x)
return (x - min(x)) / (x[0] - min(x))
i = np.array(callback.callbacks[1].iteration_counts)
plt.figure()
plt.loglog(i, rel_fun(obj), label='PDHG')
plt.loglog(i[1:], 20. / i[1:] ** 2, ':', label='$O(1/k^2)$')
plt.title('Function Values')
plt.legend()
| mpl-2.0 | 9,206,175,589,211,194,000 | 28.877778 | 79 | 0.671625 | false | 3.001116 | false | false | false |
discos/discos-backend | src/timediscos.py | 1 | 1629 |
#
#
# Copyright 2015 Marco Bartolini, [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from decimal import Decimal
from astropy.time import TimeUnix, Time
import astropy._erfa as erfa
import time
CENTINANOSECONDS = 10000000
class TimeDiscos(TimeUnix):
"""
Acs Time: centinanoseconds from 1970-01-01 00:00:00 UTC
"""
name = 'discos'
unit = 1.0 / (erfa.DAYSEC * CENTINANOSECONDS)
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
super(TimeDiscos, self).__init__(val1, val2, scale, 7,
in_subfmt, out_subfmt, from_jd)
def parse_unix_time(unix_timestamp_string):
int_timestamp = int(Decimal(unix_timestamp_string) * CENTINANOSECONDS)
return Time(int_timestamp,
format = 'discos',
scale = 'utc',
precision = 7)
def get_acs_now():
return Time(time.time() * CENTINANOSECONDS, format="discos")
def unix_to_acs_time(unix_timestamp):
return Time(unix_timestamp * CENTINANOSECONDS, format="discos")
| apache-2.0 | 1,550,545,692,869,218,600 | 30.326923 | 76 | 0.667281 | false | 3.436709 | false | false | false |
rcbops/keystone-buildpackage | keystone/backends/ldap/api/user.py | 1 | 4236 | import ldap
import ldap.filter
import keystone.backends.backendutils as utils
from keystone.backends.api import BaseUserAPI
from keystone.backends.sqlalchemy.api.user import UserAPI as SQLUserAPI
from .. import models
from .base import BaseLdapAPI, add_redirects
class UserAPI(BaseLdapAPI, BaseUserAPI):
DEFAULT_TREE_DN = 'ou=Users,dc=example,dc=com'
DEFAULT_STRUCTURAL_CLASSES = ['keystoneUidObject']
DEFAULT_ID_ATTR = 'uid'
options_name = 'user'
object_class = 'keystoneUser'
model = models.User
attribute_mapping = {
'password': 'userPassword',
'email': 'mail',
'enabled': 'keystoneEnabled',
}
attribute_ignore = ['tenant_id']
def _ldap_res_to_model(self, res):
obj = super(UserAPI, self)._ldap_res_to_model(res)
tenants = self.api.tenant.get_user_tenants(obj.id, False)
if len(tenants) > 0:
obj.tenant_id = tenants[0].id
return obj
def get_by_name(self, name, filter=None):
return self.get(name, filter)
def create(self, values):
# Persist the 'name' as the UID
values['id'] = values['name']
delattr(values, 'name')
utils.set_hashed_password(values)
values = super(UserAPI, self).create(values)
if values['tenant_id'] is not None:
self.api.tenant.add_user(values['tenant_id'], values['id'])
return values
def update(self, id, values):
old_obj = self.get(id)
try:
new_tenant = values['tenant_id']
except KeyError:
pass
else:
if old_obj.tenant_id != new_tenant:
if old_obj.tenant_id:
self.api.tenant.remove_user(old_obj.tenant_id, id)
if new_tenant:
self.api.tenant.add_user(new_tenant, id)
utils.set_hashed_password(values)
super(UserAPI, self).update(id, values, old_obj)
def delete(self, id):
super(UserAPI, self).delete(id)
for ref in self.api.role.ref_get_all_global_roles(id):
self.api.role.ref_delete(ref.id)
for ref in self.api.role.ref_get_all_tenant_roles(id):
self.api.role.ref_delete(ref.id)
def get_by_email(self, email):
users = self.get_all('(mail=%s)' % \
(ldap.filter.escape_filter_chars(email),))
try:
return users[0]
except IndexError:
return None
def user_roles_by_tenant(self, user_id, tenant_id):
return self.api.role.ref_get_all_tenant_roles(user_id, tenant_id)
def get_by_tenant(self, id, tenant_id):
user_dn = self._id_to_dn(id)
user = self.get(id)
tenant = self.api.tenant._ldap_get(tenant_id,
'(member=%s)' % (user_dn,))
if tenant is not None:
return user
else:
if self.api.role.ref_get_all_tenant_roles(id, tenant_id):
return user
return None
def delete_tenant_user(self, id, tenant_id):
self.api.tenant.remove_user(tenant_id, id)
self.delete(id)
def user_role_add(self, values):
return self.api.role.add_user(values.role_id, values.user_id,
values.tenant_id)
def user_get_update(self, id):
return self.get(id)
def users_get_page(self, marker, limit):
return self.get_page(marker, limit)
def users_get_page_markers(self, marker, limit):
return self.get_page_markers(marker, limit)
def users_get_by_tenant_get_page(self, tenant_id, marker, limit):
return self._get_page(marker, limit,
self.api.tenant.get_users(tenant_id))
def users_get_by_tenant_get_page_markers(self, tenant_id, marker, limit):
return self._get_page_markers(marker, limit,
self.api.tenant.get_users(tenant_id))
def check_password(self, user, password):
return utils.check_password(password, user.password)
add_redirects(locals(), SQLUserAPI, ['get_by_group', 'tenant_group',
'tenant_group_delete', 'user_groups_get_all',
'users_tenant_group_get_page', 'users_tenant_group_get_page_markers'])
| apache-2.0 | 6,520,816,330,600,987,000 | 34.008264 | 78 | 0.594901 | false | 3.524126 | false | false | false |
Distrotech/scons | test/site_scons/override.py | 3 | 1992 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that a that a tool module in site_tools overrides base tool.
Use 'm4' as test tool since it's likely to be found,
and not commonly overridden by platform-specific stuff the way cc is.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir('site_scons', ['site_scons', 'site_tools'])
test.write(['site_scons', 'site_tools', 'm4.py'], """
import SCons.Tool
def generate(env):
env['M4']='my_m4'
env['M4_MINE']=1
def exists(env):
return 1
""")
test.write('SConstruct', """
e=Environment()
print e.subst('M4 is $M4, M4_MINE is $M4_MINE')
""")
test.run(arguments = '-Q .',
stdout = """M4 is my_m4, M4_MINE is 1
scons: `.' is up to date.\n""")
test.pass_test()
# end of file
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 8,095,686,159,124,409,000 | 28.294118 | 73 | 0.712851 | false | 3.538188 | true | false | false |
BenjamenMeyer/aiohttp-mock | aiohttp_mock/router.py | 1 | 4282 | # Copyright 2015 by Benjamen R. Meyer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aiohttp_mock.exceptions import *
from aiohttp.client_reqrep import ClientResponse
from aiohttp_mock.utils import cidict
class ConnectionRouterHandler(object):
"""Handler for a given URI
This class handles all the HTTP Verbs for a given URI.
"""
def __init__(self, uri):
self.uri = uri
self._method_handlers = {}
def add_method_handler(self, method, handler):
"""Add or update the Method handler
:param method: string - HTTP Verb
:param handler: ClientResponse object or callable
that will be used to respond to
the request
"""
self._method_handlers[method] = handler
def handle(self, method, request):
"""Handle a request
:param method: string - HTTP Verb
:param request: aiohttp.client_reqrep.ClientRequest
:returns: aiohttp.client_reqrep.ClientResponse
Note: Returns an HTTP 405 if the HTTP Verb is not
supported
"""
# If the method has a registered handler, then
# return it. Otherwise, create a 405 response
if method in self._method_handlers:
handler = self._method_handlers[method]
# Callbacks must be callables
if hasattr(handler, '__call__'):
return self._method_handlers[method](_request)
else:
return handler
else:
response = ClientResponse(method, self.uri, host='aiohttp_mock')
response.status = 405
response.reason = 'Method Not Supported'
response._should_close = False
response._headers = cidict({
'x-agent': 'aiohttp-mock',
'content-length': 0
})
return response
class ConnectionRouter(object):
def __init__(self):
self._routes = {}
def reset(self):
"""Reset all the routes
"""
self._routes = {}
def add_route(self, uri):
"""Add a route to be managed
:param uri: string - URI to be handled
"""
if uri not in self._routes:
self._routes[uri] = ConnectionRouterHandler(uri)
def get_route(self, uri):
"""Access the handler for a URI
:param uri: string - URI of the request
:returns: ConnectionRouterHandler instance managing the route
:raises: RouteNotHandled if the route is not handled
"""
if uri in self._routes:
return self._routes[uri]
else:
raise RouteNotHandled('{0} not handled'.format(uri))
def add_route_handler(self, uri, method, handler):
"""Add an HTTP Verb handler to the URI
:param uri: string - URI that the handler is for
:param method: string - HTTP Verb the handler is for
:param handle: ClientResponse or callable that will handle the request
"""
try:
router = self.get_route(uri)
except RouteNotHandled:
self.add_route(uri)
router = self.get_route(uri)
router.add_method_handler(method, handler)
def handle(self, method, uri, request):
"""Handle a request and create a response
:param method: string - HTTP Method the request is calling
:param uri: string - URI the request is for
:param request: aiohttp.client_reqreq.ClientRequest instance
for the request
:returns: aiohttp.client_reqrep.ClientResponse instance
:raises: RouteNotHandled if the route is not handled
"""
router = self.get_route(uri)
return router.handle(method, request)
| apache-2.0 | -2,916,611,160,968,730,000 | 31.195489 | 78 | 0.613031 | false | 4.516878 | false | false | false |
tartavull/google-cloud-python | core/nox.py | 2 | 3004 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
@nox.session
@nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6'])
def unit_tests(session, python_version):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(python_version)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'unit-' + python_version
# Install all test dependencies, then install this package in-place.
session.install(
'mock',
'pytest',
'pytest-cov',
'grpcio >= 1.0.2',
)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud',
'--cov=google.api.core',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install(
'flake8', 'flake8-import-order', 'pylint', 'gcp-devrel-py-tools')
session.install('.')
session.run('flake8', 'google', 'tests')
session.run(
'gcp-devrel-py-tools', 'run-pylint',
'--config', 'pylint.config.py',
'--library-filesets', 'google',
'--test-filesets', 'tests',
# Temporarily allow this to fail.
success_codes=range(0, 100))
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
# Set the virtualenv dirname.
session.virtualenv_dirname = 'setup'
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| apache-2.0 | 4,031,518,551,961,741,000 | 28.742574 | 75 | 0.638815 | false | 3.750312 | true | false | false |
hamogu/Cepheids | snapshotprogram/detection.py | 1 | 5252 | import numpy as np
from astropy.table import vstack
from photutils import DAOStarFinder
from astropy.nddata import NDData
from photutils.psf import extract_stars
from astropy.io import fits
from astropy.table import Table
datadir = '/melkor/d1/guenther/downdata/HST/CepMASTfull/'
# I blinked through the images in ds9 to find single, isolated well-exposed
# stars not too far from the center but outside of the Cepheid PSF and not on
# any of the diffraction spikes.
prflist = [
['ibg405010_drz.fits', 340, 38],
['ibg416010_drz.fits', 443, 215],
['ibg418010_drz.fits', 112, 945],
['ibg418010_drz.fits', 112, 945],
['ibg422010_drz.fits', 895, 319],
['ibg426010_drz.fits', 385, 93],
['ibg436010_drz.fits', 342, 877],
['ibg438010_drz.fits', 416, 401],
['ibg440010_drz.fits', 211, 337],
['ibg443010_drz.fits', 359, 288],
['ibg444010_drz.fits', 328, 345],
['ibg444010_drz.fits', 725, 723],
['ibg446010_drz.fits', 276, 500],
['ibg453010_drz.fits', 812, 845],
['ibg453010_drz.fits', 333, 188],
['ibg455010_drz.fits', 263, 444],
['ibg456010_drz.fits', 529, 696],
['ibg458010_drz.fits', 161, 806],
['ibg459010_drz.fits', 374, 166],
['ibg465010_drz.fits', 588, 723],
['ibg468010_drz.fits', 150, 508],
['ibg471010_drz.fits', 600, 685],
['ibg471010_drz.fits', 892, 511],
]
#prflist = [['ibg402010_drz.fits', 612, 209],
# ['ibg402010_drz.fits', 1007, 951],
# ['ibg402010_drz.fits', 488, 705], # GAIA bad
# ['ibg403010_drz.fits', 597, 385],
# ['ibg405010_drz.fits', 570, 701], # GAIA bad
# ['ibg455010_drz.fits', 263, 444],
# ['ibg456010_drz.fits', 530, 696],
# ['ibg456010_drz.fits', 549, 462], # GAIA bad
# ['ibg456010_drz.fits', 860, 408],
# ['ibg456010_drz.fits', 911, 115],
# ['ibg465010_drz.fits', 588, 723],
# ['ibg471010_drz.fits', 600, 685],
# ['ibg471010_drz.fits', 892, 511],
#]
# -1 because the above positions are measured in ds9, which counts from (1,1)
# while the python code counts from (0,0)
stars621 = extract_stars([NDData(fits.open(datadir + row[0])[1].data) for row in prflist],
[Table({'x': [row[1] - 1], 'y': [row[2] - 1]}) for row in prflist],
size=25)
stars845 = extract_stars([NDData(fits.open(datadir + row[0].replace('10_', '20_'))[1].data) for row in prflist],
[Table({'x': [row[1] - 1], 'y': [row[2] - 1]}) for row in prflist],
size=25)
def check_matching_source_exists(l1, l2, d,
xname='xcentroid', yname='ycentroid'):
'''Check for each source in l1, if one or more sources in l2 are close
This is not the most efficient way to do things, but very quick to code and
runtime is not a concern for this.
Parameters
----------
l1, l2: two source lists
d : float
maximal distance in pix, `None` means that all input sources are returned
Returns
-------
ind1 : array
Array of indices for l1. All elements listed in this index have at least one
source in l2 within the given distance ``d``.
'''
ind1 = []
for i, s in enumerate(l1):
dsquared = (s[xname] - l2[xname])**2 + (s[yname] - l2[yname])**2
if (d is None) or (np.min(dsquared) < d**2):
ind1.append(i)
return ind1
def combine_source_tables(list621, list845, names, dmax=10, **kwargs):
'''Combine source tables. Input are two lists of tables in different bands.
This function:
- Only keeps sources if there is a source in the second band within ``dmax`` pixels.
- Adds a table column with the target name (from input ``names``)
- stackes everything in one big table.
'''
finallist = []
for i in range(len(list621)):
l1 = list621[i]
l2 = list845[i]
if len(l1) > 0:
l1['filter'] = 'F621M'
l1['TARGNAME'] = names[i]
if len(l2) > 0:
l2['filter'] = 'F845M'
l2['TARGNAME'] = names[i]
if (dmax is not None) and len(l1) > 0 and len(l2) > 0:
l1short = l1[check_matching_source_exists(l1, l2, dmax, **kwargs)]
l2short = l2[check_matching_source_exists(l2, l1, dmax, **kwargs)]
l1 = l1short
l2 = l2short
finallist.append(vstack([l1, l2]))
return vstack(finallist)
class DAOStarAutoThresholdFinder(DAOStarFinder):
'''An extended DAOStarFinder class.
'''
def __init__(self, threshold_scale=5, **kwargs):
self.threshold_in = threshold_scale
# Need to set threshold in super__init__ but value will be overwritten below anyway
super().__init__(threshold=1, **kwargs)
def __call__(self, data, *args, **kwargs):
self.threshold = self.threshold_in * np.std(data)
self.threshold_eff = self.threshold * self.kernel.relerr
return super().__call__(data, *args, **kwargs)
initial_finder = DAOStarAutoThresholdFinder(fwhm=2.5, threshold_scale=5.,
sharplo=0.55, sharphi=.75,
roundlo=-0.6, roundhi=0.6)
| mit | -3,056,475,336,787,686,000 | 37.335766 | 112 | 0.581302 | false | 2.935718 | false | false | false |
erudit/zenon | tests/functional/apps/public/journal/test_views.py | 1 | 135649 | from bs4 import BeautifulSoup
from collections import OrderedDict
from lxml import etree as et
import datetime as dt
import io
import os
import pikepdf
import unittest.mock
import subprocess
import itertools
from hashlib import md5
from unittest.mock import PropertyMock
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.http.response import HttpResponseRedirect
from django.test import Client
from django.test import TestCase, RequestFactory
from django.conf import settings
from django.test.utils import override_settings
import pytest
from apps.public.journal.viewmixins import SolrDataMixin
from core.subscription.test.utils import generate_casa_token
from erudit.models import JournalType, Issue, Article, Journal
from erudit.test.factories import ArticleFactory
from erudit.test.factories import CollectionFactory
from erudit.test.factories import DisciplineFactory
from erudit.test.factories import IssueFactory
from erudit.test.factories import EmbargoedIssueFactory
from erudit.test.factories import OpenAccessIssueFactory
from erudit.test.factories import JournalFactory
from erudit.test.factories import JournalInformationFactory
from erudit.test.solr import FakeSolrData
from erudit.fedora.objects import JournalDigitalObject
from erudit.fedora.objects import ArticleDigitalObject
from erudit.fedora.objects import MediaDigitalObject
from erudit.fedora import repository
from erudit.solr.models import Article as SolrArticle
from base.test.factories import UserFactory
from core.subscription.test.factories import JournalAccessSubscriptionFactory
from core.subscription.models import UserSubscriptions
from core.subscription.test.factories import JournalManagementSubscriptionFactory
from core.metrics.conf import settings as metrics_settings
from apps.public.journal.views import ArticleMediaView
from apps.public.journal.views import ArticleRawPdfView
from apps.public.journal.views import ArticleRawPdfFirstPageView
FIXTURE_ROOT = os.path.join(os.path.dirname(__file__), 'fixtures')
pytestmark = pytest.mark.django_db
def journal_detail_url(journal):
return reverse('public:journal:journal_detail', kwargs={'code': journal.code})
def issue_detail_url(issue):
return reverse('public:journal:issue_detail', args=[
issue.journal.code, issue.volume_slug, issue.localidentifier])
def article_detail_url(article):
return reverse('public:journal:article_detail', kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
def article_raw_pdf_url(article):
issue = article.issue
journal_id = issue.journal.localidentifier
issue_id = issue.localidentifier
article_id = article.localidentifier
return reverse('public:journal:article_raw_pdf', args=(
journal_id, issue.volume_slug, issue_id, article_id
))
class TestJournalListView:
@pytest.fixture(autouse=True)
def setup(self):
self.client = Client()
self.user = UserFactory.create(username='foobar')
self.user.set_password('notsecret')
self.user.save()
def test_upcoming_journals_are_hidden_from_list(self):
# Create 6 journals
journals = JournalFactory.create_batch(6)
# Create an issue for the first 5 journals
for journal in journals[:5]:
IssueFactory(journal=journal)
url = reverse('public:journal:journal_list')
# Run
response = self.client.get(url)
displayed_journals = set(response.context['journals'])
assert displayed_journals == set(journals[:5])
assert journals[5] not in displayed_journals
def test_can_sort_journals_by_name(self):
# Setup
collection = CollectionFactory.create()
journal_1 = JournalFactory.create_with_issue(collection=collection, name='ABC journal')
journal_2 = JournalFactory.create_with_issue(collection=collection, name='ACD journal')
journal_3 = JournalFactory.create_with_issue(collection=collection, name='DEF journal')
journal_4 = JournalFactory.create_with_issue(collection=collection, name='GHI journal')
journal_5 = JournalFactory.create_with_issue(collection=collection, name='GIJ journal')
journal_6 = JournalFactory.create_with_issue(collection=collection, name='GJK journal')
url = reverse('public:journal:journal_list')
# Run
response = self.client.get(url)
# Check
assert response.status_code == 200
assert len(response.context['sorted_objects']) == 3
assert response.context['sorted_objects'][0]['key'] == 'A'
assert response.context['sorted_objects'][0]['objects'] == [
journal_1, journal_2, ]
assert response.context['sorted_objects'][1]['key'] == 'D'
assert response.context['sorted_objects'][1]['objects'] == [journal_3, ]
assert response.context['sorted_objects'][2]['key'] == 'G'
assert response.context['sorted_objects'][2]['objects'] == [
journal_4, journal_5, journal_6, ]
def test_can_sort_journals_by_disciplines(self):
# Setup
collection = CollectionFactory.create()
discipline_1 = DisciplineFactory.create(code='abc-discipline', name='ABC')
discipline_2 = DisciplineFactory.create(code='def-discipline', name='DEF')
discipline_3 = DisciplineFactory.create(code='ghi-discipline', name='GHI')
journal_1 = JournalFactory.create_with_issue(collection=collection)
journal_1.disciplines.add(discipline_1)
journal_2 = JournalFactory.create_with_issue(collection=collection)
journal_2.disciplines.add(discipline_1)
journal_3 = JournalFactory.create_with_issue(collection=collection)
journal_3.disciplines.add(discipline_2)
journal_4 = JournalFactory.create_with_issue(collection=collection)
journal_4.disciplines.add(discipline_3)
journal_5 = JournalFactory.create_with_issue(collection=collection)
journal_5.disciplines.add(discipline_3)
journal_6 = JournalFactory.create_with_issue(collection=collection)
journal_6.disciplines.add(discipline_3)
url = reverse('public:journal:journal_list')
# Run
response = self.client.get(url, {'sorting': 'disciplines'})
# Check
assert response.status_code == 200
assert len(response.context['sorted_objects']) == 3
assert response.context['sorted_objects'][0]['key'] == discipline_1.code
assert response.context['sorted_objects'][0]['collections'][0]['key'] == collection
assert response.context['sorted_objects'][0]['collections'][0]['objects'] == [
journal_1, journal_2, ]
assert response.context['sorted_objects'][1]['key'] == discipline_2.code
assert response.context['sorted_objects'][1]['collections'][0]['key'] == collection
assert response.context['sorted_objects'][1]['collections'][0]['objects'] == [journal_3, ]
assert response.context['sorted_objects'][2]['key'] == discipline_3.code
assert response.context['sorted_objects'][2]['collections'][0]['key'] == collection
assert set(response.context['sorted_objects'][2]['collections'][0]['objects']) == set([
journal_4, journal_5, journal_6, ])
def test_only_main_collections_are_shown_by_default(self):
collection = CollectionFactory.create()
main_collection = CollectionFactory.create(is_main_collection=True)
JournalFactory.create_with_issue(collection=collection)
journal2 = JournalFactory.create_with_issue(collection=main_collection)
url = reverse('public:journal:journal_list')
response = self.client.get(url)
assert list(response.context['journals']) == [journal2]
def test_can_filter_the_journals_by_open_access(self):
# Setup
collection = CollectionFactory.create()
journal_1 = JournalFactory.create_with_issue(collection=collection, open_access=True)
JournalFactory.create(collection=collection, open_access=False)
url = reverse('public:journal:journal_list')
# Run
response = self.client.get(url, data={'open_access': True})
# Check
assert list(response.context['journals']) == [journal_1, ]
def test_can_filter_the_journals_by_types(self):
# Setup
collection = CollectionFactory.create()
jtype_1 = JournalType.objects.create(code='T1', name='T1')
jtype_2 = JournalType.objects.create(code='T2', name='T2')
JournalFactory.create(collection=collection, type=jtype_1)
journal_2 = JournalFactory.create_with_issue(collection=collection, type=jtype_2)
url = reverse('public:journal:journal_list')
# Run
response = self.client.get(url, data={'types': ['T2', ]})
# Check
assert list(response.context['journals']) == [journal_2, ]
def test_can_filter_the_journals_by_collections(self):
# Setup
col_1 = CollectionFactory(code='col1')
col_2 = CollectionFactory(code='col2')
JournalFactory.create_with_issue(collection=col_1)
journal_2 = JournalFactory.create_with_issue(collection=col_2)
url = reverse('public:journal:journal_list')
# Run
response = self.client.get(url, data={'collections': ['col2', ]})
# Check
assert list(response.context['journals']) == [journal_2, ]
def test_can_filter_the_journals_by_disciplines(self):
j1 = JournalFactory.create_with_issue(disciplines=['d1', 'd2'])
j2 = JournalFactory.create_with_issue(disciplines=['d2'])
j3 = JournalFactory.create_with_issue(disciplines=['d3'])
JournalFactory.create_with_issue(disciplines=['d4'])
url = reverse('public:journal:journal_list')
response = self.client.get(url, data={'disciplines': ['d2', 'd3']})
assert set(response.context['journals']) == {j1, j2, j3}
def test_new_journal_titles_are_not_uppercased(self):
journal = JournalFactory(is_new=True, name='Enjeux et société')
url = reverse('public:journal:journal_list')
html = self.client.get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
journals_list = dom.find('div', {'class': 'journals-list'})
assert 'Enjeux et société' in journals_list.decode()
assert 'Enjeux Et Société' not in journals_list.decode()
def test_journal_year_of_addition_is_displayed(self):
journal = JournalFactory(is_new=True, year_of_addition='2020')
url = reverse('public:journal:journal_list')
html = self.client.get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
journals_list = dom.find('div', {'class': 'journals-list'})
assert '(nouveauté 2020)' in journals_list.decode()
@pytest.mark.parametrize('logo, expected_logo_display', [
('logo.png', True),
(False, False),
])
def test_do_not_display_non_existent_journal_logo_on_list_per_disciplines(
self, logo, expected_logo_display,
):
journal = JournalFactory.create_with_issue(code='journal', name='Journal')
journal.disciplines.add(DisciplineFactory())
if logo:
repository.api.register_datastream(
journal.get_full_identifier(),
'/LOGO/content',
open(settings.MEDIA_ROOT + '/' + logo, 'rb').read(),
)
url = reverse('public:journal:journal_list')
html = self.client.get(url, {'sorting': 'disciplines'}).content.decode()
logo = '<img\n ' \
'src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQV' \
'R42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="\n ' \
'data-src="/logo/journal/20110811144159.jpg"\n ' \
'alt="Logo pour Inter"\n ' \
'class="lazyload img-responsive card__figure"\n ' \
'/>'
if expected_logo_display:
assert logo in html
else:
assert logo not in html
class TestJournalDetailView:
@pytest.fixture(autouse=True)
def setup(self, settings):
settings.DEBUG = True
self.client = Client()
self.user = UserFactory.create(username='foobar')
self.user.set_password('notsecret')
self.user.save()
def test_main_title_is_always_in_context(self):
journal = JournalFactory()
response = self.client.get(journal_detail_url(journal))
assert 'main_title' in response.context.keys()
def test_can_embed_the_journal_information_in_the_context_if_available(self):
# Setup
journal_info = JournalInformationFactory(journal=JournalFactory())
url_1 = journal_detail_url(journal_info.journal)
journal_2 = JournalFactory()
url_2 = journal_detail_url(journal_2)
# Run
response_1 = self.client.get(url_1)
response_2 = self.client.get(url_2)
# Check
assert response_1.status_code == response_2.status_code == 200
assert response_1.context['journal_info'] == journal_info
assert response_2.context['journal_info'] == {'updated': None}
def test_can_display_when_issues_have_a_space_in_their_number(self, monkeypatch):
monkeypatch.setattr(Issue, 'erudit_object', unittest.mock.MagicMock())
issue = IssueFactory(number='2 bis')
url_1 = journal_detail_url(issue.journal)
# Run
response_1 = self.client.get(url_1)
assert response_1.status_code == 200
def test_can_embed_the_published_issues_in_the_context(self):
# Setup
journal = JournalFactory(collection=CollectionFactory(localidentifier='erudit'))
issue = IssueFactory(journal=journal)
IssueFactory(journal=journal, is_published=False)
url = journal_detail_url(journal)
# Run
response = self.client.get(url)
# Check
assert response.status_code == 200
assert list(response.context['issues']) == [issue]
def test_can_embed_the_current_issue_in_the_context(self):
issue1 = IssueFactory.create()
issue2 = IssueFactory.create_published_after(issue1)
url = journal_detail_url(issue1.journal)
response = self.client.get(url)
assert response.status_code == 200
assert response.context['current_issue'] == issue2
def test_can_embed_the_current_issue_external_url_in_the_context(self):
# If the latest issue has an external URL, it's link properly reflects that (proper href,
# blank target.
external_url = 'https://example.com'
issue1 = IssueFactory.create()
issue2 = IssueFactory.create_published_after(issue1, external_url=external_url)
url = journal_detail_url(issue1.journal)
response = self.client.get(url)
assert response.status_code == 200
assert response.context['current_issue'] == issue2
link_attrs = response.context['current_issue'].extra.detail_link_attrs()
assert external_url in link_attrs
assert '_blank' in link_attrs
def test_external_issues_are_never_locked(self):
# when an issue has an external url, we never show a little lock icon next to it.
external_url = 'https://example.com'
collection = CollectionFactory.create(code='erudit')
journal = JournalFactory(open_access=False, collection=collection) # embargoed
issue1 = IssueFactory.create(journal=journal, external_url=external_url)
url = journal_detail_url(issue1.journal)
response = self.client.get(url)
assert not response.context['current_issue'].extra.is_locked()
def test_embeds_subscription_info_to_context(self):
subscription = JournalAccessSubscriptionFactory(
type='individual',
user=self.user,
valid=True,
)
self.client.login(username='foobar', password='notsecret')
url = journal_detail_url(subscription.journal_management_subscription.journal)
response = self.client.get(url)
assert response.status_code == 200
assert response.context['content_access_granted']
assert response.context['subscription_type'] == 'individual'
def test_journal_detail_has_elements_for_anchors(self):
issue = IssueFactory()
url = journal_detail_url(issue.journal)
response = self.client.get(url)
content = response.content
assert b'<li role="presentation"' in content
assert b'<section role="tabpanel"' in content
assert b'<li role="presentation" id="journal-info-about-li"' not in content
assert b'<section role="tabpanel" class="tab-pane journal-info-block" id="journal-info-about"' not in content
@pytest.mark.parametrize('charges_apc', (True, False))
def test_journal_detail_has_no_apc_mention_if_it_charges_apc(self, charges_apc):
journal = JournalFactory(charges_apc=charges_apc)
url = journal_detail_url(journal)
response = self.client.get(url)
content = response.content
if not charges_apc:
assert b'Frais de publication' in content
else:
assert b'Frais de publication' not in content
@pytest.mark.parametrize('localidentifier', ('journal', 'previous_journal'))
def test_journal_notes_with_previous_journal(self, localidentifier):
journal = JournalFactory(
localidentifier=localidentifier,
notes=[
{
'pid': 'erudit:erudit.journal',
'langue': 'fr',
'content': 'Note pour journal',
},
{
'pid': 'erudit:erudit.previous_journal',
'langue': 'fr',
'content': 'Note pour previous_journal',
},
],
)
IssueFactory(journal=journal)
html = self.client.get(journal_detail_url(journal)).content.decode()
if localidentifier == 'journal':
assert 'Note pour journal' in html
assert 'Note pour previous_journal' not in html
elif localidentifier == 'previous_journal':
assert 'Note pour journal' not in html
assert 'Note pour previous_journal' in html
class TestJournalAuthorsListView:
def test_provides_only_authors_for_the_first_available_letter_by_default(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, authors=['btest', 'ctest1', 'ctest2'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url)
assert response.status_code == 200
assert set(response.context['authors_dicts'].keys()) == {'btest', }
def test_only_provides_authors_for_the_given_letter(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, authors=['btest', 'ctest1'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url, letter='b')
assert response.status_code == 200
authors_dicts = response.context['authors_dicts']
assert len(authors_dicts) == 1
assert authors_dicts.keys() == {'btest', }
def test_can_provide_contributors_of_article(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, authors=['btest', 'ctest1'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url, letter='b')
assert response.status_code == 200
authors_dicts = response.context['authors_dicts']
contributors = authors_dicts['btest'][0]['contributors']
assert contributors == ['ctest1']
def test_dont_show_unpublished_articles(self):
issue1 = IssueFactory.create(is_published=False)
issue2 = IssueFactory.create(journal=issue1.journal, is_published=True)
ArticleFactory.create(issue=issue1, authors=['foo'])
ArticleFactory.create(issue=issue2, authors=['foo'])
# Unpublished articles aren't in solr
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue1.journal.code})
response = Client().get(url, letter='f')
authors_dicts = response.context['authors_dicts']
# only one of the two articles are there
assert len(authors_dicts['foo']) == 1
def test_can_filter_by_article_type(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, type='article', authors=['btest'])
ArticleFactory.create(issue=issue_1, type='compterendu', authors=['btest'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url, article_type='article')
assert response.status_code == 200
authors_dicts = response.context['authors_dicts']
assert len(authors_dicts) == 1
def test_can_filter_by_article_type_when_no_article_of_type(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, type='article', authors=['atest'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url, {"article_type": 'compterendu'})
assert response.status_code == 200
def test_only_letters_with_results_are_active(self):
""" Test that for a given selection in the authors list view, only the letters for which
results are present are shown """
issue_1 = IssueFactory.create(journal=JournalFactory(), date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, type='article', authors=['atest'])
ArticleFactory.create(issue=issue_1, type='compterendu', authors=['btest'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url, {"article_type": 'compterendu'})
assert response.status_code == 200
assert not response.context['letters_exists'].get('A')
def test_do_not_fail_when_user_requests_a_letter_with_no_articles(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, type='article', authors=['btest'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url, {"article_type": 'compterendu', 'letter': 'A'})
assert response.status_code == 200
def test_inserts_the_current_letter_in_the_context(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, authors=['btest', 'ctest1', 'ctest2'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response_1 = Client().get(url)
response_2 = Client().get(url, {'letter': 'C'})
response_3 = Client().get(url, {'letter': 'invalid'})
assert response_1.status_code == 200
assert response_1.status_code == 200
assert response_1.status_code == 200
assert response_1.context['letter'] == 'B'
assert response_2.context['letter'] == 'C'
assert response_3.context['letter'] == 'B'
def test_inserts_a_dict_with_the_letters_counts_in_the_context(self):
issue_1 = IssueFactory.create(date_published=dt.datetime.now())
ArticleFactory.create(issue=issue_1, authors=['btest', 'ctest1', 'ctest2'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': issue_1.journal.code})
response = Client().get(url)
assert response.status_code == 200
assert len(response.context['letters_exists']) == 26
assert response.context['letters_exists']['B']
assert response.context['letters_exists']['C']
for letter in 'adefghijklmnopqrstuvwxyz':
assert not response.context['letters_exists'][letter.upper()]
@pytest.mark.parametrize('article_type,expected', [('compterendu', True), ('article', False)])
def test_view_has_multiple_article_types(self, article_type, expected):
article1 = ArticleFactory.create(type='article', authors=['btest'])
ArticleFactory.create(issue=article1.issue, type=article_type, authors=['btest'])
url = reverse(
'public:journal:journal_authors_list',
kwargs={'code': article1.issue.journal.code})
response = Client().get(url)
assert response.context['view'].has_multiple_article_types == expected
def test_no_duplicate_authors_with_lowercase_and_uppercase_names(self):
issue = IssueFactory(journal__code='journal')
ArticleFactory.create(issue=issue, localidentifier='article1', authors=['FOO, BAR'])
ArticleFactory.create(issue=issue, localidentifier='article2', authors=['FOO, Bar'])
ArticleFactory.create(issue=issue, localidentifier='article3', authors=['Foo, Bar'])
url = reverse('public:journal:journal_authors_list', kwargs={'code': 'journal'})
response = Client().get(url)
assert response.context['authors_dicts'] == OrderedDict({
'foo-bar': [
{
'author': 'FOO, BAR',
'contributors': [],
'id': 'article1',
'title': 'Robert Southey, Writing and Romanticism',
'url': None,
'year': '2',
}, {
'author': 'FOO, Bar',
'contributors': [],
'id': 'article2',
'title': 'Robert Southey, Writing and Romanticism',
'url': None,
'year': '2',
}, {
'author': 'Foo, Bar',
'contributors': [],
'id': 'article3',
'title': 'Robert Southey, Writing and Romanticism',
'url': None,
'year': '2',
},
],
})
class TestIssueDetailView:
def test_works_with_pks(self):
issue = IssueFactory.create(date_published=dt.datetime.now())
url = issue_detail_url(issue)
response = Client().get(url)
assert response.status_code == 200
@pytest.mark.parametrize("is_published,has_ticket,expected_code", [
(True, False, 200),
(True, True, 200),
(False, False, 302),
(False, True, 200),
])
def test_can_accept_prepublication_ticket(self, is_published, has_ticket, expected_code):
localidentifier = "espace03368"
issue = IssueFactory(localidentifier=localidentifier, is_published=is_published)
url = issue_detail_url(issue)
data = None
if has_ticket:
ticket = md5(localidentifier.encode()).hexdigest()
data = {'ticket': ticket}
response = Client().get(url, data=data)
assert response.status_code == expected_code
def test_works_with_localidentifiers(self):
issue = IssueFactory.create(
date_published=dt.datetime.now(), localidentifier='test')
url = issue_detail_url(issue)
response = Client().get(url)
assert response.status_code == 200
def test_fedora_issue_with_external_url_redirects(self):
# When we have an issue with a fedora localidentifier *and* external_url set, we redirect
# to that external url when we hit the detail view.
# ref #1651
issue = IssueFactory.create(
date_published=dt.datetime.now(), localidentifier='test',
external_url='http://example.com')
url = issue_detail_url(issue)
response = Client().get(url)
assert response.status_code == 302
assert response.url == 'http://example.com'
def test_can_render_issue_summary_when_db_contains_articles_not_in_summary(self):
# Articles in the issue view are ordered according to the list specified in the erudit
# object. If an article isn't referenced in the erudit object list, then it will not be
# shown. We rely on the fact that the default patched issue points to liberte1035607
# ref support#216
issue = IssueFactory.create()
a1 = ArticleFactory.create(issue=issue, localidentifier='31492ac')
a2 = ArticleFactory.create(issue=issue, localidentifier='31491ac')
ArticleFactory.create(issue=issue, localidentifier='not-there', add_to_fedora_issue=False)
url = issue_detail_url(issue)
response = Client().get(url)
articles = response.context['articles']
assert articles == [a1, a2]
@pytest.mark.parametrize("factory, expected_lock", [
(EmbargoedIssueFactory, True),
(OpenAccessIssueFactory, False),
])
def test_embargo_lock_icon(self, factory, expected_lock):
issue = factory(is_published=False)
url = issue_detail_url(issue)
response = Client().get(url, {'ticket': issue.prepublication_ticket})
# The embargo lock icon should never be displayed when a prepublication ticket is provided.
assert b'ion-ios-lock' not in response.content
issue.is_published = True
issue.save()
response = Client().get(url)
# The embargo lock icon should only be displayed on embargoed issues.
assert (b'ion-ios-lock' in response.content) == expected_lock
def test_article_items_are_not_cached_for_unpublished_issues(self):
issue = IssueFactory(is_published=False)
article = ArticleFactory(issue=issue, title="thisismyoldtitle")
url = issue_detail_url(issue)
resp = Client().get(url, {'ticket': issue.prepublication_ticket})
assert "thisismyoldtitle" in resp.content.decode('utf-8')
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_title('thisismynewtitle')
resp = Client().get(url, {'ticket': issue.prepublication_ticket})
assert "thisismynewtitle" in resp.content.decode('utf-8')
@override_settings(CACHES=settings.LOCMEM_CACHES)
def test_article_items_are_cached_for_published_issues(self):
issue = IssueFactory(is_published=True)
article = ArticleFactory(issue=issue, title="thisismyoldtitle")
url = issue_detail_url(issue)
resp = Client().get(url)
assert "thisismyoldtitle" in resp.content.decode('utf-8')
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_title('thisismynewtitle')
resp = Client().get(url, {'ticket': issue.prepublication_ticket})
assert "thisismyoldtitle" in resp.content.decode('utf-8')
def test_can_return_404_when_issue_doesnt_exist(self):
issue = IssueFactory(
localidentifier='test',
)
issue.localidentifier = 'fail'
url = issue_detail_url(issue)
response = Client().get(url)
assert response.status_code == 404
@pytest.mark.parametrize('publication_allowed', (True, False))
def test_publication_allowed_article(self, publication_allowed):
issue = IssueFactory(journal__open_access=True)
article = ArticleFactory(issue=issue, publication_allowed=publication_allowed)
url = reverse('public:journal:issue_detail', kwargs={
'journal_code': issue.journal.code,
'issue_slug': issue.volume_slug,
'localidentifier': issue.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
toolbox = dom.find('ul', {'class': 'toolbox'})
summary_link = dom.find('p', {'class': 'bib-record__record-link'})
if publication_allowed:
assert toolbox
assert summary_link
else:
assert not toolbox
assert not summary_link
@override_settings(CACHES=settings.LOCMEM_CACHES)
@pytest.mark.parametrize('language_code, expected_link', (
('fr', '<a class="tool-btn" href="/fr/revues/journal/2000-issue/article.pdf" '
'target="_blank" title="Télécharger">'),
('en', '<a class="tool-btn" href="/en/journals/journal/2000-issue/article.pdf" '
'target="_blank" title="Download">'),
))
def test_article_pdf_url_is_cache_with_the_right_language(
self, language_code, expected_link,
):
article = ArticleFactory(
issue__journal__code='journal',
issue__year='2000',
issue__localidentifier='issue',
localidentifier='article',
with_pdf=True,
)
with override_settings(LANGUAGE_CODE=language_code):
url = reverse('public:journal:issue_detail', kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'localidentifier': article.issue.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
toolbox = dom.find('ul', {'class': 'toolbox'})
assert expected_link in toolbox.decode()
def test_journal_titles_and_subtitles_are_displayed_in_all_languages(self):
issue = IssueFactory(journal__code='journal')
repository.api.set_publication_xml(
issue.get_full_identifier(),
open('tests/fixtures/issue/im03868.xml', 'rb').read(),
)
url = reverse('public:journal:issue_detail', kwargs={
'journal_code': issue.journal.code,
'issue_slug': issue.volume_slug,
'localidentifier': issue.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
title1 = dom.find('p', {'class': 'main-header__meta'}).decode()
assert title1 == '<p class="main-header__meta">\n' \
'<a href="/fr/revues/journal/" title="Consulter la revue">\n ' \
'Intermédialités\n \n \n ' \
'<span class="hint--bottom-left hint--no-animate" ' \
'data-hint="Tous les articles de cette revue sont soumis à un processus ' \
'd’évaluation par les pairs.">\n' \
'<i class="icon ion-ios-checkmark-circle"></i>\n' \
'</span>\n<br/>\n' \
'<span class="journal-subtitle">Histoire et théorie des arts, ' \
'des lettres et des techniques</span>\n<br/>\n ' \
'Intermediality\n \n \n <br/>\n' \
'<span class="journal-subtitle">History and Theory of the Arts, ' \
'Literature and Technologies</span>\n</a>\n</p>'
title2 = dom.find('div', {'class': 'latest-issue'}).find('h2').decode()
assert title2 == '<h2>\n<a href="/fr/revues/journal/" title="Consulter la revue">\n ' \
'Intermédialités\n \n <br/>\n' \
'<span class="journal-subtitle">Histoire et théorie des arts, ' \
'des lettres et des techniques</span>\n<br/>\n ' \
'Intermediality\n \n \n <br/>\n' \
'<span class="journal-subtitle">History and Theory of the Arts, ' \
'Literature and Technologies</span>\n</a>\n</h2>'
class TestArticleDetailView:
@pytest.fixture(autouse=True)
def article_detail_solr_data(self, monkeypatch):
monkeypatch.setattr(SolrDataMixin, 'solr_data', FakeSolrData())
@pytest.mark.parametrize('method', [
'get', 'options'
])
def test_can_render_erudit_articles(self, monkeypatch, eruditarticle, method):
# The goal of this test is to verify that out erudit article mechanism doesn't crash for
# all kinds of articles. We have many articles in our fixtures and the `eruditarticle`
# argument here is a parametrization argument which causes this test to run for each
# fixture we have.
monkeypatch.setattr(metrics_settings, 'ACTIVATED', False)
monkeypatch.setattr(Article, 'get_erudit_object', lambda *a, **kw: eruditarticle)
journal = JournalFactory.create(open_access=True)
issue = IssueFactory.create(
journal=journal, date_published=dt.datetime.now(), localidentifier='test_issue')
article = ArticleFactory.create(issue=issue, localidentifier='test_article')
url = article_detail_url(article)
response = getattr(Client(), method)(url)
assert response.status_code == 200
@pytest.mark.parametrize("is_published,has_ticket,expected_code", [
(True, False, 200),
(True, True, 200),
(False, False, 302),
(False, True, 200),
])
def test_can_accept_prepublication_ticket(self, is_published, has_ticket, expected_code):
localidentifier = "espace03368"
issue = IssueFactory(localidentifier=localidentifier, is_published=is_published)
article = ArticleFactory(issue=issue)
url = article_detail_url(article)
data = None
if has_ticket:
ticket = md5(localidentifier.encode()).hexdigest()
data = {'ticket': ticket}
response = Client().get(url, data=data)
assert response.status_code == expected_code
@pytest.mark.parametrize("is_published,ticket_expected", [
(True, False),
(False, True),
])
def test_prepublication_ticket_is_propagated_to_other_pages(self, is_published, ticket_expected):
localidentifier = "espace03368"
issue = IssueFactory(localidentifier=localidentifier, is_published=is_published)
articles = ArticleFactory.create_batch(issue=issue, size=3)
article = articles[1]
url = article_detail_url(article)
ticket = md5(localidentifier.encode()).hexdigest()
response = Client().get(url, data={'ticket': ticket})
from io import StringIO
tree = et.parse(StringIO(response.content.decode()), et.HTMLParser())
# Test that the ticket is in the breadcrumbs
bc_hrefs = [e.get('href') for e in tree.findall('.//nav[@id="breadcrumbs"]//a')]
pa_hrefs = [e.get('href') for e in tree.findall('.//div[@class="pagination-arrows"]/a')]
# This is easier to debug than a generator
for href in bc_hrefs + pa_hrefs:
assert ('ticket' in href) == ticket_expected
def test_dont_cache_html_of_articles_of_unpublished_issues(self):
issue = IssueFactory.create(is_published=False)
article = ArticleFactory.create(issue=issue, title='thiswillendupinhtml')
url = '{}?ticket={}'.format(article_detail_url(article), issue.prepublication_ticket)
response = Client().get(url)
assert response.status_code == 200
assert b'thiswillendupinhtml' in response.content
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_title('thiswillreplaceoldinhtml')
response = Client().get(url)
assert response.status_code == 200
assert b'thiswillendupinhtml' not in response.content
assert b'thiswillreplaceoldinhtml' in response.content
def test_dont_cache_fedora_objects_of_articles_of_unpublished_issues(self):
with unittest.mock.patch('erudit.fedora.modelmixins.cache') as cache_mock:
cache_mock.get.return_value = None
issue = IssueFactory.create(is_published=False)
article = ArticleFactory.create(issue=issue)
url = '{}?ticket={}'.format(article_detail_url(article), issue.prepublication_ticket)
response = Client().get(url)
assert response.status_code == 200
# Assert that the cache has not be called.
assert cache_mock.get.call_count == 0
def test_allow_ephemeral_articles(self):
# When receiving a request for an article that doesn't exist in the DB, try querying fedora
# for the requested PID before declaring a failure.
issue = IssueFactory.create()
article_localidentifier = 'foo'
repository.api.register_article(
'{}.{}'.format(issue.get_full_identifier(), article_localidentifier)
)
url = reverse('public:journal:article_detail', kwargs={
'journal_code': issue.journal.code, 'issue_slug': issue.volume_slug,
'issue_localid': issue.localidentifier, 'localid': article_localidentifier})
response = Client().get(url)
assert response.status_code == 200
@unittest.mock.patch('pikepdf.open')
@unittest.mock.patch('eulfedora.models.FileDatastreamObject._get_content')
@pytest.mark.parametrize('content_access_granted,has_abstracts,should_fetch_pdf', (
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, True)
))
def test_do_not_fetch_pdfs_if_not_necessary(
self, mock_pikepdf, mock_content, content_access_granted, has_abstracts, should_fetch_pdf
):
""" Test that the PDF is only fetched on ArticleDetailView when the the user is not subscribed
and the article has no abstract
"""
article = ArticleFactory(with_pdf=True)
client = Client()
if has_abstracts:
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_abstracts([{'lang': 'fr', 'content': 'Résumé français'}])
if content_access_granted:
subscription = JournalAccessSubscriptionFactory(
pk=1,
user__password='password',
post__valid=True,
post__journals=[article.issue.journal],
organisation=None, # TODO implement IndividualJournalAccessSubscriptionFactory
)
client.login(username=subscription.user.username, password="password")
url = article_detail_url(article)
response = client.get(url)
if should_fetch_pdf:
assert mock_content.call_count == 1
else:
assert mock_content.call_count == 0
assert response.status_code == 200
def test_querystring_doesnt_mess_media_urls(self):
journal = JournalFactory(open_access=True) # so we see the whole article
issue = IssueFactory(journal=journal)
article = ArticleFactory(issue=issue, from_fixture='1003446ar') # this article has media
url = '{}?foo=bar'.format(article_detail_url(article))
response = Client().get(url)
# we have some media urls
assert b'media/' in response.content
# We don't have any messed up media urls, that is, an URL with our querystring in the
# middle
assert b'barmedia/' not in response.content
@unittest.mock.patch('erudit.fedora.cache.cache')
@unittest.mock.patch('erudit.fedora.cache.get_datastream_file_cache')
@unittest.mock.patch('erudit.fedora.cache.get_cached_datastream_content')
@pytest.mark.parametrize('is_published, expected_count', [
# When an issue is not published, we should not get any cache.get() calls when displaying
# an article's PDF.
(False, 0),
# When an issue is published, we should get one cache.get() calls when displaying an
# article's PDF.
(True, 1),
])
def test_pdf_datastream_caching(self, mock_cache, mock_get_datastream_file_cache,
mock_get_cached_datastream_content, is_published,
expected_count):
mock_cache.get.return_value = None
mock_get_datastream_file_cache.return_value = mock_cache
mock_get_cached_datastream_content.return_value = None
article = ArticleFactory(
issue__is_published=is_published,
issue__journal__open_access=True,
)
url = reverse('public:journal:article_raw_pdf', kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
mock_cache.get.reset_mock()
response = Client().get(url, {
'ticket': article.issue.prepublication_ticket,
})
assert mock_cache.get.call_count == expected_count
@unittest.mock.patch('erudit.fedora.modelmixins.cache')
@pytest.mark.parametrize('is_published, expected_count', [
# When an issue is not published, we should not get any cache.get() calls when displaying
# an article's XML.
(False, 0),
# When an issue is published, we should get one cache.get() calls when displaying an
# article's XML.
(True, 1),
])
def test_xml_datastream_caching(self, mock_cache, is_published, expected_count):
mock_cache.get.return_value = None
article = ArticleFactory(
issue__is_published=is_published,
issue__journal__open_access=True,
)
url = reverse('public:journal:article_raw_xml', kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
mock_cache.get.reset_mock()
response = Client().get(url, {
'ticket': article.issue.prepublication_ticket,
})
assert mock_cache.get.call_count == expected_count
def test_that_article_titles_are_truncated_in_breadcrumb(self):
article = ArticleFactory(
from_fixture='1056823ar',
localidentifier='article',
issue__localidentifier='issue',
issue__year='2000',
issue__journal__code='journal',
)
url = article_detail_url(article)
response = Client().get(url)
html = response.content.decode()
assert '<a href="/fr/revues/journal/2000-issue/article/">Jean-Guy Desjardins, Traité de ' \
'l’évaluation foncière, Montréal, Wilson & Lafleur …</a>' in html
def test_keywords_html_tags(self):
article = ArticleFactory(from_fixture='1055883ar')
url = article_detail_url(article)
response = Client().get(url)
html = response.content.decode()
# Check that HTML tags are displayed in the body.
assert '<ul>\n<li class="keyword">Charles Baudelaire, </li>\n<li class="keyword">\n' \
'<em>Fleurs du Mal</em>, </li>\n<li class="keyword">Seine, </li>\n' \
'<li class="keyword">mythe et réalité de Paris, </li>\n' \
'<li class="keyword">poétique du miroir</li>\n</ul>' in html
# Check that HTML tags are not displayed in the head.
assert '<meta name="citation_keywords" lang="fr" content="Charles Baudelaire, Fleurs du ' \
'Mal, Seine, mythe et réalité de Paris, poétique du miroir" />' in html
def test_article_pdf_links(self):
article = ArticleFactory(
with_pdf=True,
from_fixture='602354ar',
localidentifier='602354ar',
issue__year='2000',
issue__localidentifier='issue',
issue__is_published=False,
issue__journal__code='journal',
issue__journal__open_access=True,
)
url = article_detail_url(article)
response = Client().get(url, {
'ticket': article.issue.prepublication_ticket if not article.issue.is_published else '',
})
html = response.content.decode()
# Check that the PDF download button URL has the prepublication ticket if the issue is not
# published.
assert '<a class="tool-btn tool-download" ' \
'data-href="/fr/revues/journal/2000-issue/602354ar.pdf?' \
'ticket=0aae4c8f3cc35693d0cbbe631f2e8b52"><span class="toolbox-pdf">PDF</span>' \
'<span class="tools-label">Télécharger</span></a>' in html
# Check that the PDF menu link URL has the prepublication ticket if the issue is not
# published.
assert '<a href="#pdf-viewer" id="pdf-viewer-menu-link">Texte intégral (PDF)</a>' \
'<a href="/fr/revues/journal/2000-issue/602354ar.pdf?' \
'ticket=0aae4c8f3cc35693d0cbbe631f2e8b52" id="pdf-download-menu-link" ' \
'target="_blank">Texte intégral (PDF)</a>' in html
# Check that the embeded PDF URL has the prepublication ticket if the issue is not
# published.
assert '<object id="pdf-viewer" data="/fr/revues/journal/2000-issue/602354ar.pdf?' \
'embed&ticket=0aae4c8f3cc35693d0cbbe631f2e8b52" type="application/pdf" ' \
'style="width: 100%; height: 700px;"></object>' in html
# Check that the PDF download link URL has the prepublication ticket if the issue is not
# published.
assert '<a href="/fr/revues/journal/2000-issue/602354ar.pdf?' \
'ticket=0aae4c8f3cc35693d0cbbe631f2e8b52" class="btn btn-secondary" ' \
'target="_blank">Télécharger</a>' in html
article.issue.is_published = True
article.issue.save()
response = Client().get(url)
html = response.content.decode()
# Check that the PDF download button URL does not have the prepublication ticket if the
# issue is published.
assert '<a class="tool-btn tool-download" data-href="/fr/revues/journal/2000-issue/' \
'602354ar.pdf"><span class="toolbox-pdf">PDF</span><span ' \
'class="tools-label">Télécharger</span></a>' in html
# Check that the PDF menu link URL does not have the prepublication ticket if the issue
# is published.
assert '<a href="#pdf-viewer" id="pdf-viewer-menu-link">Texte intégral (PDF)</a>' \
'<a href="/fr/revues/journal/2000-issue/602354ar.pdf" id="pdf-download-menu-link" ' \
'target="_blank">Texte intégral (PDF)</a>' in html
# Check that the embeded PDF URL does not have the prepublication ticket if the issue is
# published.
assert '<object id="pdf-viewer" data="/fr/revues/journal/2000-issue/602354ar.pdf?' \
'embed" type="application/pdf" style="width: 100%; height: 700px;"></object>' in html
# Check that the PDF download link URL does not have the prepublication ticket if the issue
# is published.
assert '<a href="/fr/revues/journal/2000-issue/602354ar.pdf" class="btn btn-secondary" ' \
'target="_blank">Télécharger</a>' in html
@pytest.mark.parametrize('kwargs, nonce_count, authorized', (
# Valid token
({}, 1, True),
# Badly formed token
({'token_separator': '!'}, 1, False),
# Invalid nonce
({'invalid_nonce': True}, 1, False),
# Invalid message
({'invalid_message': True}, 1, False),
# Invalid signature
({'invalid_signature': True}, 1, False),
# Nonce seen more than 3 times
({}, 4, False),
# Badly formatted payload
({'payload_separator': '!'}, 1, False),
# Expired token
({'time_delta': 3600000001}, 1, False),
# Wrong IP
({'ip_subnet': '8.8.8.0/24'}, 1, False),
# Invalid subscription
({'subscription_id': 2}, 1, False),
))
@pytest.mark.parametrize('url_name', (
('public:journal:article_detail'),
('public:journal:article_raw_pdf'),
))
@unittest.mock.patch('core.subscription.middleware.SubscriptionMiddleware._nonce_count')
@override_settings(GOOGLE_CASA_KEY='74796E8FF6363EFF91A9308D1D05335E')
def test_article_detail_with_google_casa_token(self, mock_nonce_count, url_name, kwargs,
nonce_count, authorized):
mock_nonce_count.return_value = nonce_count
article = ArticleFactory()
JournalAccessSubscriptionFactory(
pk=1,
post__valid=True,
post__journals=[article.issue.journal],
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
response = Client().get(url, {
'casa_token': generate_casa_token(**kwargs),
}, follow=True)
html = response.content.decode()
if authorized:
assert 'Seuls les 600 premiers mots du texte seront affichés.' not in html
else:
assert 'Seuls les 600 premiers mots du texte seront affichés.' in html
@pytest.mark.parametrize('url_name, fixture, display_biblio, display_pdf_first_page', (
# Complete treatment articles should always display a bibliography
('public:journal:article_biblio', '009256ar', 1, 0),
('public:journal:article_summary', '009256ar', 1, 0),
('public:journal:article_detail', '009256ar', 1, 0),
# Retro minimal treatment articles should only display a bibliography in article_biblio view
('public:journal:article_biblio', '1058447ar', 1, 0),
('public:journal:article_summary', '1058447ar', 0, 1),
('public:journal:article_detail', '1058447ar', 0, 1),
# Bibliography should not be displayed on TOC page.
('public:journal:article_toc', '009256ar', 0, 0),
('public:journal:article_toc', '1058447ar', 0, 0),
))
def test_biblio_references_display(self, url_name, fixture, display_biblio,
display_pdf_first_page):
article = ArticleFactory(
from_fixture=fixture,
with_pdf=True,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
assert html.count('<section id="grbiblio" class="article-section grbiblio" '
'role="complementary">') == display_biblio
# Minimal treatment articles should not display PDF first page when displaying references.
assert html.count('<object id="pdf-viewer"') == display_pdf_first_page
@pytest.mark.parametrize('open_access', (True, False))
@pytest.mark.parametrize('url_name', (
('public:journal:article_biblio'),
('public:journal:article_summary'),
('public:journal:article_detail'),
('public:journal:article_toc'),
))
def test_display_citation_fulltext_world_readable_metatag_only_for_open_access_articles(
self, url_name, open_access
):
article = ArticleFactory(issue__journal__open_access=open_access)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
# The citation_fulltext_world_readable metatag should only be displayed for open access
# articles. Otherwise, some Google Scholar services won't work (eg. CASA).
if open_access:
assert '<meta name="citation_fulltext_world_readable" content="" />' in html
else:
assert '<meta name="citation_fulltext_world_readable" content="" />' not in html
@pytest.mark.parametrize('publication_allowed', (True, False))
@pytest.mark.parametrize('url_name', (
('public:journal:article_biblio'),
('public:journal:article_summary'),
('public:journal:article_detail'),
('public:journal:article_toc'),
))
def test_publication_allowed_text_display(self, url_name, publication_allowed):
article = ArticleFactory(
publication_allowed=publication_allowed,
issue__journal__open_access=True,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
if publication_allowed:
assert 'Plan de l’article' in dom.decode()
assert 'Boîte à outils' in dom.decode()
if url_name != 'public:journal:article_detail':
assert 'Lire le texte intégral' in dom.decode()
if url_name not in ['public:journal:article_biblio', 'public:journal:article_toc']:
assert 'In October 1800 the poet, travel-writer and polemicist Robert Southey ' \
'was in Portugal.' in dom.decode()
else:
assert 'Plan de l’article' not in dom.decode()
assert 'Boîte à outils' not in dom.decode()
assert 'Lire le texte intégral' not in dom.decode()
assert 'In October 1800 the poet, travel-writer and polemicist Robert Southey was in ' \
'Portugal.' not in dom.decode()
def test_article_detail_marquage_in_toc_nav(self):
issue = IssueFactory(
journal__code='journal',
localidentifier='issue',
year='2000',
)
ArticleFactory(
from_fixture='1054008ar',
localidentifier='prev_article',
issue=issue,
)
article = ArticleFactory(
issue=issue,
)
ArticleFactory(
from_fixture='1054008ar',
localidentifier='next_article',
issue=issue,
)
url = article_detail_url(article)
response = Client().get(url)
html = response.content.decode()
# Check that TOC navigation titles include converted marquage.
assert '<a href="/fr/revues/journal/2000-issue/prev_article/" class="toc-nav__prev" ' \
'title="Article précédent"><span class="toc-nav__arrow">' \
'<span class="arrow arrow-bar is-left"></span></span>' \
'<h4 class="toc-nav__title">\n L’action et le verbe dans ' \
'<em>Feuillets d’Hypnos</em>\n</h4></a>' in html
assert '<a href="/fr/revues/journal/2000-issue/next_article/" class="toc-nav__next" ' \
'title="Article suivant"><span class="toc-nav__arrow">' \
'<span class="arrow arrow-bar is-right"></span></span><h4 ' \
'class="toc-nav__title">\n L’action et le verbe dans ' \
'<em>Feuillets d’Hypnos</em>\n</h4></a>' in html
def test_surtitre_not_split_in_multiple_spans(self):
article = ArticleFactory(
from_fixture='1056389ar',
)
url = article_detail_url(article)
response = Client().get(url)
html = response.content.decode()
assert '<span class="surtitre">Cahier commémoratif : ' \
'25<sup>e</sup> anniversaire</span>' in html
def test_title_and_paral_title_are_displayed(self):
article = ArticleFactory(
from_fixture='1058368ar',
)
url = article_detail_url(article)
response = Client().get(url)
html = response.content.decode()
assert '<span class="titre">Les Parcs Nationaux de Roumanie : considérations sur les ' \
'habitats Natura 2000 et sur les réserves IUCN</span>' in html
assert '<span class="titreparal">The National Parks of Romania: considerations on Natura ' \
'2000 habitats and IUCN reserves</span>' in html
def test_article_detail_view_with_untitled_article(self):
article = ArticleFactory(
from_fixture='1042058ar',
localidentifier='article',
issue__year='2000',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__name='Revue',
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that "[Article sans titre]" is displayed in the header title.
assert '<title>[Article sans titre] – Inter – Érudit</title>' in html
# Check that "[Article sans titre]" is displayed in the body title.
assert '<h1 class="doc-head__title"><span class="titre">[Article sans titre]</span></h1>' in html
# Check that "[Article sans titre]" is displayed in the breadcrumbs.
assert '<li>\n <a href="/fr/revues/journal/2000-issue/article/">[Article sans titre]</a>' \
'\n</li>' in html
def test_article_authors_with_suffixes(self):
article = ArticleFactory(
from_fixture='1058611ar',
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that authors' suffixes are not displayed on the the author list under the article
# title.
assert '<li class="auteur doc-head__author">\n<span class="nompers">André\n ' \
'Ngamini-Ngui</span> et </li>' in html
# Check that authors' suffixes are displayed on the 'more information' section.
assert '<li class="auteur-affiliation"><p><strong>André\n Ngamini-Ngui, †</strong>' \
'</p></li>' in html
def test_figure_groups_source_display(self):
article = ArticleFactory(
from_fixture='1058470ar',
localidentifier='article',
issue__year='2000',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
grfigure = dom.find('div', {'class': 'grfigure', 'id': 'gf1'})
# Check that the source is displayed under both figures 1 & 2 which are in the same figure group.
fi1 = grfigure.find('figure', {'id': 'fi1'}).decode()
fi2 = grfigure.find('figure', {'id': 'fi2'}).decode()
assert fi1 == '<figure class="figure" id="fi1"><figcaption></figcaption><div ' \
'class="figure-wrapper">\n<div class="figure-object"><a class="lightbox ' \
'objetmedia" href="/fr/revues/journal/2000-issue/article/media/" title="">' \
'<img alt="" class="lazyload img-responsive" data-aspectratio="/" ' \
'data-srcset="/fr/revues/journal/2000-issue/article/media/ w" height="" ' \
'src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAA' \
'AC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" width=""/></a></div>\n' \
'<div class="figure-legende-notes-source"><cite class="source">Avec ' \
'l’aimable autorisation de l’artiste et kamel mennour, Paris/London. © ' \
'<em>ADAGP Mohamed Bourouissa</em></cite></div>\n</div></figure>'
assert fi2 == '<figure class="figure" id="fi2"><figcaption></figcaption><div ' \
'class="figure-wrapper">\n<div class="figure-object"><a class="lightbox ' \
'objetmedia" href="/fr/revues/journal/2000-issue/article/media/" title="">' \
'<img alt="" class="lazyload img-responsive" data-aspectratio="/" ' \
'data-srcset="/fr/revues/journal/2000-issue/article/media/ w" height="" ' \
'src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAA' \
'AC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" width=""/></a></div>\n' \
'<div class="figure-legende-notes-source"><cite class="source">Avec ' \
'l’aimable autorisation de l’artiste et kamel mennour, Paris/London. © ' \
'<em>ADAGP Mohamed Bourouissa</em></cite></div>\n</div></figure>'
# Check that the figure list link is displayed.
voirliste = grfigure.find('p', {'class': 'voirliste'})
assert voirliste.decode() == '<p class="voirliste"><a href="#ligf1">-> Voir la liste ' \
'des figures</a></p>'
@unittest.mock.patch.object(ArticleDigitalObject, 'infoimg')
def test_figure_with_float_dimensions(self, mock_infoimg):
article = ArticleFactory(
from_fixture='1068859ar',
localidentifier='article',
issue__year='2000',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__open_access=True,
)
mock_infoimg.content = unittest.mock.MagicMock()
mock_infoimg.content.serialize = unittest.mock.MagicMock(
return_value="""
<infoDoc>
<im id="img-05-01.png">
<imPlGr>
<nomImg>2135184.png</nomImg>
<dimx>863.0</dimx>
<dimy>504.0</dimy>
<taille>246ko</taille>
</imPlGr>
</im>
</infoDoc>
"""
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
fi1 = dom.find('figure', {'id': 'fi1'}).find('img').decode()
assert '<img alt="Modèle intégrateur : les mécanismes du façonnement des normes par la ' \
'sphère médiatique" class="lazyload img-responsive" data-aspectratio="863/504" ' \
'data-srcset="/fr/revues/journal/2000-issue/article/media/2135184.png 863w" ' \
'height="504" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1H' \
'AwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" width="863"/>' == fi1
def test_table_groups_display(self):
article = ArticleFactory(
from_fixture='1061713ar',
localidentifier='article',
issue__year='2000',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
grtableau = dom.find_all('div', {'class': 'grtableau'})[0]
figures = grtableau.find_all('figure')
# Check that the table group is displayed.
assert grtableau.attrs.get('id') == 'gt1'
# Check that the tables are displayed inside the table group.
assert figures[0].attrs.get('id') == 'ta2'
assert figures[1].attrs.get('id') == 'ta3'
assert figures[2].attrs.get('id') == 'ta4'
# Check that the table images are displayed inside the tables.
assert len(figures[0].find_all('img', {'class': 'img-responsive'})) == 1
assert len(figures[1].find_all('img', {'class': 'img-responsive'})) == 1
assert len(figures[2].find_all('img', {'class': 'img-responsive'})) == 1
# Check that the table legends are displayed inside the tables.
assert len(figures[0].find_all('p', {'class': 'alinea'})) == 1
assert len(figures[1].find_all('p', {'class': 'alinea'})) == 2
assert len(figures[2].find_all('p', {'class': 'alinea'})) == 4
def test_table_groups_display_with_table_no(self):
article = ArticleFactory(
from_fixture='1060065ar',
localidentifier='article',
issue__year='2000',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
grtableau = dom.find_all('div', {'class': 'grtableau'})[0]
figures = grtableau.find_all('figure')
# Check that the table group is displayed.
assert grtableau.attrs.get('id') == 'gt1'
# Check that the tables are displayed inside the table group.
assert figures[0].attrs.get('id') == 'ta2'
assert figures[1].attrs.get('id') == 'ta3'
# Check that the table numbers are displayed.
assert figures[0].find_all('p', {'class': 'no'})[0].text == '2A'
assert figures[1].find_all('p', {'class': 'no'})[0].text == '2B'
def test_figure_back_arrow_is_displayed_when_theres_no_number_or_title(self):
article = ArticleFactory(
from_fixture='1031003ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that the arrow to go back to the figure is present event if there's no figure
# number or caption.
assert '<figure class="tableau" id="lita7"><figcaption><p class="allertexte">' \
'<a href="#ta7"><span class="arrow arrow-bar is-top"></span></a></p>' \
'</figcaption>' in html
def test_figure_groups_numbers_display_in_figure_list(self):
article = ArticleFactory(
from_fixture='1058470ar',
localidentifier='article',
issue__year='2000',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that the figure numbers are displayed in the figure list for figure groups.
assert '<div class="grfigure" id="ligf1">\n<div class="grfigure-caption">\n' \
'<p class="allertexte"><a href="#gf1"><span class="arrow arrow-bar is-top"></span>' \
'</a></p>\n<p class="no">Figures 1 - 2</p>' in html
def test_figcaption_display_for_figure_groups_and_figures(self):
article = ArticleFactory(
from_fixture='1060169ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that figure group caption and the figure captions are displayed.
assert '<div class="grfigure-caption">\n<p class="allertexte"><a href="#gf1">' \
'<span class="arrow arrow-bar is-top"></span></a></p>\n' \
'<p class="no">Figure 1</p>\n<div class="legende"><p class="legende">' \
'<strong class="titre">RMF frequencies in German data</strong>' \
'</p></div>\n</div>' in html
assert '<figcaption><p class="legende"><strong class="titre">German non-mediated</strong>' \
'</p></figcaption>' in html
assert '<figcaption><p class="legende"><strong class="titre">German interpreted' \
'</strong></p></figcaption>' in html
def test_article_multilingual_titles(self):
article = ArticleFactory(
from_fixture='1059303ar',
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that paral titles are displayed in the article header.
assert '<span class="titreparal">Détection d’ADN d’<em>Ophiostoma ulmi</em> ' \
'introgressé naturellement dans les régions entourant les loci contrôlant ' \
'la pathogénie et le type sexuel chez <em>O. novo-ulmi</em></span>' in html
# Check that paral titles are not displayed in summary section.
assert '<h4><span class="title">Détection d’ADN d’<em>Ophiostoma ulmi</em> introgressé ' \
'naturellement dans les régions entourant les loci contrôlant la pathogénie et le ' \
'type sexuel chez <em>O. novo-ulmi</em></span></h4>' not in html
def test_authors_more_information_for_author_with_suffix_and_no_affiliation(self):
article = ArticleFactory(
from_fixture='1059571ar',
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that more information akkordion is displayed for author with suffix and
# no affiliation.
assert '<ul class="akkordion-content unstyled"><li class="auteur-affiliation"><p>' \
'<strong>Guy\n Sylvestre, o.c.</strong></p></li></ul>' in html
def test_journal_multilingual_titles_in_citations(self):
issue = IssueFactory(year="2019")
repository.api.set_publication_xml(
issue.get_full_identifier(),
open('tests/fixtures/issue/ri04376.xml', 'rb').read(),
)
article = ArticleFactory(
localidentifier='article',
issue=issue,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that the journal name is displayed in French and English (Relations industrielles
# / Industrial Relations).
assert '<dd id="id_cite_mla_article" class="cite-mla">\n Pratt, Lynda. ' \
'« Robert Southey, Writing and Romanticism. » <em>Relations ' \
'industrielles / Industrial Relations</em>, volume 73, numéro 4, automne 2018. ' \
'https://doi.org/10.7202/009255ar\n </dd>' in html
assert '<dd id="id_cite_apa_article" class="cite-apa">\n ' \
'Pratt, L. (2019). Robert Southey, Writing and Romanticism. ' \
'<em>Relations industrielles / Industrial Relations</em>. ' \
'https://doi.org/10.7202/009255ar\n </dd>' in html
assert '<dd id="id_cite_chicago_article" class="cite-chicago">\n ' \
'Pratt, Lynda « Robert Southey, Writing and Romanticism ». ' \
'<em>Relations industrielles / Industrial Relations</em> (2019). ' \
'https://doi.org/10.7202/009255ar\n </dd>' in html
@pytest.mark.parametrize('fixture, url_name, expected_result', (
# Multilingual journals should have all titles in citations.
('ri04376', 'public:journal:article_citation_enw',
'%J Relations industrielles / Industrial Relations'),
('ri04376', 'public:journal:article_citation_ris',
'JO - Relations industrielles / Industrial Relations'),
('ri04376', 'public:journal:article_citation_bib',
'journal="Relations industrielles / Industrial Relations",'),
# Sub-titles should not be in citations.
('im03868', 'public:journal:article_citation_enw', '%J Intermédialités / Intermediality'),
('im03868', 'public:journal:article_citation_ris',
'JO - Intermédialités / Intermediality'),
('im03868', 'public:journal:article_citation_bib',
'journal="Intermédialités / Intermediality'),
))
def test_journal_multilingual_titles_in_article_citation_views(self, fixture, url_name,
expected_result):
issue = IssueFactory()
repository.api.set_publication_xml(
issue.get_full_identifier(),
open('tests/fixtures/issue/{}.xml'.format(fixture), 'rb').read(),
)
article = ArticleFactory(
issue=issue,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
citation = Client().get(url).content.decode()
# Check that the journal name is displayed in French and English (Relations industrielles /
# Industrial Relations).
assert expected_result in citation
def test_doi_with_extra_space(self):
article = ArticleFactory(
from_fixture='1009368ar',
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Check that extra space around DOIs is stripped.
assert '<meta name="citation_doi" content="https://doi.org/10.7202/1009368ar" />' in html
assert '<a href="https://doi.org/10.7202/1009368ar" class="clipboard-data">' in html
def test_unicode_combining_characters(self):
article = ArticleFactory(
from_fixture='1059577ar',
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# Pre-combined character is present (ă = ă)
assert '<em>Studii de lingvistică</em>' in html
# Combining character is not present (ă = a + ˘)
assert '<em>Studii de lingvistică</em>' not in html
def test_acknowledgements_and_footnotes_sections_order(self):
article = ArticleFactory(
from_fixture='1060048ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
partiesann = dom.find_all('section', {'class': 'partiesann'})[0]
sections = partiesann.find_all('section')
# Check that acknowledgements are displayed before footnotes.
assert sections[0].attrs['id'] == 'merci'
assert sections[1].attrs['id'] == 'grnote'
def test_abstracts_and_keywords(self):
article = ArticleFactory()
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_abstracts([{'lang': 'fr', 'content': 'Résumé français'}])
wrapper.set_abstracts([{'lang': 'en', 'content': 'English abstract'}])
wrapper.add_keywords('es', ['Palabra clave en español'])
wrapper.add_keywords('fr', ['Mot-clé français'])
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
grresume = dom.find_all('section', {'class': 'grresume'})[0]
resumes = grresume.find_all('section', {'class': 'resume'})
keywords = grresume.find_all('div', {'class': 'keywords'})
# Make sure the main abstract (English) appears first, even though it's in second position
# in the XML.
assert resumes[0].decode() == '<section class="resume" id="resume-en"><h3>Abstract</h3>\n' \
'<p class="alinea"><em>English abstract</em></p></section>'
# Make sure the French keywords appear in the French abstract section.
assert resumes[1].decode() == '<section class="resume" id="resume-fr"><h3>Résumé</h3>\n' \
'<p class="alinea"><em>Résumé français</em></p>\n' \
'<div class="keywords">\n<p><strong>Mots-clés :</strong>' \
'</p>\n<ul><li class="keyword">Mot-clé français</li></ul>' \
'\n</div></section>'
# Make sure the French keywords appear first since there is no English keywords and no
# Spanish abstract.
assert keywords[0].decode() == '<div class="keywords">\n<p><strong>Mots-clés :</strong>' \
'</p>\n<ul><li class="keyword">Mot-clé français</li>' \
'</ul>\n</div>'
# Make sure the Spanish keywords are displayed even though there is no Spanish abstract.
assert keywords[1].decode() == '<div class="keywords">\n<p><strong>Palabras clave:' \
'</strong></p>\n<ul><li class="keyword">Palabra clave en ' \
'español</li></ul>\n</div>'
@pytest.mark.parametrize('article_type, expected_string', (
('compterendu', 'Un compte rendu de la revue'),
('article', 'Un article de la revue'),
))
def test_review_article_explanatory_note(self, article_type, expected_string):
article = ArticleFactory(type=article_type)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
div = dom.find_all('div', {'class': 'doc-head__metadata'})[1]
note = 'Ce document est le compte-rendu d\'une autre oeuvre tel qu\'un livre ou un ' \
'film. L\'oeuvre originale discutée ici n\'est pas disponible sur cette plateforme.'
assert expected_string in div.decode()
if article_type == 'compterendu':
assert note in div.decode()
else:
assert note not in div.decode()
def test_verbatim_poeme_lines(self):
article = ArticleFactory(
from_fixture='1062061ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
poeme = dom.find('blockquote', {'class': 'verbatim poeme'})
# Check that poems lines are displayed in <p>.
assert poeme.decode() == '<blockquote class="verbatim poeme">\n<div class="bloc">\n<p ' \
'class="ligne">Jour de larme, </p>\n<p class="ligne">jour où ' \
'les coupables se réveilleront</p>\n<p class="ligne">pour ' \
'entendre leur jugement,</p>\n<p class="ligne">alors, ô Dieu, ' \
'pardonne-leur et leur donne le repos.</p>\n<p class="ligne">' \
'Jésus, accorde-leur le repos.</p>\n</div>\n</blockquote>'
def test_verbatim_poeme_horizontal_align(self):
article = ArticleFactory(
from_fixture='1070671ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
poeme = dom.find('blockquote', {'class': 'verbatim poeme'}).decode()
# Check that poems lines are centered (align-center).
assert poeme == '<blockquote class="verbatim poeme">\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">On the land</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">On the water</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">Held in <span class="majuscule">Senćoŧen\n' \
' </span>kinship</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">Today is the future</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">It belongs to the next generations</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">of learners — dreamers — healers</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">Maybe one day we will move beyond territorial\n' \
' acknowledgement</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">and gather here in a good way</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">so that the land and their kin</p>\n' \
'</div>\n' \
'<div class="bloc align align-center">\n' \
'<p class="ligne">can introduce themselves.</p>\n' \
'</div>\n' \
'</blockquote>'
def test_grfigure_caption_position(self):
article = ArticleFactory(
from_fixture='1062105ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
grfigure = dom.find('div', {'id': 'gf1'})
grfigure_caption = grfigure.find_all('div', {'class': 'grfigure-caption'})[0]
grfigure_legende = grfigure.find_all('div', {'class': 'grfigure-legende'})[0]
assert grfigure_caption.decode() == '<div class="grfigure-caption">\n<p class="no">' \
'Figure 1</p>\n<div class="legende"></div>\n</div>'
assert grfigure_legende.decode() == '<div class="grfigure-legende">\n<p class="alinea">' \
'<sup>a</sup> Hommes et femmes des générations ' \
'enquêtées (1930-1950 résidant en ' \
'Île-de-France en 1999) et leurs parents.</p>\n' \
'<p class="alinea"><sup>b</sup> L’interprétation de ' \
'cette figure se fait par exemple de la ' \
'manière suivante : Parmi les Ego hommes de ' \
'profession « indépendants », 44 % ont déclaré ' \
'que la profession principale de leur père ' \
'était indépendant, 22,5 % ouvrier, 11,9 % cadre, ' \
'etc. L’origine « père indépendant » est ' \
'nettement surreprésentée chez les Ego hommes ' \
'indépendants. C’est aussi l’origine la plus ' \
'fréquente pour les Ego femmes indépendantes ' \
'(31,5 %), suivie par un père cadre (28,7 %).</p>\n' \
'</div>'
def test_no_liensimple_in_toc_heading(self):
article = ArticleFactory(
from_fixture='1062434ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
li = dom.find('li', {'class': 'article-toc--body'}).find('ul').find_all('li')
# Check that liensimple nodes are not displayed as links in TOC headings.
assert li[1].decode() == '<li><a href="#s1n4">«\xa0D’une vaine dispute – La Musique ' \
'plaisir de l’esprit ou jouissance sensuelle\xa0» par ' \
'Charles Koechlin, (<em><span class="souligne">La Revue ' \
'musicale, 1921</span></em>)</a></li>'
assert li[2].decode() == '<li><a href="#s1n6">« Réponse à quelques objections » par ' \
'Désiré Pâque (<em><span class="souligne">La Revue ' \
'musicale, 1935</span></em>)</a></li>'
def test_related_articles(self, monkeypatch):
journal = JournalFactory()
article_1 = ArticleFactory(issue__journal=journal)
article_2 = ArticleFactory(issue__journal=journal)
article_3 = ArticleFactory(issue__journal=journal)
article_4 = ArticleFactory(issue__journal=journal)
# Mock return value for get_journal_related_articles().
journal_related_articles = []
for article in [article_1, article_2, article_3, article_4]:
journal_related_articles.append(SolrArticle({
'RevueID': article.issue.journal.localidentifier,
'NumeroID': article.issue.localidentifier,
'ID': article.localidentifier,
}))
# Simulate a Solr result with an issue that is not in Fedora.
journal_related_articles.append(SolrArticle({
'RevueID': journal.localidentifier,
'NumeroID': 'not_in_fedora',
'ID': 'not_in_fedora',
}))
# Patch get_journal_related_articles() so it returns our mocked return value.
monkeypatch.setattr(
FakeSolrData,
'get_journal_related_articles',
unittest.mock.Mock(
return_value=journal_related_articles,
),
)
# Create the current article, which should not appear in the related articles.
current_article = ArticleFactory(
issue__journal=journal,
localidentifier='current_article',
)
# Get the response.
url = article_detail_url(current_article)
html = Client().get(url).content
# Get the HTML.
dom = BeautifulSoup(html, 'html.parser')
footer = dom.find('footer', {'class': 'container'})
# There should only be 4 related articles.
assert len(footer.find_all('article')) == 4
# The current article should not be in the related articles.
assert 'current_article' not in footer.decode()
# An article with no issue should not be in related articles.
assert 'not_in_fedora' not in footer.decode()
@pytest.mark.parametrize('with_pdf, pages, has_abstracts, open_access, expected_result', (
# If there's no PDF, there's no need to include `can_display_first_pdf_page` in the context.
(False, [], False, True, False),
# If the article has abstracts, there's no need to include `can_display_first_pdf_page` in
# the context.
(True, [1, 2], True, True, False),
# If content access is granted, `can_display_first_pdf_page` should always be True.
(True, [1], False, True, True),
(True, [1, 2], False, True, True),
# If content access is not granted, `can_display_first_pdf_page` should only be True if the
# PDF has more than one page.
(True, [1], False, False, False),
(True, [1, 2], False, False, True),
))
def test_can_display_first_pdf_page(
self, with_pdf, pages, has_abstracts, open_access, expected_result, monkeypatch,
):
monkeypatch.setattr(pikepdf._qpdf.Pdf, 'pages', pages)
article = ArticleFactory(
issue__journal__open_access=open_access,
with_pdf=with_pdf,
)
if has_abstracts:
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_abstracts([{'lang': 'fr', 'content': 'Résumé'}])
url = article_detail_url(article)
response = Client().get(url)
if not with_pdf or has_abstracts:
assert 'can_display_first_pdf_page' not in response.context.keys()
else:
assert response.context['can_display_first_pdf_page'] == expected_result
@pytest.mark.parametrize('open_access', (True, False))
@pytest.mark.parametrize('url_name', (
'public:journal:article_detail',
'public:journal:article_summary',
))
def test_complete_processing_article_with_abstracts(self, url_name, open_access):
article = ArticleFactory(
from_fixture='1058611ar',
issue__journal__open_access=open_access,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
full_article = dom.find('div', {'class': 'full-article'})
# Abstracts should be displayed in all cases.
assert full_article.find_all('section', {'id': 'resume'})
# The article body should only be displayed on detail page if content access is granted.
if open_access and url_name == 'public:journal:article_detail':
assert full_article.find_all('section', {'id': 'corps'})
else:
assert not full_article.find_all('section', {'id': 'corps'})
# PDF, PDF first page or 600 first words should never be displayed because we have complete
# processing with abstracts.
assert not full_article.find_all('section', {'id': 'pdf'})
assert not full_article.find_all('section', {'id': 'first-pdf-page'})
assert not full_article.find_all('section', {'id': 'first-600-words'})
@pytest.mark.parametrize('open_access', (True, False))
@pytest.mark.parametrize('url_name', (
'public:journal:article_detail',
'public:journal:article_summary',
))
def test_complete_processing_article_without_abstracts(self, url_name, open_access):
article = ArticleFactory(
from_fixture='1005860ar',
issue__journal__open_access=open_access,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
full_article = dom.find('div', {'class': 'full-article'})
# Abstracts should not be displayed because we have none.
assert not full_article.find_all('section', {'id': 'resume'})
# The article body should only be displayed on detail page if content access is granted.
if open_access and url_name == 'public:journal:article_detail':
assert full_article.find_all('section', {'id': 'corps'})
else:
assert not full_article.find_all('section', {'id': 'corps'})
# The first 600 words should only be displayed on summary page or if content access is not
# granted.
if not open_access or url_name == 'public:journal:article_summary':
assert full_article.find_all('section', {'id': 'first-600-words'})
else:
assert not full_article.find_all('section', {'id': 'first-600-words'})
# PDF or PDF first page should never be displayed because we have complete processing.
assert not full_article.find_all('section', {'id': 'pdf'})
assert not full_article.find_all('section', {'id': 'first-pdf-page'})
@pytest.mark.parametrize('open_access', (True, False))
@pytest.mark.parametrize('url_name', (
'public:journal:article_detail',
'public:journal:article_summary',
))
def test_minimal_processing_article_with_abstracts(self, url_name, open_access):
article = ArticleFactory(
from_fixture='602354ar',
issue__journal__open_access=open_access,
with_pdf=True,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
full_article = dom.find('div', {'class': 'full-article'})
# Abstracts should be displayed in all cases.
assert full_article.find_all('section', {'id': 'resume'})
# The article PDF should only be displayed on detail page if content access is granted.
if open_access and url_name == 'public:journal:article_detail':
assert full_article.find_all('section', {'id': 'pdf'})
else:
assert not full_article.find_all('section', {'id': 'pdf'})
# Article body, 600 first words or PDF first page should never be displayed because we have
# minimal processing with abstracts.
assert not full_article.find_all('section', {'id': 'corps'})
assert not full_article.find_all('section', {'id': 'first-600-words'})
assert not full_article.find_all('section', {'id': 'first-pdf-page'})
@pytest.mark.parametrize('open_access', (True, False))
@pytest.mark.parametrize('url_name', (
'public:journal:article_detail',
'public:journal:article_summary',
))
@pytest.mark.parametrize('pages', ([1], [1, 2]))
def test_minimal_processing_article_without_abstracts(self, pages, url_name, open_access):
article = ArticleFactory(
from_fixture='1056823ar',
issue__journal__open_access=open_access,
with_pdf=True,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
full_article = dom.find('div', {'class': 'full-article'})
# Abstracts should not be displayed because we have none.
assert not full_article.find_all('section', {'id': 'resume'})
# The article PDF should only be displayed on detail page if content access is granted.
if open_access and url_name == 'public:journal:article_detail':
assert full_article.find_all('section', {'id': 'pdf'})
else:
assert not full_article.find_all('section', {'id': 'pdf'})
# The article PDF first page should only be displayed on summary page or if content access
# is not granted.
if not open_access or url_name == 'public:journal:article_summary':
assert full_article.find_all('section', {'id': 'first-pdf-page'})
else:
assert not full_article.find_all('section', {'id': 'first-pdf-page'})
# Article body or 600 first words should never be displayed because we have minimal
# processing.
assert not full_article.find_all('section', {'id': 'corps'})
assert not full_article.find_all('section', {'id': 'first-600-words'})
@pytest.mark.parametrize('open_access', (True, False))
@pytest.mark.parametrize('url_name', (
'public:journal:article_detail',
'public:journal:article_summary',
))
def test_minimal_processing_article_without_abstracts_and_with_only_one_page(
self, url_name, open_access, monkeypatch
):
monkeypatch.setattr(pikepdf._qpdf.Pdf, 'pages', [1])
article = ArticleFactory(
from_fixture='1056823ar',
issue__journal__open_access=open_access,
with_pdf=True,
)
url = reverse(url_name, kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
full_article = dom.find('div', {'class': 'full-article'})
# Abstracts should not be displayed because we have none.
assert not full_article.find_all('section', {'id': 'resume'})
# The article PDF should only be displayed on detail page if content access is granted.
if open_access and url_name == 'public:journal:article_detail':
assert full_article.find_all('section', {'id': 'pdf'})
else:
assert not full_article.find_all('section', {'id': 'pdf'})
# The article PDF first page should only be displayed on summary page if content access is
# granted because the PDF has only one page.
if open_access and url_name == 'public:journal:article_summary':
assert full_article.find_all('section', {'id': 'first-pdf-page'})
else:
assert not full_article.find_all('section', {'id': 'first-pdf-page'})
# Article body or 600 first words should never be displayed because we have minimal
# processing.
assert not full_article.find_all('section', {'id': 'corps'})
assert not full_article.find_all('section', {'id': 'first-600-words'})
@pytest.mark.parametrize('has_abstracts, expected_alert', (
(True, 'Seul le résumé sera affiché.'),
(False, 'Seuls les 600 premiers mots du texte seront affichés.'),
))
def test_complete_processing_article_content_access_not_granted_alert(
self, has_abstracts, expected_alert,
):
article = ArticleFactory(issue__journal__open_access=False)
if has_abstracts:
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_abstracts([{'lang': 'fr', 'content': 'Résumé'}])
url = article_detail_url(article)
html = Client().get(url).content.decode()
assert expected_alert in html
@pytest.mark.parametrize('has_abstracts, pages, expected_alert', (
(True, [1, 2], 'Seul le résumé sera affiché.'),
(True, [1], 'Seul le résumé sera affiché.'),
(False, [1, 2], 'Seule la première page du PDF sera affichée.'),
(False, [1], 'Seule la première page du PDF sera affichée.'),
))
def test_minimal_processing_article_content_access_not_granted_alert(
self, has_abstracts, pages, expected_alert, monkeypatch,
):
monkeypatch.setattr(pikepdf._qpdf.Pdf, 'pages', pages)
article = ArticleFactory(
from_fixture='1056823ar',
issue__journal__open_access=False,
with_pdf=True,
)
if has_abstracts:
with repository.api.open_article(article.pid) as wrapper:
wrapper.set_abstracts([{'lang': 'fr', 'content': 'Résumé'}])
url = article_detail_url(article)
html = Client().get(url).content.decode()
# The expected alert should only be displayed if there's abstracts or if the PDF has more
# than one page.
if has_abstracts or len(pages) > 1:
assert expected_alert in html
else:
assert expected_alert not in html
@pytest.mark.parametrize('fixture, section_id, expected_title', (
# Articles without specified titles in the XML, default values should be used.
('1054008ar', 'grnotebio', 'Note biographique'),
('1054008ar', 'grnote', 'Notes'),
('1059303ar', 'merci', 'Acknowledgements'),
# Articles with specified titles in the XML.
('009676ar', 'grnotebio', 'Collaboratrice'),
('009381ar', 'grnote', 'Notas'),
('1040250ar', 'merci', 'Remerciements et financement'),
))
def test_article_annex_section_titles(self, fixture, section_id, expected_title):
article = ArticleFactory(
from_fixture=fixture,
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
article_toc = dom.find('nav', {'class': 'article-table-of-contents'})
section = dom.find('section', {'id': section_id})
assert article_toc.find('a', {'href': '#' + section_id}).text == expected_title
assert section.find('h2').text == expected_title
@pytest.mark.parametrize('fixture, expected_title', (
('009676ar', 'Bibliographie'),
('1070621ar', 'Bibliography'),
('1054008ar', 'Références'),
))
def test_article_grbiblio_section_titles(self, fixture, expected_title):
article = ArticleFactory(
from_fixture=fixture,
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
article_toc = dom.find('nav', {'class': 'article-table-of-contents'})
section = dom.find('section', {'id': 'grbiblio'})
assert article_toc.find('a', {'href': '#biblio-1'}).text == expected_title
assert section.find('h2').text == expected_title
def test_media_object_source(self):
article = ArticleFactory(
from_fixture='1065018ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
media_object = dom.find('div', {'class': 'media'})
assert media_object.find('cite', {'class': 'source'}).text == 'Courtesy of La compagnie'
def test_media_object_padding_bottom_based_on_aspect_ratio(self):
article = ArticleFactory(
from_fixture='1065018ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
media_object = dom.find('div', {'class': 'embed-responsive'})
assert media_object.get('style') == 'padding-bottom: 56.563%'
@pytest.mark.parametrize('fixture, expected_section_titles', (
('1054008ar', [
'<h2>Suspension du verbe</h2>',
'<h2>Une éthique de l’action</h2>',
'<h3>1– Nécessité de limiter l’action.</h3>',
'<h3>2– Nécessité de simplifier, c’est-à-dire de réduire à l’essentiel.</h3>',
'<h3>3– Nécessité (pour l’homme) de se transformer.</h3>',
'<h2>Une «\xa0poéthique\xa0»</h2>',
'<h2>L’en avant de la parole</h2>',
]),
('1062105ar', [
'<h2><span class="majuscule">Introduction</span></h2>',
'<h2><span class="majuscule">1. La mesure de la mobilitÉ sociale en France et '
'au QuÉbec</span></h2>',
'<h2><span class="majuscule">2. MÉthodes</span></h2>',
'<h3>2.1 Présentation des deux enquêtes et des variables professionnelles '
'sélectionnées</h3>',
'<h3>2.2 Les codages effectués pour mesurer les transmissions '
'professionnelles</h3>',
'<h4><em>2.2.1 Genre et niveau de compétences</em></h4>',
'<h4><em>2.2.2 Catégories socioprofessionnelles</em></h4>',
'<h2><span class="majuscule">3. Évolution de la structure socioprofessionnelle '
'des emplois et transmissions professionnelles au sein des lignÉes</span></h2>',
'<h3>3.1 Répartition des positions socioprofessionnelles dans les lignées des '
'générations enquêtées</h3>',
'<h3>3.2 Transmissions professionnelles dans les lignées</h3>',
'<h2><span class="majuscule">Conclusion</span></h2>',
]),
))
def test_article_toc_view(self, fixture, expected_section_titles):
article = ArticleFactory(
from_fixture=fixture,
issue__journal__open_access=True,
)
url = reverse('public:journal:article_toc', kwargs={
'journal_code': article.issue.journal.code,
'issue_slug': article.issue.volume_slug,
'issue_localid': article.issue.localidentifier,
'localid': article.localidentifier,
})
html = Client().get(url).content.decode()
for section_title in expected_section_titles:
assert section_title in html
@pytest.mark.parametrize('mock_is_external, mock_url, expected_status_code', [
(False, None, 200),
(True, 'http://www.example.com', 301),
])
def test_get_external_issues_are_redirected(self, mock_is_external, mock_url, expected_status_code, monkeypatch):
monkeypatch.setattr(Article, 'is_external', mock_is_external)
monkeypatch.setattr(Article, 'url', mock_url)
article = ArticleFactory()
url = article_detail_url(article)
response = Client().get(url)
assert response.status_code == expected_status_code
if mock_url:
assert response.url == mock_url
def test_marquage_in_affiliations(self):
article = ArticleFactory(from_fixture='1066010ar')
url = article_detail_url(article)
html = Client().get(url).content.decode()
assert '<li class="auteur-affiliation"><p><strong>Benoit\n Vaillancourt</strong><br>' \
'<span class="petitecap">C</span><span class="petitecap">élat</span>' \
'<span class="petitecap">, Ipac, </span>Université Laval</p></li>' in html
@pytest.mark.parametrize('fixture, expected_link', (
# `https://` should be added to URLs that starts with `www`.
('1038424ar', '<a href="https://www.inspq.qc.ca/pdf/publications/1177_RelGazSchisteSante' \
'%20PubRapPreliminaire.pdf" id="ls3" target="_blank">www.inspq.qc.ca/pdf/' \
'publications/1177_RelGazSchisteSante PubRapPreliminaire.pdf</a>'),
# `https://` should not be added to email addresses.
('1038424ar', '<a href="mailto:[email protected]" id="ls1" ' \
'target="_blank">[email protected]</a>'),
# Complete URLs should not be altered.
('1038424ar', '<a href="http://www.nytimes.com/2014/12/18/nyregion/cuomo-to-ban-fracking-' \
'in-new-york-state-citing-health-risks.html?_r=0" id="ls4" target="_blank">' \
'http://www.nytimes.com/2014/12/18/nyregion/cuomo-to-ban-fracking-' \
'in-new-york-state-citing-health-risks.html?_r=0</a>'),
# Links to `http://www.erudit.org` should not have target="_blank".
('009256ar', '<a href="http://www.erudit.org/revue/ron/1998/v/n9" id="ls1">' \
'http://www.erudit.org/revue/ron/1998/v/n9</a>'),
))
def test_liensimple_urls(self, fixture, expected_link):
article = ArticleFactory(from_fixture=fixture)
url = article_detail_url(article)
html = Client().get(url).content.decode()
assert expected_link in html
def test_no_white_spaces_around_objetmedia(self):
article = ArticleFactory(
from_fixture='1067517ar',
localidentifier='article',
issue__year='2020',
issue__localidentifier='issue',
issue__journal__code='journal',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
# No unwanted extra spaces in addition of wanted non-breaking spaces inside quotes.
assert '«\xa0<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwC' \
'AAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" ' \
'data-srcset="/fr/revues/journal/2020-issue/article/media/2127962n.jpg 16w" ' \
'data-aspectratio="0.941176470588235" width="16" height="17" class="lazyload" ' \
'id="im10" alt="forme: forme pleine grandeur">\xa0U+1F469 woman\xa0»' in html
# No unwanted extra spaces inside parentheses.
assert '(<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAA' \
'C0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" ' \
'data-srcset="/fr/revues/journal/2020-issue/article/media/2127980n.jpg 17w" ' \
'data-aspectratio="1.307692307692308" width="17" height="13" class="lazyload" ' \
'id="im34" alt="forme: forme pleine grandeur">)' in html
# No unwanted extra spaces after hashtag.
assert '#<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAA' \
'C0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" ' \
'data-srcset="/fr/revues/journal/2020-issue/article/media/2127981n.jpg 32w" ' \
'data-aspectratio="1.684210526315789" width="32" height="19" class="lazyload" ' \
'id="im35" alt="forme: forme pleine grandeur">' in html
def test_footnote_in_bibliography_title(self):
article = ArticleFactory(from_fixture='1068385ar')
url = article_detail_url(article)
html = Client().get(url).content.decode()
assert '<h2 id="biblio-1">Bibliographie sélective<a href="#no49" id="re1no49" class="norenvoi" ' \
'title="La bibliographie recense exclusivement les travaux cités dans l’article. ' \
'En complément, la base de données des logiciels et projets (cf.\xa0note 2) ' \
'propose pour l’ensemble des logicie[…]">[49]</a>\n</h2>' in html
assert '<li><a href="#biblio-1">Bibliographie sélective</a></li>' in html
def test_organistaion_as_author_is_displayed_in_bold(self):
article = ArticleFactory(from_fixture='1068900ar')
url = article_detail_url(article)
html = Client().get(url).content.decode()
assert '<li class="auteur-affiliation">' \
'<p><strong>The MAP Research Team</strong></p>' \
'</li>' in html
def test_appendices_titles_language(self):
article = ArticleFactory(
from_fixture='1069092ar',
issue__journal__open_access=True,
)
url = article_detail_url(article)
html = Client().get(url).content.decode()
dom = BeautifulSoup(html, 'html.parser')
sections = dom.find_all('section', {'class': 'grnotebio'})
assert len(sections) == 3
assert sections[0].find('h2').decode() == '<h2>Notes biographiques</h2>'
assert sections[1].find('h2').decode() == '<h2>Biographical notes</h2>'
assert sections[2].find('h2').decode() == '<h2>Notas biograficas</h2>'
class TestArticleRawPdfView:
@unittest.mock.patch.object(JournalDigitalObject, 'logo')
@unittest.mock.patch.object(ArticleDigitalObject, 'pdf')
@unittest.mock.patch.object(subprocess, 'check_call')
def test_can_retrieve_the_pdf_of_existing_articles(self, mock_check_call, mock_pdf, mock_logo):
with open(os.path.join(FIXTURE_ROOT, 'dummy.pdf'), 'rb') as f:
mock_pdf.content = io.BytesIO()
mock_pdf.content.write(f.read())
with open(os.path.join(FIXTURE_ROOT, 'logo.jpg'), 'rb') as f:
mock_logo.content = io.BytesIO()
mock_logo.content.write(f.read())
journal = JournalFactory()
issue = IssueFactory.create(
journal=journal, year=2010,
date_published=dt.datetime.now() - dt.timedelta(days=1000))
IssueFactory.create(
journal=journal, year=2010,
date_published=dt.datetime.now())
article = ArticleFactory.create(issue=issue)
journal_id = journal.localidentifier
issue_id = issue.localidentifier
article_id = article.localidentifier
url = article_raw_pdf_url(article)
request = RequestFactory().get(url)
request.user = AnonymousUser()
request.session = {}
request.subscriptions = UserSubscriptions()
response = ArticleRawPdfView.as_view()(
request, journal_code=journal_id, issue_slug=issue.volume_slug, issue_localid=issue_id,
localid=article_id)
assert response.status_code == 200
assert response['Content-Type'] == 'application/pdf'
def test_cannot_retrieve_the_pdf_of_inexistant_articles(self):
# Note: as there is no Erudit fedora repository used during the
# test, any tentative of retrieving the PDF of an article should
# fail.
journal_id = 'dummy139'
issue_slug = 'test'
issue_id = 'dummy1515298'
article_id = '1001942du'
url = reverse('public:journal:article_raw_pdf', args=(
journal_id, issue_slug, issue_id, article_id
))
response = Client().get(url)
assert response.status_code == 404
@unittest.mock.patch.object(ArticleDigitalObject, 'pdf')
@unittest.mock.patch.object(subprocess, 'check_call')
@pytest.mark.parametrize('pages, expected_exception', [
([], True),
([1], True),
([1, 2], False),
])
def test_can_retrieve_the_firstpage_pdf_of_existing_articles(self, mock_check_call, mock_pdf, pages, expected_exception, monkeypatch):
monkeypatch.setattr(pikepdf._qpdf.Pdf, 'pages', pages)
with open(os.path.join(FIXTURE_ROOT, 'dummy.pdf'), 'rb') as f:
mock_pdf.content = io.BytesIO()
mock_pdf.content.write(f.read())
journal = JournalFactory()
issue = IssueFactory.create(
journal=journal, year=2010,
date_published=dt.datetime.now() - dt.timedelta(days=1000))
IssueFactory.create(
journal=journal, year=2010,
date_published=dt.datetime.now())
article = ArticleFactory.create(issue=issue)
journal_id = journal.localidentifier
issue_id = issue.localidentifier
article_id = article.localidentifier
url = article_raw_pdf_url(article)
request = RequestFactory().get(url)
request.user = AnonymousUser()
request.session = {}
request.subscriptions = UserSubscriptions()
# Raise exception if PDF has less than 2 pages.
if expected_exception:
with pytest.raises(PermissionDenied):
response = ArticleRawPdfFirstPageView.as_view()(
request, journal_code=journal_id, issue_slug=issue.volume_slug, issue_localid=issue_id,
localid=article_id)
else:
response = ArticleRawPdfFirstPageView.as_view()(
request, journal_code=journal_id, issue_slug=issue.volume_slug, issue_localid=issue_id,
localid=article_id)
assert response.status_code == 200
assert response['Content-Type'] == 'application/pdf'
def test_cannot_be_accessed_if_the_article_is_not_in_open_access(self):
journal = JournalFactory(open_access=False)
issue = IssueFactory.create(
journal=journal, year=dt.datetime.now().year, date_published=dt.datetime.now())
article = ArticleFactory.create(issue=issue)
journal_code = journal.code
issue_id = issue.localidentifier
article_id = article.localidentifier
url = article_raw_pdf_url(article)
request = RequestFactory().get(url)
request.user = AnonymousUser()
request.session = {}
request.subscriptions = UserSubscriptions()
response = ArticleRawPdfView.as_view()(
request, journal_code=journal_code, issue_slug=issue.volume_slug,
issue_localid=issue_id, localid=article_id)
assert isinstance(response, HttpResponseRedirect)
assert response.url == article_detail_url(article)
def test_cannot_be_accessed_if_the_publication_of_the_article_is_not_allowed_by_its_authors(self): # noqa
journal = JournalFactory(open_access=False)
issue = IssueFactory.create(
journal=journal, year=2010, date_published=dt.datetime.now())
article = ArticleFactory.create(issue=issue, publication_allowed=False)
journal_code = journal.code
issue_id = issue.localidentifier
article_id = article.localidentifier
url = article_raw_pdf_url(article)
request = RequestFactory().get(url)
request.user = AnonymousUser()
request.session = {}
request.subscriptions = UserSubscriptions()
response = ArticleRawPdfView.as_view()(
request, journal_code=journal_code, issue_slug=issue.volume_slug,
issue_localid=issue_id, localid=article_id)
assert isinstance(response, HttpResponseRedirect)
assert response.url == article_detail_url(article)
class TestLegacyUrlsRedirection:
def test_can_redirect_issue_support_only_volume_and_year(self):
journal = JournalFactory(code='test')
issue = IssueFactory(journal=journal, volume="1", number="1", year="2017")
IssueFactory(journal=issue.journal, volume="1", number="2", year="2017")
article = ArticleFactory()
article.issue.volume = "1"
article.issue.number = "1"
article.issue.year = "2017"
article.issue.save()
article2 = ArticleFactory()
article2.issue.journal = article.issue.journal
article2.issue.volume = "1"
article2.issue.number = "2"
article2.issue.year = "2017"
article2.issue.save()
url = "/revue/{journal_code}/{year}/v{volume}/n/".format(
journal_code=article.issue.journal.code,
year=article.issue.year,
volume=article.issue.volume,
)
resp = Client().get(url)
assert resp.url == reverse('public:journal:issue_detail', kwargs=dict(
journal_code=article2.issue.journal.code,
issue_slug=article2.issue.volume_slug,
localidentifier=article2.issue.localidentifier,
))
def test_can_redirect_issue_detail_with_empty_volume(self):
issue = IssueFactory(number="1", volume="1", year="2017")
issue2 = IssueFactory(journal=issue.journal, volume="2", number="1", year="2017")
url = "/revue/{journal_code}/{year}/v/n{number}/".format(
journal_code=issue.journal.code,
number=issue.number,
year=issue.year,
)
resp = Client().get(url)
assert resp.url == reverse('public:journal:issue_detail', kwargs=dict(
journal_code=issue2.journal.code,
issue_slug=issue2.volume_slug,
localidentifier=issue2.localidentifier,
))
def test_can_redirect_article_from_legacy_urls(self):
from django.utils.translation import deactivate_all
article = ArticleFactory()
article.issue.volume = "1"
article.issue.save()
url = '/revue/{journal_code}/{issue_year}/v{issue_volume}/n/{article_localidentifier}.html'.format( # noqa
journal_code=article.issue.journal.code,
issue_year=article.issue.year,
issue_volume=article.issue.volume,
article_localidentifier=article.localidentifier
)
resp = Client().get(url)
assert resp.status_code == 301
url = '/revue/{journal_code}/{issue_year}/v/n/{article_localidentifier}.html'.format( # noqa
journal_code=article.issue.journal.code,
issue_year=article.issue.year,
article_localidentifier=article.localidentifier
)
resp = Client().get(url)
assert resp.status_code == 301
url = '/revue/{journal_code}/{issue_year}/v/n{issue_number}/{article_localidentifier}.html'.format( # noqa
journal_code=article.issue.journal.code,
issue_year=article.issue.year,
issue_number=article.issue.number,
article_localidentifier=article.localidentifier
)
resp = Client().get(url)
assert resp.url == article_detail_url(article)
assert "/fr/" in resp.url
assert resp.status_code == 301
deactivate_all()
resp = Client().get(url + "?lang=en")
assert resp.url == article_detail_url(article)
assert "/en/" in resp.url
assert resp.status_code == 301
url = '/en/revue/{journal_code}/{issue_year}/v/n{issue_number}/{article_localidentifier}.html'.format( # noqa
journal_code=article.issue.journal.code,
issue_year=article.issue.year,
issue_number=article.issue.number,
article_localidentifier=article.localidentifier
)
deactivate_all()
resp = Client().get(url)
assert resp.url == article_detail_url(article)
assert "/en/" in resp.url
assert resp.status_code == 301
@pytest.mark.parametrize("pattern", (
"/revue/{journal_code}/{year}/v{volume}/n{number}/",
"/culture/{journal_localidentifier}/{issue_localidentifier}/index.html"
))
def test_can_redirect_issues_from_legacy_urls(self, pattern):
article = ArticleFactory()
article.issue.volume = "1"
article.issue.number = "1"
article.issue.save()
url = pattern.format(
journal_code=article.issue.journal.code,
year=article.issue.year,
volume=article.issue.volume,
number=article.issue.number,
journal_localidentifier=article.issue.journal.localidentifier,
issue_localidentifier=article.issue.localidentifier,
article_localidentifier = article.localidentifier,
)
resp = Client().get(url)
assert resp.url == reverse('public:journal:issue_detail', kwargs=dict(
journal_code=article.issue.journal.code,
issue_slug=article.issue.volume_slug,
localidentifier=article.issue.localidentifier
))
assert resp.status_code == 301
def test_can_redirect_journals_from_legacy_urls(self):
article = ArticleFactory()
article.issue.volume = "1"
article.issue.number = "1"
article.issue.save()
url = "/revue/{code}/".format(
code=article.issue.journal.code,
)
resp = Client().get(url)
assert resp.url == journal_detail_url(article.issue.journal)
assert resp.status_code == 301
class TestArticleFallbackRedirection:
@pytest.fixture(params=itertools.product(
[{'code': 'nonexistent'}],
[
'legacy_journal:legacy_journal_detail',
'legacy_journal:legacy_journal_detail_index',
'legacy_journal:legacy_journal_authors',
'legacy_journal:legacy_journal_detail_culture',
'legacy_journal:legacy_journal_detail_culture_index',
'legacy_journal:legacy_journal_authors_culture'
]
))
def journal_url(self, request):
kwargs = request.param[0]
url = request.param[1]
return reverse(url, kwargs=kwargs)
@pytest.fixture(params=itertools.chain(
itertools.product(
[{
'journal_code': 'nonexistent',
'year': "1974",
'v': "7",
'n': "1",
}],
["legacy_journal:legacy_issue_detail", "legacy_journal:legacy_issue_detail_index"]
),
itertools.product(
[{
'journal_code': 'nonexistent',
'year': "1974",
'v': "7",
'n': "",
}],
[
"legacy_journal:legacy_issue_detail",
"legacy_journal:legacy_issue_detail_index"
],
),
itertools.product(
[{
'journal_code': 'nonexistent',
'year': "1974",
'v': "7",
'n': "",
}],
[
"legacy_journal:legacy_issue_detail",
"legacy_journal:legacy_issue_detail_index"
],
),
itertools.product([{
'journal_code': 'nonexistent',
'localidentifier': 'nonexistent'
}], ["legacy_journal:legacy_issue_detail_culture",
"legacy_journal:legacy_issue_detail_culture_index"],
)
))
def issue_url(self, request):
kwargs = request.param[0]
url = request.param[1]
return reverse(url, kwargs=kwargs)
@pytest.fixture(params=itertools.chain(
itertools.product(
[{
'journal_code': 'nonexistent', 'year': 2004, 'v': 1, 'issue_number': 'nonexistent',
'localid': 'nonexistent', 'format_identifier': 'html', 'lang': 'fr'
}],
[
"legacy_journal:legacy_article_detail",
"legacy_journal:legacy_article_detail_culture"
],
),
[
({'localid': 'nonexistent'}, 'legacy_journal:legacy_article_id'),
({'journal_code': 'nonexistent',
'issue_localid': 'nonexistent', 'localid': 'nonexistent',
'format_identifier': 'html'},
'legacy_journal:legacy_article_detail_culture_localidentifier')
]),
)
def article_url(self, request):
kwargs = request.param[0]
url = request.param[1]
return reverse(url, kwargs=kwargs)
def test_legacy_url_for_nonexistent_journals_404s(self, journal_url):
response = Client().get(journal_url, follow=True)
assert response.status_code == 404
def test_legacy_url_for_nonexistent_issues_404s(self, issue_url):
response = Client().get(issue_url, follow=True)
assert response.status_code == 404
def test_legacy_url_for_nonexistent_articles_404s(self, article_url):
response = Client().get(article_url, follow=True)
assert response.status_code == 404
class TestArticleXmlView:
def test_can_retrieve_xml_of_existing_articles(self):
journal = JournalFactory(open_access=True)
issue = IssueFactory.create(
journal=journal, year=2010, is_published=True,
date_published=dt.datetime.now() - dt.timedelta(days=1000))
article = ArticleFactory.create(issue=issue)
journal_id = issue.journal.localidentifier
issue_id = issue.localidentifier
article_id = article.localidentifier
url = reverse('public:journal:article_raw_xml', args=(
journal_id, issue.volume_slug, issue_id, article_id
))
response = Client().get(url)
assert response.status_code == 200
assert response['Content-Type'] == 'application/xml'
class TestArticleMediaView(TestCase):
@unittest.mock.patch.object(MediaDigitalObject, 'content')
def test_can_retrieve_the_pdf_of_existing_articles(self, mock_content):
# Setup
with open(os.path.join(FIXTURE_ROOT, 'pixel.png'), 'rb') as f:
mock_content.content = io.BytesIO()
mock_content.content.write(f.read())
mock_content.mimetype = 'image/png'
issue = IssueFactory.create(date_published=dt.datetime.now())
article = ArticleFactory.create(issue=issue)
issue_id = issue.localidentifier
article_id = article.localidentifier
request = RequestFactory().get('/')
# Run
response = ArticleMediaView.as_view()(
request, journal_code=issue.journal.code, issue_localid=issue_id,
localid=article_id, media_localid='test')
# Check
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/png')
class TestExternalURLRedirectViews:
def test_can_redirect_to_issue_external_url(self):
issue = IssueFactory.create(
date_published=dt.datetime.now(),
external_url="http://www.erudit.org"
)
response = Client().get(
reverse(
'public:journal:issue_external_redirect',
kwargs={'localidentifier': issue.localidentifier}
)
)
assert response.status_code == 302
def test_can_redirect_to_journal_external_url(self):
journal = JournalFactory(code='journal1', external_url='http://www.erudit.org')
response = Client().get(
reverse(
'public:journal:journal_external_redirect',
kwargs={'code': journal.code}
)
)
assert response.status_code == 302
@pytest.mark.parametrize('export_type', ['bib', 'enw', 'ris'])
def test_article_citation_doesnt_html_escape(export_type):
# citations exports don't HTML-escape values (they're not HTML documents).
# TODO: test authors name. Templates directly refer to `erudit_object` and we we don't have
# a proper mechanism in the upcoming fake fedora API to fake values on the fly yet.
title = "rock & rollin'"
article = ArticleFactory.create(title=title)
issue = article.issue
url = reverse('public:journal:article_citation_{}'.format(export_type), kwargs={
'journal_code': issue.journal.code, 'issue_slug': issue.volume_slug,
'issue_localid': issue.localidentifier, 'localid': article.localidentifier})
response = Client().get(url)
content = response.content.decode()
assert title in content
@pytest.mark.parametrize("view_name", (
"article_detail",
"article_summary",
"article_biblio",
"article_toc",
))
def test_no_html_in_structured_data(view_name):
article = ArticleFactory(
from_fixture="038686ar",
localidentifier="article",
issue__localidentifier="issue",
issue__year="2019",
issue__journal__code="journal",
)
url = reverse(f"public:journal:{view_name}", kwargs={
"journal_code": article.issue.journal.code,
"issue_slug": article.issue.volume_slug,
"issue_localid": article.issue.localidentifier,
"localid": article.localidentifier,
})
response = Client().get(url)
content = response.content.decode()
expected = '{\n ' \
'"@type": "ListItem",\n ' \
'"position": 5,\n ' \
'"item": {\n ' \
'"@id": "http://example.com/fr/revues/journal/2019-issue/article/",\n ' \
'"name": "Constantin, François (dir.), Les biens publics mondiaux. ' \
'Un mythe légitimateur pour l’action collective\xa0?, ' \
'coll. Logiques politiques, Paris, L’Harmattan, 2002, 385\xa0p."\n ' \
'}\n ' \
'}'
assert expected in content
| gpl-3.0 | 6,611,205,042,664,009,000 | 46.901239 | 138 | 0.603824 | false | 3.793905 | true | false | false |
macosforge/ccs-calendarserver | txdav/who/vcard.py | 1 | 13053 | ##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Utilities to converting a Record to a vCard
"""
__all__ = [
"vCardFromRecord"
]
from pycalendar.vcard.adr import Adr
from pycalendar.vcard.n import N
from twext.python.log import Logger
from twext.who.idirectory import FieldName, RecordType
from twisted.internet.defer import inlineCallbacks, returnValue
from twistedcaldav.config import config
from twistedcaldav.vcard import Component, Property, vCardProductID
from txdav.who.idirectory import FieldName as CalFieldName, \
RecordType as CalRecordType
from txweb2.dav.util import joinURL
log = Logger()
recordTypeToVCardKindMap = {
RecordType.user: "individual",
RecordType.group: "group",
CalRecordType.location: "location",
CalRecordType.resource: "device",
}
vCardKindToRecordTypeMap = {
"individual": RecordType.user,
"group": RecordType.group,
"org": RecordType.group,
"location": CalRecordType.location,
"device": CalRecordType.resource,
}
# all possible generated parameters.
vCardPropToParamMap = {
# "PHOTO": {"ENCODING": ("B",), "TYPE": ("JPEG",), },
"ADR": {"TYPE": ("WORK", "PREF", "POSTAL", "PARCEL",),
"LABEL": None, "GEO": None, },
"LABEL": {"TYPE": ("POSTAL", "PARCEL",)},
# "TEL": {"TYPE": None, }, # None means param value can be anything
"EMAIL": {"TYPE": None, },
# "KEY": {"ENCODING": ("B",), "TYPE": ("PGPPUBILICKEY", "USERCERTIFICATE", "USERPKCS12DATA", "USERSMIMECERTIFICATE",)},
# "URL": {"TYPE": ("WEBLOG", "HOMEPAGE",)},
# "IMPP": {"TYPE": ("PREF",), "X-SERVICE-TYPE": None, },
# "X-ABRELATEDNAMES": {"TYPE": None, },
# "X-AIM": {"TYPE": ("PREF",), },
# "X-JABBER": {"TYPE": ("PREF",), },
# "X-MSN": {"TYPE": ("PREF",), },
# "X-ICQ": {"TYPE": ("PREF",), },
}
vCardConstantProperties = {
# ====================================================================
# 3.6 EXPLANATORY TYPES http://tools.ietf.org/html/rfc2426#section-3.6
# ====================================================================
# 3.6.3 PRODID
"PRODID": vCardProductID,
# 3.6.9 VERSION
"VERSION": "3.0",
}
@inlineCallbacks
def vCardFromRecord(record, forceKind=None, addProps=None, parentURI=None):
def isUniqueProperty(newProperty, ignoredParameters={}):
existingProperties = vcard.properties(newProperty.name())
for existingProperty in existingProperties:
if ignoredParameters:
existingProperty = existingProperty.duplicate()
for paramName, paramValues in ignoredParameters.iteritems():
for paramValue in paramValues:
existingProperty.removeParameterValue(paramName, paramValue)
if existingProperty == newProperty:
return False
return True
def addUniqueProperty(newProperty, ignoredParameters=None):
if isUniqueProperty(newProperty, ignoredParameters):
vcard.addProperty(newProperty)
else:
log.info(
"Ignoring property {prop!r} it is a duplicate",
prop=newProperty
)
# =======================================================================
# start
# =======================================================================
log.debug(
"vCardFromRecord: record={record}, forceKind={forceKind}, addProps={addProps}, parentURI={parentURI}",
record=record, forceKind=forceKind, addProps=addProps, parentURI=parentURI)
if forceKind is None:
kind = recordTypeToVCardKindMap.get(record.recordType, "individual")
else:
kind = forceKind
constantProperties = vCardConstantProperties.copy()
if addProps:
for key, value in addProps.iteritems():
if key not in constantProperties:
constantProperties[key] = value
# create vCard
vcard = Component("VCARD")
# add constant properties
for key, value in constantProperties.items():
vcard.addProperty(Property(key, value))
# ===========================================================================
# 2.1 Predefined Type Usage
# ===========================================================================
# 2.1.4 SOURCE Type http://tools.ietf.org/html/rfc2426#section-2.1.4
if parentURI:
uri = joinURL(parentURI, record.fields[FieldName.uid].encode("utf-8") + ".vcf")
# seems like this should be in some standard place.
if (config.EnableSSL or config.BehindTLSProxy) and config.SSLPort:
if config.SSLPort == 443:
source = "https://{server}{uri}".format(server=config.ServerHostName, uri=uri)
else:
source = "https://{server}:{port}{uri}".format(server=config.ServerHostName, port=config.SSLPort, uri=uri)
else:
if config.HTTPPort == 80:
source = "https://{server}{uri}".format(server=config.ServerHostName, uri=uri)
else:
source = "https://{server}:{port}{uri}".format(server=config.ServerHostName, port=config.HTTPPort, uri=uri)
vcard.addProperty(Property("SOURCE", source))
# =======================================================================
# 3.1 IDENTIFICATION TYPES http://tools.ietf.org/html/rfc2426#section-3.1
# =======================================================================
# 3.1.1 FN
vcard.addProperty(Property("FN", record.fields[FieldName.fullNames][0].encode("utf-8")))
# 3.1.2 N
# TODO: Better parsing
fullNameParts = record.fields[FieldName.fullNames][0].split()
first = fullNameParts[0] if len(fullNameParts) >= 2 else None
last = fullNameParts[len(fullNameParts) - 1]
middle = fullNameParts[1] if len(fullNameParts) == 3 else None
prefix = None
suffix = None
nameObject = N(
first=first.encode("utf-8") if first else None,
last=last.encode("utf-8") if last else None,
middle=middle.encode("utf-8") if middle else None,
prefix=prefix.encode("utf-8") if prefix else None,
suffix=suffix.encode("utf-8") if suffix else None,
)
vcard.addProperty(Property("N", nameObject))
# 3.1.3 NICKNAME
nickname = record.fields.get(CalFieldName.abbreviatedName)
if nickname:
vcard.addProperty(Property("NICKNAME", nickname.encode("utf-8")))
# UNIMPLEMENTED
# 3.1.4 PHOTO
# 3.1.5 BDAY
# ============================================================================
# 3.2 Delivery Addressing Types http://tools.ietf.org/html/rfc2426#section-3.2
# ============================================================================
# 3.2.1 ADR
#
# Experimental:
# Use vCard 4.0 ADR: http://tools.ietf.org/html/rfc6350#section-6.3.1
params = {}
geo = record.fields.get(CalFieldName.geographicLocation)
if geo:
params["GEO"] = geo.encode("utf-8")
label = record.fields.get(CalFieldName.streetAddress)
if label:
params["LABEL"] = label.encode("utf-8")
#
extended = record.fields.get(CalFieldName.floor)
# TODO: Parse?
street = record.fields.get(CalFieldName.streetAddress)
city = None
region = None
postalcode = None
country = None
if extended or street or city or region or postalcode or country or params:
params["TYPE"] = ["WORK", "PREF", "POSTAL", "PARCEL", ]
vcard.addProperty(
Property(
"ADR", Adr(
# pobox = box,
extended=extended.encode("utf-8") if extended else None,
street=street.encode("utf-8") if street else None,
locality=city.encode("utf-8") if city else None,
region=region.encode("utf-8") if region else None,
postalcode=postalcode.encode("utf-8") if postalcode else None,
country=country.encode("utf-8") if country else None,
),
params=params
)
)
# 3.2.2 LABEL
# label = record.fields.get(CalFieldName.streetAddress)
if label:
vcard.addProperty(Property("LABEL", label.encode("utf-8"), params={"TYPE": ["POSTAL", "PARCEL", ]}))
# ======================================================================================
# 3.3 TELECOMMUNICATIONS ADDRESSING TYPES http://tools.ietf.org/html/rfc2426#section-3.3
# ======================================================================================
#
# UNIMPLEMENTED
# 3.3.1 TEL
# 3.3.2 EMAIL
preferredWorkParams = {"TYPE": ["WORK", "PREF", "INTERNET", ], }
workParams = {"TYPE": ["WORK", "INTERNET", ], }
params = preferredWorkParams
for emailAddress in record.fields.get(FieldName.emailAddresses, ()):
addUniqueProperty(Property("EMAIL", emailAddress.encode("utf-8"), params=params), ignoredParameters={"TYPE": ["PREF", ]})
params = workParams
# UNIMPLEMENTED:
# 3.3.3 MAILER
#
# =====================================================================
# 3.4 GEOGRAPHICAL TYPES http://tools.ietf.org/html/rfc2426#section-3.4
# =====================================================================
#
# UNIMPLEMENTED:
# 3.4.1 TZ
#
# 3.4.2 GEO
geographicLocation = record.fields.get(CalFieldName.geographicLocation)
if geographicLocation:
vcard.addProperty(Property("GEO", geographicLocation.encode("utf-8")))
# =======================================================================
# 3.5 ORGANIZATIONAL TYPES http://tools.ietf.org/html/rfc2426#section-3.5
# =======================================================================
#
# UNIMPLEMENTED:
# 3.5.1 TITLE
# 3.5.2 ROLE
# 3.5.3 LOGO
# 3.5.4 AGENT
# 3.5.5 ORG
#
# ====================================================================
# 3.6 EXPLANATORY TYPES http://tools.ietf.org/html/rfc2426#section-3.6
# ====================================================================
#
# UNIMPLEMENTED:
# 3.6.1 CATEGORIES
# 3.6.2 NOTE
#
# ADDED WITH CONTSTANT PROPERTIES:
# 3.6.3 PRODID
#
# UNIMPLEMENTED:
# 3.6.5 SORT-STRING
# 3.6.6 SOUND
# 3.6.7 UID
vcard.addProperty(Property("UID", record.fields[FieldName.uid].encode("utf-8")))
# UNIMPLEMENTED:
# 3.6.8 URL
# ADDED WITH CONTSTANT PROPERTIES:
# 3.6.9 VERSION
# ===================================================================
# 3.7 SECURITY TYPES http://tools.ietf.org/html/rfc2426#section-3.7
# ===================================================================
# UNIMPLEMENTED:
# 3.7.1 CLASS
# 3.7.2 KEY
# ===================================================================
# X Properties
# ===================================================================
# UNIMPLEMENTED:
# X-<instant messaging type> such as:
# "AIM", "FACEBOOK", "GAGU-GAGU", "GOOGLE TALK", "ICQ", "JABBER", "MSN", "QQ", "SKYPE", "YAHOO",
# X-MAIDENNAME
# X-PHONETIC-FIRST-NAME
# X-PHONETIC-MIDDLE-NAME
# X-PHONETIC-LAST-NAME
# X-ABRELATEDNAMES
# X-ADDRESSBOOKSERVER-KIND
if kind == "group":
vcard.addProperty(Property("X-ADDRESSBOOKSERVER-KIND", kind))
# add members
# FIXME: members() is a deferred, so all of vCardFromRecord is deferred.
for memberRecord in (yield record.members()):
cua = memberRecord.canonicalCalendarUserAddress(False)
if cua:
vcard.addProperty(Property("X-ADDRESSBOOKSERVER-MEMBER", cua.encode("utf-8")))
# ===================================================================
# vCard 4.0 http://tools.ietf.org/html/rfc6350
# ===================================================================
# UNIMPLEMENTED:
# 6.4.3 IMPP http://tools.ietf.org/html/rfc6350#section-6.4.3
#
# 6.1.4 KIND http://tools.ietf.org/html/rfc6350#section-6.1.4
#
# see also: http://www.iana.org/assignments/vcard-elements/vcard-elements.xml
#
vcard.addProperty(Property("KIND", kind))
# one more X- related to kind
if kind == "org":
vcard.addProperty(Property("X-ABShowAs", "COMPANY"))
log.debug("vCardFromRecord: vcard=\n{vcard}", vcard=vcard)
returnValue(vcard)
| apache-2.0 | -1,211,219,899,084,550,100 | 36.616715 | 129 | 0.535203 | false | 3.862977 | true | false | false |
ratoaq2/deluge | deluge/ui/console/cmdline/commands/move.py | 1 | 3154 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Nick Lanham <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from __future__ import unicode_literals
import logging
import os.path
import deluge.component as component
from deluge.ui.client import client
from . import BaseCommand
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""Move torrents' storage location"""
def add_arguments(self, parser):
parser.add_argument('torrent_ids', metavar='<torrent-id>', nargs='+', help=_('One or more torrent ids'))
parser.add_argument('path', metavar='<path>', help=_('The path to move the torrents to'))
def handle(self, options):
self.console = component.get('ConsoleUI')
if os.path.exists(options.path) and not os.path.isdir(options.path):
self.console.write('{!error!}Cannot Move Download Folder: %s exists and is not a directory' % options.path)
return
ids = []
names = []
for t_id in options.torrent_ids:
tid = self.console.match_torrent(t_id)
ids.extend(tid)
names.append(self.console.get_torrent_name(tid))
def on_move(res):
msg = 'Moved "%s" to %s' % (', '.join(names), options.path)
self.console.write(msg)
log.info(msg)
d = client.core.move_storage(ids, options.path)
d.addCallback(on_move)
return d
def complete(self, line):
line = os.path.abspath(os.path.expanduser(line))
ret = []
if os.path.exists(line):
# This is a correct path, check to see if it's a directory
if os.path.isdir(line):
# Directory, so we need to show contents of directory
# ret.extend(os.listdir(line))
for f in os.listdir(line):
# Skip hidden
if f.startswith('.'):
continue
f = os.path.join(line, f)
if os.path.isdir(f):
f += '/'
ret.append(f)
else:
# This is a file, but we could be looking for another file that
# shares a common prefix.
for f in os.listdir(os.path.dirname(line)):
if f.startswith(os.path.split(line)[1]):
ret.append(os.path.join(os.path.dirname(line), f))
else:
# This path does not exist, so lets do a listdir on it's parent
# and find any matches.
ret = []
if os.path.isdir(os.path.dirname(line)):
for f in os.listdir(os.path.dirname(line)):
if f.startswith(os.path.split(line)[1]):
p = os.path.join(os.path.dirname(line), f)
if os.path.isdir(p):
p += '/'
ret.append(p)
return ret
| gpl-3.0 | 7,143,375,092,837,900,000 | 35.252874 | 119 | 0.54629 | false | 4.064433 | false | false | false |
umitproject/openmonitor-desktop-agent | umit/icm/agent/core/PeerInfo.py | 1 | 4244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
#
# Author: Zhongjie Wang <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import cPickle
from umit.icm.agent.logger import g_logger
from umit.icm.agent.Global import *
from umit.icm.agent.Application import theApp
from umit.icm.agent.Version import PEER_TYPE
"""
This classes contains the information about the peer. This class is not used to represent other peers.
It should only used to represent the connected peer.
"""
class PeerInfo(object):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ID = None
self.Type = PEER_TYPE # normal peer by default
self.Username = ''
self.Password = ''
self.Email = ''
self.CipheredPublicKeyHash = None
self.AuthToken = None
self.local_ip = ''
self.internet_ip = ''
self.is_registered = False
self.is_logged_in = False
self.get_local_ip()
self.get_internet_ip()
def load_from_db(self):
rs = g_db_helper.select('select * from peer_info')
if not rs:
g_logger.info("No peer info in db.")
else:
if len(rs) > 1:
g_logger.warning("More than one record in user_info. " \
"Use the first one.")
g_logger.debug(rs[0])
self.ID = rs[0][0]
self.Username = rs[0][1]
self.Password = rs[0][2]
self.Email = rs[0][3]
self.CipheredPublicKeyHash = rs[0][4]
self.Type = rs[0][5]
self.is_registered = True
def save_to_db(self):
if self.is_registered:
sql_str = "insert or replace into peer_info values " \
"('%s', '%s', '%s', '%s', '%s', %d)" % \
(self.ID, self.Username, self.Password, self.Email,
self.CipheredPublicKeyHash, self.Type)
g_logger.info("[save_to_db]:save %s into DB"%sql_str)
g_db_helper.execute(sql_str)
g_db_helper.commit()
def clear_db(self):
g_db_helper.execute("delete from peer_info")
g_db_helper.commit()
def get_local_ip(self):
from socket import socket, SOCK_DGRAM, AF_INET
ip_urls = ["www.google.com", "www.baidu.com"]
for each in ip_urls:
try:
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(3)
s.connect((each, 0))
ip = s.getsockname()[0]
self.local_ip = ip
#print(each, ip)
break
except:
pass
def get_internet_ip(self):
from twisted.web.client import getPage
ip_urls = ["http://whereismyip.com/", "http://www.whereismyip.org/",
"http://myip.eu/"]
for each in ip_urls:
getPage(each).addCallback(self._handle_get_internet_ip)
def _handle_get_internet_ip(self, data):
import re
if re.search('\d+\.\d+\.\d+\.\d+', data) == None:
return
ip = re.search('\d+\.\d+\.\d+\.\d+', data).group(0)
#print(data, ip)
self.internet_ip = ip
if __name__ == "__main__":
pi = PeerInfo()
pi.load_from_db() | gpl-2.0 | -8,304,608,539,564,153,000 | 33.680672 | 102 | 0.53935 | false | 3.739207 | false | false | false |
ducted/duct | duct/sources/linux/sensors.py | 1 | 7282 | """
.. module:: sensors
:platform: unix
:synopsis: Provides checks for system sensors and SMART devices
.. moduleauthor:: Colin Alston <[email protected]>
"""
import os
from zope.interface import implementer
from twisted.internet import defer
from duct.interfaces import IDuctSource
from duct.objects import Source
@implementer(IDuctSource)
class Sensors(Source):
"""Returns hwmon sensors info
Note: There is no transformation done on values, they may be in
thousands
**Metrics:**
:(service name).(adapter).(sensor): Sensor value
"""
def _find_sensors(self):
path = '/sys/class/hwmon'
sensors = {}
# Find adapters
if os.path.exists(path):
monitors = os.listdir(path)
for hwmons in monitors:
mon_path = os.path.join(path, hwmons)
name_path = os.path.join(mon_path, 'name')
if os.path.exists(name_path):
with open(name_path, 'rt') as name_file:
name = name_file.read().strip()
else:
name = None
if name not in sensors:
sensors[name] = {}
sensor_map = {}
# Find sensors in this adapter
for mon_file in os.listdir(mon_path):
if mon_file.startswith('temp') or mon_file.startswith(
'fan'):
tn = mon_file.split('_')[0]
sensor_path = os.path.join(mon_path, mon_file)
if tn not in sensor_map:
sensor_map[tn] = [None, 0]
if mon_file.endswith('_input'):
with open(sensor_path, 'rt') as value_file:
value = int(value_file.read().strip())
if mon_file.startswith('temp'):
value = value / 1000.0
sensor_map[tn][1] = value
if mon_file.endswith('_label'):
with open(sensor_path, 'rt') as value_file:
sensor_name = value_file.read().strip()
sensor_map[tn][0] = sensor_name
for sensor_name, value in sensor_map.values():
if sensor_name:
filtered_name = sensor_name.lower().replace(' ', '_')
sensors[name][filtered_name] = value
return sensors
def get(self):
sensors = self._find_sensors()
events = []
for adapter, v in sensors.items():
for sensor, val in v.items():
events.append(
self.createEvent('ok',
'Sensor %s:%s - %s' % (
adapter, sensor, val),
val,
prefix='%s.%s' % (adapter, sensor,)))
return events
@implementer(IDuctSource)
class LMSensors(Source):
"""Returns lm-sensors output
This does the exact same thing as the Sensors class but uses lm-sensors.
**Metrics:**
:(service name).(adapter).(sensor): Sensor value
"""
ssh = True
@defer.inlineCallbacks
def _get_sensors(self):
out, _err, code = yield self.fork('/usr/bin/sensors')
if code == 0:
defer.returnValue(out.strip('\n').split('\n'))
else:
defer.returnValue([])
def _parse_sensors(self, sensors):
adapters = {}
adapter = None
for i in sensors:
l = i.strip()
if not l:
continue
if ':' in l:
n, v = l.split(':')
vals = v.strip().split()
if n == 'Adapter':
continue
if '\xc2\xb0' in vals[0]:
val = vals[0].split('\xc2\xb0')[0]
elif len(vals) > 1:
val = vals[0]
else:
continue
val = float(val)
adapters[adapter][n] = val
else:
adapter = l
adapters[adapter] = {}
return adapters
@defer.inlineCallbacks
def get(self):
sensors = yield self._get_sensors()
adapters = self._parse_sensors(sensors)
events = []
for adapter, v in adapters.items():
for sensor, val in v.items():
events.append(
self.createEvent('ok',
'Sensor %s:%s - %s' % (
adapter, sensor, val),
val,
prefix='%s.%s' % (adapter, sensor,)))
defer.returnValue(events)
@implementer(IDuctSource)
class SMART(Source):
"""Returns SMART output for all disks
**Metrics:**
:(service name).(disk).(sensor): Sensor value
"""
ssh = True
def __init__(self, *a, **kw):
Source.__init__(self, *a, **kw)
self.devices = []
@defer.inlineCallbacks
def _get_disks(self):
out, _err, code = yield self.fork('/usr/sbin/smartctl',
args=('--scan',))
if code != 0:
defer.returnValue([])
out = out.strip('\n').split('\n')
devices = []
for ln in out:
if '/dev' in ln:
devices.append(ln.split()[0])
defer.returnValue(devices)
@defer.inlineCallbacks
def _get_smart(self, device):
out, _err, code = yield self.fork('/usr/sbin/smartctl',
args=('-A', device))
if code == 0:
defer.returnValue(out.strip('\n').split('\n'))
else:
defer.returnValue([])
def _parse_smart(self, smart):
mark = False
attributes = {}
for l in smart:
ln = l.strip('\n').strip()
if not ln:
continue
if mark:
(_id, attribute, _flag, _val, _worst, _thresh, _type, _u, _wf,
raw) = ln.split(None, 9)
try:
raw = int(raw.split()[0])
attributes[attribute.replace('_', ' ')] = raw
except:
pass
if ln[:3] == 'ID#':
mark = True
return attributes
@defer.inlineCallbacks
def get(self):
if not self.devices:
self.devices = yield self._get_disks()
events = []
for disk in self.devices:
smart = yield self._get_smart(disk)
stats = self._parse_smart(smart)
for sensor, val in stats.items():
events.append(
self.createEvent('ok',
'Attribute %s:%s - %s' % (
disk, sensor, val),
val,
prefix='%s.%s' % (disk, sensor,))
)
defer.returnValue(events)
| mit | -3,479,305,473,948,432,400 | 27.896825 | 78 | 0.440538 | false | 4.571249 | false | false | false |
helicontech/zoo | Zoocmd/cmdline/argument_parser.py | 1 | 4696 | # -*- coding: utf-8 -*-
import argparse
"""Description of arguments of command line interface"""
def create_parser():
"""Create command line arguments parser"""
parser = argparse.ArgumentParser(description='Helicon Zoo command line')
# print settings from settings.yaml
parser.add_argument('--get-settings', action='store_true', help='get current settings')
# write settings to settings.yaml
parser.add_argument('--set-settings', dest="set_settings", nargs="+", help='set settings')
# set urls of additional feeds
parser.add_argument('--feed-urls', dest='urls', nargs='*', default='', help='feed urls to load')
# print installed products
parser.add_argument('--list-installed', action='store_true', dest='show_installed',
help='show all installed programs')
# print installed products
parser.add_argument('--run-tests', action='store_true', dest='run_test',
help='run tests over software')
# print all products
parser.add_argument('--list', action='store_true', dest='list_products',
help='list latest versions of all available products')
# custom settings path
parser.add_argument('--settings', dest='settings', default=None, help='search the settings in custom directory')
# custom data dir
parser.add_argument('--data-dir', dest='data_dir', default=None, help='default data directory')
# search products
parser.add_argument('--search', dest='search', help='search products for name and descriptions')
# search installed products and write they to current.yaml
parser.add_argument('--sync', action='store_true', dest='sync', help='synchronize installed version of products from system')
# set products to install pr uninstall
parser.add_argument('--products', dest='products', nargs="*", help='product names to install/uninstall')
# make intstall
parser.add_argument('--install', dest='install', action='store_true', help='install of specified products')
# set install parameters for products to install
parser.add_argument('--parameters', dest='parameters', nargs="?", help="application install parameters\n\
Format: --parameters param1=val1 product2@param2=val2 ...")
# set install parameters for products to install
parser.add_argument('-pj', '--data-parameters-json',
dest='json_params',
nargs="?",
help="install with parameters in file json format")
# set install parameters for products to install
parser.add_argument('-py', '--data-parameters',
dest='yml_params',
nargs="?",
help="install with parameters in file yaml format")
# make uninstall
parser.add_argument('--uninstall', action='store_true', dest='uninstall', help='uninstall a program')
# quit mode
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='don\'t print anything to stdout')
# allow communicate with user during install process
parser.add_argument('-i', '--interactive', action='store_true', dest='interactive', default=False,
help='allow to ask install parameters if needed')
# ignore any errors
parser.add_argument('-f', '--force', action='store_true', dest='force', default=False, help='ignore exit code')
# set log level
parser.add_argument('--log-level', dest='log_level', default=None,
help='set log level (debug, warning, info, error, critical)')
# start ui web server
parser.add_argument('--run-server', dest='run_server', action='store_true', help='run web ui at http://localhost:7799/')
# set ui server port
parser.add_argument('--run-server-addr', dest='run_server_addr', default='7799', help='bind web ui server to "addr:port" or port')
# start install/unstall task
parser.add_argument('--start-install-worker', dest='worker_task_id', type=int, help='start supervisour worker')
parser.add_argument('-l', '--task-log', dest='task_log',
nargs="?", default=None,
help='specify log for task if not specified will print to stdout')
parser.add_argument('--task-work', dest='task_id', type=int, help='start installer worker with task by id')
# compile zoo feed from dest to src
parser.add_argument('--compile-repository', nargs=2, dest="zoo_compile",
help='compile zoo feed, first agrument src feed directory, second destination feed ')
return parser
| apache-2.0 | 5,404,428,320,272,622,000 | 40.557522 | 134 | 0.640332 | false | 4.421846 | false | false | false |
lenards/pretty-doge | doge/image.py | 1 | 1650 | """
Magically manipulate and return the doge image.
"""
from math import floor
import numpy as np
from PIL import Image
from colorsys import rgb_to_hls, hls_to_rgb
LIGHT = (172, 143, 239)
DARK = (134, 30, 214)
def scrub_rgb(c):
return (int(floor(c[0] * 255)),
int(floor(c[1] * 255)),
int(floor(c[2] * 255)))
def get_color_pallete(c):
"""
Given a color c(rgb) return a new dark and light color(rgb).
"""
hls_d = rgb_to_hls(c[0]/255., c[1]/255., c[2]/255.)
# Magic numbers are the diff from hls(DARK) and hls(LIGHT).
hls = (hls_d[0] - 0.04385, hls_d[1] + 0.27059, hls_d[2])
new_dark = scrub_rgb(hls_to_rgb(hls_d[0], hls_d[1], hls_d[2]))
new_light = scrub_rgb(hls_to_rgb(hls[0], hls[1], hls[2]))
return new_light, new_dark
class ImageManager(object):
"""
Manages the doge template and does the conversion.
"""
_light = LIGHT
_dark = DARK
def __init__(self, image=None, light=None, dark=None):
if light:
self._light = light
if dark:
self._dark = dark
if not image:
self._image = Image.open("doge.png").convert("RGB")
else:
self._image = image.convert("RGB")
self._data = np.array(self._image.convert("RGB"))
def put_color(self, c):
new_light, new_dark = get_color_pallete(c)
return self.put(new_light, new_dark)
def put(self, new_light, new_dark):
data = np.copy(self._data)
data[(data == LIGHT).all(axis=-1)] = new_light
data[(data == DARK).all(axis=-1)] = new_dark
return Image.fromarray(data, mode="RGB")
| bsd-3-clause | 4,863,042,658,398,833,000 | 26.04918 | 66 | 0.569697 | false | 2.941176 | false | false | false |
datamindedbe/train-occupancy | ingest.py | 1 | 2244 | import argparse
from datetime import datetime, timedelta
import dateutil.relativedelta
from config import CONNECTION_STRING
from ingestion.etl import Etl
from ingestion.ingest import Ingest
CONNECTIONS_URL = 'http://graph.spitsgids.be/connections/?departureTime='
STATIONS_URL = 'https://irail.be/stations/NMBS'
FEEDBACK_URL = 'https://gtfs.irail.be/nmbs/feedback/occupancy-until-20161029.newlinedelimitedjsonobjects'
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
parser = argparse.ArgumentParser(description='Parse ingest options')
# Switch
parser.add_argument('-w', '--wipe', action='store_const', const=True,
help='Wipe the database. Will drop all tables. Default is FALSE')
parser.add_argument('-f', '--forceIngest', action='store_const', const=True,
help="Don't skip existing ingest files. Default is FALSE")
parser.add_argument('-s', "--startDate", required=False, type=valid_date,
help="The Start Date - format YYYY-MM-DD. Default is 1 month ago.")
parser.add_argument('-e', "--endDate", required=False, type=valid_date,
help="The End Date - format YYYY-MM-DD. Default is now()")
parser.add_argument('-o', "--outputFolder", required=False,
help="The folder in which to store the files. Default is 'data/'")
args = parser.parse_args()
if args.endDate is not None:
END = args.endDate
else:
END = datetime.now()
END = END - timedelta(minutes=END.minute % 10, seconds=END.second, microseconds=END.microsecond)
if args.startDate is not None:
START = args.startDate
else:
START = END - dateutil.relativedelta.relativedelta(months=1)
WIPE = args.wipe if args.wipe is not None else False
FOLDER = args.outputFolder if args.outputFolder is not None else 'data'
FORCE_INGEST = args.forceIngest if args.forceIngest is not None else False
print "Ingesting from %s to %s. Initialize=%s" % (START, END, WIPE)
ingest = Ingest(CONNECTIONS_URL, STATIONS_URL, FEEDBACK_URL, START, END, FOLDER, FORCE_INGEST)
ingest.run()
etl = Etl(CONNECTION_STRING, FOLDER, WIPE)
etl.run()
| apache-2.0 | -4,994,390,314,920,525,000 | 39.071429 | 105 | 0.698752 | false | 3.415525 | false | false | false |
OmkarPathak/pygorithm | imgs/test_geometry/test_extrapolated_intersection/af_test_line_line_touching.py | 1 | 1371 | from utils import create_newfig, create_moving_line, create_still_segment, run_or_export
func_code = 'af'
func_name = 'test_line_line_touching'
def setup_fig01():
fig, ax, renderer = create_newfig('{}01'.format(func_code))
create_moving_line(fig, ax, renderer, (1, 3), (2, 3), (3, -3), 'top')
create_still_segment(fig, ax, renderer, (3, 3), (5, 0), 'topright')
return fig, ax, '{}01_{}'.format(func_code, func_name)
def setup_fig02():
fig, ax, renderer = create_newfig('{}02'.format(func_code))
create_moving_line(fig, ax, renderer, (1, 1), (2, 1), (1, 1), 'bot')
create_still_segment(fig, ax, renderer, (3, 2), (3, 3), 'right')
return fig, ax, '{}02_{}'.format(func_code, func_name)
def setup_fig03():
fig, ax, renderer = create_newfig('{}03'.format(func_code))
create_moving_line(fig, ax, renderer, (1, 1), (2, 1), (2, 2), 'bot')
create_still_segment(fig, ax, renderer, (2, 3), (3, 3), 'top')
return fig, ax, '{}03_{}'.format(func_code, func_name)
def setup_fig04():
fig, ax, renderer = create_newfig('{}04'.format(func_code))
create_moving_line(fig, ax, renderer, (1, 1), (2, 1), (0, 2), 'bot')
create_still_segment(fig, ax, renderer, (2, 3), (3, 3), 'top')
return fig, ax, '{}04_{}'.format(func_code, func_name)
run_or_export(setup_fig01, setup_fig02, setup_fig03, setup_fig04) | mit | 4,029,336,342,482,772,000 | 39.352941 | 88 | 0.603939 | false | 2.747495 | false | false | false |
ezequielpereira/Time-Line | specs/FileTimeline.py | 2 | 5917 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import datetime
import unittest
from specs.utils import TmpDirTestCase
from timelinelib.db.backends.file import dequote
from timelinelib.db.backends.file import FileTimeline
from timelinelib.db.backends.file import quote
from timelinelib.db.backends.file import split_on_semicolon
from timelinelib.db.exceptions import TimelineIOError
from timelinelib.db.objects import TimePeriod
from timelinelib.drawing.viewproperties import ViewProperties
from timelinelib.time import PyTimeType
class FileTimelineSpec(TmpDirTestCase):
IO = True
def testCorruptData(self):
"""
Scenario: You open a timeline that contains corrupt data.
Expected result: You get an exception and you can not use the timeline.
"""
self.assertRaises(TimelineIOError, FileTimeline, self.corrupt_file)
def testMissingEOF(self):
"""
Scenario: A timeline is opened that contains no corrupt data. However,
no end of file marker is found.
Expected result: The timeline should be treated as corrupt.
"""
self.assertRaises(TimelineIOError, FileTimeline, self.missingeof_file)
def testAddingEOF(self):
"""
Scenario: You open an old timeline < 0.3.0 with a client >= 0.3.0.
Expected result: The timeline does not contain the EOF marker but since
it is an old file, no exception should be raised.
"""
FileTimeline(self._021_file)
def testInvalidTimePeriod(self):
"""
Scenario: You open a timeline that has a PREFERRED-PERIOD of length 0.
Expected result: Even if this is a valid value for a TimePeriod it
should not be a valid PREFERRED-PERIOD. The length must be > 0. So we
should get an error when trying to read this.
"""
self.assertRaises(TimelineIOError, FileTimeline,
self.invalid_time_period_file)
def testSettingInvalidPreferredPeriod(self):
"""
Scenario: You try to assign a preferred period whose length is 0.
Expected result: You should get an error.
"""
timeline = FileTimeline(self.valid_file)
now = datetime.datetime.now()
zero_tp = TimePeriod(PyTimeType(), now, now)
vp = ViewProperties()
vp.displayed_period = zero_tp
self.assertRaises(TimelineIOError, timeline.save_view_properties, vp)
def setUp(self):
TmpDirTestCase.setUp(self)
# Create temporary dir and names
self.corrupt_file = self.get_tmp_path("corrupt.timeline")
self.missingeof_file = self.get_tmp_path("missingeof.timeline")
self._021_file = self.get_tmp_path("021.timeline")
self.invalid_time_period_file = self.get_tmp_path("invalid_time_period.timeline")
self.valid_file = self.get_tmp_path("valid.timeline")
# Write content to files
HEADER_030 = "# Written by Timeline 0.3.0 on 2009-7-23 9:40:33"
HEADER_030_DEV = "# Written by Timeline 0.3.0dev on 2009-7-23 9:40:33"
HEADER_021 = "# Written by Timeline 0.2.1 on 2009-7-23 9:40:33"
self.write_timeline(self.corrupt_file, ["corrupt data here"])
self.write_timeline(self.missingeof_file, ["# valid data"])
self.write_timeline(self._021_file, [HEADER_021])
invalid_time_period = [
"# Written by Timeline 0.5.0dev785606221dc2 on 2009-9-22 19:1:10",
"PREFERRED-PERIOD:2008-12-9 11:32:26;2008-12-9 11:32:26",
"CATEGORY:Work;173,216,230;True",
"CATEGORY:Private;200,200,200;True",
"EVENT:2009-7-13 0:0:0;2009-7-18 0:0:0;Programming course;Work",
"EVENT:2009-7-10 14:30:0;2009-7-10 14:30:0;Go to dentist;Private",
"EVENT:2009-7-20 0:0:0;2009-7-27 0:0:0;Vacation;Private",
"# END",
]
self.write_timeline(self.invalid_time_period_file, invalid_time_period)
valid = [
"# Written by Timeline 0.5.0 on 2009-9-22 19:1:10",
"# END",
]
self.write_timeline(self.valid_file, valid)
def write_timeline(self, path, lines):
f = file(path, "w")
f.write("\n".join(lines))
f.close()
class FileTimelineQuuoteFunctionsSpec(unittest.TestCase):
def testQuote(self):
# None
self.assertEqual(quote("plain"), "plain")
# Single
self.assertEqual(quote("foo;bar"), "foo\\;bar")
self.assertEqual(quote("foo\nbar"), "foo\\nbar")
self.assertEqual(quote("foo\\bar"), "foo\\\\bar")
self.assertEqual(quote("foo\\nbar"), "foo\\\\nbar")
self.assertEqual(quote("\\;"), "\\\\\\;")
# Mixed
self.assertEqual(quote("foo\nbar\rbaz\\n;;"),
"foo\\nbar\\rbaz\\\\n\\;\\;")
def testDequote(self):
self.assertEqual(dequote("\\\\n"), "\\n")
def testQuoteDequote(self):
for s in ["simple string", "with; some;; semicolons",
"with\r\n some\n\n newlines\n"]:
self.assertEqual(s, dequote(quote(s)))
def testSplit(self):
self.assertEqual(split_on_semicolon("one;two\\;;three"),
["one", "two\\;", "three"])
| gpl-3.0 | 8,495,560,757,091,305,000 | 38.446667 | 89 | 0.641541 | false | 3.693508 | true | false | false |
indico/cephalopod | mereswine/cli.py | 1 | 2467 | import click
import logging
import mondrian
from celery.bin.celery import CeleryCommand, command_classes
from flask import current_app
from flask.cli import FlaskGroup, with_appcontext
# XXX: Do not import any mereswine modules here!
# If any import from this module triggers an exception the dev server
# will die while an exception only happening during app creation will
# be handled gracefully.
def _create_app(info):
from .factory import make_app
return make_app()
def shell_ctx():
from .core import db
ctx = {'db': db}
ctx.update((name, cls) for name, cls in db.Model._decl_class_registry.items() if hasattr(cls, '__table__'))
return ctx
def register_shell_ctx(app):
app.shell_context_processor(shell_ctx)
@click.group(cls=FlaskGroup, create_app=_create_app)
@with_appcontext
def cli():
"""
This script lets you control various aspects of Mereswine from the
command line.
"""
logger = logging.getLogger()
mondrian.setup(excepthook=True)
logger.setLevel(logging.DEBUG if current_app.debug else logging.INFO)
@cli.group(name='db')
def db_cli():
"""DB management commands"""
pass
@db_cli.command()
def drop():
"""Drop all database tables"""
from .core import db
if click.confirm('Are you sure you want to lose all your data?'):
db.drop_all()
@db_cli.command()
def create():
"""Create database tables"""
from .core import db
db.create_all()
@db_cli.command()
def recreate():
"""Recreate database tables (same as issuing 'drop' and then 'create')"""
from .core import db
if click.confirm('Are you sure you want to lose all your data?'):
db.drop_all()
db.create_all()
@cli.command()
@click.option('--uuid', help="UUID of server to crawl")
def crawl(uuid):
"""Crawl all instances, or a given UUID if passed"""
from .crawler import crawl_instance, crawl_all
if uuid is not None:
crawl_instance(uuid)
else:
crawl_all()
@cli.command(context_settings={'ignore_unknown_options': True, 'allow_extra_args': True}, add_help_option=False)
@click.pass_context
def celery(ctx):
"""Manage the Celery task daemon."""
from .tasks import celery
# remove the celery shell command
next(funcs for group, funcs, _ in command_classes if group == 'Main').remove('shell')
del CeleryCommand.commands['shell']
CeleryCommand(celery).execute_from_commandline(['mereswine celery'] + ctx.args)
| gpl-3.0 | -7,071,746,143,821,515,000 | 25.815217 | 112 | 0.684232 | false | 3.570188 | false | false | false |
malkoto1/just_cook | backend/sources/utils/skeleton.py | 1 | 1144 | __author__ = 'Vojda'
class User:
"""
This is the user class
"""
@classmethod
def from_dict(cls, object_dict):
return User(object_dict['username'], object_dict['password'], object_dict['admin'])
def __init__(self, username, password, admin=False):
self.username = username
self.password = password
self.admin = admin
def to_json(self):
return "{}"
class Recipe:
"""
Recipe class representing the recipes in the db
"""
@classmethod
def from_dict(cls, object_dict):
return Recipe(object_dict['name'], object_dict['products'], object_dict['description'], object_dict['checked'], object_dict['_id'])
def __init__(self, name, products, description, checked=False, idd=None):
self.name = name
self.products = products
self.description = description
self.checked = checked
self._id = idd
class Cookie:
"""
A cookie representation:
{"hash": "SOME HASH",
"Expires": miliseconds
}
"""
def __init__(self, hash, expires):
self.hash = hash
self.expires = expires | gpl-2.0 | -8,236,627,754,515,718,000 | 22.367347 | 139 | 0.590909 | false | 4 | false | false | false |
miccrun/brewer | auth/views.py | 1 | 1348 |
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import (
FormView,
View,
)
from auth.forms import (
LoginForm,
RegisterForm,
)
class LoginRequiredMixin(object):
@method_decorator(login_required(redirect_field_name='redirect'))
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class AuthView(FormView):
def form_valid(self, form):
user = form.save()
login(self.request, user)
return super(AuthView, self).form_valid(form)
class LoginView(AuthView):
template_name = 'auth/login.html'
form_class = LoginForm
def get_success_url(self):
return self.request.POST.get('redirect', '/')
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
context['redirect'] = self.request.GET.get('redirect', '/')
return context
class RegisterView(AuthView):
template_name = 'auth/register.html'
form_class = RegisterForm
success_url = '/'
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect('/')
| mit | 7,860,899,273,064,127,000 | 23.962963 | 72 | 0.683976 | false | 3.884726 | false | false | false |
dcf21/4most-4gp-scripts | src/helper_code/base_synthesizer.py | 1 | 14736 | # -*- coding: utf-8 -*-
"""
Framework for code to synthesise a library of spectra.
"""
import argparse
import hashlib
import json
import logging
import os
import re
import sqlite3
import time
from os import path as os_path
from fourgp_speclib import SpectrumLibrarySqlite, Spectrum
from fourgp_specsynth import TurboSpectrum
from fourgp_telescope_data import FourMost
class Synthesizer:
# Convenience function to provide dictionary access to rows of an astropy table
@staticmethod
def astropy_row_to_dict(x):
return dict([(i, x[i]) for i in x.columns])
# Read input parameters
def __init__(self, library_name, logger, docstring, root_path="../../../..", spectral_resolution=50000):
self.logger = logger
self.our_path = os_path.split(os_path.abspath(__file__))[0]
self.root_path = os_path.abspath(os_path.join(self.our_path, root_path, ".."))
self.pid = os.getpid()
self.spectral_resolution = spectral_resolution
parser = argparse.ArgumentParser(description=docstring)
parser.add_argument('--output-library',
required=False,
default="turbospec_{}".format(library_name),
dest="library",
help="Specify the name of the SpectrumLibrary we are to feed synthesized spectra into.")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries.")
parser.add_argument('--create',
required=False,
action='store_true',
dest="create",
help="Create a clean SpectrumLibrary to feed synthesized spectra into")
parser.add_argument('--no-create',
required=False,
action='store_false',
dest="create",
help="Do not create a clean SpectrumLibrary to feed synthesized spectra into")
parser.set_defaults(create=True)
parser.add_argument('--log-dir',
required=False,
default="/tmp/turbospec_{}_{}".format(library_name, self.pid),
dest="log_to",
help="Specify a log directory where we log our progress and configuration files.")
parser.add_argument('--dump-to-sqlite-file',
required=False,
default="",
dest="sqlite_out",
help="Specify an sqlite3 filename where we dump the stellar parameters of the stars.")
parser.add_argument('--line-lists-dir',
required=False,
default=self.root_path,
dest="lines_dir",
help="Specify a directory where line lists for TurboSpectrum can be found.")
parser.add_argument('--elements',
required=False,
default="",
dest="elements",
help="Only read the abundances of a comma-separated list of elements, and use scaled-solar "
"abundances for everything else.")
parser.add_argument('--binary-path',
required=False,
default=self.root_path,
dest="binary_path",
help="Specify a directory where Turbospectrum and Interpol packages are installed.")
parser.add_argument('--every',
required=False,
default=1,
type=int,
dest="every",
help="Only process every nth spectrum. "
"This is useful when parallelising this script across multiple processes.")
parser.add_argument('--skip',
required=False,
default=0,
type=int,
dest="skip",
help="Skip n spectra before starting to process every nth. "
"This is useful when parallelising this script across multiple processes.")
parser.add_argument('--limit',
required=False,
default=0,
type=int,
dest="limit",
help="Only process a maximum of n spectra.")
self.args = parser.parse_args()
logging.info("Synthesizing {} to <{}>".format(library_name, self.args.library))
# Set path to workspace where we create libraries of spectra
self.workspace = (self.args.workspace if self.args.workspace else
os_path.abspath(os_path.join(self.our_path, root_path, "workspace")))
os.system("mkdir -p {}".format(self.workspace))
def set_star_list(self, star_list):
self.star_list = star_list
# Ensure that every star has a name; number stars of not
for i, item in enumerate(self.star_list):
if 'name' not in item:
item['name'] = "star_{:08d}".format(i)
# Ensure that every star has free_abundances and extra metadata
for i, item in enumerate(self.star_list):
if 'free_abundances' not in item:
item['free_abundances'] = {}
if 'extra_metadata' not in item:
item['extra_metadata'] = {}
if 'microturbulence' not in item:
item['microturbulence'] = 1
# Ensure that we have a table of input data to dump to SQLite, if requested
for item in self.star_list:
if 'input_data' not in item:
item['input_data'] = {'name': item['name'],
'Teff': item['Teff'],
'[Fe/H]': item['[Fe/H]'],
'logg': item['logg']}
item['input_data'].update(item['free_abundances'])
item['input_data'].update(item['extra_metadata'])
if 'name' not in item['input_data']:
item['input_data']['name'] = item['name']
def dump_stellar_parameters_to_sqlite(self):
# Output data into sqlite3 db
if self.args.sqlite_out:
os.system("rm -f {}".format(self.args.sqlite_out))
conn = sqlite3.connect(self.args.sqlite_out)
c = conn.cursor()
columns = []
for col_name, col_value in list(self.star_list[0]['input_data'].items()):
col_type_str = isinstance(col_value, str)
columns.append("{} {}".format(col_name, "TEXT" if col_type_str else "REAL"))
c.execute("CREATE TABLE stars (uid INTEGER PRIMARY KEY, {});".format(",".join(columns)))
for i, item in enumerate(self.star_list):
print(("Writing sqlite parameter dump: %5d / %5d" % (i, len(self.star_list))))
c.execute("INSERT INTO stars (name) VALUES (?);", (item['input_data']['name'],))
uid = c.lastrowid
for col_name in item['input_data']:
if col_name == "name":
continue
arguments = (
str(item['input_data'][col_name]) if isinstance(item['input_data'][col_name], str)
else float(item['input_data'][col_name]),
uid
)
c.execute("UPDATE stars SET %s=? WHERE uid=?;" % col_name, arguments)
conn.commit()
conn.close()
def create_spectrum_library(self):
# Create new SpectrumLibrary
self.library_name = re.sub("/", "_", self.args.library)
self.library_path = os_path.join(self.workspace, self.library_name)
self.library = SpectrumLibrarySqlite(path=self.library_path, create=self.args.create)
# Invoke FourMost data class. Ensure that the spectra we produce are much higher resolution than 4MOST.
# We down-sample them later to whatever resolution we actually want.
self.FourMostData = FourMost()
self.lambda_min = self.FourMostData.bands["LRS"]["lambda_min"]
self.lambda_max = self.FourMostData.bands["LRS"]["lambda_max"]
self.line_lists_path = self.FourMostData.bands["LRS"]["line_lists_edvardsson"]
# Invoke a TurboSpectrum synthesizer instance
self.synthesizer = TurboSpectrum(
turbospec_path=os_path.join(self.args.binary_path, "turbospectrum-15.1/exec-gf-v15.1"),
interpol_path=os_path.join(self.args.binary_path, "interpol_marcs"),
line_list_paths=[os_path.join(self.args.lines_dir, self.line_lists_path)],
marcs_grid_path=os_path.join(self.args.binary_path, "fromBengt/marcs_grid"))
self.synthesizer.configure(lambda_min=self.lambda_min,
lambda_max=self.lambda_max,
lambda_delta=float(self.lambda_min) / self.spectral_resolution,
line_list_paths=[os_path.join(self.args.lines_dir, self.line_lists_path)],
stellar_mass=1)
self.counter_output = 0
# Start making log output
os.system("mkdir -p {}".format(self.args.log_to))
self.logfile = os.path.join(self.args.log_to, "synthesis.log")
def do_synthesis(self):
# Iterate over the spectra we're supposed to be synthesizing
with open(self.logfile, "w") as result_log:
for star in self.star_list:
star_name = star['name']
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
metadata = {
"Starname": str(star_name),
"uid": str(unique_id),
"Teff": float(star['Teff']),
"[Fe/H]": float(star['[Fe/H]']),
"logg": float(star['logg']),
"microturbulence": float(star["microturbulence"])
}
# User can specify that we should only do every nth spectrum, if we're running in parallel
self.counter_output += 1
if (self.args.limit > 0) and (self.counter_output > self.args.limit):
break
if (self.counter_output - self.args.skip) % self.args.every != 0:
continue
# Pass list of the abundances of individual elements to TurboSpectrum
free_abundances = dict(star['free_abundances'])
for element, abundance in list(free_abundances.items()):
metadata["[{}/H]".format(element)] = float(abundance)
# Propagate all ionisation states into metadata
metadata.update(star['extra_metadata'])
# Configure Turbospectrum with the stellar parameters of the next star
self.synthesizer.configure(
t_eff=float(star['Teff']),
metallicity=float(star['[Fe/H]']),
log_g=float(star['logg']),
stellar_mass=1 if "stellar_mass" not in star else star["stellar_mass"],
turbulent_velocity=1 if "microturbulence" not in star else star["microturbulence"],
free_abundances=free_abundances
)
# Make spectrum
time_start = time.time()
turbospectrum_out = self.synthesizer.synthesise()
time_end = time.time()
# Log synthesizer status
logfile_this = os.path.join(self.args.log_to, "{}.log".format(star_name))
open(logfile_this, "w").write(json.dumps(turbospectrum_out))
# Check for errors
errors = turbospectrum_out['errors']
if errors:
result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(),
time_end - time_start,
star_name,
errors))
logging.warn("Star <{}> could not be synthesised. Errors were: {}".
format(star_name, errors))
result_log.flush()
continue
else:
logging.info("Synthesis completed without error.")
# Fetch filename of the spectrum we just generated
filepath = os_path.join(turbospectrum_out["output_file"])
# Insert spectrum into SpectrumLibrary
try:
filename = "spectrum_{:08d}".format(self.counter_output)
# First import continuum-normalised spectrum, which is in columns 1 and 2
metadata['continuum_normalised'] = 1
spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, columns=(0, 1), binary=False)
self.library.insert(spectra=spectrum, filenames=filename)
# Then import version with continuum, which is in columns 1 and 3
metadata['continuum_normalised'] = 0
spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, columns=(0, 2), binary=False)
self.library.insert(spectra=spectrum, filenames=filename)
except (ValueError, IndexError):
result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start,
star_name, "Could not read bsyn output"))
result_log.flush()
continue
# Update log file to show our progress
result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start,
star_name, "OK"))
result_log.flush()
def clean_up(self):
logging.info("Synthesized {:d} spectra.".format(self.counter_output))
# Close TurboSpectrum synthesizer instance
self.synthesizer.close()
| mit | 4,591,461,098,104,170,500 | 49.293515 | 120 | 0.514929 | false | 4.543941 | false | false | false |
binarybin/MPS_Proba | src/projectionboys.py | 1 | 5741 | """
Program name: MPS-Proba
Program purpose: The Alpha version of the APC 524 project.
File name: projectionboys.py
File purpose: the projection boys model
Responsible person: Bin Xu
"""
from numpy import zeros
import numpy as np
from model import Model
class ProjectionBoys(Model):
"""
A probablistic model that describes the model in human language and gives some parameters.
"""
def __init__(self, size, p0, p1, q1, q2, init_state):
super(ProjectionBoys, self).__init__()
self.size = size
self.p0 = p0
self.p1 = p1
self.q1 = q1
self.q2 = q2
self.init_state = init_state
self.model_type = "ProjectionBoys"
self.hamiltonian = r"H = p0 I + \sum_{i=1}^{n-1}\frac{p 1}{n-1}\sigma_i^x\otimes\sigma_{i+1}^x + \frac{q 1}{n-1}\pi_i^+\otimes\pi_{i+1}^- + \frac{q 2}{n-1}\pi_i^+\otimes\pi_{i+1}^-"
self.normalizeTMat()
def normalizeTMat(self):
totalproba = self.p0 + self.p1 + self.q1 + self.q2
self.p0 /= totalproba
self.p1 /= totalproba
self.p1 /= self.size - 1
self.q1 /= totalproba
self.q1 /= self.size - 1
self.q2 /= totalproba
self.q2 /= self.size - 1
def prepareMpo(self):
#initialize the MPO
self.mpo = []
mpo_left = zeros(shape = (2, 2, 1, 5), dtype = float)
mpo_middle = zeros(shape = (2, 2, 5, 5), dtype = float)
mpo_right = zeros(shape = (2, 2, 5, 1), dtype = float)
# remember our convention: phys_in, phys_out, aux_l, aux_r
# mpo_left = [p0 I, p1 Sx, q1 Pi+, q2 Pi-, I]
mpo_left[:, :, 0, 0] = self.p0 * self.I
mpo_left[:, :, 0, 1] = self.p1 * self.sigma_x
mpo_left[:, :, 0, 2] = self.q1 * self.pi_plus
mpo_left[:, :, 0, 3] = self.q2 * self.pi_minus
mpo_left[:, :, 0, 4] = self.I
# mpo_middle = [I, 0, 0, 0, 0]
# [Sx, 0, 0, 0, 0]
# [pi+, 0, 0, 0, 0]
# [pi-, 0, 0, 0, 0]
# [0, p1 Sx, q1 pi+, q2 pi-, I]
mpo_middle[:, :, 0, 0] = self.I
mpo_middle[:, :, 1, 0] = self.sigma_x
mpo_middle[:, :, 2, 0] = self.pi_plus
mpo_middle[:, :, 3, 0] = self.pi_minus
mpo_middle[:, :, 4, 1] = self.p1 * self.sigma_x
mpo_middle[:, :, 4, 2] = self.q1 * self.pi_plus
mpo_middle[:, :, 4, 3] = self.q2 * self.pi_minus
mpo_middle[:, :, 4, 4] = self.I
# mpo_right = [I, Sx, pi+, pi-, 0].transpose
mpo_right[:, :, 0, 0] = self.I
mpo_right[:, :, 1, 0] = self.sigma_x
mpo_right[:, :, 2, 0] = self.pi_plus
mpo_right[:, :, 3, 0] = self.pi_minus
# store the list of mpo's
self.mpo.append(mpo_left)
for i in range(self.size-2):
self.mpo.append(mpo_middle)
self.mpo.append(mpo_right)
def prepareMps(self):
self.mps = []
if self.init_state == "all down":
for i in range(self.size):
new_mps = zeros(shape = (2, 1, 1), dtype = float)
new_mps[0, 0, 0] = 1
self.mps.append(new_mps)
elif type(self.init_state) == list:
if len(self.init_state) != self.size:
raise Exception("The size of the initial condition does not match with the size of the model.")
for i in range(self.size):
new_mps = zeros(shape = (2, 1, 1), dtype = float)
if self.init_state[i] == 0:
new_mps[0, 0, 0] = 1
elif self.init_state[i] == 1:
new_mps[1, 0, 0] = 1
else:
raise Exception("Initial condition can only have 0 or 1 for this model.")
self.mps.append(new_mps)
else:
raise Exception("Initial condition not supported!")
def prepareTransitionalMat(self):
#create sigma_x matrix
sigmax = np.matrix(self.sigma_x)
pi_plus = np.matrix(self.pi_plus).T
pi_minus = np.matrix(self.pi_minus).T
#non changing channel
self.H = self.p0*np.identity(2**self.size) # not changing states
# sigma_x channel
for i in range(self.size-1):
Tmatrix = np.identity(1)
for j in range(self.size):
if j == i or j == i+1:
Tmatrix = np.kron(Tmatrix, sigmax)
else:
Tmatrix = np.kron(Tmatrix, np.identity(2))
self.H = np.add(self.H, Tmatrix * self.p1)
# pi+ channel
for i in range(self.size-1):
Tmatrix = np.identity(1)
for j in range(self.size):
if j == i or j == i+1:
Tmatrix = np.kron(Tmatrix, pi_plus)
else:
Tmatrix = np.kron(Tmatrix, np.identity(2))
self.H = np.add(self.H, Tmatrix * self.q1)
# pi- channel
for i in range(self.size-1):
Tmatrix = np.identity(1)
for j in range(self.size):
if j == i or j == i+1:
Tmatrix = np.kron(Tmatrix, pi_minus)
else:
Tmatrix = np.kron(Tmatrix, np.identity(2))
self.H = np.add(self.H, Tmatrix * self.q2)
def prepareExactInitState(self):
self.init_exact = np.zeros((2**self.size, 1))
if self.init_state == "all down":
self.init_exact[0] = 1
else:
raise Exception("Init state not supported!")
def __repr__(self):
return ( "Hamiltonian: "+self.hamiltonian + "\nSystem length = "+str(self.size)+"\nremain_proba = "+str(self.remain_proba) +"\ninitial state: "+self.init_state)
| gpl-2.0 | 312,906,142,778,674,500 | 36.03871 | 189 | 0.503048 | false | 3.150933 | false | false | false |
fabiocaccamo/django-kway | kway/cache.py | 1 | 1533 | # -*- coding: utf-8 -*-
from django.core.cache import get_cache
from kway import settings, utils
def get_value_for_key(key, default_value = None):
cache = get_cache(settings.KWAY_CACHE_NAME)
localized_key = utils.get_localized_key(key)
value = None
if cache:
value = cache.get(localized_key, None)
if value:
cache.set(localized_key, value)
cache.close()
return value or default_value
def set_value_for_key(key, value):
cache = get_cache(settings.KWAY_CACHE_NAME)
localized_key = utils.get_localized_key(key)
if cache:
if value:
cache.set(localized_key, value)
else:
cache.delete(localized_key)
cache.close()
return value
def update_values_post_save(sender, instance, **kwargs):
if kwargs['created']:
return
cache = get_cache(settings.KWAY_CACHE_NAME)
if cache:
for language in settings.KWAY_LANGUAGES:
language_code = language[0]
localized_key = utils.get_localized_key(instance.key, language_code)
localized_value_field_name = utils.get_localized_value_field_name(language_code)
localized_value = getattr(instance, localized_value_field_name)
cache.set(localized_key, localized_value)
cache.close()
| mit | 5,651,205,504,249,544,000 | 22.6 | 92 | 0.551207 | false | 4.246537 | false | false | false |
pugpe/pugpe | apps/cert/migrations/0002_auto__add_attendee.py | 1 | 5385 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attendee'
db.create_table('cert_attendee', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=254)),
))
db.send_create_signal('cert', ['Attendee'])
# Adding M2M table for field events on 'Attendee'
db.create_table('cert_attendee_events', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('attendee', models.ForeignKey(orm['cert.attendee'], null=False)),
('event', models.ForeignKey(orm['events.event'], null=False))
))
db.create_unique('cert_attendee_events', ['attendee_id', 'event_id'])
def backwards(self, orm):
# Deleting model 'Attendee'
db.delete_table('cert_attendee')
# Removing M2M table for field events on 'Attendee'
db.delete_table('cert_attendee_events')
models = {
'cert.attendee': {
'Meta': {'object_name': 'Attendee'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['events.Event']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cert.signature': {
'Meta': {'object_name': 'Signature'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'signature': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'full_description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Location']"}),
'partners': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['events.Partner']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'signature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cert.Signature']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'submission_deadline': ('django.db.models.fields.DateTimeField', [], {})
},
'events.partner': {
'Meta': {'object_name': 'Partner'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'geo.location': {
'Meta': {'object_name': 'Location'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': "'15'"}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['cert'] | mit | -4,034,517,740,122,692,600 | 58.844444 | 170 | 0.551346 | false | 3.693416 | false | false | false |
sevagas/macro_pack | src/modules/excel_dde.py | 1 | 2672 | #!/usr/bin/env python
# encoding: utf-8
# Only enabled on windows
import sys
from collections import OrderedDict
if sys.platform == "win32":
# Download and install pywin32 from https://sourceforge.net/projects/pywin32/files/pywin32/
import win32com.client # @UnresolvedImport
import logging
from modules.excel_gen import ExcelGenerator
from common import utils
class ExcelDDE(ExcelGenerator):
"""
Module used to generate MS ecel file with DDE object attack
"""
def run(self):
logging.info(" [+] Generating MS Excel with DDE document...")
try:
# Get command line
paramDict = OrderedDict([("Cmd_Line",None)])
self.fillInputParams(paramDict)
command = paramDict["Cmd_Line"]
logging.info(" [-] Open document...")
# open up an instance of Excel with the win32com driver\ \\
excel = win32com.client.Dispatch("Excel.Application")
# do the operation in background without actually opening Excel
#excel.Visible = False
workbook = excel.Workbooks.Open(self.outputFilePath)
logging.info(" [-] Inject DDE field (Answer 'No' to popup)...")
ddeCmd = r"""=MSEXCEL|'\..\..\..\Windows\System32\cmd.exe /c %s'!A1""" % command.rstrip()
excel.Cells(1, 26).Formula = ddeCmd
excel.Cells(1, 26).FormulaHidden = True
# Remove Informations
logging.info(" [-] Remove hidden data and personal info...")
xlRDIAll=99
workbook.RemoveDocumentInformation(xlRDIAll)
logging.info(" [-] Save Document...")
excel.DisplayAlerts=False
excel.Workbooks(1).Close(SaveChanges=1)
excel.Application.Quit()
# garbage collection
del excel
logging.info(" [-] Generated %s file path: %s" % (self.outputFileType, self.outputFilePath))
except Exception:
logging.exception(" [!] Exception caught!")
logging.error(" [!] Hints: Check if MS office is really closed and Antivirus did not catch the files")
logging.error(" [!] Attempt to force close MS Excel applications...")
objExcel = win32com.client.Dispatch("Excel.Application")
objExcel.Application.Quit()
del objExcel
# If it Application.Quit() was not enough we force kill the process
if utils.checkIfProcessRunning("Excel.exe"):
utils.forceProcessKill("Excel.exe")
| apache-2.0 | -653,021,444,565,893,400 | 38.308824 | 114 | 0.583832 | false | 4.268371 | false | false | false |
ChimeraCoder/GOctober | july/people/migrations/0008_auto__add_field_project_updated_on.py | 2 | 9706 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from pytz import UTC
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.updated_on'
db.add_column(u'people_project', 'updated_on',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 7, 1, 0, 0, tzinfo=UTC), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.updated_on'
db.delete_column(u'people_project', 'updated_on')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'july.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'location_members'", 'null': 'True', 'to': u"orm['people.Location']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_members'", 'null': 'True', 'to': u"orm['people.Team']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'people.achievedbadge': {
'Meta': {'object_name': 'AchievedBadge'},
'achieved_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Badge']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['july.User']", 'null': 'True', 'blank': 'True'})
},
u'people.badge': {
'Meta': {'object_name': 'Badge'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'people.commit': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Commit'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'files': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['july.User']", 'null': 'True', 'blank': 'True'})
},
u'people.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'people.location': {
'Meta': {'object_name': 'Location'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.project': {
'Meta': {'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parent_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'repo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.team': {
'Meta': {'object_name': 'Team'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['people']
| mit | 4,708,723,472,445,294,000 | 73.661538 | 187 | 0.541315 | false | 3.604159 | false | false | false |
ODoSE/odose.nl | align_trim_orthologs.py | 1 | 9906 | #!/usr/bin/env python
"""Module to align and trim orthologs after the OrthoMCL step."""
from __future__ import division
from Bio import AlignIO
from shared import create_directory, extract_archive_of_files, create_archive_of_files, parse_options, \
CODON_TABLE_ID
from scatterplot import scatterplot
from versions import TRANSLATORX
from operator import itemgetter
from subprocess import check_call, STDOUT
import logging as log
import os
import shutil
import sys
import tempfile
__author__ = "Tim te Beek"
__copyright__ = "Copyright 2011, Netherlands Bioinformatics Centre"
__license__ = "MIT"
def _align_sicos(run_dir, sico_files):
"""Align all SICO files given as argument in parallel and return the resulting alignment files."""
log.info('Aligning {0} SICO genes using TranslatorX & muscle.'.format(len(sico_files)))
# We'll multiplex this embarrassingly parallel task using a pool of workers
return [_run_translatorx((run_dir, sico_file)) for sico_file in sico_files]
def _run_translatorx((run_dir, sico_file), translation_table=CODON_TABLE_ID):
"""Run TranslatorX to create DNA level alignment file of protein level aligned DNA sequences within sico_file."""
assert os.path.exists(TRANSLATORX) and os.access(TRANSLATORX, os.X_OK), 'Could not find or run ' + TRANSLATORX
# Determine output file name
sico_base = os.path.splitext(os.path.split(sico_file)[1])[0]
alignment_dir = create_directory('alignments/' + sico_base, inside_dir=run_dir)
# Created output file
file_base = os.path.join(alignment_dir, sico_base)
dna_alignment = file_base + '.nt_ali.fasta'
# Actually run the TranslatorX program
command = [TRANSLATORX,
'-i', sico_file,
'-c', str(translation_table),
'-o', file_base]
check_call(command, stdout=open('/dev/null', 'w'), stderr=STDOUT)
assert os.path.isfile(dna_alignment) and 0 < os.path.getsize(dna_alignment), \
'Alignment file should exist and have some content now: {0}'.format(dna_alignment)
return dna_alignment
def _trim_alignments(run_dir, dna_alignments, retained_threshold, max_indel_length, stats_file, scatterplot_file):
"""Trim all DNA alignments using _trim_alignment (singular), and calculate some statistics about the trimming."""
log.info('Trimming {0} DNA alignments from first non-gap codon to last non-gap codon'.format(len(dna_alignments)))
# Create directory here, to prevent race-condition when folder does not exist, but is then created by another process
trimmed_dir = create_directory('trimmed', inside_dir=run_dir)
# Trim all the alignments
trim_tpls = [_trim_alignment((trimmed_dir, dna_alignment, max_indel_length)) for dna_alignment in dna_alignments]
remaining_percts = [tpl[3] for tpl in trim_tpls]
trimmed_alignments = [tpl[0] for tpl in trim_tpls if retained_threshold <= tpl[3]]
misaligned = [tpl[0] for tpl in trim_tpls if retained_threshold > tpl[3]]
# Write trim statistics to file in such a way that they're easily converted to a graph in Galaxy
with open(stats_file, mode='w') as append_handle:
msg = '{0:6} sequence alignments trimmed'.format(len(trim_tpls))
log.info(msg)
append_handle.write('#' + msg + '\n')
average_retained = sum(remaining_percts) / len(remaining_percts)
msg = '{0:5.1f}% sequence retained on average overall'.format(average_retained)
log.info(msg)
append_handle.write('#' + msg + '\n')
filtered = len(misaligned)
msg = '{0:6} orthologs filtered because less than {1}% sequence retained or because of indel longer than {2} '\
.format(filtered, str(retained_threshold), max_indel_length)
log.info(msg)
append_handle.write('#' + msg + '\n')
append_handle.write('# Trimmed file\tOriginal length\tTrimmed length\tPercentage retained\n')
for tpl in sorted(trim_tpls, key=itemgetter(3)):
append_handle.write(os.path.split(tpl[0])[1] + '\t')
append_handle.write(str(tpl[1]) + '\t')
append_handle.write(str(tpl[2]) + '\t')
append_handle.write('{0:.2f}\n'.format(tpl[3]))
# Create scatterplot using trim_tuples
scatterplot(retained_threshold, trim_tpls, scatterplot_file)
return sorted(trimmed_alignments), sorted(misaligned)
def _trim_alignment((trimmed_dir, dna_alignment, max_indel_length)):
"""Trim alignment to retain first & last non-gapped codons across alignment, and everything in between (+gaps!).
Return trimmed file, original length, trimmed length and percentage retained as tuple"""
# Read single alignment from fasta file
alignment = AlignIO.read(dna_alignment, 'fasta')
# print '\n'.join([str(seqr.seq) for seqr in alignment])
# Total alignment should be just as long as first seqr of alignment
alignment_length = len(alignment[0])
# After using protein alignment only for CDS, all alignment lengths should be multiples of three
assert alignment_length % 3 == 0, 'Length not a multiple of three: {} \n{2}'.format(alignment_length, alignment)
# Assert all codons are either full length codons or gaps, but not a mix of gaps and letters such as AA- or A--
for index in range(0, alignment_length, 3):
for ali in alignment:
codon = ali.seq[index:index + 3]
assert not ('-' in codon and str(codon) != '---'), '{0} at {1} in \n{2}'.format(codon, index, alignment)
# Loop over alignment, taking 3 DNA characters each time, representing a single codon
first_full_codon_start = None
last_full_codon_end = None
for index in range(0, alignment_length, 3):
codon_concatemer = ''.join([str(seqr.seq) for seqr in alignment[:, index:index + 3]])
if '-' in codon_concatemer:
continue
if first_full_codon_start is None:
first_full_codon_start = index
else:
last_full_codon_end = index + 3
# Create sub alignment consisting of all trimmed sequences from full alignment
trimmed = alignment[:, first_full_codon_start:last_full_codon_end]
trimmed_length = len(trimmed[0])
assert trimmed_length % 3 == 0, 'Length not a multiple of three: {} \n{2}'.format(trimmed_length, trimmed)
# Write out trimmed alignment file
trimmed_file = os.path.join(trimmed_dir, os.path.split(dna_alignment)[1])
with open(trimmed_file, mode='w') as write_handle:
AlignIO.write(trimmed, write_handle, 'fasta')
# Assert file now exists with content
assert os.path.isfile(trimmed_file) and os.path.getsize(trimmed_file), \
'Expected trimmed alignment file to exist with some content now: {0}'.format(trimmed_file)
# Filter out those alignment that contain an indel longer than N: return zero (0) as trimmed length & % retained
if any('-' * max_indel_length in str(seqr.seq) for seqr in trimmed):
return trimmed_file, alignment_length, 0, 0
return trimmed_file, alignment_length, trimmed_length, trimmed_length / alignment_length * 100
def main(args):
"""Main function called when run from command line or as part of pipeline."""
usage = """
Usage: filter_orthologs.py
--orthologs-zip=FILE archive of orthologous genes in FASTA format
--retained-threshold=PERC filter orthologs that retain less than PERC % of sequence after trimming alignment
--max-indel-length=NUMBER filter orthologs that contain insertions / deletions longer than N in middle of alignment
--aligned-zip=FILE destination file path for archive of aligned orthologous genes
--misaligned-zip=FILE destination file path for archive of misaligned orthologous genes
--trimmed-zip=FILE destination file path for archive of aligned & trimmed orthologous genes
--stats=FILE destination file path for ortholog trimming statistics file
--scatterplot=FILE destination file path for scatterplot of retained and filtered sequences by length
"""
options = ['orthologs-zip', 'retained-threshold', 'max-indel-length',
'aligned-zip', 'misaligned-zip', 'trimmed-zip', 'stats', 'scatterplot']
orthologs_zip, retained_threshold, max_indel_length, \
aligned_zip, misaligned_zip, trimmed_zip, target_stats_path, target_scatterplot = \
parse_options(usage, options, args)
# Convert retained threshold to integer, so we can fail fast if argument value format was wrong
retained_threshold = int(retained_threshold)
max_indel_length = int(max_indel_length)
# Run filtering in a temporary folder, to prevent interference from simultaneous runs
run_dir = tempfile.mkdtemp(prefix='align_trim_')
# Extract files from zip archive
temp_dir = create_directory('orthologs', inside_dir=run_dir)
sico_files = extract_archive_of_files(orthologs_zip, temp_dir)
# Align SICOs so all sequences become equal length sequences
aligned_files = _align_sicos(run_dir, sico_files)
# Filter orthologs that retain less than PERC % of sequence after trimming alignment
trimmed_files, misaligned_files = _trim_alignments(run_dir, aligned_files, retained_threshold, max_indel_length,
target_stats_path, target_scatterplot)
# Create archives of files on command line specified output paths
create_archive_of_files(aligned_zip, aligned_files)
create_archive_of_files(misaligned_zip, misaligned_files)
create_archive_of_files(trimmed_zip, trimmed_files)
# Remove unused files to free disk space
shutil.rmtree(run_dir)
# Exit after a comforting log message
log.info('Produced: \n%s', '\n'.join((aligned_zip, misaligned_zip, trimmed_zip,
target_stats_path, target_scatterplot)))
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 1,495,519,883,376,409,900 | 48.283582 | 121 | 0.687967 | false | 3.627243 | false | false | false |
jenholm/MontePy | Sim3.py | 1 | 1660 | #our unit of time here, is going to be
#one minute, and we're going to run for one week
SIM_TIME=7*24*60
DOW=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"]
hour_array=["00","01", "02", "03", "04", "05", "06",
"07","08", "09", "10", "11", "12", "13",
"14","15", "16", "17", "18", "19", "20",
"21","22", "23"]
current_day_hour_minute=None
class DayHourMinute(object):
def __init__(self, day_string, hour_string, minute_string):
self.day=day_string
self.hour=hour_string
self.minute=minute_string
class ScheduleHour(object):
def __init__(self, day, hour, index):
self.day = day
self.hour = hour
self.index = index
####START SIM RUN
hour=0
schedule = []
h=0
for this_day in DOW:
for this_hour in hour_array:
temp_hour = ScheduleHour(this_day, this_hour, h)
schedule.append(temp_hour)
h += 1
for i in range(1, SIM_TIME):
if i % 60 == 0:
print("Another hour has passed. Last hour %d" % hour)
hour+=1
print("This hour: %d" % hour)
day_index = DOW.index(schedule[hour].day)
current_day_hour_minute = DayHourMinute(schedule[hour].day,
schedule[hour].hour, str(i - int(schedule[hour].hour) * 60
- (1440 * day_index)))
print("Day %s Hour %s Minute %s " % (current_day_hour_minute.day,
current_day_hour_minute.hour,
current_day_hour_minute.minute))
| gpl-3.0 | 4,859,559,912,248,309,000 | 28.740741 | 102 | 0.493976 | false | 3.408624 | false | false | false |
eneldoserrata/marcos_openerp | addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/django/template/defaulttags.py | 9 | 38786 | """Default tags used by the template system, available to all templates."""
import sys
import re
from itertools import cycle as itertools_cycle
try:
reversed
except NameError:
from django.utils.itercompat import reversed # Python 2.3 fallback
from django.template import Node, NodeList, Template, Context, Variable
from django.template import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END
from django.template import get_library, Library, InvalidTemplateLibrary
from django.conf import settings
from django.utils.encoding import smart_str, smart_unicode
from django.utils.itercompat import groupby
from django.utils.safestring import mark_safe
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None):
self.cycle_iter = itertools_cycle([Variable(v) for v in cyclevars])
self.variable_name = variable_name
def render(self, context):
value = self.cycle_iter.next().resolve(context)
if self.variable_name:
context[self.variable_name] = value
return value
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, vars):
self.vars = map(Variable, vars)
def render(self, context):
for var in self.vars:
try:
value = var.resolve(context)
except VariableDoesNotExist:
continue
if value:
return smart_unicode(value)
return u''
class ForNode(Node):
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_loop.get_nodes_by_type(nodetype))
return nodes
def render(self, context):
nodelist = NodeList()
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
context.update(dict(zip(self.loopvars, item)))
else:
context[self.loopvars[0]] = item
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if unpack:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._last_seen = None
self._varlist = map(Variable, varlist)
self._id = str(id(self))
def render(self, context):
if 'forloop' in context and self._id not in context['forloop']:
self._last_seen = None
context['forloop'][self._id] = 1
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context) for var in self._varlist]
else:
compare_to = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != self._last_seen:
firstloop = (self._last_seen == None)
self._last_seen = compare_to
context.push()
context['ifchanged'] = {'firstloop': firstloop}
content = self.nodelist_true.render(context)
context.pop()
return content
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
class IfEqualNode(Node):
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = Variable(var1), Variable(var2)
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
try:
val1 = self.var1.resolve(context)
except VariableDoesNotExist:
val1 = None
try:
val2 = self.var2.resolve(context)
except VariableDoesNotExist:
val2 = None
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, bool_exprs, nodelist_true, nodelist_false, link_type):
self.bool_exprs = bool_exprs
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.link_type = link_type
def __repr__(self):
return "<If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
for node in self.nodelist_false:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_true.get_nodes_by_type(nodetype))
nodes.extend(self.nodelist_false.get_nodes_by_type(nodetype))
return nodes
def render(self, context):
if self.link_type == IfNode.LinkTypes.or_:
for ifnot, bool_expr in self.bool_exprs:
try:
value = bool_expr.resolve(context, True)
except VariableDoesNotExist:
value = None
if (value and not ifnot) or (ifnot and not value):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
else:
for ifnot, bool_expr in self.bool_exprs:
try:
value = bool_expr.resolve(context, True)
except VariableDoesNotExist:
value = None
if not ((value and not ifnot) or (ifnot and not value)):
return self.nodelist_false.render(context)
return self.nodelist_true.render(context)
class LinkTypes:
and_ = 0,
or_ = 1
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda v, f=self.expression.resolve: f(v, True))
]
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath, self.parsed = filepath, parsed
def render(self, context):
if not include_is_allowed(self.filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
fp = open(self.filepath, 'r')
output = fp.read()
fp.close()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=self.filepath)
return t.render(context)
except TemplateSyntaxError, e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
from datetime import datetime
from django.utils.dateformat import DateFormat
df = DateFormat(datetime.now())
return df.format(self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_str(k,'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
try:
return reverse(self.view_name, args=args, kwargs=kwargs)
except NoReverseMatch:
try:
project_name = settings.SETTINGS_MODULE.split('.')[0]
return reverse(project_name + '.' + self.view_name,
args=args, kwargs=kwargs)
except NoReverseMatch:
return ''
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
maxvalue = self.max_expr.resolve(context)
except VariableDoesNotExist:
return ''
try:
value = float(value)
maxvalue = float(maxvalue)
ratio = (value / maxvalue) * int(self.max_width)
except (ValueError, ZeroDivisionError):
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist):
self.var = var
self.name = name
self.nodelist = nodelist
def __repr__(self):
return "<WithNode>"
def render(self, context):
val = self.var.resolve(context)
context.push()
context[self.name] = val
output = self.nodelist.render(context)
context.pop()
return output
#@register.tag
def autoescape(parser, token):
"""
Force autoescape behaviour for this block.
"""
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'Autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in (u'on', u'off'):
raise TemplateSyntaxError("'Autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
autoescape = register.tag(autoescape)
#@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
comment = register.tag(comment)
#@register.tag
def cycle(parser, token):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
if len(args) > 4 and args[-2] == 'as':
name = args[-1]
node = CycleNode(args[1:-2], name)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
node = CycleNode(args[1:])
return node
cycle = register.tag(cycle)
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
debug = register.tag(debug)
#@register.tag(name="filter")
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
"""
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
do_filter = register.tag("filter", do_filter)
#@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% else %}{% if var2 %}
{{ var2 }}
{% else %}{% if var3 %}
{{ var3 }}
{% endif %}{% endif %}{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
"""
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one"
" argument")
return FirstOfNode(bits)
firstof = register.tag(firstof)
#@register.tag(name="for")
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.contents.split()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = is_reversed and -3 or -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.sub(r' *, *', ',', ' '.join(bits[1:in_index])).split(',')
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index+1])
nodelist_loop = parser.parse(('endfor',))
parser.delete_first_token()
return ForNode(loopvars, sequence, is_reversed, nodelist_loop)
do_for = register.tag("for", do_for)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError, "%r takes two arguments" % bits[0]
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfEqualNode(bits[1], bits[2], nodelist_true, nodelist_false, negate)
#@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
ifequal = register.tag(ifequal)
#@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
ifnotequal = register.tag(ifnotequal)
#@register.tag(name="if")
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag can take an option ``{% else %}`` clause
that will be displayed if the test fails.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
``if`` tags do not allow ``and`` and ``or`` clauses with the same tag,
because the order of logic would be ambigous. For example, this is
invalid::
{% if athlete_list and coach_list or cheerleader_list %}
If you need to combine ``and`` and ``or`` to do advanced logic, just use
nested if tags. For example::
{% if athlete_list %}
{% if coach_list or cheerleader_list %}
We have athletes, and either coaches or cheerleaders!
{% endif %}
{% endif %}
"""
bits = token.contents.split()
del bits[0]
if not bits:
raise TemplateSyntaxError("'if' statement requires at least one argument")
# Bits now looks something like this: ['a', 'or', 'not', 'b', 'or', 'c.d']
bitstr = ' '.join(bits)
boolpairs = bitstr.split(' and ')
boolvars = []
if len(boolpairs) == 1:
link_type = IfNode.LinkTypes.or_
boolpairs = bitstr.split(' or ')
else:
link_type = IfNode.LinkTypes.and_
if ' or ' in bitstr:
raise TemplateSyntaxError, "'if' tags can't mix 'and' and 'or'"
for boolpair in boolpairs:
if ' ' in boolpair:
try:
not_, boolvar = boolpair.split()
except ValueError:
raise TemplateSyntaxError, "'if' statement improperly formatted"
if not_ != 'not':
raise TemplateSyntaxError, "Expected 'not' in if statement"
boolvars.append((True, parser.compile_filter(boolvar)))
else:
boolvars.append((False, parser.compile_filter(boolpair)))
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfNode(boolvars, nodelist_true, nodelist_false, link_type)
do_if = register.tag("if", do_if)
#@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The 'ifchanged' block tag is used within a loop. It has two possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given a variable, check whether that variable has changed.
For example, the following shows the date every time it changes, but
only shows the hour if both the hour and the date have changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.contents.split()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfChangedNode(nodelist_true, nodelist_false, *bits[1:])
ifchanged = register.tag(ifchanged)
#@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi /home/html/ljworld.com/includes/right_generic.html %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi /home/html/ljworld.com/includes/right_generic.html parsed %}
"""
bits = token.contents.split()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
return SsiNode(bits[1], parsed)
ssi = register.tag(ssi)
#@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
"""
bits = token.contents.split()
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library("django.templatetags.%s" % taglib)
parser.add_library(lib)
except InvalidTemplateLibrary, e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
load = register.tag(load)
#@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.contents.split('"')
if len(bits) != 3:
raise TemplateSyntaxError, "'now' statement takes one argument"
format_string = bits[1]
return NowNode(format_string)
now = register.tag(now)
#@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that `{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError, "'regroup' tag takes five arguments"
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
expression = parser.compile_filter(lastbits_reversed[2][::-1])
var_name = lastbits_reversed[0][::-1]
return RegroupNode(target, expression, var_name)
regroup = register.tag(regroup)
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
spaceless = register.tag(spaceless)
#@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "'templatetag' statement takes one argument"
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, TemplateTagNode.mapping.keys()))
return TemplateTagNode(tag)
templatetag = register.tag(templatetag)
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url path.to.some_view arg1,arg2,name1=value1 %}
The first argument is a path to a view. It can be an absolute python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project. Other arguments are comma-separated values
that will be filled in place of positional and keyword arguments in the
URL. All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url app_name.client client.id %}
The URL will look like ``/clients/client/123/``.
"""
bits = token.contents.split(' ', 2)
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
args = []
kwargs = {}
if len(bits) > 2:
for arg in bits[2].split(','):
if '=' in arg:
k, v = arg.split('=', 1)
k = k.strip()
kwargs[k] = parser.compile_filter(v)
else:
args.append(parser.compile_filter(arg))
return URLNode(bits[1], args, kwargs)
url = register.tag(url)
#@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value 100 %}' />
Above, if ``this_value`` is 175 and ``max_value`` is 200, the the image in
the above example will be 88 pixels wide (because 175/200 = .875;
.875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
try:
max_width = int(max_width)
except ValueError:
raise TemplateSyntaxError("widthratio final argument must be an integer")
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr), max_width)
widthratio = register.tag(widthratio)
#@register.tag
def do_with(parser, token):
"""
Adds a value to the context (inside of this block) for caching and easy
access.
For example::
{% with person.some_sql_method as total %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
"""
bits = list(token.split_contents())
if len(bits) != 4 or bits[2] != "as":
raise TemplateSyntaxError("%r expected format is 'value as name'" %
bits[0])
var = parser.compile_filter(bits[1])
name = bits[3]
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(var, name, nodelist)
do_with = register.tag('with', do_with)
| agpl-3.0 | -7,235,047,295,472,682,000 | 33.910891 | 213 | 0.58155 | false | 4.125292 | false | false | false |
gandrewstone/yadog | PyHtmlGen/js.py | 1 | 4604 | import os.path
import os
from document import *
from module import *
def moduledir():
return os.path.dirname(os.path.abspath(__file__))
hidden = Marker("hidden")
# Modules: ( name, marker, [ (marker,[insert before marker,...] ), (marker,...), ...] )
jsm = Marker("js")
# Modules: ( name, marker, [ (marker,[insert before marker,...] ), (marker,...), ...] )
jsModule = Module("js",jsm,[("head",["<script language='JavaScript'> /* <![CDATA[ */\n",jsm,"//]]>\n</script>\n"]) ])
AnOlderToggleShowImpl = """
function toggleShow(itemId){
var ctnt = document.getElementById(itemId);
curStyle = ctnt.getAttribute("style");
if (curStyle == 'display: none;') {
ctnt.setAttribute("style",ctnt.getAttribute("origstyle"));
}
else {
ctnt.setAttribute("origstyle",ctnt.getAttribute("style"));
ctnt.setAttribute("style","display: none;");
}
}
"""
showHideModule = Module("showhide",hidden, [("js",["""function toggleShow(itemId) {
var ctnt = document.getElementById(itemId);
if ((ctnt.style.display == "none") || (ctnt.style.display == "")) {
if (ctnt.getAttribute("actualdisplay"))
ctnt.style.display = ctnt.getAttribute("actualdisplay");
else
ctnt.style.display = "block";
}
else {
ctnt.setAttribute("actualdisplay",ctnt.style.display);
ctnt.style.display = "none";
}
}
function SwapContent(contentId, toId, hideId){
var ctnt = document.getElementById(contentId);
var hide = document.getElementById(hideId) ;
var tgt = document.getElementById(toId);
kids = tgt.childNodes;
for (var i = 0; i < kids.length; i++) {
hide.appendChild(kids[i]);
}
tgt.appendChild(ctnt);
}
function MoveContent(contentId,toId){
var ctnt = document.getElementById(contentId);
var tgt = document.getElementById(toId);
tgt.appendChild(ctnt);
}
function CopyContent(contentId,toId,remExisting){
var ctnt = document.getElementById(contentId);
var tgt = document.getElementById(toId);
var copy = ctnt.cloneNode(true);
copy.removeAttribute('id');
if (remExisting) while( tgt.hasChildNodes() ) { tgt.removeChild( tgt.lastChild ); }
tgt.appendChild(copy);
}
"""] ),
("style",["#hidden { display:none }\n"]),("body",["<div id='hidden'>",hidden,"</div>"]) ])
# ("style",["#hidden { position:absolute; bottom:0px ; right:0px ; height:1px ; width:1px ; z-index:-10000 ; overflow:hidden; clip:auto }\n"]),("body",["<div id='hidden'>",hidden,"</div>"]) ])
delayLoadModule = Module("delayLoad",None,[("js",["""
function delayLoadImg(imId,href){
var img = document.getElementById(imId);
img.src = href;
}
"""])])
faderModule = Module("delayLoad",None,[("js",[file(moduledir() + os.sep + "fader.js","r").read()])])
""" Example use of styleRow
<tr>
<td>
<div onClick="styleRow(this,'background-color:red')">
New
</div>
</td>
</tr>
"""
styleRowModule = Module("styleRow",None,[("js",["""
function styleRow(elemInRow,newStyle){
var row = elemInRow;
while ((row != document)&&(row.tagName != "TR")) { row = row.parentNode; }
if (row != document) row.setAttribute('style', newStyle);
}
"""])])
newRowModule = Module("newRow",None,[("js",["""
function newRow(anyElemInTable,budId){
var table = anyElemInTable;
while ((table != document)&&(table.tagName != "TABLE")) { table = table.parentNode; }
if (table != document) {
var copy = document.getElementById(budId).cloneNode(true);
copy.removeAttribute('id');
table.appendChild(copy);
}
}
"""])])
""" Example use of makeEditable: note I have to removeAttribute('onClick'), or when you click to edit it will make another.
<form>
<table><tr>
<td onClick="this.removeAttribute('onClick'); makeEditable(this,'textbox1')">
New Hampshire
</td>
</tr>
</table>
<input id='textbox1' name="Start" type="text" value="Start" />
</form>
"""
makeEditableModule = Module("makeEditable",None,[("js",[r"""
function makeEditable(elem,editBoxBudId, newId){
var newEditBox = document.getElementById(editBoxBudId).cloneNode(true);
var data = elem.firstChild.data;
var i=0;
while ((data[i] == ' ')||(data[i] == '\n')) i++; /* Wipe preceding whitespace */
data = data.substring(i,data.length);
newEditBox.setAttribute('value',data);
if (newId != "") newEditBox.setAttribute('id',newId);
newEditBox.setAttribute('name',newId);
elem.replaceChild(newEditBox, elem.firstChild);
newEditBox.focus();
}
"""])])
# styleRow(anyElemInTable,'background-color:blue')
| gpl-3.0 | 6,350,722,387,476,507,000 | 28.139241 | 230 | 0.631842 | false | 3.290922 | false | false | false |
erickeller/edi | edi/commands/lxccommands/importcmd.py | 1 | 3325 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
import logging
import subprocess
from edi.commands.lxc import Lxc
from edi.commands.imagecommands.imagelxc import Lxc as LxcImageCommand
from edi.lib.shellhelpers import run
from edi.lib.helpers import print_success
class Import(Lxc):
@classmethod
def advertise(cls, subparsers):
help_text = "import an edi image into the LXD image store"
description_text = "Import an edi image into the LXD image store."
parser = subparsers.add_parser(cls._get_short_command_name(),
help=help_text,
description=description_text)
cls._require_config_file(parser)
def run_cli(self, cli_args):
self.run(cli_args.config_file)
def run(self, config_file):
self._setup_parser(config_file)
if self._is_in_image_store():
logging.info(("{0} is already in image store. "
"Delete it to regenerate it."
).format(self._result()))
return self._result()
image = LxcImageCommand().run(config_file)
print("Going to import lxc image into image store.")
self._import_image(image)
print_success("Imported lxc image into image store as {}.".format(self._result()))
return self._result()
def clean(self, config_file):
self._setup_parser(config_file)
if self._is_in_image_store():
logging.info(("Removing '{}' from image store."
).format(self._result()))
self._delete_image()
print_success("Removed {} from image store.".format(self._result()))
def _result(self):
return "{}_{}".format(self.config.get_project_name(),
self._get_command_file_name_prefix())
def _is_in_image_store(self):
cmd = []
cmd.append("lxc")
cmd.append("image")
cmd.append("show")
cmd.append("local:{}".format(self._result()))
result = run(cmd, check=False, stderr=subprocess.PIPE)
return result.returncode == 0
def _import_image(self, image):
cmd = []
cmd.append("lxc")
cmd.append("image")
cmd.append("import")
cmd.append(image)
cmd.append("local:")
cmd.extend(["--alias", self._result()])
run(cmd)
def _delete_image(self):
cmd = []
cmd.append("lxc")
cmd.append("image")
cmd.append("delete")
cmd.append("local:{}".format(self._result()))
run(cmd)
| lgpl-3.0 | -2,792,770,826,113,229,300 | 31.920792 | 90 | 0.603008 | false | 4.040097 | true | false | false |
tmwbook/nhs-honors-chemistry-2013 | Python/Molarity.py | 1 | 1634 | '''
@author: Thomas
'''
# M = moles solute / liters
def calculateMolarity():
molesSolute = float(raw_input('How many moles of the solute do you have?' ))
litersSolvent = float(raw_input('How many liters of the solvent do you have? '))
return float(molesSolute) / float(litersSolvent)
def calculateLiters():
molarity = float(raw_input('What is the molarity of the solution? '))
molesSolute = float(raw_input('How many moles are there dissolved in the solute? '))
return molesSolute / molarity
def calculateMoles():
molarity = int(raw_input('What is the molarity of the solution? '))
litersSolvent = float(raw_input('How many liters of the solvent do you have? '))
molesSolute = molarity * litersSolvent
return molesSolute
def setBool(a):
if a.lower() == 'y':
return True
else:
return False
def typeOfProblem():
molesCheck = raw_input('Do you know the amount of moles in the solution?(y/n) ')
litersCheck = raw_input('Do you know the amount of liters in the solution?(y/n) ')
molesCheck = setBool(molesCheck)
litersCheck = setBool(litersCheck)
if molesCheck and litersCheck:
print "M = " + str(calculateMolarity())
elif molesCheck and not litersCheck:
print str(calculateLiters()) + " L"
else:
print str(calculateMoles()) + " mol"
if __name__ == "__main__":
while True:
typeOfProblem()
option = raw_input('Do you need to solve another problem?(y/n) ')
if option.lower() == "y":
continue
else:
break | mit | 4,177,069,595,224,972,000 | 29.461538 | 88 | 0.623623 | false | 3.536797 | false | false | false |
fundthmcalculus/propresenter-conversion | directoryconversiongui.py | 1 | 3130 | from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from os.path import expanduser
from propresenterconverter import propresenterconverter
class directoryconversiongui:
def __init__(self):
# Create the gui.
self.window = Tk()
self.window.title("Directory Converter")
# Set the variables.
self.inputdirectory = StringVar(value="")
self.outputdirectory = StringVar(value="")
# Add the variables.
self.mainframe = ttk.Frame(self.window, padding="3 3 12 12")
self.mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
self.mainframe.columnconfigure(0, weight=1)
self.mainframe.rowconfigure(0, weight=1)
self.inputdirbutton = ttk.Button(self.mainframe, text="Input Directory", command=self.inputdirbutton_click).\
grid(column=1, row=1, sticky=(W, E))
self.outputdirbutton = ttk.Button(self.mainframe, text="Output Directory", command=self.outputdirbutton_click).\
grid(column=1, row=2, sticky=(W, E))
self.processbutton = ttk.Button(self.mainframe, text="Convert!", command=self.processbutton_click).\
grid(column=1, row=3, sticky=(W, E))
self.inputdirlabel = ttk.Label(self.mainframe, textvariable=self.inputdirectory).grid(column=2, columnspan=2,
row=1, sticky=(W, E))
self.outputdirlabel = ttk.Label(self.mainframe, textvariable=self.outputdirectory).grid(column=2, columnspan=2,
row=2, sticky=(W, E))
# Minimum width for the label.
self.mainframe.columnconfigure(2, minsize=200)
# Options for opening a directory.
self.dir_opt = options = {}
options['initialdir'] = expanduser("~")
options['mustexist'] = False
options['parent'] = self.mainframe
options['title'] = 'Choose Folder'
def inputdirbutton_click(self):
# Show the folder choice dialog.
self.dir_opt['title'] = 'Choose Input Directory'
inputdir = filedialog.askdirectory(**self.dir_opt)
if inputdir is None:
inputdir = ""
self.inputdirectory.set(inputdir)
self.mainframe.update_idletasks()
def outputdirbutton_click(self):
# Show the folder choice dialog.
self.dir_opt['title'] = 'Choose Input Directory'
outputdir = filedialog.askdirectory(**self.dir_opt)
if outputdir is None:
outputdir = ""
self.outputdirectory.set(outputdir)
self.mainframe.update_idletasks()
def processbutton_click(self):
# TODO - Run the conversion code with the appropriate arguments.
ppconv = propresenterconverter(arglist=['-inputdir', self.inputdirectory.get(), '-outputdir',
self.outputdirectory.get()])
ppconv.convert()
return
def show(self):
# Start running the main loop.
self.window.mainloop()
| mit | -5,231,695,585,284,080,000 | 39.128205 | 120 | 0.600958 | false | 4.173333 | false | false | false |
crmccreary/openerp_server | openerp/addons/l10n_be/company.py | 8 | 1901 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
class res_company(osv.osv):
_inherit = "res.company"
_description = 'Company'
def _get_default_ad(self, addresses):
name = email = phone = city = post_code = address = country_code = ""
for ads in addresses:
if ads.type == 'default':
city = ads.city or ""
post_code = ads.zip or ""
if ads.street:
address = ads.street or ""
if ads.street2:
address += " " + ads.street2
if ads.country_id:
country_code = ads.country_id and ads.country_id.code or ""
name = ads.name or ""
email = ads.email or ""
phone = ads.phone or ""
return name, email, phone, city, post_code, address, country_code
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,675,750,205,088,862,000 | 41.244444 | 79 | 0.559705 | false | 4.233853 | false | false | false |
amirhmoin/recodev | RecoDev-Prototype/extend_data/extend.py | 1 | 1108 | '''
import sys
sys.path.append('..')
'''
from master_utils import *
import numpy as np
import os
import MySQLdb
from lda import LDA
from sklearn.decomposition import LatentDirichletAllocation
import datetime
import lda_t1
class BugEvent:
def __init__(self, start, end, topic, id, final):
self.start_date = start
self.end_date = end
self.bug_id = id
self.final_fixer = final
class TossingItem:
def __init__(self, time, user):
self.sq_timestamp = time
self.sq_user = user
self.time_passed = 0 #in seconds
def ComputeTime(self, prev_tossing):
time_n = datetime.datetime.fromtimestamp(float(self.sq_timestamp))
time_p = datetime.datetime.fromtimestamp(float(prev_tossing.sq_timestamp))
length = time_n - time_p
self.time_passed = length.total_seconds()
def __str__(self):
s = self.sq_timestamp + '##' + self.sq_user + '##' + str(self.time_passed) + '#;;#'
return s
| bsd-3-clause | -4,493,428,429,327,756,300 | 23.622222 | 91 | 0.564079 | false | 3.78157 | false | false | false |
rubenvb/skia | infra/bots/buildstats/make_treemap.py | 11 | 1250 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a .tar.gz file containing an HTML treemap displaying the codesize.
Requires docker to be installed.
Example usage:
python make_treemap.py $SKIA_ROOT/out/Release/skottie_tool /tmp/size
"""
import os
import subprocess
import sys
import tempfile
DOCKER_IMAGE = 'gcr.io/skia-public/binary-size:v1'
DOCKER_SCRIPT = '/opt/binary_size/src/run_binary_size_analysis.py'
def main():
input_file = sys.argv[1]
out_dir = sys.argv[2]
input_base = os.path.basename(input_file)
input_dir = os.path.dirname(input_file)
temp_out = tempfile.mkdtemp('treemap')
subprocess.check_call(['docker', 'run', '--volume', '%s:/IN' % input_dir,
'--volume', '%s:/OUT' % temp_out,
DOCKER_IMAGE, DOCKER_SCRIPT,
'--library', '/IN/%s' % input_base,
'--destdir', '/OUT'])
subprocess.check_call(['tar', '--directory=%s' % temp_out, '-zcf',
'%s/%s_tree.tar.gz' % (out_dir, input_base),
'.'])
if __name__ == '__main__':
main()
| bsd-3-clause | 3,144,961,997,505,883,600 | 27.409091 | 77 | 0.5928 | false | 3.443526 | false | false | false |
renatahodovan/fuzzinator | tests/call/test_subprocess_call.py | 2 | 1891 | # Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
import pytest
import sys
import fuzzinator
from common_call import blinesep, resources_dir
@pytest.mark.parametrize('command, cwd, env, no_exit_code, test, exp', [
('%s %s --print-args {test}' % (sys.executable, os.path.join(resources_dir, 'mock_tool.py')), None, None, None, 'foo', fuzzinator.call.NonIssue({'stdout': b'foo' + blinesep, 'stderr': b'', 'exit_code': 0})),
('%s %s --print-args --exit-code 1 {test}' % (sys.executable, os.path.join(resources_dir, 'mock_tool.py')), None, None, None, 'foo', {'stdout': b'foo' + blinesep, 'stderr': b'', 'exit_code': 1}),
('%s %s --print-args --to-stderr --exit-code 1 {test}' % (sys.executable, os.path.join(resources_dir, 'mock_tool.py')), None, None, None, 'foo', {'stdout': b'', 'stderr': b'foo' + blinesep, 'exit_code': 1}),
('%s %s --print-args --exit-code 1 {test}' % (sys.executable, os.path.join('.', 'mock_tool.py')), resources_dir, None, None, 'foo', {'stdout': b'foo' + blinesep, 'stderr': b'', 'exit_code': 1}),
('%s %s --print-env BAR --print-args --exit-code 1 {test}' % (sys.executable, os.path.join('.', 'mock_tool.py')), resources_dir, '{"BAR": "baz"}', None, 'foo', {'stdout': b'foo' + blinesep + b'baz' + blinesep, 'stderr': b'', 'exit_code': 1}),
('%s %s --print-args --exit-code 0 {test}' % (sys.executable, os.path.join(resources_dir, 'mock_tool.py')), None, None, 'True', 'foo', {'stdout': b'foo' + blinesep, 'stderr': b'', 'exit_code': 0}),
])
def test_subprocess_call(command, cwd, env, no_exit_code, test, exp):
assert fuzzinator.call.SubprocessCall(command, cwd=cwd, env=env, no_exit_code=no_exit_code, test=test) == exp
| bsd-3-clause | 1,759,386,165,031,792,400 | 71.730769 | 246 | 0.638287 | false | 2.904762 | true | false | false |
lehins/django-smartfields | smartfields/processors/video.py | 1 | 1968 | import re
import six
from smartfields.processors.base import ExternalFileProcessor
from smartfields.utils import ProcessingError
from smartfields.processors.mixin import CloudExternalFileProcessorMixin
__all__ = [
'FFMPEGProcessor', 'CloudFFMEGPRocessor'
]
class FFMPEGProcessor(ExternalFileProcessor):
duration_re = re.compile(r'Duration: (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
progress_re = re.compile(r'time=(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
error_re = re.compile(r'Invalid data found when processing input')
cmd_template = "ffmpeg -i {input} -y -codec:v {vcodec} -b:v {vbitrate} " \
"-maxrate {maxrate} -bufsize {bufsize} -vf " \
"scale={width}:{height} -threads {threads} -c:a {acodec} {output}"
def stdout_handler(self, line, duration=None):
if duration is None:
duration_time = self.duration_re.search(line)
if duration_time:
duration = self.timedict_to_seconds(duration_time.groupdict())
elif duration != 0:
current_time = self.progress_re.search(line)
if current_time:
seconds = self.timedict_to_seconds(current_time.groupdict())
progress = float(seconds)/duration
progress = progress if progress < 1 else 0.99
self.set_progress(progress)
elif self.error_re.search(line):
raise ProcessingError("Invalid video file or unknown video format.")
return (duration,)
def timedict_to_seconds(self, timedict):
seconds = 0
for key, t in six.iteritems(timedict):
if key == 'seconds':
seconds+= int(t)
elif key == 'minutes':
seconds+= int(t)*60
elif key == 'hours':
seconds+= int(t)*3600
return seconds
class CloudFFMEGPRocessor(CloudExternalFileProcessorMixin, FFMPEGProcessor):
pass | mit | -6,904,748,000,513,170,000 | 40.020833 | 91 | 0.613313 | false | 3.858824 | false | false | false |
undeadpixel/mallet | scripts/roc_curve.py | 1 | 2248 | #!/usr/bin/env python
import sys, math
# added prent directory to import path
sys.path.append("..")
import mallet.viterbi as viterbi
import mallet.input.sequence_parser as seq_parser
import mallet.input.tgf_parser as tgf_parser
import mallet.safe_math as safe_math
STEPS = 100
def frange(start, end, step):
end += step # HACK: prevent floating point errors in end
current = start
while current <= end:
yield current
current += step
def float_floor(value, decimals = 1):
value = value*(10**decimals)
return math.floor(value)/(10**decimals)
def float_ceil(value, decimals = 1):
value = value*(10**decimals)
return math.ceil(value)/(10**decimals)
def evaluate_alignment(alignment):
return (alignment, alignment.state_path.sequence[50] == "*")
def accuracy_metrics(evaluated_alignments, threshold):
tp, tn, fp, fn = (0,0,0,0)
for alignment,is_correct in evaluated_alignments:
if alignment.score >= threshold:
if is_correct:
tp += 1
else:
fp += 1
else:
if is_correct:
fn += 1
else:
tn += 1
return (tp, tn, fp, fn)
def print_roc_data_in_tsv(roc_data):
print "score\ttpr\tfpr\tppv\ttp\ttn\tfp\tfn"
for score,metrics in sorted(roc_data.items()):
print "{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}".format(score, *metrics)
hmm = tgf_parser.parse(sys.argv[1])
sequences = seq_parser.parse(sys.argv[2])
alignments = viterbi.viterbi_all(hmm, sequences)
evaluated_alignments = map(evaluate_alignment, alignments)
max_score = max(alignments, key = lambda align: align.score).score
min_score = min(alignments, key = lambda align: align.score).score
roc_data = {}
step_size = (max_score - min_score)/STEPS
scores_iterator = frange(float_floor(min_score), float_ceil(max_score), step_size)
for score in scores_iterator:
tp, tn, fp, fn = accuracy_metrics(evaluated_alignments, score)
tpr = safe_math.div(float(tp), float(tp+fn))
fpr = safe_math.div(float(fp), float(fp+tn))
ppv = safe_math.div(float(tp), float(tp+fp))
roc_data[score] = (tpr, fpr, ppv, tp, tn, fp, fn)
print_roc_data_in_tsv(roc_data)
| mit | -7,365,806,780,185,346,000 | 27.455696 | 102 | 0.63968 | false | 3.005348 | false | false | false |
tatemura/strudel | exp/cluster/bin/tokumx.py | 1 | 1624 | #!/usr/bin/python
#
# python script that deploys and runs tokumx
# arg: hostname (start|stop)
#
# NOTE
# TokuMX will not run with transparent huge pages enabled.
# To disable:
# (echo never > /sys/kernel/mm/transparent_hugepage/enabled)
import subprocess
import os
import sys
import json
import shutil
import re
import expsystem as es
mongos_port = 27017
class TokumxServer:
def __init__(self, param, host, script_home):
self.param = param
self.host = host
self.dir = es.DataRoot(param).dir('tokumx-' + es.getuser())
self.mongobin = param.get('tokumx_bin')
if (self.mongobin is None or self.mongobin == ''):
self.mongobin = os.path.join(os.path.dirname(script_home),
'tokumx', 'bin')
def install(self):
es.cleandir(self.dir)
def start(self, install = True):
if (install):
self.install()
slog = es.SysLog('tokumx')
slog.rotate(10)
subprocess.check_call("{0} --fork --dbpath {1} --logpath {2}".format(
os.path.join(self.mongobin, 'mongod'), self.dir, slog.getpath()),
shell=True)
def stop(self):
subprocess.call([os.path.join(self.mongobin, 'mongod'), '--shutdown',
'--dbpath', self.dir])
data = json.load(sys.stdin)
if len(sys.argv) > 2:
cmd = sys.argv[2]
elif len(sys.argv) > 1:
cmd = sys.argv[1]
else:
cmd = 'start'
sd = es.SysDirs()
server = TokumxServer(data, host = sys.argv[1], script_home = sd.script_home)
if (cmd == 'stop'):
server.stop()
else:
server.start()
| apache-2.0 | 8,159,668,917,509,453,000 | 25.622951 | 93 | 0.592365 | false | 3.178082 | false | false | false |
jteehan/cfme_tests | cfme/tests/configure/test_access_control.py | 1 | 29148 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
import traceback
from cfme.configure.access_control import User, Group, Role, Tenant, Project
from utils import error
import cfme.fixtures.pytest_selenium as sel
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.automate.explorer import AutomateExplorer # NOQA
from cfme.base import Server
from cfme.control.explorer import ControlExplorer # NOQA
from cfme.exceptions import OptionNotAvailable
from cfme.common.provider import base_types
from cfme.infrastructure import virtual_machines as vms
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.services.myservice import MyService
from cfme.web_ui import flash, Table, InfoBlock, toolbar as tb
from cfme.configure import tasks
from fixtures.provider import setup_one_or_skip
from utils.appliance.implementations.ui import navigate_to
from utils.blockers import BZ
from utils.log import logger
from utils.providers import ProviderFilter
from utils.update import update
from utils import version
records_table = Table("//div[@id='main_div']//table")
usergrp = Group(description='EvmGroup-user')
group_table = Table("//div[@id='main_div']//table")
pytestmark = test_requirements.rbac
@pytest.fixture(scope='module')
def a_provider(request):
prov_filter = ProviderFilter(classes=[VMwareProvider])
return setup_one_or_skip(request, filters=[prov_filter])
def new_credential():
return Credential(principal='uid' + fauxfactory.gen_alphanumeric(), secret='redhat')
def new_user(group=usergrp):
return User(
name='user' + fauxfactory.gen_alphanumeric(),
credential=new_credential(),
email='[email protected]',
group=group,
cost_center='Workload',
value_assign='Database')
def new_group(role='EvmRole-approver'):
return Group(
description='grp' + fauxfactory.gen_alphanumeric(),
role=role)
def new_role():
return Role(
name='rol' + fauxfactory.gen_alphanumeric(),
vm_restriction='None')
def get_tag():
return InfoBlock('Smart Management', 'My Company Tags').text
@pytest.fixture(scope='function')
def check_item_visibility(tag):
def _check_item_visibility(item, user_restricted):
category_name = ' '.join((tag.category.display_name, '*'))
item.edit_tags(category_name, tag.display_name)
with user_restricted:
assert item.exists
item.remove_tag(category_name, tag.display_name)
with user_restricted:
assert not item.exists
return _check_item_visibility
# User test cases
@pytest.mark.tier(2)
def test_user_crud():
user = new_user()
user.create()
with update(user):
user.name = user.name + "edited"
copied_user = user.copy()
copied_user.delete()
user.delete()
# @pytest.mark.meta(blockers=[1035399]) # work around instead of skip
@pytest.mark.tier(2)
def test_user_login():
user = new_user()
user.create()
try:
with user:
navigate_to(Server, 'Dashboard')
finally:
user.appliance.server.login_admin()
@pytest.mark.tier(3)
def test_user_duplicate_name(appliance):
region = appliance.server_region
nu = new_user()
nu.create()
msg = version.pick({
version.LOWEST: "Userid has already been taken",
'5.8': "Userid is not unique within region {}".format(region)
})
with error.expected(msg):
nu.create()
group_user = Group("EvmGroup-user")
@pytest.mark.tier(3)
def test_username_required_error_validation():
user = User(
name="",
credential=new_credential(),
email='[email protected]',
group=group_user)
with error.expected("Name can't be blank"):
user.create()
@pytest.mark.tier(3)
def test_userid_required_error_validation():
user = User(
name='user' + fauxfactory.gen_alphanumeric(),
credential=Credential(principal='', secret='redhat'),
email='[email protected]',
group=group_user)
with error.expected("Userid can't be blank"):
user.create()
@pytest.mark.tier(3)
def test_user_password_required_error_validation():
user = User(
name='user' + fauxfactory.gen_alphanumeric(),
credential=Credential(principal='uid' + fauxfactory.gen_alphanumeric(), secret=None),
email='[email protected]',
group=group_user)
if version.current_version() < "5.5":
check = "Password_digest can't be blank"
else:
check = "Password can't be blank"
with error.expected(check):
user.create()
@pytest.mark.tier(3)
def test_user_group_error_validation():
user = User(
name='user' + fauxfactory.gen_alphanumeric(),
credential=new_credential(),
email='[email protected]',
group='')
with error.expected("A User must be assigned to a Group"):
user.create()
@pytest.mark.tier(3)
def test_user_email_error_validation():
user = User(
name='user' + fauxfactory.gen_alphanumeric(),
credential=new_credential(),
email='xyzdhat.com',
group=group_user)
with error.expected("Email must be a valid email address"):
user.create()
@pytest.mark.tier(2)
def test_user_edit_tag():
user = new_user()
user.create()
user.edit_tags("Cost Center *", "Cost Center 001")
assert get_tag() == "Cost Center: Cost Center 001", "User edit tag failed"
user.delete()
@pytest.mark.tier(3)
def test_user_remove_tag():
user = new_user()
user.create()
user.edit_tags("Department", "Engineering")
user.remove_tag("Department", "Engineering")
navigate_to(user, 'Details')
assert get_tag() != "Department: Engineering", "Remove User tag failed"
user.delete()
@pytest.mark.tier(3)
def test_delete_default_user():
"""Test for deleting default user Administrator.
Steps:
* Login as Administrator user
* Try deleting the user
"""
user = User(name='Administrator')
navigate_to(User, 'All')
column = version.pick({version.LOWEST: "Name",
"5.4": "Full Name"})
row = records_table.find_row_by_cells({column: user.name})
sel.check(sel.element(".//input[@type='checkbox']", root=row[0]))
tb.select('Configuration', 'Delete selected Users', invokes_alert=True)
sel.handle_alert()
flash.assert_message_match('Default EVM User "{}" cannot be deleted' .format(user.name))
@pytest.mark.tier(3)
@pytest.mark.meta(automates=[BZ(1090877)])
@pytest.mark.meta(blockers=[BZ(1408479)], forced_streams=["5.7", "upstream"])
@pytest.mark.uncollectif(lambda: version.current_version() >= "5.7")
def test_current_user_login_delete(request):
"""Test for deleting current user login.
Steps:
* Login as Admin user
* Create a new user
* Login with the new user
* Try deleting the user
"""
group_user = Group("EvmGroup-super_administrator")
user = User(
name='user' + fauxfactory.gen_alphanumeric(),
credential=new_credential(),
email='[email protected]',
group=group_user)
user.create()
request.addfinalizer(user.delete)
request.addfinalizer(user.appliance.server.login_admin())
with user:
if version.current_version() >= '5.7':
navigate_to(user, 'Details')
menu_item = ('Configuration', 'Delete this User')
assert tb.exists(*menu_item) and tb.is_greyed(*menu_item), "Delete User is not dimmed"
else:
with error.expected("Current EVM User \"{}\" cannot be deleted".format(user.name)):
user.delete()
@pytest.mark.tier(3)
def test_tagvis_user(user_restricted, check_item_visibility):
""" Tests if group honour tag visibility feature
Prerequirement:
Catalog, tag, role, group and restricted user should be created
Steps:
1. As admin add tag to group
2. Login as restricted user, group is visible for user
3. As admin remove tag from group
4. Login as restricted user, group is not visible for user
"""
check_item_visibility(user_restricted, user_restricted)
@pytest.mark.tier(2)
# Group test cases
def test_group_crud():
group = new_group()
group.create()
with update(group):
group.description = group.description + "edited"
group.delete()
@pytest.mark.tier(2)
def test_group_crud_with_tag(a_provider, category, tag):
"""Test for verifying group create with tag defined
Steps:
* Login as Admin user
* Navigate to add group page
* Fill all fields
* Set tag
* Save group
"""
group = Group(
description='grp{}'.format(fauxfactory.gen_alphanumeric()),
role='EvmRole-approver',
tag=[category.display_name, tag.display_name],
host_cluster=[a_provider.data['name']],
vm_template=[a_provider.data['name'], a_provider.data['datacenters'][0],
'Discovered virtual machine']
)
group.create()
with update(group):
group.tag = [tag.category.display_name, tag.display_name]
group.host_cluster = [a_provider.data['name']]
group.vm_template = [a_provider.data['name'], a_provider.data['datacenters'][0],
'Discovered virtual machine']
group.delete()
@pytest.mark.tier(3)
def test_group_duplicate_name(appliance):
region = appliance.server_region
group = new_group()
group.create()
msg = version.pick({
version.LOWEST: "Description has already been taken",
'5.8': "Description is not unique within region {}".format(region)
})
with error.expected(msg):
group.create()
@pytest.mark.tier(2)
def test_group_edit_tag():
group = new_group()
group.create()
group.edit_tags("Cost Center *", "Cost Center 001")
assert get_tag() == "Cost Center: Cost Center 001", "Group edit tag failed"
group.delete()
@pytest.mark.tier(2)
def test_group_remove_tag():
group = new_group()
group.create()
navigate_to(group, 'Edit')
group.edit_tags("Department", "Engineering")
group.remove_tag("Department", "Engineering")
assert get_tag() != "Department: Engineering", "Remove Group tag failed"
group.delete()
@pytest.mark.tier(3)
def test_group_description_required_error_validation():
error_text = "Description can't be blank"
group = Group(description=None, role='EvmRole-approver')
with error.expected(error_text):
group.create()
flash.dismiss()
@pytest.mark.tier(3)
def test_delete_default_group():
flash_msg = "EVM Group \"{}\": Error during delete: A read only group cannot be deleted."
group = Group(description='EvmGroup-administrator')
view = navigate_to(Group, 'All')
row = group_table.find_row_by_cells({'Name': group.description})
sel.check(sel.element(".//input[@type='checkbox']", root=row[0]))
view.configuration.item_select('Delete selected Groups', handle_alert=True)
view.flash.assert_message(flash_msg.format(group.description))
@pytest.mark.tier(3)
def test_delete_group_with_assigned_user():
flash_msg = version.pick({
'5.6': ("EVM Group \"{}\": Error during delete: Still has users assigned"),
'5.5': ("EVM Group \"{}\": Error during \'destroy\': Still has users assigned")})
group = new_group()
group.create()
user = new_user(group=group)
user.create()
with error.expected(flash_msg.format(group.description)):
group.delete()
@pytest.mark.tier(3)
def test_edit_default_group():
flash_msg = 'Read Only EVM Group "{}" can not be edited'
group = Group(description='EvmGroup-approver')
navigate_to(Group, 'All')
row = group_table.find_row_by_cells({'Name': group.description})
sel.check(sel.element(".//input[@type='checkbox']", root=row[0]))
tb.select('Configuration', 'Edit the selected Group')
flash.assert_message_match(flash_msg.format(group.description))
@pytest.mark.tier(3)
def test_edit_sequence_usergroups(request):
"""Test for editing the sequence of user groups for LDAP lookup.
Steps:
* Login as Administrator user
* create a new group
* Edit the sequence of the new group
* Verify the changed sequence
"""
group = new_group()
group.create()
request.addfinalizer(group.delete)
view = navigate_to(Group, 'All')
row = view.table.row(name=group.description)
original_sequence = row.sequence.text
group.set_group_order(group.description)
row = view.table.row(name=group.description)
changed_sequence = row.sequence.text
assert original_sequence != changed_sequence, "Edit Sequence Failed"
@pytest.mark.tier(3)
def test_tagvis_group(user_restricted, group_with_tag, check_item_visibility):
""" Tests if group honour tag visibility feature
Prerequirement:
Catalog, tag, role, group and restricted user should be created
Steps:
1. As admin add tag to group
2. Login as restricted user, group is visible for user
3. As admin remove tag from group
4. Login as restricted user, group is not visible for user
"""
check_item_visibility(group_with_tag, user_restricted)
# Role test cases
@pytest.mark.tier(2)
def test_role_crud():
role = new_role()
role.create()
with update(role):
role.name = role.name + "edited"
copied_role = role.copy()
copied_role.delete()
role.delete()
@pytest.mark.tier(3)
def test_rolename_required_error_validation():
role = Role(
name=None,
vm_restriction='Only User Owned')
with error.expected("Name can't be blank"):
role.create()
@pytest.mark.tier(3)
def test_rolename_duplicate_validation():
role = new_role()
role.create()
with error.expected("Name has already been taken"):
role.create()
@pytest.mark.tier(3)
def test_delete_default_roles():
flash_msg = version.pick({
'5.6': ("Role \"{}\": Error during delete: Cannot delete record "
"because of dependent entitlements"),
'5.5': ("Role \"{}\": Error during \'destroy\': Cannot delete record "
"because of dependent miq_groups")})
role = Role(name='EvmRole-approver')
with error.expected(flash_msg.format(role.name)):
role.delete()
@pytest.mark.tier(3)
def test_edit_default_roles():
role = Role(name='EvmRole-auditor')
navigate_to(role, 'Edit')
flash.assert_message_match("Read Only Role \"{}\" can not be edited" .format(role.name))
@pytest.mark.tier(3)
def test_delete_roles_with_assigned_group():
flash_msg = version.pick({
'5.6': ("Role \"{}\": Error during delete: Cannot delete record "
"because of dependent entitlements"),
'5.5': ("Role \"{}\": Error during \'destroy\': Cannot delete record "
"because of dependent miq_groups")})
role = new_role()
role.create()
group = new_group(role=role.name)
group.create()
with error.expected(flash_msg.format(role.name)):
role.delete()
@pytest.mark.tier(3)
def test_assign_user_to_new_group():
role = new_role() # call function to get role
role.create()
group = new_group(role=role.name)
group.create()
user = new_user(group=group)
user.create()
def _test_vm_provision():
logger.info("Checking for provision access")
navigate_to(vms.Vm, 'VMsOnly')
vms.lcl_btn("Provision VMs")
def _test_vm_power_on():
"""Ensures power button is shown for a VM"""
logger.info("Checking for power button")
vm_name = vms.Vm.get_first_vm_title()
logger.debug("VM " + vm_name + " selected")
if not vms.is_pwr_option_visible(vm_name, option=vms.Vm.POWER_ON):
raise OptionNotAvailable("Power button does not exist")
def _test_vm_removal():
logger.info("Testing for VM removal permission")
vm_name = vms.get_first_vm()
logger.debug("VM " + vm_name + " selected")
vms.remove(vm_name, cancel=True)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'product_features, action',
[(
{version.LOWEST: [['Everything', 'Infrastructure', 'Virtual Machines', 'Accordions'],
['Everything', 'Access Rules for all Virtual Machines', 'VM Access Rules', 'Modify',
'Provision VMs']],
'5.6': [['Everything', 'Compute', 'Infrastructure', 'Virtual Machines', 'Accordions'],
['Everything', 'Access Rules for all Virtual Machines', 'VM Access Rules', 'Modify',
'Provision VMs']]},
_test_vm_provision)])
def test_permission_edit(appliance, request, product_features, action):
"""
Ensures that changes in permissions are enforced on next login
"""
product_features = version.pick(product_features)
request.addfinalizer(appliance.server.login_admin())
role_name = fauxfactory.gen_alphanumeric()
role = Role(name=role_name,
vm_restriction=None,
product_features=[(['Everything'], False)] + # role_features
[(k, True) for k in product_features])
role.create()
group = new_group(role=role.name)
group.create()
user = new_user(group=group)
user.create()
with user:
try:
action()
except Exception:
pytest.fail('Incorrect permissions set')
appliance.server.login_admin()
role.update({'product_features': [(['Everything'], True)] +
[(k, False) for k in product_features]
})
with user:
try:
with error.expected(Exception):
action()
except error.UnexpectedSuccessException:
pytest.Fails('Permissions have not been updated')
def _mk_role(name=None, vm_restriction=None, product_features=None):
"""Create a thunk that returns a Role object to be used for perm
testing. name=None will generate a random name
"""
name = name or fauxfactory.gen_alphanumeric()
return lambda: Role(name=name,
vm_restriction=vm_restriction,
product_features=product_features)
def _go_to(cls, dest='All'):
"""Create a thunk that navigates to the given destination"""
return lambda: navigate_to(cls, dest)
cat_name = "Settings"
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'role,allowed_actions,disallowed_actions',
[[_mk_role(product_features=[[['Everything'], False], # minimal permission
[['Everything', cat_name, 'Tasks'], True]]),
{'tasks': lambda: sel.click(tasks.buttons.default)}, # can only access one thing
{
'my services': _go_to(MyService),
'chargeback': _go_to(Server, 'Chargeback'),
'clouds providers': _go_to(base_types()['cloud']),
'infrastructure providers': _go_to(base_types()['infra']),
'control explorer': _go_to(Server, 'ControlExplorer'),
'automate explorer': _go_to(Server, 'AutomateExplorer')}],
[_mk_role(product_features=[[['Everything'], True]]), # full permissions
{
'my services': _go_to(MyService),
'chargeback': _go_to(Server, 'Chargeback'),
'clouds providers': _go_to(base_types()['cloud']),
'infrastructure providers': _go_to(base_types()['infra']),
'control explorer': _go_to(Server, 'ControlExplorer'),
'automate explorer': _go_to(Server, 'AutomateExplorer')},
{}]])
@pytest.mark.meta(blockers=[1262759])
def test_permissions(appliance, role, allowed_actions, disallowed_actions):
# create a user and role
role = role() # call function to get role
role.create()
group = new_group(role=role.name)
group.create()
user = new_user(group=group)
user.create()
fails = {}
try:
with user:
appliance.server.login_admin()
for name, action_thunk in allowed_actions.items():
try:
action_thunk()
except Exception:
fails[name] = "{}: {}".format(name, traceback.format_exc())
for name, action_thunk in disallowed_actions.items():
try:
with error.expected(Exception):
action_thunk()
except error.UnexpectedSuccessException:
fails[name] = "{}: {}".format(name, traceback.format_exc())
if fails:
message = ''
for failure in fails.values():
message = "{}\n\n{}".format(message, failure)
raise Exception(message)
finally:
appliance.server.login_admin()
def single_task_permission_test(appliance, product_features, actions):
"""Tests that action succeeds when product_features are enabled, and
fail when everything but product_features are enabled"""
test_permissions(appliance, _mk_role(name=fauxfactory.gen_alphanumeric(),
product_features=[(['Everything'], False)] +
[(f, True) for f in product_features]),
actions,
{})
test_permissions(appliance, _mk_role(name=fauxfactory.gen_alphanumeric(),
product_features=[(['Everything'], True)] +
[(f, False) for f in product_features]),
{},
actions)
@pytest.mark.tier(3)
@pytest.mark.meta(blockers=[1262764])
def test_permissions_role_crud(appliance):
single_task_permission_test(appliance,
[['Everything', cat_name, 'Configuration'],
['Everything', 'Services', 'Catalogs Explorer']],
{'Role CRUD': test_role_crud})
@pytest.mark.tier(3)
def test_permissions_vm_provisioning(appliance):
features = version.pick({
version.LOWEST: [
['Everything', 'Infrastructure', 'Virtual Machines', 'Accordions'],
['Everything', 'Access Rules for all Virtual Machines', 'VM Access Rules', 'Modify',
'Provision VMs']
],
'5.6': [
['Everything', 'Compute', 'Infrastructure', 'Virtual Machines', 'Accordions'],
['Everything', 'Access Rules for all Virtual Machines', 'VM Access Rules', 'Modify',
'Provision VMs']
]})
single_task_permission_test(
appliance,
features,
{'Provision VM': _test_vm_provision}
)
# This test is disabled until it has been rewritten
# def test_permissions_vm_power_on_access(appliance):
# # Ensure VMs exist
# if not vms.get_number_of_vms():
# logger.debug("Setting up providers")
# infra_provider
# logger.debug("Providers setup")
# single_task_permission_test(
# appliance,
# [
# ['Infrastructure', 'Virtual Machines', 'Accordions'],
# ['Infrastructure', 'Virtual Machines', 'VM Access Rules', 'Operate', 'Power On']
# ],
# {'VM Power On': _test_vm_power_on}
# )
# This test is disabled until it has been rewritten
# def test_permissions_vm_remove(appliance):
# # Ensure VMs exist
# if not vms.get_number_of_vms():
# logger.debug("Setting up providers")
# setup_infrastructure_providers()
# logger.debug("Providers setup")
# single_task_permission_test(
# appliance,
# [
# ['Infrastructure', 'Virtual Machines', 'Accordions'],
# ['Infrastructure', 'Virtual Machines', 'VM Access Rules', 'Modify', 'Remove']
# ],
# {'Remove VM': _test_vm_removal}
# )
# commenting this out, there is validation around the 'no group selected'and we have a test for it
# @pytest.mark.meta(blockers=[1154112])
# def test_user_add_button_should_be_disabled_without_group(soft_assert):
# from cfme.web_ui import fill, form_buttons
# navigate_to(User, 'Add')
# pw = fauxfactory.gen_alphanumeric()
# fill(User.user_form, {
# "name_txt": fauxfactory.gen_alphanumeric(),
# "userid_txt": fauxfactory.gen_alphanumeric(),
# "password_txt": pw,
# "password_verify_txt": pw,
# "email_txt": "[email protected]"
# })
# assert not sel.is_displayed(form_buttons.add), "The Add button should not be displayed!"
@pytest.mark.tier(2)
def test_user_change_password(appliance, request):
user = User(
name="user {}".format(fauxfactory.gen_alphanumeric()),
credential=Credential(
principal="user_principal_{}".format(fauxfactory.gen_alphanumeric()),
secret="very_secret",
verify_secret="very_secret"
),
email="[email protected]",
group=usergrp,
)
user.create()
request.addfinalizer(user.delete)
request.addfinalizer(appliance.server.login_admin())
with user:
appliance.server.logout()
appliance.server.login(user)
assert appliance.server.current_full_name() == user.name
appliance.server.login_admin()
with update(user):
user.credential = Credential(
principal=user.credential.principal,
secret="another_very_secret",
verify_secret="another_very_secret",
)
with user:
appliance.server.logout()
appliance.server.login(user)
assert appliance.server.current_full_name() == user.name
# Tenant/Project test cases
@pytest.mark.tier(3)
def test_superadmin_tenant_crud(request):
"""Test suppose to verify CRUD operations for CFME tenants
Prerequisities:
* This test is not depending on any other test and can be executed against fresh appliance.
Steps:
* Create tenant
* Update description of tenant
* Update name of tenat
* Delete tenant
"""
tenant = Tenant(
name='tenant1' + fauxfactory.gen_alphanumeric(),
description='tenant1 description')
@request.addfinalizer
def _delete_tenant():
if tenant.exists:
tenant.delete()
tenant.create()
with update(tenant):
tenant.description = tenant.description + "edited"
with update(tenant):
tenant.name = tenant.name + "edited"
tenant.delete()
@pytest.mark.tier(3)
@pytest.mark.meta(blockers=[BZ(1387088, forced_streams=['5.7', 'upstream'])])
def test_superadmin_tenant_project_crud(request):
"""Test suppose to verify CRUD operations for CFME projects
Prerequisities:
* This test is not depending on any other test and can be executed against fresh appliance.
Steps:
* Create tenant
* Create project as child to tenant
* Update description of project
* Update name of project
* Delete project
* Delete tenant
"""
tenant = Tenant(
name='tenant1' + fauxfactory.gen_alphanumeric(),
description='tenant1 description')
project = Project(
name='project1' + fauxfactory.gen_alphanumeric(),
description='project1 description',
parent_tenant=tenant)
@request.addfinalizer
def _delete_tenant_and_project():
for item in [project, tenant]:
if item.exists:
item.delete()
tenant.create()
project.create()
with update(project):
project.description = project.description + "edited"
with update(project):
project.name = project.name + "edited"
project.delete()
tenant.delete()
@pytest.mark.tier(3)
@pytest.mark.parametrize('number_of_childrens', [5])
def test_superadmin_child_tenant_crud(request, number_of_childrens):
"""Test CRUD operations for CFME child tenants, where several levels of tenants are created.
Prerequisities:
* This test is not depending on any other test and can be executed against fresh appliance.
Steps:
* Create 5 tenants where the next tenant is always child to the previous one
* Update description of tenant(N-1)_* in the tree
* Update name of tenant(N-1)_*
* Delete all created tenants in reversed order
"""
tenant = None
tenant_list = []
@request.addfinalizer
def _delete_tenants():
# reversed because we need to go from the last one
for tenant in reversed(tenant_list):
if tenant.exists:
tenant.delete()
for i in range(1, number_of_childrens + 1):
new_tenant = Tenant(
name="tenant{}_{}".format(i, fauxfactory.gen_alpha(4)),
description=fauxfactory.gen_alphanumeric(16),
parent_tenant=tenant)
tenant_list.append(new_tenant)
new_tenant.create()
tenant = new_tenant
tenant_update = tenant.parent_tenant
with update(tenant_update):
tenant_update.description = tenant_update.description + "edited"
with update(tenant_update):
tenant_update.name = tenant_update.name + "edited"
for tenant_item in reversed(tenant_list):
tenant_item.delete()
assert not tenant_item.exists
| gpl-2.0 | -3,983,837,271,983,107,600 | 32.273973 | 99 | 0.628619 | false | 3.800261 | true | false | false |
Nexenta/lfs | setup.py | 1 | 1495 | #!/usr/bin/python
# Copyright 2011-2012 Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from swift_lfs import __version__ as version
name = 'swift_lfs'
setup(
name=name,
version=version,
description='Swift LFS middleware',
license='Apache License (2.0)',
author='Nexenta Systems Inc',
author_email='[email protected]',
url='https://github.com/Nexenta/lfs',
packages=find_packages(exclude=['test_lfs']),
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
requires=['swift(>=1.4.7)'],
entry_points={
'paste.filter_factory': [
'swift_lfs=swift_lfs.lfs:filter_factory',
],
},
)
| apache-2.0 | -4,710,356,204,277,387,000 | 30.145833 | 69 | 0.665552 | false | 3.813776 | false | false | false |
flomotlik/formica | tests/unit/test_s3.py | 1 | 2780 | from formica.s3 import temporary_bucket
import pytest
from .constants import STACK
@pytest.fixture
def bucket(mocker, boto_resource, boto_client):
return boto_resource.return_value.Bucket
STRING_BODY = "string"
# MD5 hash of body
STRING_KEY = "b45cffe084dd3d20d928bee85e7b0f21"
BINARY_BODY = "binary".encode()
BINARY_KEY = "9d7183f16acce70658f686ae7f1a4d20"
BUCKET_NAME = "formica-deploy-88dec80484e3155b2c8cf023b635fb31"
FILE_NAME = "testfile"
FILE_BODY = "file-body"
FILE_KEY = "de858a1b070b29a579e2d8861b53ad20"
def test_s3_bucket_context(mocker, bucket, uuid4, boto_client):
bucket.return_value.objects.all.return_value = [mocker.Mock(key=STRING_KEY), mocker.Mock(key=BINARY_KEY)]
boto_client.return_value.meta.region_name = "eu-central-1"
boto_client.return_value.get_caller_identity.return_value = {'Account': '1234'}
mock_open = mocker.mock_open(read_data=FILE_BODY.encode())
mocker.patch('formica.s3.open', mock_open)
with temporary_bucket(seed=STACK) as temp_bucket:
string_return = temp_bucket.add(STRING_BODY)
binary_return = temp_bucket.add(BINARY_BODY)
file_return = temp_bucket.add_file(FILE_NAME)
temp_bucket.upload()
bucket_name = temp_bucket.name
assert string_return == STRING_KEY
assert binary_return == BINARY_KEY
assert file_return == FILE_KEY
assert bucket_name == BUCKET_NAME
bucket.assert_called_once_with(BUCKET_NAME)
assert bucket.call_count == 1
assert mock_open.call_count == 2
location_parameters = {'CreateBucketConfiguration': dict(LocationConstraint='eu-central-1')}
calls = [mocker.call(Body=STRING_BODY.encode(), Key=STRING_KEY), mocker.call(Body=BINARY_BODY, Key=BINARY_KEY), mocker.call(Body=mock_open(), Key=FILE_KEY)]
bucket.return_value.create.assert_called_once_with(**location_parameters)
bucket.return_value.put_object.assert_has_calls(calls)
assert bucket.return_value.put_object.call_count == 3
bucket.return_value.delete_objects.assert_called_once_with(
Delete={'Objects': [{'Key': STRING_KEY}, {'Key': BINARY_KEY}]})
bucket.return_value.delete.assert_called_once_with()
def test_does_not_delete_objects_if_empty(bucket):
bucket.return_value.objects.all.return_value = []
with temporary_bucket(seed=STACK):
pass
bucket.return_value.delete_objects.assert_not_called()
def test_does_not_use_s3_api_when_planning(bucket):
bucket.return_value.objects.all.return_value = []
with temporary_bucket(seed=STACK) as temp_bucket:
temp_bucket.add(STRING_BODY)
temp_bucket.add(BINARY_BODY)
bucket.return_value.create.assert_not_called()
bucket.return_value.put_object.assert_not_called()
bucket.return_value.delete_objects.assert_not_called()
| mit | -8,662,619,227,889,192,000 | 37.611111 | 160 | 0.721942 | false | 3.169897 | true | false | false |
ryaninhust/Press-Start-Button | tools/cut_edge.py | 1 | 2359 | #!/usr/bin/env python
#encoding: utf-8
import numpy as np
from PIL import Image
def create_data(file):
data_list = []
with open(file, 'r') as f:
for line in f:
line = line.rstrip('\n').split(' ')
feature_dict = {}
for l in line[1:]:
ls = l.split(':')
feature_dict[ls[0]] = ls[1]
data_list.append([line[0], feature_dict])
return data_list
def find_m_index(n):
"""find matrix index giving feature index"""
return (n - 1) / 105, (n - 1) % 105
def find_f_index(x, col):
"""find feature index giving matrix index"""
return x[0] * col + x[1] + 1
def cut_blank(image, filename):
feature = image[1]
# find matrix index and remove noise
matrix_index = {find_m_index(int(f)):float(feature[f]) for f in feature
if float(feature[f]) > 0.35}
if matrix_index:
row_index = [m[0] for m in matrix_index]
col_index = [m[1] for m in matrix_index]
matrix_cut = {(m[0] - min(row_index),m[1] - min(col_index)):matrix_index[m]
for m in matrix_index}
col_range = max(col_index) - min(col_index) + 1
row_range = max(row_index) - min(row_index) + 1
create_image(filename, matrix_cut, row_range, col_range)
else:
create_image(filename, matrix_index, 60, 60)
def create_image(filename, matrix_index, nrow, ncol, normalize = False, t = 0):
matrix_init = np.zeros((nrow, ncol))
for i in matrix_index:
if normalize:
if float(matrix_index[i]) > t:
matrix_init[i[0]][i[1]] = 255
else:
matrix_init[i[0]][i[1]] = float(matrix_index[i]) * 255
im = Image.fromarray(matrix_init)
image_name = 'image/' + filename + '.jpg'
im.convert('RGB').save(image_name)
def image_preprocessing(image_data, dir_name):
#image_valid = [image for image in image_data if len(image_data[1])>0]
for idx, image in enumerate(image_data):
filename = dir_name + str(idx) + '_' + str(image[0])
print filename
cut_blank(image, filename)
if __name__ == "__main__":
image_train = create_data('ml14fall_train.dat')
image_test = create_data('ml14fall_test1_no_answer.dat')
image_preprocessing(image_train, 'train/')
image_preprocessing(image_test, 'test/')
| mit | 7,847,049,955,560,052,000 | 32.7 | 83 | 0.574396 | false | 3.222678 | false | false | false |
artofhuman/django-clever-pages | page/views.py | 1 | 1468 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404
from django.views.generic.detail import DetailView
from .models import Page
from meta.views import MetadataMixin
class MetaTagsMexin(MetadataMixin):
""" Mixin for show meta tegs from django-meta """
def get_meta_description(self, context):
return self.page.meta_description
def get_meta_keywords(self, context):
keywords_str = self.page.meta_keywords
if keywords_str:
return [c.strip() for c in keywords_str.split(',')]
def get_meta_title(self, context):
return self.page.meta_title or self.page.name
class HomePageView(MetaTagsMexin, DetailView):
model = Page
context_object_name = 'page'
template_name = 'page/homepage.html'
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
return context
def get_object(self):
page = get_object_or_404(self.model, slug='home')
self.page = page
return page
class PageDetailView(MetaTagsMexin, DetailView):
model = Page
context_object_name = 'page'
template_name = 'page/default.html'
def get_object(self):
page = get_object_or_404(self.model, path=self.request.path, active=1)
self.page = page
return page
def get_template_names(self):
return ["page/%s.html" % self.page.template]
| mit | -7,776,898,867,387,568,000 | 26.185185 | 78 | 0.664169 | false | 3.651741 | false | false | false |
bozokyzoltan/Disordered_conformers | disconf/predict/pre.py | 1 | 6087 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Zoltan Bozoky on 2014.07.02.
Under GPL licence.
Purpose:
========
Calculate PRE distances.
Note:
=====
The actual pre data is stored as a power of -6 in the bcd file!
"""
# Built-ins
# 3rd party modules
import numpy as np
# Project modules
from disconf.predict.predictor import Predictor
from disconf import fileio
def distance2pre(distance_data):
"""
Convert distance data to pre value stores as -6 power.
Note:
PRE distance: value = pow(distance, -6) <=> distance(value, -1.0/6.0)
"""
#
return np.power(distance_data, -6)
### ======================================================================== ###
def pre2distance(pre_data):
"""
Convert stored PRE value, saved in power(-6) to angstrom
Note:
PRE distance: value = pow(distance, -6) <=> distance(value, -1.0/6.0)
"""
#
return np.power(pre_data, -1.0 / 6.0)
### ======================================================================== ###
class Pre(Predictor):
"""
PRE distance calcutations.
"""
def __init__(self, kwarg):
"""
Parameters:
===========
No extra argument is required!
"""
# ---------------------------------
# Initialize general parameters
# ---------------------------------
Predictor.__init__(self, kwarg)
# ---------------------------------
# Define restraint name
# ---------------------------------
self._name = 'pre'
#
return None
### ==================================================================== ###
def predict(self, pdb_filename, **kwarg):
"""
Calculates the atom - atom distance for PRE fitting
Parameters:
===========
* labels
* coordinates
"""
print ' >>> PARAMAGNETIC RELAXATION ENHANCEMENT BACK CALCULATION'
# ---------------------------------
# Get the structure data
# ---------------------------------
if (('labels' in kwarg) and ('coodinates' in kwarg)):
labels = kwarg['labels']
coordinates = kwarg['coordinates']
else:
# Get the cooredinates from the pdb file
labels, coordinates = fileio.read_pdb_file(pdb_filename)
# ---------------------------------
# Extract the residue number and atom name and put into a dictionary
# ---------------------------------
residue_number_atom_name = {}
for i in xrange(len(labels)):
atom_name = labels[i][12:16].strip()
residue_number = int(labels[i][22:26])
residue_number_atom_name[(residue_number, atom_name)] = i
# ---------------------------------
# Container to store the back calculated data
# ---------------------------------
pre_data = np.empty(shape = (self.experimental_data[self.name]['size']),
dtype = np.float32)
# ---------------------------------
# Iterate through all experimantal datapoints
# ---------------------------------
for i in xrange(self.experimental_data[self.name]['size']):
resi1 = self.experimental_data[self.name]['resi1'][i]
atom1 = self.experimental_data[self.name]['atom1'][i]
resi2 = self.experimental_data[self.name]['resi2'][i]
atom2 = self.experimental_data[self.name]['atom2'][i]
# ---------------------------------
# If there is a "#" character indicating the ambiguity in atom1
# ---------------------------------
if '#' in atom1:
# ---------------------------------
# coordinate_1 is the average position of all possible atoms
# ---------------------------------
coordinate_1 = np.zeros(3, dtype = np.float32)
num = 0
for index in ['1', '2', '3', '4']:
if (resi1, atom1.replace('#', index)) in residue_number_atom_name:
coordinate_1 += coordinates[residue_number_atom_name[(resi1, atom1.replace('#', index))]]
num += 1.0
coordinate_1 /= num
else:
coordinate_1 = coordinates[
residue_number_atom_name[(resi1, atom1)]]
# ---------------------------------
# If there is a "#" character indicating the ambiguity in atom2
# ---------------------------------
if '#' in atom2:
# ---------------------------------
# coordinate_1 is the average position of all possible atoms
# ---------------------------------
coordinate_2 = np.zeros(3, dtype = np.float32)
num = 0
for index in ['1', '2', '3', '4']:
if (resi2, atom2.replace('#', index)) in residue_number_atom_name:
coordinate_2 += coordinates[residue_number_atom_name[(resi2, atom2.replace('#', index))]]
num += 1.0
coordinate_2 /= num
else:
coordinate_2 = coordinates[
residue_number_atom_name[(resi2, atom2)]]
# ---------------------------------
# Calculate the distance between the two coordinates and put on -6
# power
# ---------------------------------
pre_data[i] = distance2pre(np.linalg.norm(
coordinate_1 - coordinate_2))
#
print pdb_filename, ':', len(pre_data), 'distance information extracted'
#
return {'pre': pre_data}
### ==================================================================== ###
### ==================================================================== ###
### ==================================================================== ###
| gpl-2.0 | -1,215,891,602,983,140,600 | 38.525974 | 113 | 0.405126 | false | 4.861821 | false | false | false |
googleads/google-ads-python | google/ads/googleads/v8/errors/types/function_parsing_error.py | 1 | 1516 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"FunctionParsingErrorEnum",},
)
class FunctionParsingErrorEnum(proto.Message):
r"""Container for enum describing possible function parsing
errors.
"""
class FunctionParsingError(proto.Enum):
r"""Enum describing possible function parsing errors."""
UNSPECIFIED = 0
UNKNOWN = 1
NO_MORE_INPUT = 2
EXPECTED_CHARACTER = 3
UNEXPECTED_SEPARATOR = 4
UNMATCHED_LEFT_BRACKET = 5
UNMATCHED_RIGHT_BRACKET = 6
TOO_MANY_NESTED_FUNCTIONS = 7
MISSING_RIGHT_HAND_OPERAND = 8
INVALID_OPERATOR_NAME = 9
FEED_ATTRIBUTE_OPERAND_ARGUMENT_NOT_INTEGER = 10
NO_OPERANDS = 11
TOO_MANY_OPERANDS = 12
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -208,529,365,072,351,650 | 30.583333 | 74 | 0.682058 | false | 3.947917 | false | false | false |
jigarkb/FullContact | fullcontact.py | 1 | 1560 | import requests
import json
class FullContact(object):
def __init__(self, api_key):
self.api_key = api_key
self.url = url = "https://api.fullcontact.com/v2/person.json"
def get(self, **kwargs):
if 'apiKey' not in kwargs:
kwargs['apiKey'] = self.api_key
r = requests.get(self.url, params=kwargs)
return json.loads(r.text)
def change_keys(self,d):
modified_response={}
for keys in d:
key=keys[49:]
modified_response[key]=d[keys]
return modified_response
def post(self, email_list):
data_dict={}
counter=0
chunks=[]
person_url="https://api.fullcontact.com/v2/person.json?email="
for i in range(len(email_list)):
email_list[i]=person_url+email_list[i]
while counter<len(email_list):
chunks.append(email_list[counter:counter+20])
counter+=20
for request_urls in chunks:
post_data = json.dumps({'requests' : request_urls})
r = requests.post(
'https://api.fullcontact.com/v2/batch.json',
params={'apiKey': self.api_key},
headers={'content-type': 'application/json'},
data=post_data).json
json_data=json.loads(r.im_self.content)
modified_data = self.change_keys(json_data["responses"])
data_dict=dict(data_dict.items()+modified_data.items())
return data_dict
| apache-2.0 | 826,975,456,134,007,600 | 35.27907 | 70 | 0.542949 | false | 3.880597 | false | false | false |
lilsweetcaligula/Online-Judges | leetcode/easy/implement_stack_using_queues/py/solution.py | 1 | 1370 | import collections
class MyStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self._queue = collections.deque()
def push(self, x):
"""
Push element x onto stack.
:type x: int
:rtype: void
"""
self._queue.append(x)
def pop(self):
"""
Removes the element on top of the stack and returns that element.
:rtype: int
"""
tempQueue = collections.deque()
while len(self._queue) > 1:
x = self._queue.popleft()
tempQueue.append(x)
result = self._queue.pop()
while len(tempQueue) > 0:
x = tempQueue.popleft()
self._queue.append(x)
return result
def top(self):
"""
Get the top element.
:rtype: int
"""
result = self.pop()
self._queue.append(result)
return result
def empty(self):
"""
Returns whether the stack is empty.
:rtype: bool
"""
return len(self._queue) == 0
# Your MyStack object will be instantiated and called as such:
# obj = MyStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.empty()
| mit | -327,564,662,910,520,800 | 19.147059 | 73 | 0.475182 | false | 4.308176 | false | false | false |
vladimir-ipatov/ganeti | lib/mcpu.py | 1 | 18556 | #
#
# Copyright (C) 2006, 2007, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module implementing the logic behind the cluster operations
This module implements the logic for doing operations in the cluster. There
are two kinds of classes defined:
- logical units, which know how to deal with their specific opcode only
- the processor, which dispatches the opcodes to their logical units
"""
import sys
import logging
import random
import time
import itertools
import traceback
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import constants
from ganeti import errors
from ganeti import hooksmaster
from ganeti import cmdlib
from ganeti import locking
from ganeti import utils
from ganeti import compat
_OP_PREFIX = "Op"
_LU_PREFIX = "LU"
#: LU classes which don't need to acquire the node allocation lock
#: (L{locking.NAL}) when they acquire all node or node resource locks
_NODE_ALLOC_WHITELIST = frozenset([])
#: LU classes which don't need to acquire the node allocation lock
#: (L{locking.NAL}) in the same mode (shared/exclusive) as the node
#: or node resource locks
_NODE_ALLOC_MODE_WHITELIST = compat.UniqueFrozenset([
cmdlib.LUBackupExport,
cmdlib.LUBackupRemove,
cmdlib.LUOobCommand,
])
class LockAcquireTimeout(Exception):
"""Exception to report timeouts on acquiring locks.
"""
def _CalculateLockAttemptTimeouts():
"""Calculate timeouts for lock attempts.
"""
result = [constants.LOCK_ATTEMPTS_MINWAIT]
running_sum = result[0]
# Wait for a total of at least LOCK_ATTEMPTS_TIMEOUT before doing a
# blocking acquire
while running_sum < constants.LOCK_ATTEMPTS_TIMEOUT:
timeout = (result[-1] * 1.05) ** 1.25
# Cap max timeout. This gives other jobs a chance to run even if
# we're still trying to get our locks, before finally moving to a
# blocking acquire.
timeout = min(timeout, constants.LOCK_ATTEMPTS_MAXWAIT)
# And also cap the lower boundary for safety
timeout = max(timeout, constants.LOCK_ATTEMPTS_MINWAIT)
result.append(timeout)
running_sum += timeout
return result
class LockAttemptTimeoutStrategy(object):
"""Class with lock acquire timeout strategy.
"""
__slots__ = [
"_timeouts",
"_random_fn",
"_time_fn",
]
_TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
def __init__(self, _time_fn=time.time, _random_fn=random.random):
"""Initializes this class.
@param _time_fn: Time function for unittests
@param _random_fn: Random number generator for unittests
"""
object.__init__(self)
self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT)
self._time_fn = _time_fn
self._random_fn = _random_fn
def NextAttempt(self):
"""Returns the timeout for the next attempt.
"""
try:
timeout = self._timeouts.next()
except StopIteration:
# No more timeouts, do blocking acquire
timeout = None
if timeout is not None:
# Add a small variation (-/+ 5%) to timeout. This helps in situations
# where two or more jobs are fighting for the same lock(s).
variation_range = timeout * 0.1
timeout += ((self._random_fn() * variation_range) -
(variation_range * 0.5))
return timeout
class OpExecCbBase: # pylint: disable=W0232
"""Base class for OpCode execution callbacks.
"""
def NotifyStart(self):
"""Called when we are about to execute the LU.
This function is called when we're about to start the lu's Exec() method,
that is, after we have acquired all locks.
"""
def Feedback(self, *args):
"""Sends feedback from the LU code to the end-user.
"""
def CurrentPriority(self): # pylint: disable=R0201
"""Returns current priority or C{None}.
"""
return None
def SubmitManyJobs(self, jobs):
"""Submits jobs for processing.
See L{jqueue.JobQueue.SubmitManyJobs}.
"""
raise NotImplementedError
def _LUNameForOpName(opname):
"""Computes the LU name for a given OpCode name.
"""
assert opname.startswith(_OP_PREFIX), \
"Invalid OpCode name, doesn't start with %s: %s" % (_OP_PREFIX, opname)
return _LU_PREFIX + opname[len(_OP_PREFIX):]
def _ComputeDispatchTable():
"""Computes the opcode-to-lu dispatch table.
"""
return dict((op, getattr(cmdlib, _LUNameForOpName(op.__name__)))
for op in opcodes.OP_MAPPING.values()
if op.WITH_LU)
def _SetBaseOpParams(src, defcomment, dst):
"""Copies basic opcode parameters.
@type src: L{opcodes.OpCode}
@param src: Source opcode
@type defcomment: string
@param defcomment: Comment to specify if not already given
@type dst: L{opcodes.OpCode}
@param dst: Destination opcode
"""
if hasattr(src, "debug_level"):
dst.debug_level = src.debug_level
if (getattr(dst, "priority", None) is None and
hasattr(src, "priority")):
dst.priority = src.priority
if not getattr(dst, opcodes_base.COMMENT_ATTR, None):
dst.comment = defcomment
def _ProcessResult(submit_fn, op, result):
"""Examines opcode result.
If necessary, additional processing on the result is done.
"""
if isinstance(result, cmdlib.ResultWithJobs):
# Copy basic parameters (e.g. priority)
map(compat.partial(_SetBaseOpParams, op,
"Submitted by %s" % op.OP_ID),
itertools.chain(*result.jobs))
# Submit jobs
job_submission = submit_fn(result.jobs)
# Build dictionary
result = result.other
assert constants.JOB_IDS_KEY not in result, \
"Key '%s' found in additional return values" % constants.JOB_IDS_KEY
result[constants.JOB_IDS_KEY] = job_submission
return result
def _FailingSubmitManyJobs(_):
"""Implementation of L{OpExecCbBase.SubmitManyJobs} to raise an exception.
"""
raise errors.ProgrammerError("Opcodes processed without callbacks (e.g."
" queries) can not submit jobs")
def _VerifyLocks(lu, glm, _mode_whitelist=_NODE_ALLOC_MODE_WHITELIST,
_nal_whitelist=_NODE_ALLOC_WHITELIST):
"""Performs consistency checks on locks acquired by a logical unit.
@type lu: L{cmdlib.LogicalUnit}
@param lu: Logical unit instance
@type glm: L{locking.GanetiLockManager}
@param glm: Lock manager
"""
if not __debug__:
return
have_nal = glm.check_owned(locking.LEVEL_NODE_ALLOC, locking.NAL)
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
# TODO: Verify using actual lock mode, not using LU variables
if level in lu.needed_locks:
share_node_alloc = lu.share_locks[locking.LEVEL_NODE_ALLOC]
share_level = lu.share_locks[level]
if lu.__class__ in _mode_whitelist:
assert share_node_alloc != share_level, \
"LU is whitelisted to use different modes for node allocation lock"
else:
assert bool(share_node_alloc) == bool(share_level), \
("Node allocation lock must be acquired using the same mode as nodes"
" and node resources")
if lu.__class__ in _nal_whitelist:
assert not have_nal, \
"LU is whitelisted for not acquiring the node allocation lock"
elif lu.needed_locks[level] == locking.ALL_SET or glm.owning_all(level):
assert have_nal, \
("Node allocation lock must be used if an LU acquires all nodes"
" or node resources")
class Processor(object):
"""Object which runs OpCodes"""
DISPATCH_TABLE = _ComputeDispatchTable()
def __init__(self, context, ec_id, enable_locks=True):
"""Constructor for Processor
@type context: GanetiContext
@param context: global Ganeti context
@type ec_id: string
@param ec_id: execution context identifier
"""
self.context = context
self._ec_id = ec_id
self._cbs = None
self.rpc = context.rpc
self.hmclass = hooksmaster.HooksMaster
self._enable_locks = enable_locks
def _CheckLocksEnabled(self):
"""Checks if locking is enabled.
@raise errors.ProgrammerError: In case locking is not enabled
"""
if not self._enable_locks:
raise errors.ProgrammerError("Attempted to use disabled locks")
def _AcquireLocks(self, level, names, shared, opportunistic, timeout):
"""Acquires locks via the Ganeti lock manager.
@type level: int
@param level: Lock level
@type names: list or string
@param names: Lock names
@type shared: bool
@param shared: Whether the locks should be acquired in shared mode
@type opportunistic: bool
@param opportunistic: Whether to acquire opportunistically
@type timeout: None or float
@param timeout: Timeout for acquiring the locks
@raise LockAcquireTimeout: In case locks couldn't be acquired in specified
amount of time
"""
self._CheckLocksEnabled()
if self._cbs:
priority = self._cbs.CurrentPriority()
else:
priority = None
acquired = self.context.glm.acquire(level, names, shared=shared,
timeout=timeout, priority=priority,
opportunistic=opportunistic)
if acquired is None:
raise LockAcquireTimeout()
return acquired
def _ExecLU(self, lu):
"""Logical Unit execution sequence.
"""
write_count = self.context.cfg.write_count
lu.CheckPrereq()
hm = self.BuildHooksManager(lu)
h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
self.Log, None)
if getattr(lu.op, "dry_run", False):
# in this mode, no post-hooks are run, and the config is not
# written (as it might have been modified by another LU, and we
# shouldn't do writeout on behalf of other threads
self.LogInfo("dry-run mode requested, not actually executing"
" the operation")
return lu.dry_run_result
if self._cbs:
submit_mj_fn = self._cbs.SubmitManyJobs
else:
submit_mj_fn = _FailingSubmitManyJobs
try:
result = _ProcessResult(submit_mj_fn, lu.op, lu.Exec(self.Log))
h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
self.Log, result)
finally:
# FIXME: This needs locks if not lu_class.REQ_BGL
if write_count != self.context.cfg.write_count:
hm.RunConfigUpdate()
return result
def BuildHooksManager(self, lu):
return self.hmclass.BuildFromLu(lu.rpc.call_hooks_runner, lu)
def _LockAndExecLU(self, lu, level, calc_timeout):
"""Execute a Logical Unit, with the needed locks.
This is a recursive function that starts locking the given level, and
proceeds up, till there are no more locks to acquire. Then it executes the
given LU and its opcodes.
"""
glm = self.context.glm
adding_locks = level in lu.add_locks
acquiring_locks = level in lu.needed_locks
if level not in locking.LEVELS:
_VerifyLocks(lu, glm)
if self._cbs:
self._cbs.NotifyStart()
try:
result = self._ExecLU(lu)
except AssertionError, err:
# this is a bit ugly, as we don't know from which phase
# (prereq, exec) this comes; but it's better than an exception
# with no information
(_, _, tb) = sys.exc_info()
err_info = traceback.format_tb(tb)
del tb
logging.exception("Detected AssertionError")
raise errors.OpExecError("Internal assertion error: please report"
" this as a bug.\nError message: '%s';"
" location:\n%s" % (str(err), err_info[-1]))
elif adding_locks and acquiring_locks:
# We could both acquire and add locks at the same level, but for now we
# don't need this, so we'll avoid the complicated code needed.
raise NotImplementedError("Can't declare locks to acquire when adding"
" others")
elif adding_locks or acquiring_locks:
self._CheckLocksEnabled()
lu.DeclareLocks(level)
share = lu.share_locks[level]
opportunistic = lu.opportunistic_locks[level]
try:
assert adding_locks ^ acquiring_locks, \
"Locks must be either added or acquired"
if acquiring_locks:
# Acquiring locks
needed_locks = lu.needed_locks[level]
self._AcquireLocks(level, needed_locks, share, opportunistic,
calc_timeout())
else:
# Adding locks
add_locks = lu.add_locks[level]
lu.remove_locks[level] = add_locks
try:
glm.add(level, add_locks, acquired=1, shared=share)
except errors.LockError:
logging.exception("Detected lock error in level %s for locks"
" %s, shared=%s", level, add_locks, share)
raise errors.OpPrereqError(
"Couldn't add locks (%s), most likely because of another"
" job who added them first" % add_locks,
errors.ECODE_NOTUNIQUE)
try:
result = self._LockAndExecLU(lu, level + 1, calc_timeout)
finally:
if level in lu.remove_locks:
glm.remove(level, lu.remove_locks[level])
finally:
if glm.is_owned(level):
glm.release(level)
else:
result = self._LockAndExecLU(lu, level + 1, calc_timeout)
return result
# pylint: disable=R0201
def _CheckLUResult(self, op, result):
"""Check the LU result against the contract in the opcode.
"""
resultcheck_fn = op.OP_RESULT
if not (resultcheck_fn is None or resultcheck_fn(result)):
logging.error("Expected opcode result matching %s, got %s",
resultcheck_fn, result)
if not getattr(op, "dry_run", False):
# FIXME: LUs should still behave in dry_run mode, or
# alternately we should have OP_DRYRUN_RESULT; in the
# meantime, we simply skip the OP_RESULT check in dry-run mode
raise errors.OpResultError("Opcode result does not match %s: %s" %
(resultcheck_fn, utils.Truncate(result, 80)))
def ExecOpCode(self, op, cbs, timeout=None):
"""Execute an opcode.
@type op: an OpCode instance
@param op: the opcode to be executed
@type cbs: L{OpExecCbBase}
@param cbs: Runtime callbacks
@type timeout: float or None
@param timeout: Maximum time to acquire all locks, None for no timeout
@raise LockAcquireTimeout: In case locks couldn't be acquired in specified
amount of time
"""
if not isinstance(op, opcodes.OpCode):
raise errors.ProgrammerError("Non-opcode instance passed"
" to ExecOpcode (%s)" % type(op))
lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
if lu_class is None:
raise errors.OpCodeUnknown("Unknown opcode")
if timeout is None:
calc_timeout = lambda: None
else:
calc_timeout = utils.RunningTimeout(timeout, False).Remaining
self._cbs = cbs
try:
if self._enable_locks:
# Acquire the Big Ganeti Lock exclusively if this LU requires it,
# and in a shared fashion otherwise (to prevent concurrent run with
# an exclusive LU.
self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
not lu_class.REQ_BGL, False, calc_timeout())
elif lu_class.REQ_BGL:
raise errors.ProgrammerError("Opcode '%s' requires BGL, but locks are"
" disabled" % op.OP_ID)
try:
lu = lu_class(self, op, self.context, self.rpc)
lu.ExpandNames()
assert lu.needed_locks is not None, "needed_locks not set by LU"
try:
result = self._LockAndExecLU(lu, locking.LEVEL_CLUSTER + 1,
calc_timeout)
finally:
if self._ec_id:
self.context.cfg.DropECReservations(self._ec_id)
finally:
# Release BGL if owned
if self.context.glm.is_owned(locking.LEVEL_CLUSTER):
assert self._enable_locks
self.context.glm.release(locking.LEVEL_CLUSTER)
finally:
self._cbs = None
self._CheckLUResult(op, result)
return result
def Log(self, *args):
"""Forward call to feedback callback function.
"""
if self._cbs:
self._cbs.Feedback(*args)
def LogStep(self, current, total, message):
"""Log a change in LU execution progress.
"""
logging.debug("Step %d/%d %s", current, total, message)
self.Log("STEP %d/%d %s" % (current, total, message))
def LogWarning(self, message, *args, **kwargs):
"""Log a warning to the logs and the user.
The optional keyword argument is 'hint' and can be used to show a
hint to the user (presumably related to the warning). If the
message is empty, it will not be printed at all, allowing one to
show only a hint.
"""
assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
"Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
if args:
message = message % tuple(args)
if message:
logging.warning(message)
self.Log(" - WARNING: %s" % message)
if "hint" in kwargs:
self.Log(" Hint: %s" % kwargs["hint"])
def LogInfo(self, message, *args):
"""Log an informational message to the logs and the user.
"""
if args:
message = message % tuple(args)
logging.info(message)
self.Log(" - INFO: %s" % message)
def GetECId(self):
"""Returns the current execution context ID.
"""
if not self._ec_id:
raise errors.ProgrammerError("Tried to use execution context id when"
" not set")
return self._ec_id
| gpl-2.0 | 515,231,502,783,152,700 | 29.978297 | 80 | 0.645398 | false | 3.833884 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.