repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wido/cloudstack | tools/ngui/api.py | 7 | 1481 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
from requester import make_request
from precache import apicache
from config import *
import re
def get_error_code(error):
return int(re.findall("\d{3}",error)[0]) #Find the error code by regular expression
# return int(error[11:14]) #Ugly
def get_command(verb, subject):
commandlist = apicache.get(verb, None)
if commandlist is not None:
command = commandlist.get(subject, None)
if command is not None:
return command["name"]
return None
def apicall(command, data ):
response, error = make_request(command, data, None, host, port, apikey, secretkey, protocol, path)
if error is not None:
return error, get_error_code(error)
return response
| apache-2.0 | 5,381,673,089,288,709,000 | 39.027027 | 102 | 0.740041 | false |
Belgabor/django | django/contrib/gis/gdal/prototypes/geom.py | 315 | 4821 | import re
from datetime import date
from ctypes import c_char, c_char_p, c_double, c_int, c_ubyte, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, string_output, void_output
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines, if supported.
if GEOJSON:
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True)
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True)
else:
from_json = False
to_json = False
to_kml = False
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)])
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p])
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| bsd-3-clause | 5,107,185,897,152,000,000 | 44.481132 | 137 | 0.728272 | false |
apdjustino/DRCOG_Urbansim | src/opus_core/resource_factory.py | 1 | 2293 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.resources import Resources
from opus_core.storage_factory import StorageFactory
class ResourceFactory(object):
""" Class for creating a Resource object.
"""
def get_resources_for_dataset(self,
dataset_name,
in_storage,
out_storage,
resources={},
in_table_name_pair=(None,None),
out_table_name_pair=(None,None),
attributes_pair=(None,None),
id_name_pair=(None,None),
nchunks_pair=(None,None),
debug_pair=(None,None)
):
"""Create an object of class Resources to be used in a Dataset object.
The created resources are merged with the resources given as an argument 'resources'.
The first element
of each tuple of the remaining arguments contains the desired value, the second element contains
the default value which is used if the first element is None.
Entries in resources of the same name as the argument values are overwritten if the one of the
tuple values is not equal None.
"""
# merge resources with arguments
local_resources = Resources(resources)
local_resources.merge_if_not_None({
"in_storage":in_storage,
"out_storage":out_storage,
"nchunks":nchunks_pair[0], "attributes":attributes_pair[0],
"in_table_name": in_table_name_pair[0], "out_table_name": out_table_name_pair[0],
"id_name":id_name_pair[0], "debug":debug_pair[0],
"dataset_name":dataset_name})
# merge resources with default values
local_resources.merge_with_defaults({
"nchunks":nchunks_pair[1], "attributes":attributes_pair[1],
"in_table_name":in_table_name_pair[1], "out_table_name":out_table_name_pair[1],
"id_name":id_name_pair[1], "debug":debug_pair[1],
"dataset_name":dataset_name})
return local_resources | agpl-3.0 | -7,942,508,891,477,226,000 | 44.88 | 105 | 0.58744 | false |
andrewpollock/grpc | tools/gcp/stress_test/run_server.py | 37 | 5970 | #!/usr/bin/env python2.7
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import resource
import select
import subprocess
import sys
import time
from stress_test_utils import BigQueryHelper
from stress_test_utils import EventType
def run_server():
"""This is a wrapper around the interop server and performs the following:
1) Create a 'Summary table' in Big Query to record events like the server
started, completed successfully or failed. NOTE: This also creates
another table called the QPS table which is currently NOT needed on the
server (it is needed on the stress test clients)
2) Start the server process and add a row in Big Query summary table
3) Wait for the server process to terminate. The server process does not
terminate unless there is an error.
If the server process terminated with a failure, add a row in Big Query
and wait forever.
NOTE: This script typically runs inside a GKE pod which means that the
pod gets destroyed when the script exits. However, in case the server
process fails, we would not want the pod to be destroyed (since we
might want to connect to the pod for examining logs). This is the
reason why the script waits forever in case of failures.
"""
# Set the 'core file' size to 'unlimited' so that 'core' files are generated
# if the server crashes (Note: This is not relevant for Java and Go servers)
resource.setrlimit(resource.RLIMIT_CORE,
(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
# Read the parameters from environment variables
env = dict(os.environ)
run_id = env['RUN_ID'] # The unique run id for this test
image_type = env['STRESS_TEST_IMAGE_TYPE']
stress_server_cmd = env['STRESS_TEST_CMD'].split()
args_str = env['STRESS_TEST_ARGS_STR']
pod_name = env['POD_NAME']
project_id = env['GCP_PROJECT_ID']
dataset_id = env['DATASET_ID']
summary_table_id = env['SUMMARY_TABLE_ID']
qps_table_id = env['QPS_TABLE_ID']
# The following parameter is to inform us whether the server runs forever
# until forcefully stopped or will it naturally stop after sometime.
# This way, we know that the process should not terminate (even if it does
# with a success exit code) and flag any termination as a failure.
will_run_forever = env.get('WILL_RUN_FOREVER', '1')
logfile_name = env.get('LOGFILE_NAME')
print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, '
'summary_table_id: %s, qps_table_id: %s') % (pod_name, project_id,
run_id, dataset_id,
summary_table_id,
qps_table_id)
bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
dataset_id, summary_table_id, qps_table_id)
bq_helper.initialize()
# Create BigQuery Dataset and Tables: Summary Table and Metrics Table
if not bq_helper.setup_tables():
print 'Error in creating BigQuery tables'
return
start_time = datetime.datetime.now()
logfile = None
details = 'Logging to stdout'
if logfile_name is not None:
print 'Opening log file: ', logfile_name
logfile = open(logfile_name, 'w')
details = 'Logfile: %s' % logfile_name
stress_cmd = stress_server_cmd + [x for x in args_str.split()]
details = '%s, Stress server command: %s' % (details, str(stress_cmd))
# Update status that the test is starting (in the status table)
bq_helper.insert_summary_row(EventType.STARTING, details)
print 'Launching process %s ...' % stress_cmd
stress_p = subprocess.Popen(args=stress_cmd,
stdout=logfile,
stderr=subprocess.STDOUT)
# Update the status to running if subprocess.Popen launched the server
if stress_p.poll() is None:
bq_helper.insert_summary_row(EventType.RUNNING, '')
# Wait for the server process to terminate
returncode = stress_p.wait()
if will_run_forever == '1' or returncode != 0:
end_time = datetime.datetime.now().isoformat()
event_type = EventType.FAILURE
details = 'Returncode: %d; End time: %s' % (returncode, end_time)
bq_helper.insert_summary_row(event_type, details)
print 'Waiting indefinitely..'
select.select([], [], [])
return returncode
if __name__ == '__main__':
run_server()
| bsd-3-clause | -7,807,885,494,732,641,000 | 42.26087 | 80 | 0.689615 | false |
lukas-krecan/tensorflow | tensorflow/python/ops/linalg_grad.py | 17 | 2430 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(ainv,
math_ops.matmul(grad,
ainv,
transpose_b=True),
transpose_a=True)
@ops.RegisterGradient("BatchMatrixInverse")
def _BatchMatrixInverseGrad(op, grad):
"""Gradient for BatchMatrixInverse."""
ainv = op.outputs[0]
return -math_ops.batch_matmul(ainv,
math_ops.batch_matmul(grad,
ainv,
adj_y=True),
adj_x=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant.
Returns:
gradient
Args:
op: op
grad: grad
"""
a = op.inputs[0]
c = op.outputs[0]
ainv = linalg_ops.matrix_inverse(a)
return grad * c * array_ops.transpose(ainv)
| apache-2.0 | -3,058,256,088,958,562,300 | 33.714286 | 80 | 0.635391 | false |
pombredanne/MOG | tools/install_venv_common.py | 15 | 7428 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def post_process(self):
self.get_distro().post_process()
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
def post_process(self):
"""Any distribution-specific post-processing gets done here.
In particular, this is useful for applying patches to code inside
the venv.
"""
pass
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def apply_patch(self, originalfile, patchfile):
self.run_command(['patch', '-N', originalfile, patchfile],
check_exit_code=False)
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
def post_process(self):
"""Workaround for a bug in eventlet.
This currently affects RHEL6.1, but the fix can safely be
applied to all RHEL and Fedora distributions.
This can be removed when the fix is applied upstream.
Nova: https://bugs.launchpad.net/nova/+bug/884915
Upstream: https://bitbucket.org/eventlet/eventlet/issue/89
RHEL: https://bugzilla.redhat.com/958868
"""
if os.path.exists('contrib/redhat-eventlet.patch'):
# Install "patch" program if it's not there
if not self.check_pkg('patch'):
self.die("Please install 'patch'.")
# Apply the eventlet patch
self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
'site-packages',
'eventlet/green/subprocess.py'),
'contrib/redhat-eventlet.patch')
| apache-2.0 | -9,019,303,833,624,986,000 | 33.873239 | 79 | 0.58643 | false |
lbjay/cds-invenio | modules/bibformat/lib/elements/bfe_photos.py | 4 | 5570 | # -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Print photos of the record (if bibdoc file)
"""
import cgi
from invenio.bibdocfile import BibRecDocs
from invenio.urlutils import create_html_link
def format(bfo, separator=" ", style='', img_style='', text_style='font-size:small', print_links='yes', max_photos='',
show_comment='yes', img_max_width='250px', display_all_version_links='yes'):
"""
Lists the photos of a record. Display the icon version, linked to
its original version.
This element works for photos appended to a record as BibDoc
files, for which a preview icon has been generated. If there are
several formats for one photo, use the first one found.
@param separator: separator between each photo
@param print_links: if 'yes', print links to the original photo
@param style: style attributes of the whole image block. Eg: "padding:2px;border:1px"
@param img_style: style attributes of the images. Eg: "width:50px;border:none"
@param text_style: style attributes of the text. Eg: "font-size:small"
@param max_photos: the maximum number of photos to display
@param show_comment: if 'yes', display the comment of each photo
@param display_all_version_links: if 'yes', print links to additional (sub)formats
"""
photos = []
bibarchive = BibRecDocs(bfo.recID)
bibdocs = bibarchive.list_bibdocs()
if max_photos.isdigit():
max_photos = int(max_photos)
else:
max_photos = len(bibdocs)
for doc in bibdocs[:max_photos]:
found_icons = []
found_url = ''
for docfile in doc.list_latest_files():
if docfile.is_icon():
found_icons.append((docfile.get_size(), docfile.get_url()))
else:
found_url = docfile.get_url()
found_icons.sort()
if found_icons:
additional_links = ''
name = doc.get_docname()
comment = doc.list_latest_files()[0].get_comment()
preview_url = None
if len(found_icons) > 1:
preview_url = found_icons[1][1]
additional_urls = [(docfile.get_size(), docfile.get_url(), \
docfile.get_superformat(), docfile.get_subformat()) \
for docfile in doc.list_latest_files() if not docfile.is_icon()]
additional_urls.sort()
additional_links = [create_html_link(url, urlargd={}, \
linkattrd={'style': 'font-size:x-small'}, \
link_label="%s %s (%s)" % (format.strip('.').upper(), subformat, format_size(size))) \
for (size, url, format, subformat) in additional_urls]
img = '<img src="%(icon_url)s" alt="%(name)s" style="max-width:%(img_max_width)s;_width:%(img_max_width)s;%(img_style)s" />' % \
{'icon_url': cgi.escape(found_icons[0][1], True),
'name': cgi.escape(name, True),
'img_style': img_style,
'img_max_width': img_max_width}
if print_links.lower() == 'yes':
img = '<a href="%s">%s</a>' % (cgi.escape(preview_url or found_url, True), img)
if display_all_version_links.lower() == 'yes' and additional_links:
img += '<br />' + ' '.join(additional_links) + '<br />'
if show_comment.lower() == 'yes' and comment:
img += '<div style="margin-auto;text-align:center;%(text_style)s">%(comment)s</div>' % \
{'comment': comment.replace('\n', '<br/>'),
'text_style': text_style}
img = '<div style="vertical-align: middle;text-align:center;display:inline-block;display: -moz-inline-stack;zoom: 1;*display: inline;max-width:%(img_max_width)s;_width:%(img_max_width)s;text-align:center;%(style)s">%(img)s</div>' % \
{'img_max_width': img_max_width,
'style': style,
'img': img}
photos.append(img)
return '<div>' + separator.join(photos) + '</div>'
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
def format_size(size):
"""
Get human-readable string for the given size in Bytes
"""
if size < 1024:
return "%d byte%s" % (size, size != 1 and 's' or '')
elif size < 1024 * 1024:
return "%.1f KB" % (size / 1024)
elif size < 1024 * 1024 * 1024:
return "%.1f MB" % (size / (1024 * 1024))
else:
return "%.1f GB" % (size / (1024 * 1024 * 1024))
| gpl-2.0 | 595,953,332,581,147,400 | 44.284553 | 245 | 0.582406 | false |
purepitch/trove | tests/test_view.py | 1 | 2089 | # -*- coding: utf-8 -*-
import unittest
import StringIO
import sys
import re
from modules.view import View
class TestView(unittest.TestCase):
def setUp(self):
self.view = View()
def testPrintInfoExpectsArgument(self):
with self.assertRaises(TypeError):
self.view.print_line()
def testPrintInfoPrintsInput(self):
message = "test message"
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
self.view.print_line(message)
received_stdout = sys.stdout.getvalue().strip()
sys.stdout = old_stdout
self.assertEqual(received_stdout, message)
def testPrintBoldExpectsArgument(self):
with self.assertRaises(TypeError):
self.view.print_bold()
def testPrintBoldPrintsMessageInAnsiBold(self):
message = "bold message"
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
self.view.print_bold(message)
received_stdout = sys.stdout.getvalue().strip()
sys.stdout = old_stdout
bold_message = "\x1b[01m" + message + "\x1b[0m"
self.assertEqual(received_stdout, bold_message)
def testPrintErrorExpectsArgument(self):
with self.assertRaises(TypeError):
self.view.print_error()
def testPrintErrorPrintsMessageInAnsiBold(self):
message = "error message"
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
self.view.print_error(message)
received_stdout = sys.stdout.getvalue()
sys.stdout = old_stdout
formatted_message = "\x1b[01m" + message + "\x1b[0m\n"
self.assertEqual(received_stdout, formatted_message)
def testPrintHelpPrintsAvailableCommandsToStdout(self):
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
self.view.print_help()
received_stdout = sys.stdout.getvalue()
sys.stdout = old_stdout
regexp = re.compile('Available commands:')
self.assertRegexpMatches(received_stdout, regexp)
# vim: expandtab shiftwidth=4 softtabstop=4
| gpl-3.0 | -1,116,729,715,636,350,300 | 31.138462 | 62 | 0.653423 | false |
databrary/curation | tools/scripts/utils/openproject/update.py | 1 | 5971 | #!/usr/bin/env python2.7
import sys, os
'''smoother more informative version check'''
if sys.version_info >= (3, 0, 0):
sys.exit("You need to run this with python 2.7, exiting now so you can get your stuff together")
'''Run from ../scripts/ with `python -m utils.openproject.update`'''
import json
from .. import dbclient
from config import conn as c
import requests
import argparse
from pprint import pprint
'''quick and dirty command line tool that automatically updates new volumes
to openproject via the openproject api'''
_QUERIES = {
"db_volumes":"select v.id, volume_creation(v.id), v.name, owners from volume v left join volume_owners o ON v.id = o.volume where v.id > 3 order by v.id;",
"op_parties":"select wp.id, wp.subject, cv.* from work_packages wp left join custom_values cv on cv.customized_id = wp.id where cv.customized_type = 'WorkPackage' and wp.type_id = 6 and wp.project_id = 12 and cv.custom_field_id = 29 order by wp.id asc;",
"op_workpackages": "select wp.id, wp.type_id, wp.project_id, wp.parent_id, wp.category_id, wp.created_at, wp.start_date, cv.* from work_packages wp left join custom_values cv on cv.customized_id = wp.id where cv.customized_type = 'WorkPackage' and project_id = 14 and cv.custom_field_id = 29 order by wp.id asc;"
}
def wp_vols(data):
return sorted([d[11] for d in data if d[11] != None and d[11] != ''], key=lambda x: float(x))
def getnew(op_vols, db_vols):
return [z for z in db_vols if str(z[0]) not in op_vols]
def getdel(op_vols, db_vols):
vols_only = []
for d in db_vols:
vols_only.append(d[0])
return [z for z in op_vols if int(z) not in vols_only]
def getData(vol_data, party_data):
nl = []
for v in vol_data:
d = {}
owner_id = v[3][0].split(':')[0] if v[3] is not None else None
if owner_id is not None:
parent_id = [p[0] for p in party_data if p[6]==owner_id]
else:
parent_id = None
d["owner_id"] = owner_id
d["parent_id"] = parent_id
d["volume_id"] = v[0]
d["start_date"] = v[1]
d["title"] = v[2]
nl.append(d)
return nl
def prepareData(data):
fresh_data = []
for i in data:
record = {
"customField37": True,
"description": {
"format": "textile",
"raw": ""
},
"_links": {
"type": {"href":"project/api/v3/types/16"},
"status":{"href":"project/api/v3/statuses/1"},
"priority":{"href":"project/api/v3/priorities/3"}
}
}
if type(i['parent_id']) == list:
if i['parent_id'] != []:
pid = int(i['parent_id'][0])
else:
pid = None
else:
pid = None
desc = "Opened: %s" % (i['start_date'].strftime('%Y-%m-%d'))
record['subject'] = i['title']
record['description']['raw'] = desc
record['parentId'] = pid
record['customField29'] = int(i['volume_id'])
fresh_data.append(record)
return fresh_data
def prepareDel(del_vols, workpackages):
return [w[0] for w in workpackages if w[11] in del_vols]
def insert_vols(data):
data = json.dumps(data)
return requests.post(c.API_POST_TARGET, auth=("apikey", c.API_KEY), data=data, headers={"Content-Type": "application/json"})
if __name__ == '__main__':
######################### /command line argument handling ################################
parser = argparse.ArgumentParser(description='quick and dirty command line tool that automatically updates new volumes to openproject via the openproject api')
parser.add_argument('-r', '--runnow', help='Arg to make update run now', required=False, action='store_true')
args = vars(parser.parse_args())
RUNNOW = args['runnow']
######################### /command line argument handling ################################
db_DB = dbclient.DB(c.db['HOST'], c.db['DATABASE'], c.db['USER'], c.db['PASSWORD'], c.db['PORT'])
op_DB = dbclient.DB(c.op['HOST'], c.op['DATABASE'], c.op['USER'], c.op['PASSWORD'], c.op['PORT'])
#
# 1 go to dbrary and get all volumes (id, owner id)
#
db_volumes = db_DB.query(_QUERIES['db_volumes'])
#
# 2 get all wp from op for the volumes project with volume id
#
op_workpackages = op_DB.query(_QUERIES['op_workpackages'])
# - index the volumes in already
volumes_in_op = wp_vols(op_workpackages)
#
# 3a compare data and determine all of the volumes in dbrary that need to be added
#
vols_to_add = getnew(volumes_in_op, db_volumes)
print("%s new volumes to be added" % str(len(vols_to_add)))
#
# 3b determine which volumes have been added to op, but no longer exist in db
#
vols_to_del = getdel(volumes_in_op, db_volumes)
print("%s volumes have been deleted" % str(len(vols_to_del)))
#
# 4a prepare data for adding to wp
# - get user information same way as get volume info from wp as #2
op_parties = op_DB.query(_QUERIES['op_parties'])
raw_data = getData(vols_to_add, op_parties)
ready_data = prepareData(raw_data)
print("To be added:")
pprint(ready_data)
#
#
# 4b prepare which volumes we need to edit, got back to op_workpackages and get wp ids by vol in vols_to_del
#
del_data = prepareDel(vols_to_del, op_workpackages)
print("the following workpackages should be deleted: %s" % str(del_data))
#
# 5 insert these records via the api (POST - /project/api/v3/projects/volumes/work_packages)
#
if RUNNOW:
print("sending volumes to tickets...")
for r in ready_data:
insert_vols(r)
else:
print("Run with `-r` to insert outstanding volumes into ticket flow")
#
# 6 remove the deleted volumes
# if op api actually allowed this to happen
#
# 7 close up data base, die
#
del db_DB
del op_DB
| gpl-3.0 | -2,786,448,086,821,386,000 | 33.12 | 316 | 0.596885 | false |
kustodian/ansible | lib/ansible/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py | 37 | 6115 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Evgeniy Krysanov <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
}
DOCUMENTATION = r'''
---
module: bitbucket_pipeline_key_pair
short_description: Manages Bitbucket pipeline SSH key pair
description:
- Manages Bitbucket pipeline SSH key pair.
version_added: "2.8"
author:
- Evgeniy Krysanov (@catcombo)
options:
client_id:
description:
- OAuth consumer key.
- If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
type: str
client_secret:
description:
- OAuth consumer secret.
- If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
type: str
repository:
description:
- The repository name.
type: str
required: true
username:
description:
- The repository owner.
type: str
required: true
public_key:
description:
- The public key.
type: str
private_key:
description:
- The private key.
type: str
state:
description:
- Indicates desired state of the key pair.
type: str
required: true
choices: [ absent, present ]
notes:
- Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
- Check mode is supported.
'''
EXAMPLES = r'''
- name: Create or update SSH key pair
bitbucket_pipeline_key_pair:
repository: 'bitbucket-repo'
username: bitbucket_username
public_key: '{{lookup("file", "bitbucket.pub") }}'
private_key: '{{lookup("file", "bitbucket") }}'
state: present
- name: Remove SSH key pair
bitbucket_pipeline_key_pair:
repository: bitbucket-repo
username: bitbucket_username
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.source_control.bitbucket import BitbucketHelper
error_messages = {
'invalid_params': 'Account, repository or SSH key pair was not found',
'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
}
BITBUCKET_API_ENDPOINTS = {
'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
}
def get_existing_ssh_key_pair(module, bitbucket):
"""
Retrieves an existing ssh key pair from repository
specified in module param `repository`
:param module: instance of the :class:`AnsibleModule`
:param bitbucket: instance of the :class:`BitbucketHelper`
:return: existing key pair or None if not found
:rtype: dict or None
Return example::
{
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
"type": "pipeline_ssh_key_pair"
}
"""
api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
username=module.params['username'],
repo_slug=module.params['repository'],
)
info, content = bitbucket.request(
api_url=api_url,
method='GET',
)
if info['status'] == 404:
# Account, repository or SSH key pair was not found.
return None
return content
def update_ssh_key_pair(module, bitbucket):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
username=module.params['username'],
repo_slug=module.params['repository'],
),
method='PUT',
data={
'private_key': module.params['private_key'],
'public_key': module.params['public_key'],
},
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_params'])
if info['status'] != 200:
module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
def delete_ssh_key_pair(module, bitbucket):
info, content = bitbucket.request(
api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
username=module.params['username'],
repo_slug=module.params['repository'],
),
method='DELETE',
)
if info['status'] == 404:
module.fail_json(msg=error_messages['invalid_params'])
if info['status'] != 204:
module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
def main():
argument_spec = BitbucketHelper.bitbucket_argument_spec()
argument_spec.update(
repository=dict(type='str', required=True),
username=dict(type='str', required=True),
public_key=dict(type='str'),
private_key=dict(type='str', no_log=True),
state=dict(type='str', choices=['present', 'absent'], required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
bitbucket = BitbucketHelper(module)
state = module.params['state']
public_key = module.params['public_key']
private_key = module.params['private_key']
# Check parameters
if ((public_key is None) or (private_key is None)) and (state == 'present'):
module.fail_json(msg=error_messages['required_keys'])
# Retrieve access token for authorized API requests
bitbucket.fetch_access_token()
# Retrieve existing ssh key
key_pair = get_existing_ssh_key_pair(module, bitbucket)
changed = False
# Create or update key pair
if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
if not module.check_mode:
update_ssh_key_pair(module, bitbucket)
changed = True
# Delete key pair
elif key_pair and (state == 'absent'):
if not module.check_mode:
delete_ssh_key_pair(module, bitbucket)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,603,212,077,573,208,000 | 27.70892 | 131 | 0.641864 | false |
FescueFungiShare/hydroshare | hs_core/views/discovery_json_view.py | 1 | 3195 | import json
from django.http import HttpResponse
from haystack.generic_views import FacetedSearchView
from hs_core.discovery_form import DiscoveryForm
# View class for generating JSON data format from Haystack
# returned JSON objects array is used for building the map view
class DiscoveryJsonView(FacetedSearchView):
# set facet fields
facet_fields = ['creators', 'subjects', 'resource_type', 'public', 'owners_names', 'discoverable', 'published']
# declare form class to use in this view
form_class = DiscoveryForm
# overwrite Haystack generic_view.py form_valid() function to generate JSON response
def form_valid(self, form):
# initialize an empty array for holding the result objects with coordinate values
coor_values = []
# get query set
self.queryset = form.search()
# When we have a GET request with search query, build our JSON objects array
if len(self.request.GET):
# iterate all the search results
for result in self.get_queryset():
# initialize a null JSON object
json_obj = {}
# assign title and url values to the object
json_obj['title'] = result.object.metadata.title.value
json_obj['resource_type'] = result.object.verbose_name
json_obj['get_absolute_url'] = result.object.get_absolute_url()
json_obj['first_author'] = result.object.first_creator.name
if result.object.first_creator.description:
json_obj['first_author_description'] = result.object.first_creator.description
# iterate all the coverage values
for coverage in result.object.metadata.coverages.all():
# if coverage type is point, assign 'east' and 'north' coordinates to the object
if coverage.type == 'point':
json_obj['coverage_type'] = coverage.type
json_obj['east'] = coverage.value['east']
json_obj['north'] = coverage.value['north']
# elif coverage type is box, assign 'northlimit', 'eastlimit', 'southlimit' and 'westlimit' coordinates to the object
elif coverage.type == 'box':
json_obj['coverage_type'] = coverage.type
json_obj['northlimit'] = coverage.value['northlimit']
json_obj['eastlimit'] = coverage.value['eastlimit']
json_obj['southlimit'] = coverage.value['southlimit']
json_obj['westlimit'] = coverage.value['westlimit']
# else, skip
else:
continue
# encode object to JSON format
coor_obj = json.dumps(json_obj)
# add JSON object the results array
coor_values.append(coor_obj)
# encode the results results array to JSON array
the_data = json.dumps(coor_values)
# return JSON response
return HttpResponse(the_data, content_type='application/json')
| bsd-3-clause | 1,549,041,135,639,713,000 | 50.532258 | 137 | 0.58748 | false |
dushu1203/chromium.src | chrome/common/extensions/docs/server2/jsc_view_test.py | 22 | 15249 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
from jsc_view import GetEventByNameFromEvents
from api_schema_graph import APISchemaGraph
from availability_finder import AvailabilityFinder, AvailabilityInfo
from branch_utility import BranchUtility, ChannelInfo
from compiled_file_system import CompiledFileSystem
from extensions_paths import CHROME_EXTENSIONS
from fake_host_file_system_provider import FakeHostFileSystemProvider
from fake_url_fetcher import FakeUrlFetcher
from features_bundle import FeaturesBundle
from future import Future
from host_file_system_iterator import HostFileSystemIterator
from jsc_view import CreateJSCView, _JSCViewBuilder, _FormatValue
from object_store_creator import ObjectStoreCreator
from schema_processor import SchemaProcessorFactoryForTest
from servlet import Request
from server_instance import ServerInstance
from test_data.api_data_source.canned_master_fs import CANNED_MASTER_FS_DATA
from test_data.canned_data import CANNED_API_FILE_SYSTEM_DATA
from test_data.object_level_availability.tabs import TABS_SCHEMA_BRANCHES
from test_file_system import TestFileSystem
from test_util import Server2Path
class _FakeTemplateCache(object):
def GetFromFile(self, key):
return Future(value='motemplate %s' % key)
class _FakeFeaturesBundle(object):
def GetAPIFeatures(self):
return Future(value={
'bluetooth': {'value': True},
'contextMenus': {'value': True},
'jsonStableAPI': {'value': True},
'idle': {'value': True},
'input.ime': {'value': True},
'tabs': {'value': True}
})
class _FakeAvailabilityFinder(object):
def __init__(self, fake_availability):
self._fake_availability = fake_availability
def GetAPIAvailability(self, api_name):
return self._fake_availability
def GetAPINodeAvailability(self, api_name):
schema_graph = APISchemaGraph()
api_graph = APISchemaGraph(json.loads(
CANNED_MASTER_FS_DATA['api'][api_name + '.json']))
# Give the graph fake ChannelInfo; it's not used in tests.
channel_info = ChannelInfo('stable', '28', 28)
schema_graph.Update(api_graph, lambda _: channel_info)
return schema_graph
class JSCViewTest(unittest.TestCase):
def setUp(self):
self._base_path = Server2Path('test_data', 'test_json')
server_instance = ServerInstance.ForTest(
TestFileSystem(CANNED_MASTER_FS_DATA, relative_to=CHROME_EXTENSIONS))
file_system = server_instance.host_file_system_provider.GetMaster()
self._json_cache = server_instance.compiled_fs_factory.ForJson(file_system)
self._features_bundle = FeaturesBundle(file_system,
server_instance.compiled_fs_factory,
server_instance.object_store_creator,
'extensions')
self._api_models = server_instance.platform_bundle.GetAPIModels(
'extensions')
self._fake_availability = AvailabilityInfo(ChannelInfo('stable', '396', 5))
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _LoadJSON(self, filename):
return json.loads(self._ReadLocalFile(filename))
def _FakeLoadAddRulesSchema(self):
events = self._LoadJSON('add_rules_def_test.json')
return Future(value=GetEventByNameFromEvents(events))
def testFormatValue(self):
self.assertEquals('1,234,567', _FormatValue(1234567))
self.assertEquals('67', _FormatValue(67))
self.assertEquals('234,567', _FormatValue(234567))
def testGetEventByNameFromEvents(self):
events = {}
# Missing 'types' completely.
self.assertRaises(AssertionError, GetEventByNameFromEvents, events)
events['types'] = []
# No type 'Event' defined.
self.assertRaises(AssertionError, GetEventByNameFromEvents, events)
events['types'].append({ 'name': 'Event',
'functions': []})
add_rules = { "name": "addRules" }
events['types'][0]['functions'].append(add_rules)
self.assertEqual(add_rules,
GetEventByNameFromEvents(events)['addRules'])
events['types'][0]['functions'].append(add_rules)
# Duplicates are an error.
self.assertRaises(AssertionError, GetEventByNameFromEvents, events)
def testCreateId(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
dict_ = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[],
Request.ForTest(''))
self.assertEquals('type-TypeA', dict_['types'][0]['id'])
self.assertEquals('property-TypeA-b',
dict_['types'][0]['properties'][0]['id'])
self.assertEquals('method-get', dict_['functions'][0]['id'])
self.assertEquals('event-EventA', dict_['events'][0]['id'])
# TODO(kalman): re-enable this when we have a rebase option.
def DISABLED_testToDict(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
expected_json = self._LoadJSON('expected_tester.json')
dict_ = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[],
Request.ForTest(''))
self.assertEquals(expected_json, dict_)
def testAddRules(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
dict_ = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('add_rules_tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
self._FakeLoadAddRulesSchema(),
'extensions',
[],
Request.ForTest(''))
# Check that the first event has the addRulesFunction defined.
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('rules', dict_['events'][0]['name'])
self.assertEquals('notable_name_to_check_for',
dict_['events'][0]['byName']['addRules'][
'parameters'][0]['name'])
# Check that the second event has addListener defined.
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('callback',
dict_['events'][0]['byName']['addListener'][
'parameters'][0]['name'])
def testGetIntroList(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
model = _JSCViewBuilder(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[])
expected_list = [
{ 'title': 'Description',
'content': [
{ 'text': 'a test api' }
]
},
{ 'title': 'Availability',
'content': [
{ 'partial': 'motemplate chrome/common/extensions/docs/' +
'templates/private/intro_tables/stable_message.html',
'version': 5,
'scheduled': None
}
]
},
{ 'title': 'Permissions',
'content': [
{ 'class': 'override',
'text': '"tester"'
},
{ 'text': 'is an API for testing things.' }
]
},
{ 'title': 'Manifest',
'content': [
{ 'class': 'code',
'text': '"tester": {...}'
}
]
},
{ 'title': 'Content Scripts',
'content': [
{
'partial': 'motemplate chrome/common/extensions/docs' +
'/templates/private/intro_tables/content_scripts.html',
'contentScriptSupport': {
'name': 'tester',
'restrictedTo': None
}
}
]
},
{ 'title': 'Learn More',
'content': [
{ 'link': 'https://tester.test.com/welcome.html',
'text': 'Welcome!'
}
]
}
]
self.assertEquals(model._GetIntroTableList(), expected_list)
# Tests the same data with a scheduled availability.
fake_avail_finder = _FakeAvailabilityFinder(
AvailabilityInfo(ChannelInfo('beta', '1453', 27), scheduled=28))
model = _JSCViewBuilder(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[])
expected_list[1] = {
'title': 'Availability',
'content': [
{ 'partial': 'motemplate chrome/common/extensions/docs/' +
'templates/private/intro_tables/beta_message.html',
'version': 27,
'scheduled': 28
}
]
}
self.assertEquals(model._GetIntroTableList(), expected_list)
class JSCViewWithoutNodeAvailabilityTest(unittest.TestCase):
def setUp(self):
server_instance = ServerInstance.ForTest(
file_system_provider=FakeHostFileSystemProvider(
CANNED_API_FILE_SYSTEM_DATA))
self._api_models = server_instance.platform_bundle.GetAPIModels(
'extensions')
self._json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetMaster())
self._avail_finder = server_instance.platform_bundle.GetAvailabilityFinder(
'extensions')
def testGetAPIAvailability(self):
api_availabilities = {
'bluetooth': 31,
'contextMenus': 'master',
'jsonStableAPI': 20,
'idle': 5,
'input.ime': 18,
'tabs': 18
}
for api_name, availability in api_availabilities.iteritems():
model_dict = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel(api_name).Get(),
self._avail_finder,
self._json_cache,
_FakeTemplateCache(),
_FakeFeaturesBundle(),
None,
'extensions',
[],
Request.ForTest(''))
self.assertEquals(availability,
model_dict['introList'][1]['content'][0]['version'])
class JSCViewWithNodeAvailabilityTest(unittest.TestCase):
def setUp(self):
tabs_unmodified_versions = (16, 20, 23, 24)
self._branch_utility = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(Server2Path('test_data')),
ObjectStoreCreator.ForTest())
self._node_fs_creator = FakeHostFileSystemProvider(TABS_SCHEMA_BRANCHES)
self._node_fs_iterator = HostFileSystemIterator(self._node_fs_creator,
self._branch_utility)
test_object_store = ObjectStoreCreator.ForTest()
self._avail_finder = AvailabilityFinder(
self._branch_utility,
CompiledFileSystem.Factory(test_object_store),
self._node_fs_iterator,
self._node_fs_creator.GetMaster(),
test_object_store,
'extensions',
SchemaProcessorFactoryForTest())
server_instance = ServerInstance.ForTest(
file_system_provider=FakeHostFileSystemProvider(
TABS_SCHEMA_BRANCHES))
self._api_models = server_instance.platform_bundle.GetAPIModels(
'extensions')
self._json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetMaster())
# Imitate the actual SVN file system by incrementing the stats for paths
# where an API schema has changed.
last_stat = type('last_stat', (object,), {'val': 0})
def stat_paths(file_system, channel_info):
if channel_info.version not in tabs_unmodified_versions:
last_stat.val += 1
# HACK: |file_system| is a MockFileSystem backed by a TestFileSystem.
# Increment the TestFileSystem stat count.
file_system._file_system.IncrementStat(by=last_stat.val)
# Continue looping. The iterator will stop after 'master' automatically.
return True
# Use the HostFileSystemIterator created above to change global stat values
# for the TestFileSystems that it creates.
self._node_fs_iterator.Ascending(
# The earliest version represented with the tabs' test data is 13.
self._branch_utility.GetStableChannelInfo(13),
stat_paths)
def testGetAPINodeAvailability(self):
def assertEquals(node, actual):
node_availabilities = {
'tabs.Tab': None,
'tabs.fakeTabsProperty1': None,
'tabs.get': None,
'tabs.onUpdated': None,
'tabs.InjectDetails': 25,
'tabs.fakeTabsProperty2': 15,
'tabs.getCurrent': 19,
'tabs.onActivated': 30
}
self.assertEquals(node_availabilities[node], actual)
model_dict = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tabs').Get(),
self._avail_finder,
self._json_cache,
_FakeTemplateCache(),
_FakeFeaturesBundle(),
None,
'extensions',
[],
Request.ForTest(''))
# Test nodes that have the same availability as their parent.
# Test type.
assertEquals('tabs.Tab', model_dict['types'][0]['availability'])
# Test property.
assertEquals('tabs.fakeTabsProperty1',
model_dict['properties'][0]['availability'])
# Test function.
assertEquals('tabs.get', model_dict['functions'][1]['availability'])
# Test event.
assertEquals('tabs.onUpdated', model_dict['events'][1]['availability'])
# Test nodes with varying availabilities.
# Test type.
assertEquals('tabs.InjectDetails',
model_dict['types'][1]['availability']['version'])
# Test property.
assertEquals('tabs.fakeTabsProperty2',
model_dict['properties'][2]['availability']['version'])
# Test function.
assertEquals('tabs.getCurrent',
model_dict['functions'][0]['availability']['version'])
# Test event.
assertEquals('tabs.onActivated',
model_dict['events'][0]['availability']['version'])
# Test a node that became deprecated.
self.assertEquals({
'scheduled': None,
'version': 26,
'partial': 'motemplate chrome/common/extensions/docs/templates/' +
'private/intro_tables/deprecated_message.html'
}, model_dict['types'][2]['availability'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,795,724,604,769,226,000 | 34.964623 | 80 | 0.627058 | false |
google/lkml-gerrit-bridge | src/message.py | 1 | 2859 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email
import re
from typing import List, Optional, Tuple
def lore_link(message_id: str) -> str:
# We store message ids enclosed in <>, so trim those off.
return 'https://lore.kernel.org/linux-kselftest/' + message_id[1:-1]
class Message(object):
def __init__(self, id, subject, from_, in_reply_to, content, archive_hash) -> None:
self.id = id
self.subject = subject
self.from_ = from_
self.in_reply_to = in_reply_to
self.content = content
self.change_id = None
self.archive_hash = archive_hash
self.children = [] # type: List[Message]
def is_patch(self) -> bool:
if re.match(r'\[.+\] .+', self.subject):
return True
return False
def patch_index(self) -> Tuple[int, int]:
if not self.is_patch():
raise ValueError(f'Missing patch index in subject: {self.subject}')
match = re.match(r'\[.+ (\d+)/(\d+)\] .+', self.subject)
if match:
return int(match.group(1)), int(match.group(2))
return (1, 1)
def __str__(self) -> str:
in_reply_to = self.in_reply_to or ''
return ('{\n' +
'id = ' + self.id + '\n' +
'subject = ' + self.subject + '\n' +
'in_reply_to = ' + in_reply_to + '\n' +
# 'content = ' + self.content + '\n' +
'}')
def __repr__(self) -> str:
return str(self)
def debug_info(self) -> str:
return (f'Message ID: {self.id}\n'
f'Lore Link: {lore_link(self.id)}\n'
f'Commit Hash: {self.archive_hash}')
def parse_message_from_str(raw_email: str, archive_hash: str) -> Message:
"""Parses a Message from a raw email."""
compiled_email = email.message_from_string(raw_email)
content = []
if compiled_email.is_multipart():
for payload in compiled_email.get_payload():
content.append(payload.get_payload())
else:
content = compiled_email.get_payload()
return Message(compiled_email['Message-Id'],
compiled_email['subject'],
compiled_email['from'],
compiled_email['In-Reply-To'],
content,
archive_hash)
| apache-2.0 | -6,645,432,135,169,708,000 | 35.189873 | 87 | 0.578874 | false |
hgl888/chromium-crosswalk | chrome/common/extensions/docs/server2/app_yaml_helper_test.py | 53 | 6154 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from app_yaml_helper import AppYamlHelper
from extensions_paths import SERVER2
from host_file_system_provider import HostFileSystemProvider
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import MoveTo, TestFileSystem
from test_util import DisableLogging
_ExtractVersion, _IsGreater, _GenerateAppYaml = (
AppYamlHelper.ExtractVersion,
AppYamlHelper.IsGreater,
AppYamlHelper.GenerateAppYaml)
class AppYamlHelperTest(unittest.TestCase):
def testExtractVersion(self):
def run_test(version):
self.assertEqual(version, _ExtractVersion(_GenerateAppYaml(version)))
run_test('0')
run_test('0-0')
run_test('0-0-0')
run_test('1')
run_test('1-0')
run_test('1-0-0')
run_test('1-0-1')
run_test('1-1-0')
run_test('1-1-1')
run_test('2-0-9')
run_test('2-0-12')
run_test('2-1')
run_test('2-1-0')
run_test('2-11-0')
run_test('3-1-0')
run_test('3-1-3')
run_test('3-12-0')
def testIsGreater(self):
def assert_is_greater(lhs, rhs):
self.assertTrue(_IsGreater(lhs, rhs), '%s is not > %s' % (lhs, rhs))
self.assertFalse(_IsGreater(rhs, lhs),
'%s should not be > %s' % (rhs, lhs))
assert_is_greater('0-0', '0')
assert_is_greater('0-0-0', '0')
assert_is_greater('0-0-0', '0-0')
assert_is_greater('1', '0')
assert_is_greater('1', '0-0')
assert_is_greater('1', '0-0-0')
assert_is_greater('1-0', '0-0')
assert_is_greater('1-0-0-0', '0-0-0')
assert_is_greater('2-0-12', '2-0-9')
assert_is_greater('2-0-12', '2-0-9-0')
assert_is_greater('2-0-12-0', '2-0-9')
assert_is_greater('2-0-12-0', '2-0-9-0')
assert_is_greater('2-1', '2-0-9')
assert_is_greater('2-1', '2-0-12')
assert_is_greater('2-1-0', '2-0-9')
assert_is_greater('2-1-0', '2-0-12')
assert_is_greater('3-1-0', '2-1')
assert_is_greater('3-1-0', '2-1-0')
assert_is_greater('3-1-0', '2-11-0')
assert_is_greater('3-1-3', '3-1-0')
assert_is_greater('3-12-0', '3-1-0')
assert_is_greater('3-12-0', '3-1-3')
assert_is_greater('3-12-0', '3-1-3-0')
@DisableLogging('warning')
def testInstanceMethods(self):
test_data = {
'app.yaml': _GenerateAppYaml('1-0'),
'app_yaml_helper.py': 'Copyright notice etc'
}
updates = []
# Pass a specific file system at head to the HostFileSystemProvider so that
# we know it's always going to be backed by a MockFileSystem. The Provider
# may decide to wrap it in caching etc.
file_system_at_head = MockFileSystem(
TestFileSystem(test_data, relative_to=SERVER2))
def apply_update(update):
update = MoveTo(SERVER2, update)
file_system_at_head.Update(update)
updates.append(update)
def host_file_system_constructor(branch, commit=None):
self.assertEqual('master', branch)
self.assertTrue(commit is not None)
return MockFileSystem.Create(
TestFileSystem(test_data, relative_to=SERVER2), updates[:int(commit)])
object_store_creator = ObjectStoreCreator.ForTest()
host_file_system_provider = HostFileSystemProvider(
object_store_creator,
default_master_instance=file_system_at_head,
constructor_for_test=host_file_system_constructor)
helper = AppYamlHelper(object_store_creator, host_file_system_provider)
def assert_is_up_to_date(version):
self.assertTrue(helper.IsUpToDate(version),
'%s is not up to date' % version)
self.assertRaises(ValueError,
helper.GetFirstRevisionGreaterThan, version)
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
assert_is_up_to_date('1-0-0')
assert_is_up_to_date('1-5-0')
# Revision 1.
apply_update({
'app.yaml': _GenerateAppYaml('1-5-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
assert_is_up_to_date('1-5-0')
assert_is_up_to_date('2-5-0')
# Revision 2.
apply_update({
'app_yaml_helper.py': 'fixed a bug'
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
assert_is_up_to_date('1-5-0')
assert_is_up_to_date('2-5-0')
# Revision 3.
apply_update({
'app.yaml': _GenerateAppYaml('1-6-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
self.assertEqual(3, helper.GetFirstRevisionGreaterThan('1-5-0'))
assert_is_up_to_date('2-5-0')
# Revision 4.
apply_update({
'app.yaml': _GenerateAppYaml('1-8-0')
})
# Revision 5.
apply_update({
'app.yaml': _GenerateAppYaml('2-0-0')
})
# Revision 6.
apply_update({
'app.yaml': _GenerateAppYaml('2-2-0')
})
# Revision 7.
apply_update({
'app.yaml': _GenerateAppYaml('2-4-0')
})
# Revision 8.
apply_update({
'app.yaml': _GenerateAppYaml('2-6-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
self.assertEqual(3, helper.GetFirstRevisionGreaterThan('1-5-0'))
self.assertEqual(5, helper.GetFirstRevisionGreaterThan('1-8-0'))
self.assertEqual(6, helper.GetFirstRevisionGreaterThan('2-0-0'))
self.assertEqual(6, helper.GetFirstRevisionGreaterThan('2-1-0'))
self.assertEqual(7, helper.GetFirstRevisionGreaterThan('2-2-0'))
self.assertEqual(7, helper.GetFirstRevisionGreaterThan('2-3-0'))
self.assertEqual(8, helper.GetFirstRevisionGreaterThan('2-4-0'))
self.assertEqual(8, helper.GetFirstRevisionGreaterThan('2-5-0'))
assert_is_up_to_date('2-6-0')
assert_is_up_to_date('2-7-0')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 7,926,481,948,426,720,000 | 33.379888 | 80 | 0.643646 | false |
tchernomax/ansible | lib/ansible/modules/cloud/openstack/os_group.py | 34 | 4758 | #!/usr/bin/python
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_group
short_description: Manage OpenStack Identity Groups
extends_documentation_fragment: openstack
version_added: "2.1"
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
description:
- Manage OpenStack Identity Groups. Groups can be created, deleted or
updated. Only the I(description) value can be updated.
options:
name:
description:
- Group name
required: true
description:
description:
- Group description
domain_id:
description:
- Domain id to create the group in if the cloud supports domains.
version_added: "2.3"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a group named "demo"
- os_group:
cloud: mycloud
state: present
name: demo
description: "Demo Group"
domain_id: demoid
# Update the description on existing "demo" group
- os_group:
cloud: mycloud
state: present
name: demo
description: "Something else"
domain_id: demoid
# Delete group named "demo"
- os_group:
cloud: mycloud
state: absent
name: demo
'''
RETURN = '''
group:
description: Dictionary describing the group.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique group ID
type: string
sample: "ee6156ff04c645f481a6738311aea0b0"
name:
description: Group name
type: string
sample: "demo"
description:
description: Group description
type: string
sample: "Demo Group"
domain_id:
description: Domain for the group
type: string
sample: "default"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, description, group):
if state == 'present' and not group:
return True
if state == 'present' and description is not None and group.description != description:
return True
if state == 'absent' and group:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(required=False, default=None),
domain_id=dict(required=False, default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
name = module.params.get('name')
description = module.params.get('description')
state = module.params.get('state')
domain_id = module.params.pop('domain_id')
sdk, cloud = openstack_cloud_from_module(module)
try:
if domain_id:
group = cloud.get_group(name, filters={'domain_id': domain_id})
else:
group = cloud.get_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, description, group))
if state == 'present':
if group is None:
group = cloud.create_group(
name=name, description=description, domain=domain_id)
changed = True
else:
if description is not None and group.description != description:
group = cloud.update_group(
group.id, description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, group=group)
elif state == 'absent':
if group is None:
changed = False
else:
cloud.delete_group(group.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -2,193,676,315,343,591,700 | 27.491018 | 125 | 0.603405 | false |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_map.py | 170 | 3058 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| apache-2.0 | -1,656,485,600,551,944,400 | 32.604396 | 78 | 0.516024 | false |
skoczen/correlationbot | tests/test_post.py | 1 | 1928 | from bot_tests import BotTests
class ValidPostTests(BotTests):
def test_two_column_works(self):
resp = self.app.post_json('/', {
"data": [
[1,2,3,4,6,7,8,9],
[2,4,6,8,10,12,13,15],
]
})
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json, {
"correlations": [
{
"column_1": 1,
"column_2": 2,
"correlation": 0.9953500135553002,
"pearson": 0.9953500135553002,
# "spearman": 0.4,
# "kendall": 0.2,
},
]
})
def test_3_column_works(self):
resp = self.app.post_json('/', {
"data": [
[1, 2, 3],
[412, 5, 6],
[45, -125, 6.334],
]
})
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json, {
"correlations": [
{
"column_1": 1,
"column_2": 2,
"correlation": -0.86495821895559954,
"pearson": -0.86495821895559954,
# "spearman": 0.4,
# "kendall": 0.2,
},
{
"column_1": 1,
"column_2": 3,
"correlation": -0.21695628247970969,
"pearson": -0.21695628247970969,
# "spearman": 0.4,
# "kendall": 0.2,
},
{
"column_1": 2,
"column_2": 3,
"correlation": 0.67754874098457696,
"pearson": 0.67754874098457696,
# "spearman": 0.4,
# "kendall": 0.2,
}
]
}) | mit | 1,592,703,209,701,723,100 | 29.619048 | 56 | 0.34751 | false |
liberation/django-elasticsearch | test_project/test_app/models.py | 6 | 4045 | from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
from django_elasticsearch.models import EsIndexable
from django_elasticsearch.serializers import EsJsonSerializer
class TestSerializer(EsJsonSerializer):
# Note: i want this field to be null instead of u''
def serialize_email(self, instance, field_name):
val = getattr(instance, field_name)
if val == u'':
return None
return val
def serialize_date_joined_exp(self, instance, field_name):
# abstract
return {'iso': instance.date_joined.isoformat(),
'timestamp': instance.date_joined.strftime('%s.%f')}
class TestModel(User, EsIndexable):
class Elasticsearch(EsIndexable.Elasticsearch):
index = 'django-test'
doc_type = 'test-doc-type'
mappings = {
"username": {"index": "not_analyzed"},
"date_joined_exp": {"type": "object"}
}
serializer_class = TestSerializer
class Meta:
proxy = True
ordering = ('id',)
class Dummy(models.Model):
foo = models.CharField(max_length=256, null=True)
class Test2Serializer(EsJsonSerializer):
def serialize_type_datetimefield(self, instance, field_name):
d = getattr(instance, field_name)
# a rather typical api output
return {
'iso': d and d.isoformat(),
'date': d and d.date().isoformat(),
'time': d and d.time().isoformat()[:5]
}
def deserialize_type_datetimefield(self, instance, field_name):
return datetime.strptime(instance.get(field_name)['iso'],
'%Y-%m-%dT%H:%M:%S.%f')
def serialize_abstract_method(self, instance, field_name):
return 'woot'
def serialize_bigint(self, instance, field_name):
return 42
def deserialize_bigint(self, source, field_name):
return 45
class Test2Model(EsIndexable):
# string
char = models.CharField(max_length=256, null=True)
text = models.TextField(null=True)
email = models.EmailField(null=True)
filef = models.FileField(null=True, upload_to='f/')
# img = models.ImageField(null=True) # would need pillow
filepf = models.FilePathField(null=True)
ipaddr = models.IPAddressField(null=True)
genipaddr = models.GenericIPAddressField(null=True)
slug = models.SlugField(null=True)
url = models.URLField(null=True)
# numeric
intf = models.IntegerField(null=True)
bigint = models.BigIntegerField(null=True)
intlist = models.CommaSeparatedIntegerField(max_length=256, null=True)
floatf = models.FloatField(null=True)
dec = models.DecimalField(max_digits=5, decimal_places=2, null=True)
posint = models.PositiveIntegerField(null=True)
smint = models.SmallIntegerField(null=True)
possmint = models.PositiveSmallIntegerField(null=True)
# dj1.8
# binary = models.BinaryField()
# bool
boolf = models.BooleanField(null=False, default=True)
nullboolf = models.NullBooleanField(null=True)
# datetime
datef = models.DateField(null=True)
datetf = models.DateTimeField(null=True, auto_now_add=True)
timef = models.TimeField(null=True)
# related
fk = models.ForeignKey(Dummy, null=True, related_name="tofk")
oto = models.OneToOneField(Dummy, null=True, related_name="toto")
fkself = models.ForeignKey('self', null=True, related_name="toselffk") # a bit of a special case
mtm = models.ManyToManyField(Dummy, related_name="tomtm")
class Elasticsearch(EsIndexable.Elasticsearch):
index = 'django-test'
doc_type = 'test-doc-type'
serializer_class = Test2Serializer
# Note: we need to specify this field since the value returned
# by the serializer does not correspond to it's default mapping
# see: Test2Serializer.serialize_type_datetimefield
mappings = {'datetf': {'type': 'object'}}
@property
def abstract_prop(self):
return 'weez'
| mit | -206,594,376,945,400,420 | 32.991597 | 101 | 0.659827 | false |
hale36/SRTV | lib/sqlalchemy/testing/plugin/plugin_base.py | 76 | 14817 | # plugin/plugin_base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
so that we can continue to support nose and also begin adding new functionality
via py.test.
"""
from __future__ import absolute_import
try:
# unitttest has a SkipTest also but pytest doesn't
# honor it unless nose is imported too...
from nose import SkipTest
except ImportError:
from _pytest.runner import Skipped as SkipTest
import sys
import re
py3k = sys.version_info >= (3, 0)
if py3k:
import configparser
else:
import ConfigParser as configparser
# late imports
fixtures = None
engines = None
exclusions = None
warnings = None
profiling = None
assertions = None
requirements = None
config = None
testing = None
util = None
file_config = None
logging = None
db_opts = {}
options = None
def setup_options(make_option):
make_option("--log-info", action="callback", type="string", callback=_log,
help="turn on info logging for <LOG> (multiple OK)")
make_option("--log-debug", action="callback", type="string", callback=_log,
help="turn on debug logging for <LOG> (multiple OK)")
make_option("--db", action="append", type="string", dest="db",
help="Use prefab database uri. Multiple OK, "
"first one is run by default.")
make_option('--dbs', action='callback', callback=_list_dbs,
help="List available prefab dbs")
make_option("--dburi", action="append", type="string", dest="dburi",
help="Database uri. Multiple OK, first one is run by default.")
make_option("--dropfirst", action="store_true", dest="dropfirst",
help="Drop all tables in the target database first")
make_option("--backend-only", action="store_true", dest="backend_only",
help="Run only tests marked with __backend__")
make_option("--mockpool", action="store_true", dest="mockpool",
help="Use mock pool (asserts only one connection used)")
make_option("--low-connections", action="store_true", dest="low_connections",
help="Use a low number of distinct connections - i.e. for Oracle TNS"
)
make_option("--reversetop", action="store_true", dest="reversetop", default=False,
help="Use a random-ordering set implementation in the ORM (helps "
"reveal dependency issues)")
make_option("--requirements", action="callback", type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg")
make_option("--with-cdecimal", action="store_true", dest="cdecimal", default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' for all tests")
make_option("--serverside", action="callback", callback=_server_side_cursors,
help="Turn on server side cursors for PG")
make_option("--mysql-engine", action="store", dest="mysql_engine", default=None,
help="Use the specified MySQL storage engine for all tables, default is "
"a db-default/InnoDB combo.")
make_option("--tableopts", action="append", dest="tableopts", default=[],
help="Add a dialect-specific table option, key=value")
make_option("--write-profiles", action="store_true", dest="write_profiles", default=False,
help="Write/update profiling data.")
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(['setup.cfg', 'test.cfg'])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config as well
# as nose plugins like coverage
global util, fixtures, engines, exclusions, \
assertions, warnings, profiling,\
config, testing
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines, exclusions, \
assertions, warnings, profiling, config
from sqlalchemy import util
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith('-info'):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith('-debug'):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options('db')):
print("%20s\t%s" % (macro, file_config.get('db', macro)))
sys.exit(0)
def _server_side_cursors(opt_str, value, parser):
db_opts['server_side_cursors'] = True
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
pre_configure = []
post_configure = []
def pre(fn):
pre_configure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules['decimal'] = cdecimal
@post
def _engine_uri(options, file_config):
from sqlalchemy.testing import engines, config
from sqlalchemy import testing
if options.dburi:
db_urls = list(options.dburi)
else:
db_urls = []
if options.db:
for db_token in options.db:
for db in re.split(r'[,\s]+', db_token):
if db not in file_config.options('db'):
raise RuntimeError(
"Unknown URI specifier '%s'. Specify --dbs for known uris."
% db)
else:
db_urls.append(file_config.get('db', db))
if not db_urls:
db_urls.append(file_config.get('db', 'default'))
for db_url in db_urls:
eng = engines.testing_engine(db_url, db_opts)
eng.connect().close()
config.Config.register(eng, db_opts, options, file_config, testing)
config.db_opts = db_opts
@post
def _engine_pool(options, file_config):
if options.mockpool:
from sqlalchemy import pool
db_opts['poolclass'] = pool.AssertionPool
@post
def _requirements(options, file_config):
requirement_cls = file_config.get('sqla_testing', "requirement_cls")
_setup_requirements(requirement_cls)
def _setup_requirements(argument):
from sqlalchemy.testing import config
from sqlalchemy import testing
if config.requirements is not None:
return
modname, clsname = argument.split(":")
# importlib.import_module() only introduced in 2.7, a little
# late
mod = __import__(modname)
for component in modname.split(".")[1:]:
mod = getattr(mod, component)
req_cls = getattr(mod, clsname)
config.requirements = testing.requires = req_cls()
@post
def _prep_testing_database(options, file_config):
from sqlalchemy.testing import config
from sqlalchemy import schema, inspect
if options.dropfirst:
for cfg in config.Config.all_configs():
e = cfg.db
inspector = inspect(e)
try:
view_names = inspector.get_view_names()
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(schema.Table(vname, schema.MetaData())))
if config.requirements.schemas.enabled_for_config(cfg):
try:
view_names = inspector.get_view_names(schema="test_schema")
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(
schema.Table(vname,
schema.MetaData(), schema="test_schema")))
for tname in reversed(inspector.get_table_names(order_by="foreign_key")):
e.execute(schema.DropTable(schema.Table(tname, schema.MetaData())))
if config.requirements.schemas.enabled_for_config(cfg):
for tname in reversed(inspector.get_table_names(
order_by="foreign_key", schema="test_schema")):
e.execute(schema.DropTable(
schema.Table(tname, schema.MetaData(), schema="test_schema")))
@post
def _set_table_options(options, file_config):
from sqlalchemy.testing import schema
table_options = schema.table_options
for spec in options.tableopts:
key, value = spec.split('=')
table_options[key] = value
if options.mysql_engine:
table_options['mysql_engine'] = options.mysql_engine
@post
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm.util import randomize_unitofwork
randomize_unitofwork()
@post
def _post_setup_options(opt, file_config):
from sqlalchemy.testing import config
config.options = options
config.file_config = file_config
@post
def _setup_profiling(options, file_config):
from sqlalchemy.testing import profiling
profiling._profile_stats = profiling.ProfileStatsFile(
file_config.get('sqla_testing', 'profile_file'))
def want_class(cls):
if not issubclass(cls, fixtures.TestBase):
return False
elif cls.__name__.startswith('_'):
return False
elif config.options.backend_only and not getattr(cls, '__backend__', False):
return False
else:
return True
def generate_sub_tests(cls, module):
if getattr(cls, '__backend__', False):
for cfg in config.Config.all_configs():
name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver)
subcls = type(
name,
(cls, ),
{
"__only_on__": ("%s+%s" % (cfg.db.name, cfg.db.driver)),
"__backend__": False}
)
setattr(module, name, subcls)
yield subcls
else:
yield cls
def start_test_class(cls):
_do_skips(cls)
_setup_engine(cls)
def stop_test_class(cls):
engines.testing_reaper._stop_test_ctx()
if not options.low_connections:
assertions.global_cleanup_assertions()
_restore_engine()
def _restore_engine():
config._current.reset(testing)
def _setup_engine(cls):
if getattr(cls, '__engine_options__', None):
eng = engines.testing_engine(options=cls.__engine_options__)
config._current.push_engine(eng, testing)
def before_test(test, test_module_name, test_class, test_name):
# like a nose id, e.g.:
# "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause"
name = test_class.__name__
suffix = "_%s_%s" % (config.db.name, config.db.driver)
if name.endswith(suffix):
name = name[0:-(len(suffix))]
id_ = "%s.%s.%s" % (test_module_name, name, test_name)
warnings.resetwarnings()
profiling._current_test = id_
def after_test(test):
engines.testing_reaper._after_test_ctx()
warnings.resetwarnings()
def _do_skips(cls):
all_configs = set(config.Config.all_configs())
reasons = []
if hasattr(cls, '__requires__'):
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__requires__:
check = getattr(requirements, requirement)
if check.predicate(config_obj):
all_configs.remove(config_obj)
if check.reason:
reasons.append(check.reason)
break
if hasattr(cls, '__prefer_requires__'):
non_preferred = set()
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__prefer_requires__:
check = getattr(requirements, requirement)
if check.predicate(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if cls.__unsupported_on__:
spec = exclusions.db_spec(*cls.__unsupported_on__)
for config_obj in list(all_configs):
if spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, '__only_on__', None):
spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
for config_obj in list(all_configs):
if not spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, '__skip_if__', False):
for c in getattr(cls, '__skip_if__'):
if c():
raise SkipTest("'%s' skipped by %s" % (
cls.__name__, c.__name__)
)
for db_spec, op, spec in getattr(cls, '__excluded_on__', ()):
for config_obj in list(all_configs):
if exclusions.skip_if(
exclusions.SpecPredicate(db_spec, op, spec)
).predicate(config_obj):
all_configs.remove(config_obj)
if not all_configs:
raise SkipTest(
"'%s' unsupported on DB implementation %s%s" % (
cls.__name__,
", ".join("'%s' = %s" % (
config_obj.db.name,
config_obj.db.dialect.server_version_info)
for config_obj in config.Config.all_configs()
),
", ".join(reasons)
)
)
elif hasattr(cls, '__prefer_backends__'):
non_preferred = set()
spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
for config_obj in all_configs:
if not spec(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if config._current not in all_configs:
_setup_config(all_configs.pop(), cls)
def _setup_config(config_obj, ctx):
config._current.push(config_obj, testing)
| gpl-3.0 | 5,509,093,584,656,215,000 | 31.564835 | 94 | 0.604306 | false |
alistairlow/tensorflow | tensorflow/examples/tutorials/mnist/input_data.py | 165 | 1107 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
| apache-2.0 | -7,825,658,111,054,377,000 | 37.172414 | 80 | 0.721771 | false |
jonatanblue/flask | scripts/flaskext_compat.py | 153 | 5038 | # -*- coding: utf-8 -*-
"""
flaskext_compat
~~~~~~~~~~~~~~~
Implements the ``flask.ext`` virtual package for versions of Flask
older than 0.7. This module is a noop if Flask 0.8 was detected.
Usage::
import flaskext_compat
flaskext_compat.activate()
from flask.ext import foo
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import types
import sys
import os
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# end this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about.
if self.is_important_traceback(realname, tb):
raise exc_type, exc_value, tb.tb_next
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
def activate():
import flask
ext_module = types.ModuleType('flask.ext')
ext_module.__path__ = []
flask.ext = sys.modules['flask.ext'] = ext_module
importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], 'flask.ext')
importer.install()
| bsd-3-clause | 1,676,566,584,735,214,000 | 39.304 | 78 | 0.597261 | false |
SirAnthony/marvin-xmpp | plugins/urlhead.py | 1 | 1360 | # -*- coding: utf-8 -*-
import lxml.html
import re
from functions import goUrl
class UrlHead:
_marvinModule = True
public = ['urlhead']
def urlhead():
None
def _urlhead(self,message):
if 'http://' or 'https://' in message.text:
try:
foo = re.findall(r'(htt[p|ps]s?://\S+)', message.text)
for url in foo:
# dirty code for cyrillic domains
# TODO: punycode converter
if u'.рф' in url:
encodedHtml = goUrl('http://idnaconv.phlymail.de/index.php?decoded=' + url.encode('utf-8') + '&idn_version=2008&encode=Encode+>>')
temp = re.search(r'(https?://xn--\S+)', encodedHtml)
url = temp.group(0).replace('"','')
# end of dirty code
try:
html = lxml.html.fromstring(goUrl(url).decode('utf-8'))
except:
html = lxml.html.fromstring(goUrl(url))
title = " ".join(html.find('.//title').text.split())
message.reply('Title: ' + title)
except IOError:
message.reply('Link broken. Can\'t get header!')
except AttributeError:
pass
else:
return None
| mit | -2,215,610,532,836,480,000 | 35.702703 | 154 | 0.469809 | false |
kalaspuff/tomodachi | tomodachi/discovery/dummy_registry.py | 1 | 1160 | import logging
from typing import Any, Dict
# An example discovery class which would could be extended to register which
# the started service' HTTP endpoints are.
class DummyRegistry(object):
http_endpoints: Dict = {}
@classmethod
async def add_http_endpoint(cls, service: Any, host: str, port: int, method: str, pattern: str) -> None:
cls.http_endpoints[service] = cls.http_endpoints.get(service, [])
cls.http_endpoints[service].append((host, port, method, pattern))
@classmethod
async def _register_service(cls, service: Any) -> None:
logging.getLogger("discovery.dummy_registry").info(
'Registering service "{}" [id: {}]'.format(service.name, service.uuid)
)
for host, port, method, pattern in cls.http_endpoints.get(service, []):
pass
@classmethod
async def _deregister_service(cls, service: Any) -> None:
logging.getLogger("discovery.dummy_registry").info(
'Deregistering service "{}" [id: {}]'.format(service.name, service.uuid)
)
for host, port, method, pattern in cls.http_endpoints.pop(service, []):
pass
| mit | -3,238,731,071,140,974,600 | 39 | 108 | 0.65 | false |
davidwtbuxton/search | search/tests/test_ql.py | 3 | 3162 | import datetime
import unittest
from search.ql import Query, Q, GeoQueryArguments
from search.fields import TextField, GeoField, DateField
from search.indexes import DocumentModel
class FakeDocument(DocumentModel):
foo = TextField()
bar = DateField()
class FakeGeoDocument(DocumentModel):
my_loc = GeoField()
class TestKeywordQuery(unittest.TestCase):
def test_basic_keywords(self):
query = Query(FakeDocument)
query.add_keywords("foo bar")
self.assertEqual(
u"foo bar",
unicode(query))
class TestQuery(unittest.TestCase):
def test_basic_keywords(self):
query = Query(FakeDocument)
query.add_q(Q(foo__gt=42))
self.assertEqual(
u"(foo > 42)",
unicode(query))
def test_add_q_or(self):
"""Test that two Q objects can be added to a query without needing to wrap them in
another Q object
"""
query = Query(FakeDocument)
q_1 = Q(foo=42)
q_2 = Q(foo=128)
query.add_q(q_1)
query.add_q(q_2, conn=Q.OR)
self.assertEqual(
u'((foo:"42") OR (foo:"128"))',
unicode(query))
class TestGeoQuery(unittest.TestCase):
def test_geosearch(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) < 20)",
unicode(query))
def test_geosearch_lt(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_lt=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) < 20)",
unicode(query))
def test_geosearch_lte(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_lte=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) <= 20)",
unicode(query))
def test_geosearch_gt(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_gt=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) > 20)",
unicode(query))
def test_geosearch_gte(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_gte=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) >= 20)",
unicode(query))
class TestDateQuery(unittest.TestCase):
def test_before(self):
query = Query(FakeDocument)
today = datetime.date.today()
query.add_q(Q(bar__lt=today))
self.assertEqual(
u"(bar < {0})".format(today.isoformat()),
unicode(query))
def test_after(self):
query = Query(FakeDocument)
today = datetime.date.today()
query.add_q(Q(bar__gt=today))
self.assertEqual(
u"(bar > {0} AND NOT bar:{1})".format(today.isoformat(), DateField().none_value()),
unicode(query))
| mit | -846,240,630,540,917,800 | 27.745455 | 95 | 0.590133 | false |
mdrio/pydoop | test/backward_compatibility/test_task_context.py | 3 | 1995 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import unittest
import pydoop
from pydoop.pipes import InputSplit
example_input_splits = [
('/hdfs://localhost:9000/user/zag/in-dir/FGCS-1.ps\x00\x00\x00\x00\x00'
'\x08h(\x00\x00\x00\x00\x00\x08h\x05',
'hdfs://localhost:9000/user/zag/in-dir/FGCS-1.ps', 550952, 550917),
('/hdfs://localhost:9000/user/zag/in-dir/FGCS-1.ps\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x08h(',
'hdfs://localhost:9000/user/zag/in-dir/FGCS-1.ps', 0, 550952),
('1hdfs://localhost:9000/user/zag/in-dir/images_list\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$',
'hdfs://localhost:9000/user/zag/in-dir/images_list', 0, 36)
]
if not pydoop.hadoop_version_info().has_variable_isplit_encoding():
example_input_splits = [("\x00" + raw_split, fn, o, l)
for (raw_split, fn, o, l) in example_input_splits]
class taskcontext_tc(unittest.TestCase):
def test_input_split(self):
for s in example_input_splits:
i = InputSplit(s[0])
self.assertEqual(i.filename, s[1])
self.assertEqual(i.offset, s[2])
self.assertEqual(i.length, s[3])
def suite():
suite = unittest.TestSuite()
suite.addTest(taskcontext_tc('test_input_split'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
| apache-2.0 | -950,733,306,138,611,800 | 34 | 78 | 0.67218 | false |
joshblum/django-with-audit | django/contrib/gis/geos/point.py | 403 | 4253 | from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, (int, float, long)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, i.next())
capi.cs_sety(cs, 0, i.next())
if ndim == 3: capi.cs_setz(cs, 0, i.next())
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| bsd-3-clause | 6,507,393,299,635,238,000 | 30.503704 | 87 | 0.563837 | false |
mmetak/streamlink | docs/ext_argparse.py | 1 | 4619 | """Convert a argparse parser to option directives.
Inspired by sphinxcontrib.autoprogram but with a few differences:
- Instead of relying on private argparse structures uses hooking
to extract information from a argparse parser.
- Contains some simple pre-processing on the help messages to make
the Sphinx version a bit prettier.
"""
import argparse
import re
from collections import namedtuple
from textwrap import dedent
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.compat import Directive
_ArgumentParser = argparse.ArgumentParser
_Argument = namedtuple("Argument", ["args", "options"])
_block_re = re.compile(r":\n{2}\s{2}")
_default_re = re.compile(r"Default is (.+)\.\n")
_note_re = re.compile(r"Note: (.*)\n\n", re.DOTALL)
_option_re = re.compile(r"(--[\w-]+)")
class ArgumentParser(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.groups = []
self.arguments = []
def add_argument(self, *args, **options):
if not options.get("help") == argparse.SUPPRESS:
self.arguments.append(_Argument(args, options))
def add_argument_group(self, *args, **options):
group = ArgumentParser(*args, **options)
self.groups.append(group)
return group
def get_parser(module_name, attr):
argparse.ArgumentParser = ArgumentParser
module = __import__(module_name, globals(), locals(), [attr])
argparse.ArgumentParser = _ArgumentParser
return getattr(module, attr)
def indent(value, length=4):
space = " " * length
return "\n".join(space + line for line in value.splitlines())
class ArgparseDirective(Directive):
has_content = True
option_spec = {
"module": unchanged,
"attr": unchanged,
}
def process_help(self, help):
# Dedent the help to make sure we are always dealing with
# non-indented text.
help = dedent(help)
# Create simple blocks.
help = _block_re.sub("::\n\n ", help)
# Boldify the default value.
help = _default_re.sub(r"Default is: **\1**.\n", help)
# Create note directives from "Note: " paragraphs.
help = _note_re.sub(
lambda m: ".. note::\n\n" + indent(m.group(1)) + "\n\n",
help
)
# Replace option references with links.
help = _option_re.sub(
lambda m: (
":option:`{0}`".format(m.group(1))
if m.group(1) in self._available_options
else m.group(1)
),
help
)
return indent(help)
def generate_group_rst(self, group):
for arg in group.arguments:
help = arg.options.get("help")
metavar = arg.options.get("metavar")
if isinstance(metavar, tuple):
metavar = " ".join(metavar)
if metavar:
options = []
for a in arg.args:
if a.startswith("-"):
if arg.options.get("nargs") == "?":
metavar = "[{0}]".format(metavar)
options.append("{0} {1}".format(a, metavar))
else:
options.append(metavar)
else:
options = arg.args
yield ".. option:: {0}".format(", ".join(options))
yield ""
for line in self.process_help(help).split("\n"):
yield line
yield ""
def generate_parser_rst(self, parser):
for group in parser.groups:
title = group.args[0]
yield ""
yield title
yield "^" * len(title)
for line in self.generate_group_rst(group):
yield line
def run(self):
module = self.options.get("module")
attr = self.options.get("attr")
parser = get_parser(module, attr)
self._available_options = []
for group in parser.groups:
for arg in group.arguments:
self._available_options += arg.args
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.generate_parser_rst(parser):
result.append(line, "argparse")
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive("argparse", ArgparseDirective)
| bsd-2-clause | 7,007,146,270,116,127,000 | 28.608974 | 68 | 0.572851 | false |
deepakantony/sms-tools | software/transformations_interface/harmonicTransformations_function.py | 20 | 5398 | block=False# function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import sineModel as SM
import harmonicModel as HM
import sineTransformations as ST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile='../../sounds/vignesh.wav', window='blackman', M=1201, N=2048, t=-90,
minSineDur=0.1, nH=100, minf0=130, maxf0=300, f0et=7, harmDevSlope=0.01):
"""
Analyze a sound with the harmonic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
returns inputFile: input file name; fs: sampling rate of input file, tfreq,
tmag: sinusoidal frequencies and magnitudes
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic model of the whole sound
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
# synthesize the sines without original phases
y = SM.sineModelSynth(hfreq, hmag, np.array([]), Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModel.wav'
# write the sound resulting from the inverse stft
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
if (hfreq.shape[1] > 0):
plt.subplot(3,1,2)
tracks = np.copy(hfreq)
numFrames = tracks.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of harmonic tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
return inputFile, fs, hfreq, hmag
def transformation_synthesis(inputFile, fs, hfreq, hmag, freqScaling = np.array([0, 2.0, 1, .3]),
freqStretching = np.array([0, 1, 1, 1.5]), timbrePreservation = 1,
timeScaling = np.array([0, .0, .671, .671, 1.978, 1.978+1.0])):
"""
Transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file
fs: sampling rate of input file
tfreq, tmag: sinusoidal frequencies and magnitudes
freqScaling: frequency scaling factors, in time-value pairs
freqStretchig: frequency stretching factors, in time-value pairs
timbrePreservation: 1 preserves original timbre, 0 it does not
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the harmonics
yhfreq, yhmag = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
# time scale the sound
yhfreq, yhmag = ST.sineTimeScaling(yhfreq, yhmag, timeScaling)
# synthesis
y = SM.sineModelSynth(yhfreq, yhmag, np.array([]), Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModelTransformation.wav'
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot the transformed sinusoidal frequencies
plt.subplot(2,1,1)
if (yhfreq.shape[1] > 0):
tracks = np.copy(yhfreq)
tracks = tracks*np.less(tracks, maxplotfreq)
tracks[tracks<=0] = np.nan
numFrames = int(tracks[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, tracks)
plt.title('transformed harmonic tracks')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile, fs, hfreq, hmag = analysis()
# transformation and synthesis
transformation_synthesis (inputFile, fs, hfreq, hmag)
plt.show()
| agpl-3.0 | -4,697,103,074,808,371,000 | 30.940828 | 147 | 0.705632 | false |
CoolProp/CoolProp | dev/Tickets/524.py | 2 | 1561 | import CoolProp
from CoolProp.CoolProp import PropsSI
from CoolProp.CoolProp import set_reference_state
print("CoolProp version %s" % CoolProp.__version__)
print("CoolProp revision %s" % CoolProp.__gitrevision__)
REF = 'R134a'
T0 = 273.15
RefState = 'IIR'
set_reference_state(REF, RefState)
print(REF, RefState)
print("HMass at %s C %s" % (T0 - 273.15, "C", PropsSI('HMASS', 'T', T0, 'Q', 0, REF) / 1000))
print("SMass at %s C %s" % (T0 - 273.15, "C", PropsSI('SMASS', 'T', T0, 'Q', 0, REF) / 1000))
T0 = 273.15 - 40
RefState = 'ASHRAE'
set_reference_state(REF, RefState)
print(REF, RefState)
print("HMass at %s C %s" % (T0 - 273.15, PropsSI('HMASS', 'T', T0, 'Q', 0, REF) / 1000))
print("SMass at %s C %s" % (T0 - 273.15, PropsSI('SMASS', 'T', T0, 'Q', 0, REF) / 1000))
P0 = 101325
RefState = 'NBP'
set_reference_state(REF, RefState)
print(REF, RefState)
print("HMass at %s Pa %s" % (P0, PropsSI('HMASS', 'P', P0, 'Q', 0, REF) / 1000))
print("SMass at %s Pa %s" % (P0, PropsSI('SMASS', 'P', P0, 'Q', 0, REF) / 1000))
T0 = 273.15
RefState = 'IIR'
set_reference_state(REF, RefState)
print(REF, RefState)
print("HMass at %s C %s" % (T0 - 273.15, PropsSI('HMASS', 'T', T0, 'Q', 0, REF) / 1000))
print("SMass at %s C %s" % (T0 - 273.15, PropsSI('SMASS', 'T', T0, 'Q', 0, REF) / 1000))
T0 = 273.15
RefState = 'DEF'
set_reference_state(REF, RefState)
print(REF, RefState)
print("HMass at %s C %s" % (T0 - 273.15, "C", PropsSI('HMASS', 'T', T0, 'Q', 0, REF) / 1000))
print("SMass at %s C %s" % (T0 - 273.15, "C", PropsSI('SMASS', 'T', T0, 'Q', 0, REF) / 1000))
| mit | -7,684,209,428,663,868,000 | 35.302326 | 93 | 0.606662 | false |
sbalde/edx-platform | lms/envs/aws_migrate.py | 288 | 1256 | """
A Django settings file for use on AWS while running
database migrations, since we don't want to normally run the
LMS with enough privileges to modify the database schema.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Import everything from .aws so that our settings are based on those.
from .aws import *
import os
from django.core.exceptions import ImproperlyConfigured
DB_OVERRIDES = dict(
PASSWORD=os.environ.get('DB_MIGRATION_PASS', None),
ENGINE=os.environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=os.environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=os.environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=os.environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=os.environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
if DB_OVERRIDES['PASSWORD'] is None:
raise ImproperlyConfigured("No database password was provided for running "
"migrations. This is fatal.")
for override, value in DB_OVERRIDES.iteritems():
DATABASES['default'][override] = value
| agpl-3.0 | -4,782,532,784,825,021,000 | 40.866667 | 81 | 0.718949 | false |
automatthias/aubio | waflib/Tools/flex.py | 314 | 1057 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import waflib.TaskGen,os,re
def decide_ext(self,node):
if'cxx'in self.features:
return['.lex.cc']
return['.lex.c']
def flexfun(tsk):
env=tsk.env
bld=tsk.generator.bld
wd=bld.variant_dir
def to_list(xx):
if isinstance(xx,str):return[xx]
return xx
tsk.last_cmd=lst=[]
lst.extend(to_list(env['FLEX']))
lst.extend(to_list(env['FLEXFLAGS']))
inputs=[a.path_from(bld.bldnode)for a in tsk.inputs]
if env.FLEX_MSYS:
inputs=[x.replace(os.sep,'/')for x in inputs]
lst.extend(inputs)
lst=[x for x in lst if x]
txt=bld.cmd_and_log(lst,cwd=wd,env=env.env or None,quiet=0)
tsk.outputs[0].write(txt.replace('\r\n','\n').replace('\r','\n'))
waflib.TaskGen.declare_chain(name='flex',rule=flexfun,ext_in='.l',decider=decide_ext,)
def configure(conf):
conf.find_program('flex',var='FLEX')
conf.env.FLEXFLAGS=['-t']
if re.search(r"\\msys\\[0-9.]+\\bin\\flex.exe$",conf.env.FLEX):
conf.env.FLEX_MSYS=True
| gpl-3.0 | -3,755,693,672,671,253,000 | 32.03125 | 102 | 0.689688 | false |
wkeyword/pip | tests/functional/test_uninstall.py | 17 | 13733 | from __future__ import with_statement
import textwrap
import os
import sys
import pytest
from os.path import join, normpath
from tempfile import mkdtemp
from mock import patch
from tests.lib import assert_all_changes, pyversion
from tests.lib.local_repos import local_repo, local_checkout
from pip.utils import rmtree
@pytest.mark.network
def test_simple_uninstall(script):
"""
Test simple install and uninstall.
"""
result = script.pip('install', 'INITools==0.2')
assert join(script.site_packages, 'initools') in result.files_created, (
sorted(result.files_created.keys())
)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
def test_simple_uninstall_distutils(script):
"""
Test simple install and uninstall.
"""
script.scratch_path.join("distutils_install").mkdir()
pkg_path = script.scratch_path / 'distutils_install'
pkg_path.join("setup.py").write(textwrap.dedent("""
from distutils.core import setup
setup(
name='distutils-install',
version='0.1',
)
"""))
result = script.run('python', pkg_path / 'setup.py', 'install')
result = script.pip('list')
assert "distutils-install (0.1)" in result.stdout
script.pip('uninstall', 'distutils_install', '-y', expect_stderr=True)
result2 = script.pip('list')
assert "distutils-install (0.1)" not in result2.stdout
@pytest.mark.network
def test_uninstall_with_scripts(script):
"""
Uninstall an easy_installed package with scripts.
"""
result = script.run('easy_install', 'PyLogo', expect_stderr=True)
easy_install_pth = script.site_packages / 'easy-install.pth'
pylogo = sys.platform == 'win32' and 'pylogo' or 'PyLogo'
assert(pylogo in result.files_updated[easy_install_pth].bytes)
result2 = script.pip('uninstall', 'pylogo', '-y')
assert_all_changes(
result,
result2,
[script.venv / 'build', 'cache', easy_install_pth],
)
@pytest.mark.network
def test_uninstall_easy_install_after_import(script):
"""
Uninstall an easy_installed package after it's been imported
"""
result = script.run('easy_install', 'INITools==0.2', expect_stderr=True)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
@pytest.mark.network
def test_uninstall_namespace_package(script):
"""
Uninstall a distribution with a namespace package without clobbering
the namespace and everything in it.
"""
result = script.pip('install', 'pd.requires==0.0.3', expect_error=True)
assert join(script.site_packages, 'pd') in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'pd.find', '-y', expect_error=True)
assert join(script.site_packages, 'pd') not in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
assert join(script.site_packages, 'pd', 'find') in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
def test_uninstall_overlapping_package(script, data):
"""
Uninstalling a distribution that adds modules to a pre-existing package
should only remove those added modules, not the rest of the existing
package.
See: GitHub issue #355 (pip uninstall removes things it didn't install)
"""
parent_pkg = data.packages.join("parent-0.1.tar.gz")
child_pkg = data.packages.join("child-0.1.tar.gz")
result1 = script.pip('install', parent_pkg, expect_error=False)
assert join(script.site_packages, 'parent') in result1.files_created, (
sorted(result1.files_created.keys())
)
result2 = script.pip('install', child_pkg, expect_error=False)
assert join(script.site_packages, 'child') in result2.files_created, (
sorted(result2.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result2.files_created, sorted(result2.files_created.keys())
# The import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import parent.plugins.child_plugin, child")
result3 = script.pip('uninstall', '-y', 'child', expect_error=False)
assert join(script.site_packages, 'child') in result3.files_deleted, (
sorted(result3.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result3.files_deleted, sorted(result3.files_deleted.keys())
assert join(script.site_packages, 'parent') not in result3.files_deleted, (
sorted(result3.files_deleted.keys())
)
# Additional check: uninstalling 'child' should return things to the
# previous state, without unintended side effects.
assert_all_changes(result2, result3, [])
@pytest.mark.network
def test_uninstall_console_scripts(script):
"""
Test uninstalling a package with more files (console_script entry points,
extra directories).
"""
args = ['install']
args.append('discover')
result = script.pip(*args, **{"expect_error": True})
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y', expect_error=True)
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_easy_installed_console_scripts(script):
"""
Test uninstalling package with console_scripts that is easy_installed.
"""
args = ['easy_install']
args.append('discover')
result = script.run(*args, **{"expect_stderr": True})
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
@pytest.mark.network
def test_uninstall_editable_from_svn(script, tmpdir):
"""
Test uninstalling an editable installation from svn.
"""
result = script.pip(
'install', '-e',
'%s#egg=initools-dev' % local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
),
)
result.assert_installed('INITools')
result2 = script.pip('uninstall', '-y', 'initools')
assert (script.venv / 'src' / 'initools' in result2.files_after)
assert_all_changes(
result,
result2,
[
script.venv / 'src',
script.venv / 'build',
script.site_packages / 'easy-install.pth'
],
)
@pytest.mark.network
def test_uninstall_editable_with_source_outside_venv(script, tmpdir):
"""
Test uninstalling editable install from existing source outside the venv.
"""
cache_dir = tmpdir.join("cache")
try:
temp = mkdtemp()
tmpdir = join(temp, 'pip-test-package')
_test_uninstall_editable_with_source_outside_venv(
script,
tmpdir,
cache_dir,
)
finally:
rmtree(temp)
def _test_uninstall_editable_with_source_outside_venv(
script, tmpdir, cache_dir):
result = script.run(
'git', 'clone',
local_repo(
'git+git://github.com/pypa/pip-test-package',
cache_dir,
),
tmpdir,
expect_stderr=True,
)
result2 = script.pip('install', '-e', tmpdir)
assert join(
script.site_packages, 'pip-test-package.egg-link'
) in result2.files_created, list(result2.files_created.keys())
result3 = script.pip('uninstall', '-y',
'pip-test-package', expect_error=True)
assert_all_changes(
result,
result3,
[script.venv / 'build', script.site_packages / 'easy-install.pth'],
)
@pytest.mark.network
def test_uninstall_from_reqs_file(script, tmpdir):
"""
Test uninstall from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(
textwrap.dedent("""
-e %s#egg=initools-dev
# and something else to test out:
PyLogo<0.4
""") %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
)
)
result = script.pip('install', '-r', 'test-req.txt')
script.scratch_path.join("test-req.txt").write(
textwrap.dedent("""
# -f, -i, and --extra-index-url should all be ignored by uninstall
-f http://www.example.com
-i http://www.example.com
--extra-index-url http://www.example.com
-e %s#egg=initools-dev
# and something else to test out:
PyLogo<0.4
""") %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
)
)
result2 = script.pip('uninstall', '-r', 'test-req.txt', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
script.venv / 'src',
script.scratch / 'test-req.txt',
script.site_packages / 'easy-install.pth',
],
)
def test_uninstall_as_egg(script, data):
"""
Test uninstall package installed as egg.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, '--egg', expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_folder = script.site_packages / 'FSPkg-0.1.dev0-py%s.egg' % pyversion
assert fspkg_folder not in result.files_created, str(result.stdout)
assert egg_folder in result.files_created, str(result)
result2 = script.pip('uninstall', 'FSPkg', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
],
)
def test_uninstallpathset_no_paths(caplog):
"""
Test UninstallPathSet logs notification when there are no paths to
uninstall
"""
from pip.req.req_uninstall import UninstallPathSet
from pkg_resources import get_distribution
test_dist = get_distribution('pip')
# ensure that the distribution is "local"
with patch("pip.req.req_uninstall.dist_is_local") as mock_dist_is_local:
mock_dist_is_local.return_value = True
uninstall_set = UninstallPathSet(test_dist)
uninstall_set.remove() # with no files added to set
assert (
"Can't uninstall 'pip'. No files were found to uninstall."
in caplog.text()
)
def test_uninstallpathset_non_local(caplog):
"""
Test UninstallPathSet logs notification and returns (with no exception)
when dist is non-local
"""
nonlocal_path = os.path.abspath("/nonlocal")
from pip.req.req_uninstall import UninstallPathSet
from pkg_resources import get_distribution
test_dist = get_distribution('pip')
test_dist.location = nonlocal_path
# ensure that the distribution is "non-local"
# setting location isn't enough, due to egg-link file checking for
# develop-installs
with patch("pip.req.req_uninstall.dist_is_local") as mock_dist_is_local:
mock_dist_is_local.return_value = False
uninstall_set = UninstallPathSet(test_dist)
# with no files added to set; which is the case when trying to remove
# non-local dists
uninstall_set.remove()
assert (
"Not uninstalling pip at %s, outside environment %s"
% (nonlocal_path, sys.prefix)
in caplog.text()
)
def test_uninstall_wheel(script, data):
"""
Test uninstalling a wheel
"""
package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index')
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created
result2 = script.pip('uninstall', 'simple.dist', '-y')
assert_all_changes(result, result2, [])
def test_uninstall_setuptools_develop_install(script, data):
"""Try uninstall after setup.py develop followed of setup.py install"""
pkg_path = data.packages.join("FSPkg")
script.run('python', 'setup.py', 'develop',
expect_stderr=True, cwd=pkg_path)
script.run('python', 'setup.py', 'install',
expect_stderr=True, cwd=pkg_path)
list_result = script.pip('list')
assert "FSPkg (0.1.dev0)" in list_result.stdout
# Uninstall both develop and install
uninstall = script.pip('uninstall', 'FSPkg', '-y')
assert any(filename.endswith('.egg')
for filename in uninstall.files_deleted.keys())
uninstall2 = script.pip('uninstall', 'FSPkg', '-y')
assert join(
script.site_packages, 'FSPkg.egg-link'
) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys())
list_result2 = script.pip('list')
assert "FSPkg" not in list_result2.stdout
| mit | 5,465,819,814,812,617,000 | 32.413625 | 79 | 0.628559 | false |
hlange/LogSoCR | .waf/waflib/Tools/xlcxx.py | 1 | 1468 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2016 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
# Michael Kuhn, 2009
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_xlcxx(conf):
"""
Detects the Aix C++ compiler
"""
cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX')
conf.get_xlc_version(cxx)
conf.env.CXX_NAME = 'xlc++'
@conf
def xlcxx_common_flags(conf):
"""
Flags required for executing the Aix C++ compiler
"""
v = conf.env
v.CXX_SRC_F = []
v.CXX_TGT_F = ['-c', '-o']
if not v.LINK_CXX:
v.LINK_CXX = v.CXX
v.CXXLNK_SRC_F = []
v.CXXLNK_TGT_F = ['-o']
v.CPPPATH_ST = '-I%s'
v.DEFINES_ST = '-D%s'
v.LIB_ST = '-l%s' # template for adding libs
v.LIBPATH_ST = '-L%s' # template for adding libpaths
v.STLIB_ST = '-l%s'
v.STLIBPATH_ST = '-L%s'
v.RPATH_ST = '-Wl,-rpath,%s'
v.SONAME_ST = []
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.LINKFLAGS_cxxprogram= ['-Wl,-brtl']
v.cxxprogram_PATTERN = '%s'
v.CXXFLAGS_cxxshlib = ['-fPIC']
v.LINKFLAGS_cxxshlib = ['-G', '-Wl,-brtl,-bexpfull']
v.cxxshlib_PATTERN = 'lib%s.so'
v.LINKFLAGS_cxxstlib = []
v.cxxstlib_PATTERN = 'lib%s.a'
def configure(conf):
conf.find_xlcxx()
conf.find_ar()
conf.xlcxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| agpl-3.0 | 7,030,532,117,857,186,000 | 21.584615 | 62 | 0.568801 | false |
mysteryjeans/doorsale-demo | doorstep/accounts/models.py | 2 | 5736 | from __future__ import unicode_literals
from datetime import timedelta
from django.db import models
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.contrib.auth.models import AbstractUser as _AbstractUser, UserManager as _UserManager
from doorstep.exceptions import DoorstepError
class UserManager(_UserManager):
def register(self, first_name, last_name, email, gender, username, password, is_verified=False, **extra_fields):
"""
Creates a new user in database, and also marked first user as staff and superuser
"""
# Is verified can be later use to verify user email address
verify_code = None
if not is_verified:
verify_code = self.make_random_password(length=20)
if self.filter(email__iexact=email).count() > 0:
raise ValidationError("User with this Email address already exists.")
# First user will automatically become super user and staff member
if self.count() == 0:
user = self.create_superuser(username=username,
first_name=first_name,
last_name=last_name,
email=email,
password=password,
gender=gender,
is_verified=is_verified,
verify_code=verify_code,
updated_by=username,
created_by=username,
**extra_fields)
else:
user = self.create_user(username=username,
first_name=first_name,
last_name=last_name,
email=email,
password=password,
gender=gender,
is_verified=is_verified,
verify_code=verify_code,
updated_by=username,
created_by=username,
**extra_fields)
return user
def get_reset_code(self, email):
"""
Generates a new password reset code returns user
"""
try:
user = self.get(email__iexact=email)
user.reset_code = self.make_random_password(length=20)
user.reset_code_expire = timezone.now() + timedelta(days=2)
user.save()
return user
except get_user_model().DoesNotExist:
raise DoorstepError('We can\'t find that email address, sorry!')
def reset_password(self, user_id, reset_code, password):
"""
Set new password for the user
"""
if not password:
raise DoorstepError('New password can\'t be blank.')
try:
user = self.get(id=user_id)
if not user.reset_code or user.reset_code != reset_code or user.reset_code_expire < timezone.now():
raise DoorstepError('Password reset code is invalid or expired.')
# Password reset code shouldn't be used again
user.reset_code = None
user.set_password(password)
user.save()
except get_user_model().DoesNotExist:
raise DoorstepError('Password reset code is invalid or expired.')
def change_password(self, user, current_password, password):
"""
Updates user's current password
"""
if not password:
raise DoorstepError('New password can\'t be blank.')
# Changing user's password if old password verifies
user = self.get(id=user.id)
if not user.check_password(current_password):
raise DoorstepError('Your current password is wrong.')
user.set_password(password)
user.save()
class AbstractUser(_AbstractUser):
"""
An abstract class extending Django authentication user model for Doorstep.
"""
MALE = 'M'
FEMALE = 'F'
GENDERS = ((MALE, 'Male'),
(FEMALE, 'Female'))
birth_date = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=1, choices=GENDERS, default=None)
is_verified = models.BooleanField(default=True)
verify_code = models.CharField(max_length=512, blank=True, null=True,
help_text='User account verification code.', editable=False)
reset_code = models.CharField(max_length=512, blank=True, null=True,
help_text='Password reset code.', editable=False)
reset_code_expire = models.DateTimeField(max_length=512, blank=True, null=True,
help_text='Password reset code expire date.', editable=False)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
objects = UserManager()
REQUIRED_FIELDS = ['email', 'gender', 'updated_by', 'created_by']
class Meta:
abstract = True
@classmethod
def get_by_username(cls, username):
"""
Returns user for specified username or raised DoesNotExist exception
"""
return cls.objects.get(username__iexact=username)
class User(AbstractUser):
"""
Extends Django authentication user model for Doorstep.
"""
| gpl-2.0 | 841,217,162,521,892,700 | 36.986755 | 116 | 0.558229 | false |
Protocol-X/plugin.video.funimationnow | resources/lib/modules/trakt.py | 1 | 8240 | # -*- coding: utf-8 -*-
'''
Funimation|Now Add-on
Copyright (C) 2016 Funimation|Now
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re;
import json;
import urlparse;
import time;
import logging;
#from resources.lib.modules import cache
from resources.lib.modules import control;
#from resources.lib.modules import cleandate
from resources.lib.modules import client;
from resources.lib.modules import utils;
#getTrakt('/sync/history', {"shows": [{"seasons": [{"episodes": [{"number": episode}], "number": season}], "ids": {"tvdb": tvdb}}]})
#mark as watched
#getTrakt('search/tvdb/:72070')
logger = logging.getLogger('funimationnow');
def getTrakt(url, post=None):
try:
url = urlparse.urljoin('http://api-v2launch.trakt.tv/', url);
headers = {
'Content-Type': 'application/json',
'trakt-api-key': '49e7f57ee0c22e6ca39649a9255f6097d10cbdb708a5f1c3dc196e615cce6549',
'trakt-api-version': '2'
};
if not post == None:
post = json.dumps(post);
if getTraktCredentialsInfo() == False:
result = client.request(url, post=post, headers=headers);
return result;
headers.update({'Authorization': 'Bearer %s' % utils.setting('trakt.token')});
result = client.request(url, post=post, headers=headers, output='response', error=True);
if not (result[0] == '401' or result[0] == '405'):
return result[1];
oauth = 'http://api-v2launch.trakt.tv/oauth/token';
opost = {
'client_id': '49e7f57ee0c22e6ca39649a9255f6097d10cbdb708a5f1c3dc196e615cce6549',
'client_secret': '49288059527f042ac3e953419ecfd8c6783438ae966cd0b05982423e7fbb0259',
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
'grant_type': 'refresh_token',
'refresh_token': utils.setting('trakt.refresh')
};
result = client.request(oauth, post=json.dumps(opost), headers=headers);
result = json.loads(result);
token = result['access_token'];
refresh = result['refresh_token'];
utils.setting('trakt.token', token);
urils.setting('trakt.refresh', refresh);
headers.update({'Authorization': 'Bearer %s' % token});
result = client.request(url, post=post, headers=headers);
return result;
except:
pass;
def authTrakt():
try:
if getTraktCredentialsInfo() == True:
if control.yesnoDialog(control.lang(32700).encode('utf-8'), control.lang(32701).encode('utf-8'), '', 'Trakt'):
utils.setting('trakt.user', '');
utils.setting('trakt.token', '');
utils.setting('trakt.refresh', '');
raise Exception();
result = getTrakt('/oauth/device/code', {'client_id': '49e7f57ee0c22e6ca39649a9255f6097d10cbdb708a5f1c3dc196e615cce6549'});
result = json.loads(result);
verification_url = (control.lang(32702) % result['verification_url']).encode('utf-8');
user_code = (control.lang(32703) % result['user_code']).encode('utf-8');
expires_in = int(result['expires_in']);
device_code = result['device_code'];
interval = result['interval'];
progressDialog = control.progressDialog;
progressDialog.create('Trakt', verification_url, user_code);
for i in range(0, expires_in):
try:
if progressDialog.iscanceled():
break;
time.sleep(1);
if not float(i) % interval == 0:
raise Exception();
r = getTrakt('/oauth/device/token', {'client_id': '49e7f57ee0c22e6ca39649a9255f6097d10cbdb708a5f1c3dc196e615cce6549', 'client_secret': '49288059527f042ac3e953419ecfd8c6783438ae966cd0b05982423e7fbb0259', 'code': device_code});
r = json.loads(r);
if 'access_token' in r:
break;
except:
pass;
try:
progressDialog.close();
except:
pass;
token = r['access_token'];
refresh = r['refresh_token'];
headers = {
'Content-Type': 'application/json',
'trakt-api-key': '49e7f57ee0c22e6ca39649a9255f6097d10cbdb708a5f1c3dc196e615cce6549',
'trakt-api-version': '2',
'Authorization': 'Bearer %s' % token
};
result = client.request('http://api-v2launch.trakt.tv/users/me', headers=headers);
result = json.loads(result);
user = result['username'];
utils.setting('trakt.user', user);
utils.setting('trakt.token', token);
utils.setting('trakt.refresh', refresh);
raise Exception();
except:
control.openSettings('3.1');
def getTraktCredentialsInfo():
user = utils.setting('trakt.user').strip();
token = utils.setting('trakt.token');
refresh = utils.setting('trakt.refresh');
if (user == '' or token == '' or refresh == ''):
return False;
return True;
def markEpisodeAsWatched(tvdb, season, episode):
season = int('%01d' % int(season));
episode = int('%01d' % int(episode));
return getTrakt('/sync/history', {"shows": [{"seasons": [{"episodes": [{"number": episode}], "number": season}], "ids": {"tvdb": tvdb}}]});
def markEpisodeAsNotWatched(tvdb, season, episode):
season = int('%01d' % int(season));
episode = int('%01d' % int(episode));
return getTrakt('/sync/history/remove', {"shows": [{"seasons": [{"episodes": [{"number": episode}], "number": season}], "ids": {"tvdb": tvdb}}]});
def startProgress(content, show_id, asset_id, currentTime, totalTime):
if content.title() == 'Episode':
try:
trakt_data = utils.fetchtraktprogressdata(show_id, asset_id);
if trakt_data is not None:
(title, season, number) = trakt_data;
trakt_progress = round(((float(currentTime) / abs(totalTime)) * 100.00), 4);
getTrakt('/scrobble/start', {"show": {"title": title}, "episode": {"season": season, "number": number}, "progress": trakt_progress, "app_version": "1.0", "app_date": "2016-08-02"});
except Exception as inst:
logger.error(inst);
pass;
def pauseProgress(content, show_id, asset_id, currentTime, totalTime):
if content.title() == 'Episode':
try:
trakt_data = utils.fetchtraktprogressdata(show_id, asset_id);
if trakt_data is not None:
(title, season, number) = trakt_data;
trakt_progress = round(((float(currentTime) / abs(totalTime)) * 100.00), 4);
getTrakt('/scrobble/pause', {"show": {"title": title}, "episode": {"season": season, "number": number}, "progress": trakt_progress, "app_version": "1.0", "app_date": "2016-08-02"});
except Exception as inst:
logger.error(inst);
pass;
def stopProgress(content, show_id, asset_id, trakt_progress):
if content.title() == 'Episode':
try:
trakt_data = utils.fetchtraktprogressdata(show_id, asset_id);
if trakt_data is not None:
(title, season, number) = trakt_data;
getTrakt('/scrobble/stop', {"show": {"title": title}, "episode": {"season": season, "number": number}, "progress": trakt_progress, "app_version": "1.0", "app_date": "2016-08-02"});
except Exception as inst:
logger.error(inst);
pass; | gpl-3.0 | 5,781,380,733,623,459,000 | 29.981203 | 241 | 0.593811 | false |
ivano666/tensorflow | tensorflow/contrib/util/__init__.py | 5 | 1447 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with Tensors.
## Miscellaneous Utility Functions
@@constant_value
@@make_tensor_proto
@@make_ndarray
@@ops_used_by_graph_def
@@stripped_op_list_for_graph
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework.tensor_util import constant_value
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
from tensorflow.python.training.saver import ops_used_by_graph_def
from tensorflow.python.training.saver import stripped_op_list_for_graph
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
| apache-2.0 | 1,646,377,162,740,159,500 | 34.292683 | 80 | 0.733932 | false |
chauhanmohit/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/w3c/test_importer.py | 115 | 20224 | #!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
This script imports a directory of W3C CSS tests into WebKit.
You must have checked out the W3C repository to your local drive.
This script will import the tests into WebKit following these rules:
- Only tests that are approved or officially submitted awaiting review are imported
- All tests are imported into LayoutTests/csswg
- If the tests are approved, they'll be imported into a directory tree that
mirrors the CSS Mercurial repo. For example, <csswg_repo_root>/approved/css2.1 is brought in
as LayoutTests/csswg/approved/css2.1, maintaining the entire directory structure under that
- If the tests are submitted, they'll be brought in as LayoutTests/csswg/submitted and will also
maintain their directory structure under that. For example, everything under
<csswg_repo_root>/contributors/adobe/submitted is brought into submitted, mirroring its
directory structure in the csswg repo
- If the import directory specified is just a contributor folder, only the submitted folder
for that contributor is brought in. For example, to import all of Mozilla's tests, either
<csswg_repo_root>/contributors/mozilla or <csswg_repo_root>/contributors/mozilla/submitted
will work and are equivalent
- For the time being, this script won't work if you try to import the full set of submitted
tests under contributors/*/submitted. Since these are awaiting review, this is just a small
control mechanism to enforce carefully selecting what non-approved tests are imported.
It can obviously and easily be changed.
- By default, only reftests and jstest are imported. This can be overridden with a -a or --all
argument
- Also by default, if test files by the same name already exist in the destination directory,
they are overwritten with the idea that running this script would refresh files periodically.
This can also be overridden by a -n or --no-overwrite flag
- All files are converted to work in WebKit:
1. Paths to testharness.js files are modified point to Webkit's copy of them in
LayoutTests/resources, using the correct relative path from the new location
2. All CSS properties requiring the -webkit-vendor prefix are prefixed - this current
list of what needs prefixes is read from Source/WebCore/CSS/CSSProperties.in
3. Each reftest has its own copy of its reference file following the naming conventions
new-run-webkit-tests expects
4. If a reference files lives outside the directory of the test that uses it, it is checked
for paths to support files as it will be imported into a different relative position to the
test file (in the same directory)
- Upon completion, script outputs the total number tests imported, broken down by test type
- Also upon completion, each directory where files are imported will have w3c-import.log written
with a timestamp, the W3C Mercurial changeset if available, the list of CSS properties used that
require prefixes, the list of imported files, and guidance for future test modification and
maintenance.
- On subsequent imports, this file is read to determine if files have been removed in the newer changesets.
The script removes these files accordingly.
"""
# FIXME: Change this file to use the Host abstractions rather that os, sys, shutils, etc.
import datetime
import logging
import mimetypes
import optparse
import os
import shutil
import sys
from webkitpy.common.host import Host
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.common.system.executive import ScriptError
from webkitpy.w3c.test_parser import TestParser
from webkitpy.w3c.test_converter import W3CTestConverter
TEST_STATUS_UNKNOWN = 'unknown'
TEST_STATUS_APPROVED = 'approved'
TEST_STATUS_SUBMITTED = 'submitted'
CHANGESET_NOT_AVAILABLE = 'Not Available'
_log = logging.getLogger(__name__)
def main(_argv, _stdout, _stderr):
options, args = parse_args()
import_dir = args[0]
if len(args) == 1:
repo_dir = os.path.dirname(import_dir)
else:
repo_dir = args[1]
if not os.path.exists(import_dir):
sys.exit('Source directory %s not found!' % import_dir)
if not os.path.exists(repo_dir):
sys.exit('Repository directory %s not found!' % repo_dir)
if not repo_dir in import_dir:
sys.exit('Repository directory %s must be a parent of %s' % (repo_dir, import_dir))
configure_logging()
test_importer = TestImporter(Host(), import_dir, repo_dir, options)
test_importer.do_import()
def configure_logging():
class LogHandler(logging.StreamHandler):
def format(self, record):
if record.levelno > logging.INFO:
return "%s: %s" % (record.levelname, record.getMessage())
return record.getMessage()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = LogHandler()
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return handler
def parse_args():
parser = optparse.OptionParser(usage='usage: %prog [options] w3c_test_directory [repo_directory]')
parser.add_option('-n', '--no-overwrite', dest='overwrite', action='store_false', default=True,
help='Flag to prevent duplicate test files from overwriting existing tests. By default, they will be overwritten')
parser.add_option('-a', '--all', action='store_true', default=False,
help='Import all tests including reftests, JS tests, and manual/pixel tests. By default, only reftests and JS tests are imported')
options, args = parser.parse_args()
if len(args) not in (1, 2):
parser.error('Incorrect number of arguments')
return options, args
class TestImporter(object):
def __init__(self, host, source_directory, repo_dir, options):
self.host = host
self.source_directory = source_directory
self.options = options
self.filesystem = self.host.filesystem
webkit_finder = WebKitFinder(self.filesystem)
self._webkit_root = webkit_finder.webkit_base()
self.repo_dir = repo_dir
subdirs = os.path.dirname(os.path.relpath(source_directory, repo_dir))
self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", 'w3c', *subdirs)
self.changeset = CHANGESET_NOT_AVAILABLE
self.test_status = TEST_STATUS_UNKNOWN
self.import_list = []
def do_import(self):
self.find_importable_tests(self.source_directory)
self.load_changeset()
self.import_tests()
def load_changeset(self):
"""Returns the current changeset from mercurial or "Not Available"."""
try:
self.changeset = self.host.executive.run_command(['hg', 'tip']).split('changeset:')[1]
except (OSError, ScriptError):
self.changeset = CHANGESET_NOT_AVAILABLE
def find_importable_tests(self, directory):
# FIXME: use filesystem
for root, dirs, files in os.walk(directory):
_log.info('Scanning ' + root + '...')
total_tests = 0
reftests = 0
jstests = 0
# "archive" and "data" dirs are internal csswg things that live in every approved directory.
# FIXME: skip 'incoming' tests for now, but we should rework the 'test_status' concept and
# support reading them as well.
DIRS_TO_SKIP = ('.git', '.hg', 'data', 'archive', 'incoming')
for d in DIRS_TO_SKIP:
if d in dirs:
dirs.remove(d)
copy_list = []
for filename in files:
# FIXME: This block should really be a separate function, but the early-continues make that difficult.
if filename.startswith('.') or filename.endswith('.pl'):
continue # For some reason the w3c repo contains random perl scripts we don't care about.
fullpath = os.path.join(root, filename)
mimetype = mimetypes.guess_type(fullpath)
if not 'html' in str(mimetype[0]) and not 'xml' in str(mimetype[0]):
copy_list.append({'src': fullpath, 'dest': filename})
continue
test_parser = TestParser(vars(self.options), filename=fullpath)
test_info = test_parser.analyze_test()
if test_info is None:
continue
if 'reference' in test_info.keys():
reftests += 1
total_tests += 1
test_basename = os.path.basename(test_info['test'])
# Add the ref file, following WebKit style.
# FIXME: Ideally we'd support reading the metadata
# directly rather than relying on a naming convention.
# Using a naming convention creates duplicate copies of the
# reference files.
ref_file = os.path.splitext(test_basename)[0] + '-expected'
ref_file += os.path.splitext(test_basename)[1]
copy_list.append({'src': test_info['reference'], 'dest': ref_file})
copy_list.append({'src': test_info['test'], 'dest': filename})
# Update any support files that need to move as well to remain relative to the -expected file.
if 'refsupport' in test_info.keys():
for support_file in test_info['refsupport']:
source_file = os.path.join(os.path.dirname(test_info['reference']), support_file)
source_file = os.path.normpath(source_file)
# Keep the dest as it was
to_copy = {'src': source_file, 'dest': support_file}
# Only add it once
if not(to_copy in copy_list):
copy_list.append(to_copy)
elif 'jstest' in test_info.keys():
jstests += 1
total_tests += 1
copy_list.append({'src': fullpath, 'dest': filename})
else:
total_tests += 1
copy_list.append({'src': fullpath, 'dest': filename})
if not total_tests:
# We can skip the support directory if no tests were found.
if 'support' in dirs:
dirs.remove('support')
if copy_list:
# Only add this directory to the list if there's something to import
self.import_list.append({'dirname': root, 'copy_list': copy_list,
'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
def import_tests(self):
converter = W3CTestConverter()
total_imported_tests = 0
total_imported_reftests = 0
total_imported_jstests = 0
total_prefixed_properties = {}
for dir_to_copy in self.import_list:
total_imported_tests += dir_to_copy['total_tests']
total_imported_reftests += dir_to_copy['reftests']
total_imported_jstests += dir_to_copy['jstests']
prefixed_properties = []
if not dir_to_copy['copy_list']:
continue
orig_path = dir_to_copy['dirname']
subpath = os.path.relpath(orig_path, self.repo_dir)
new_path = os.path.join(self.destination_directory, subpath)
if not(os.path.exists(new_path)):
os.makedirs(new_path)
copied_files = []
for file_to_copy in dir_to_copy['copy_list']:
# FIXME: Split this block into a separate function.
orig_filepath = os.path.normpath(file_to_copy['src'])
if os.path.isdir(orig_filepath):
# FIXME: Figure out what is triggering this and what to do about it.
_log.error('%s refers to a directory' % orig_filepath)
continue
if not(os.path.exists(orig_filepath)):
_log.warning('%s not found. Possible error in the test.', orig_filepath)
continue
new_filepath = os.path.join(new_path, file_to_copy['dest'])
if not(os.path.exists(os.path.dirname(new_filepath))):
os.makedirs(os.path.dirname(new_filepath))
if not self.options.overwrite and os.path.exists(new_filepath):
_log.info('Skipping import of existing file ' + new_filepath)
else:
# FIXME: Maybe doing a file diff is in order here for existing files?
# In other words, there's no sense in overwriting identical files, but
# there's no harm in copying the identical thing.
_log.info('Importing: %s', orig_filepath)
_log.info(' As: %s', new_filepath)
# Only html, xml, or css should be converted
# FIXME: Eventually, so should js when support is added for this type of conversion
mimetype = mimetypes.guess_type(orig_filepath)
if 'html' in str(mimetype[0]) or 'xml' in str(mimetype[0]) or 'css' in str(mimetype[0]):
converted_file = converter.convert_for_webkit(new_path, filename=orig_filepath)
if not converted_file:
shutil.copyfile(orig_filepath, new_filepath) # The file was unmodified.
else:
for prefixed_property in converted_file[0]:
total_prefixed_properties.setdefault(prefixed_property, 0)
total_prefixed_properties[prefixed_property] += 1
prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
outfile = open(new_filepath, 'wb')
outfile.write(converted_file[1])
outfile.close()
else:
shutil.copyfile(orig_filepath, new_filepath)
copied_files.append(new_filepath.replace(self._webkit_root, ''))
self.remove_deleted_files(new_path, copied_files)
self.write_import_log(new_path, copied_files, prefixed_properties)
_log.info('Import complete')
_log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
_log.info('Imported %d reftests', total_imported_reftests)
_log.info('Imported %d JS tests', total_imported_jstests)
_log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
_log.info('')
_log.info('Properties needing prefixes (by count):')
for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
_log.info(' %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])
def setup_destination_directory(self):
""" Creates a destination directory that mirrors that of the source approved or submitted directory """
self.update_test_status()
start = self.source_directory.find(self.test_status)
new_subpath = self.source_directory[len(self.repo_dir):]
destination_directory = os.path.join(self.destination_directory, new_subpath)
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
_log.info('Tests will be imported into: %s', destination_directory)
def update_test_status(self):
""" Sets the test status to either 'approved' or 'submitted' """
status = TEST_STATUS_UNKNOWN
if 'approved' in self.source_directory.split(os.path.sep):
status = TEST_STATUS_APPROVED
elif 'submitted' in self.source_directory.split(os.path.sep):
status = TEST_STATUS_SUBMITTED
self.test_status = status
def remove_deleted_files(self, import_directory, new_file_list):
""" Reads an import log in |import_directory|, compares it to the |new_file_list|, and removes files not in the new list."""
previous_file_list = []
import_log_file = os.path.join(import_directory, 'w3c-import.log')
if not os.path.exists(import_log_file):
return
import_log = open(import_log_file, 'r')
contents = import_log.readlines()
if 'List of files\n' in contents:
list_index = contents.index('List of files:\n') + 1
previous_file_list = [filename.strip() for filename in contents[list_index:]]
deleted_files = set(previous_file_list) - set(new_file_list)
for deleted_file in deleted_files:
_log.info('Deleting file removed from the W3C repo: %s', deleted_file)
deleted_file = os.path.join(self._webkit_root, deleted_file)
os.remove(deleted_file)
import_log.close()
def write_import_log(self, import_directory, file_list, prop_list):
""" Writes a w3c-import.log file in each directory with imported files. """
now = datetime.datetime.now()
import_log = open(os.path.join(import_directory, 'w3c-import.log'), 'w')
import_log.write('The tests in this directory were imported from the W3C repository.\n')
import_log.write('Do NOT modify these tests directly in Webkit. Instead, push changes to the W3C CSS repo:\n\n')
import_log.write('http://hg.csswg.org/test\n\n')
import_log.write('Then run the Tools/Scripts/import-w3c-tests in Webkit to reimport\n\n')
import_log.write('Do NOT modify or remove this file\n\n')
import_log.write('------------------------------------------------------------------------\n')
import_log.write('Last Import: ' + now.strftime('%Y-%m-%d %H:%M') + '\n')
import_log.write('W3C Mercurial changeset: ' + self.changeset + '\n')
import_log.write('Test status at time of import: ' + self.test_status + '\n')
import_log.write('------------------------------------------------------------------------\n')
import_log.write('Properties requiring vendor prefixes:\n')
if prop_list:
for prop in prop_list:
import_log.write(prop + '\n')
else:
import_log.write('None\n')
import_log.write('------------------------------------------------------------------------\n')
import_log.write('List of files:\n')
for item in file_list:
import_log.write(item + '\n')
import_log.close()
| bsd-3-clause | -8,548,164,923,579,631,000 | 43.942222 | 138 | 0.620748 | false |
dondieselkopf/amgcl | examples/make_poisson.py | 1 | 2435 | #!/usr/bin/python
import numpy as np
from scipy.sparse import csr_matrix
def numba_jit_if_available():
try:
from numba import jit
return jit
except ImportError:
return lambda f: f
#----------------------------------------------------------------------------
# Assemble matrix for Poisson problem in a unit cube
#----------------------------------------------------------------------------
@numba_jit_if_available()
def make_poisson(n=64):
nnz = 7 * n**3 - 6 * n**2
ptr = np.zeros(n**3+1, dtype=np.int32)
col = np.zeros(nnz, dtype=np.int32)
val = np.zeros(nnz, dtype=np.float64)
rhs = np.ones (n**3, dtype=np.float64)
idx = 0
head = 0
for k in range(0, n):
for j in range(0, n):
for i in range(0, n):
if k > 0:
col[head] = idx - n**2
val[head] = -1.0/6.0
head += 1
if j > 0:
col[head] = idx - n
val[head] = -1.0/6.0
head += 1
if i > 0:
col[head] = idx - 1
val[head] = -1.0/6.0
head += 1
col[head] = idx
val[head] = 1.0
head += 1
if i + 1 < n:
col[head] = idx + 1
val[head] = -1.0/6.0
head += 1
if j + 1 < n:
col[head] = idx + n
val[head] = -1.0/6.0
head += 1
if k + 1 < n:
col[head] = idx + n**2
val[head] = -1.0/6.0
head += 1
idx += 1
ptr[idx] = head
return ( csr_matrix( (val, col, ptr) ), rhs )
if __name__ == "__main__":
import sys
import argparse
from scipy.io import mmwrite
parser = argparse.ArgumentParser(sys.argv[0])
parser.add_argument('-n,--size', dest='n', default='256', help='Size of problem to generate')
parser.add_argument('-A,--matrix', dest='A', default='A', help='Output matrix filename')
parser.add_argument('-b,--rhs', dest='b', default='b', help='Output rhs filename')
args = parser.parse_args(sys.argv[1:])
(A, b) = make_poisson(int(args.n))
mmwrite(args.A, A)
mmwrite(args.b, b.reshape((A.shape[0],1)))
| mit | -6,715,250,416,436,728,000 | 26.988506 | 99 | 0.408624 | false |
wasn-lab/visual-positioning | cpp/scons/scons-local-2.0.0.final.0/SCons/SConf.py | 34 | 39052 | """SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py 5023 2010/06/14 22:05:46 scons"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, (int, bool)):
if res:
text = "yes"
else:
text = "no"
elif isinstance(res, str):
text = res
else:
raise TypeError("Expected string, int or bool, got " + str(type(res)))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | 756,569,871,858,430,600 | 36.914563 | 96 | 0.58353 | false |
joelddiaz/openshift-tools | ansible/roles/lib_openshift_3.2/build/src/oadm_manage_node.py | 9 | 6045 | # pylint: skip-file
class ManageNodeException(Exception):
''' manage-node exception class '''
pass
class ManageNodeConfig(OpenShiftCLIConfig):
''' ManageNodeConfig is a DTO for the manage-node command.'''
def __init__(self, kubeconfig, node_options):
super(ManageNodeConfig, self).__init__(None, None, kubeconfig, node_options)
# pylint: disable=too-many-instance-attributes
class ManageNode(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(ManageNode, self).__init__(None, config.kubeconfig)
self.config = config
def evacuate(self):
''' formulate the params and run oadm manage-node '''
return self._evacuate(node=self.config.config_options['node']['value'],
selector=self.config.config_options['selector']['value'],
pod_selector=self.config.config_options['pod_selector']['value'],
dry_run=self.config.config_options['dry_run']['value'],
grace_period=self.config.config_options['grace_period']['value'],
force=self.config.config_options['force']['value'],
)
def get_nodes(self, node=None, selector=''):
'''perform oc get node'''
_node = None
_sel = None
if node:
_node = node
if selector:
_sel = selector
results = self._get('node', rname=_node, selector=_sel)
if results['returncode'] != 0:
return results
nodes = []
items = None
if results['results'][0]['kind'] == 'List':
items = results['results'][0]['items']
else:
items = results['results']
for node in items:
_node = {}
_node['name'] = node['metadata']['name']
_node['schedulable'] = True
if node['spec'].has_key('unschedulable'):
_node['schedulable'] = False
nodes.append(_node)
return nodes
def get_pods_from_node(self, node, pod_selector=None):
'''return pods for a node'''
results = self._list_pods(node=[node], pod_selector=pod_selector)
if results['returncode'] != 0:
return results
# When a selector or node is matched it is returned along with the json.
# We are going to split the results based on the regexp and then
# load the json for each matching node.
# Before we return we are going to loop over the results and pull out the node names.
# {'node': [pod, pod], 'node': [pod, pod]}
# 3.2 includes the following lines in stdout: "Listing matched pods on node:"
all_pods = []
if "Listing matched" in results['results']:
listing_match = re.compile('\n^Listing matched.*$\n', flags=re.MULTILINE)
pods = listing_match.split(results['results'])
for pod in pods:
if pod:
all_pods.extend(json.loads(pod)['items'])
# 3.3 specific
else:
# this is gross but I filed a bug...
# build our own json from the output.
all_pods = json.loads(results['results'])['items']
if all_pods is None:
all_pods = []
return all_pods
def list_pods(self):
''' run oadm manage-node --list-pods'''
_nodes = self.config.config_options['node']['value']
_selector = self.config.config_options['selector']['value']
_pod_selector = self.config.config_options['pod_selector']['value']
if not _nodes:
_nodes = self.get_nodes(selector=_selector)
else:
_nodes = [{'name': name} for name in _nodes]
all_pods = {}
for node in _nodes:
results = self.get_pods_from_node(node['name'], pod_selector=_pod_selector)
if isinstance(results, dict):
return results
all_pods[node['name']] = results
results = {}
results['nodes'] = all_pods
results['returncode'] = 0
return results
def schedulable(self):
'''oadm manage-node call for making nodes unschedulable'''
nodes = self.config.config_options['node']['value']
selector = self.config.config_options['selector']['value']
if not nodes:
nodes = self.get_nodes(selector=selector)
else:
tmp_nodes = []
for name in nodes:
tmp_result = self.get_nodes(name)
if isinstance(tmp_result, dict):
tmp_nodes.append(tmp_result)
continue
tmp_nodes.extend(self.get_nodes(name))
nodes = tmp_nodes
for node in nodes:
if isinstance(node, dict) and node.has_key('returncode'):
return {'results': nodes, 'returncode': node['returncode']}
if isinstance(node, list) and node[0].has_key('returncode'):
return {'results': nodes, 'returncode': node[0]['returncode']}
# check all the nodes that were returned and verify they are:
# node['schedulable'] == self.config.config_options['schedulable']['value']
if any([node['schedulable'] != self.config.config_options['schedulable']['value'] for node in nodes]):
return self._schedulable(node=self.config.config_options['node']['value'],
selector=self.config.config_options['selector']['value'],
schedulable=self.config.config_options['schedulable']['value'],
)
results = {}
results['returncode'] = 0
results['changed'] = False
results['nodes'] = nodes
return results
| apache-2.0 | -9,021,958,956,466,719,000 | 37.75 | 110 | 0.54756 | false |
ibmsoe/tensorflow | tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops.py | 49 | 4117 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.contrib.input_pipeline.ops import gen_input_pipeline_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader
_input_pipeline_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))
def obtain_next(string_list_tensor, counter):
"""Basic wrapper for the ObtainNextOp.
Args:
string_list_tensor: A tensor that is a list of strings
counter: an int64 ref tensor to keep track of which element is returned.
Returns:
An op that produces the element at counter + 1 in the list, round
robin style.
"""
return gen_input_pipeline_ops.obtain_next(string_list_tensor, counter)
def _maybe_randomize_list(string_list, shuffle):
if shuffle:
random.shuffle(string_list)
return string_list
def _create_list(string_list, shuffle, seed, num_epochs):
if shuffle and seed:
random.seed(seed)
expanded_list = _maybe_randomize_list(string_list, shuffle)
if num_epochs:
for _ in range(num_epochs - 1):
expanded_list.extend(_maybe_randomize_list(string_list, shuffle))
return expanded_list
def seek_next(string_list, shuffle=False, seed=None, num_epochs=None):
"""Returns an op that seeks the next element in a list of strings.
Seeking happens in a round robin fashion. This op creates a variable called
obtain_next_counter that is initialized to -1 and is used to keep track of
which element in the list was returned, and a variable
obtain_next_expanded_list to hold the list. If num_epochs is not None, then we
limit the number of times we go around the string_list before OutOfRangeError
is thrown. It creates a variable to keep track of this.
Args:
string_list: A list of strings.
shuffle: If true, we shuffle the string_list differently for each epoch.
seed: Seed used for shuffling.
num_epochs: Returns OutOfRangeError once string_list has been repeated
num_epoch times. If unspecified then keeps on looping.
Returns:
An op that produces the next element in the provided list.
"""
expanded_list = _create_list(string_list, shuffle, seed, num_epochs)
with variable_scope.variable_scope("obtain_next"):
counter = variable_scope.get_variable(
name="obtain_next_counter",
initializer=constant_op.constant(
-1, dtype=dtypes.int64),
dtype=dtypes.int64)
with ops.colocate_with(counter):
string_tensor = variable_scope.get_variable(
name="obtain_next_expanded_list",
initializer=constant_op.constant(expanded_list),
dtype=dtypes.string)
if num_epochs:
filename_counter = variable_scope.get_variable(
name="obtain_next_filename_counter",
initializer=constant_op.constant(
0, dtype=dtypes.int64),
dtype=dtypes.int64)
c = filename_counter.count_up_to(len(expanded_list))
with ops.control_dependencies([c]):
return obtain_next(string_tensor, counter)
else:
return obtain_next(string_tensor, counter)
| apache-2.0 | -7,125,936,997,226,870,000 | 37.12037 | 80 | 0.712655 | false |
bayolau/seqan | apps/ngs_roi/tool_shed/rois.py | 18 | 1820 | #!/usr/bin/env python
class RoiRecord(object):
"""Represent one record in a ROI file."""
def __init__(self, ref, start_pos, end_pos, region_name, region_length,
strand, max_count, data, points):
"""Initialize RoiRecord."""
self.ref = ref
self.start_pos = start_pos
self.end_pos = end_pos
self.strand = strand
self.region_length = region_length
self.region_name = region_name
self.max_count = max_count
self.data = data
self.points = points
def __str__(self):
return 'RoiRecord(%s, %s, %s, %s, %s, %s, %s, %s, len([...])==%s)' % \
(repr(self.ref), self.start_pos, self.end_pos,
self.region_name, self.region_length, repr(self.strand),
self.max_count, self.data, len(self.points))
def __repr__(self):
return self.__str__()
def loadRoi(path, max_count=0):
"""Load ROI file and return it as a list of RoiRecord objects.
NA values are translated to 0.
"""
data_keys = []
result = []
i = 0
with open(path, 'rb') as f:
for line in f:
if line.startswith('##'):
data_keys = line[2:].split('\t')[7:-1]
if line[0] == '#':
continue
if max_count > 0 and i >= max_count:
break
i += 1
vals = line.split()
region_length = int(vals[4])
data = vals[7:-1]
points = [int(x) for x in vals[-1].split(',')]
r = RoiRecord(vals[0], int(vals[1]) - 1, int(vals[2]), vals[3],
region_length, vals[5], int(vals[6]), data, points)
result.append(r)
#print ' => Loaded %d records.' % len(result)
return data_keys, result
| bsd-3-clause | -6,175,257,125,299,301,000 | 33.339623 | 78 | 0.5 | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/i18n/po2dict.py | 1 | 2930 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""Generate python dictionaries catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a python dictionary
Based on msgfmt.py by Martin v. Löwis <[email protected]>
"""
import sys, re, os
MESSAGES = {}
# pylint: disable=W0602,W0603
def add(id_, string, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and string:
MESSAGES[id_] = string
def generateDict():
"Return the generated dictionary"
global MESSAGES
metadata = MESSAGES['']
del MESSAGES['']
encoding = re.search(r'charset=(\S*)\n', metadata).group(1)
return "# -*- coding: %s -*-\n#This is generated code - do not edit\nencoding = '%s'\ndict = %s"%(encoding, encoding, MESSAGES)
def make(filename, outfile=None):
ID = 1
STR = 2
global MESSAGES
MESSAGES = {}
# Compute .py name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.py'
try:
lines = open(infile).readlines()
except IOError, msg:
print >> sys.stderr, msg
sys.exit(1)
section = None
fuzzy = 0
# Parse the catalog
lno = 0
for l in lines:
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy) # pylint: disable=E0601
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and l.find('fuzzy'):
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid'):
if section == STR:
add(msgid, msgstr, fuzzy)
section = ID
l = l[5:]
msgid = msgstr = ''
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
# XXX: Does this always follow Python escape semantics? # pylint: disable=W0511
l = eval(l)
if section == ID:
msgid += l
elif section == STR:
msgstr += l
else:
print >> sys.stderr, 'Syntax error on %s:%d' % (infile, lno), \
'before:'
print >> sys.stderr, l
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generateDict()
try:
open(outfile,"wb").write(output)
except IOError,msg:
print >> sys.stderr, msg
return outfile
| gpl-3.0 | 2,397,016,724,270,267,400 | 25.396396 | 131 | 0.548805 | false |
SamplesAndDemos/division42pos1 | src/division42pos/division42pos/main/models.py | 1 | 1269 | import uuid
from django.db import models
from django.contrib.auth.models import User
STATE_CODE_CHOICES = (
('AL','Alabama'),
('AK','Alaska'),
('CO','Colorado'),
('CT','Connecticut'),
('DE','Delaware'),
('FL','Florida'),
('GA','Georgia'),
('LA','Lousiana'),
('MA','Massachusetts'),
('ME','Maine'),
('MI','Michigan'),
('MO','Missouri'),
('NC','North Carolina'),
('ND','North Dakota'),
('NH','New Hampshire'),
('NJ','New Jersey'),
('NY','New York'),
)
# Create your models here.
class Customer(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
first_name = models.CharField(max_length=100, verbose_name="First Name", help_text="Please enter a first name for this customer.")
last_name = models.CharField(max_length=100)
address1 = models.CharField(max_length=100, blank=True)
address2 = models.CharField(max_length=100, blank=True)
city = models.CharField(max_length=100, blank=True)
state_code = models.CharField(max_length=2, default='FL', choices=STATE_CODE_CHOICES, blank=True)
postal_code = models.CharField(max_length=100, blank=True)
created_by = models.ForeignKey(User)
def __str__(self):
return "{0}, {1} ({2}, {3})".format(self.last_name,
self.first_name, self.city, self.state_code) | mit | -5,278,518,399,757,831,000 | 31.564103 | 131 | 0.676123 | false |
dcrosta/ansible | lib/ansible/playbook/block.py | 30 | 13215 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
class Block(Base, Become, Conditional, Taggable):
_block = FieldAttribute(isa='list', default=[])
_rescue = FieldAttribute(isa='list', default=[])
_always = FieldAttribute(isa='list', default=[])
_delegate_to = FieldAttribute(isa='list')
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
#_otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
self._task_include = task_include
self._parent_block = parent_block
self._use_handlers = use_handlers
self._implicit = implicit
self._dep_chain = []
super(Block, self).__init__()
def get_vars(self):
'''
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
'''
all_vars = self.vars.copy()
if self._role:
all_vars.update(self._role.get_vars(self._dep_chain))
if self._parent_block:
all_vars.update(self._parent_block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
implicit = not Block.is_block(data)
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@staticmethod
def is_block(ds):
is_block = False
if isinstance(ds, dict):
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
return is_block
def preprocess_data(self, ds):
'''
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
'''
if not Block.is_block(ds):
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_rescue(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_always(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
if isinstance(task, Block):
new_task = task.copy(exclude_parent=True)
new_task._parent_block = new_block
else:
new_task = task.copy(exclude_block=True)
new_task._block = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
new_me._dep_chain = self._dep_chain[:]
if not exclude_tasks:
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._parent_block = None
if self._parent_block and not exclude_parent:
new_me._parent_block = self._parent_block.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
new_me._task_include = None
if self._task_include:
new_me._task_include = self._task_include.copy()
return new_me
def serialize(self):
'''
Override of the default serialize method, since when we're serializing
a task we don't want to include the attribute list of tasks.
'''
data = dict()
for attr in self._get_base_attributes():
if attr not in ('block', 'rescue', 'always'):
data[attr] = getattr(self, attr)
data['dep_chain'] = self._dep_chain
if self._role is not None:
data['role'] = self._role.serialize()
if self._task_include is not None:
data['task_include'] = self._task_include.serialize()
if self._parent_block is not None:
data['parent_block'] = self._parent_block.copy(exclude_tasks=True).serialize()
return data
def deserialize(self, data):
'''
Override of the default deserialize method, to match the above overridden
serialize method
'''
from ansible.playbook.task import Task
# we don't want the full set of attributes (the task lists), as that
# would lead to a serialize/deserialize loop
for attr in self._get_base_attributes():
if attr in data and attr not in ('block', 'rescue', 'always'):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', [])
# if there was a serialized role, unpack it too
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
# if there was a serialized task include, unpack it too
ti_data = data.get('task_include')
if ti_data:
ti = Task()
ti.deserialize(ti_data)
self._task_include = ti
pb_data = data.get('parent_block')
if pb_data:
pb = Block()
pb.deserialize(pb_data)
self._parent_block = pb
def evaluate_conditional(self, templar, all_vars):
if len(self._dep_chain):
for dep in self._dep_chain:
if not dep.evaluate_conditional(templar, all_vars):
return False
if self._task_include is not None:
if not self._task_include.evaluate_conditional(templar, all_vars):
return False
if self._parent_block is not None:
if not self._parent_block.evaluate_conditional(templar, all_vars):
return False
elif self._role is not None:
if not self._role.evaluate_conditional(templar, all_vars):
return False
return super(Block, self).evaluate_conditional(templar, all_vars)
def set_loader(self, loader):
self._loader = loader
if self._parent_block:
self._parent_block.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
if self._task_include:
self._task_include.set_loader(loader)
for dep in self._dep_chain:
dep.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False):
'''
Generic logic to get the attribute or parent attribute for a block value.
'''
value = None
try:
value = self._attributes[attr]
if self._parent_block and (value is None or extend):
parent_value = getattr(self._parent_block, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if self._task_include and (value is None or extend):
parent_value = getattr(self._task_include, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if self._role and (value is None or extend):
parent_value = getattr(self._role, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if len(self._dep_chain) and (not value or extend):
reverse_dep_chain = self._dep_chain[:]
reverse_dep_chain.reverse()
for dep in reverse_dep_chain:
dep_value = getattr(dep, attr)
if extend:
value = self._extend_value(value, dep_value)
else:
value = dep_value
if value is not None and not extend:
break
if self._play and (value is None or extend):
parent_value = getattr(self._play, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
except KeyError:
pass
return value
def _get_attr_environment(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
environment = self._attributes['environment']
if environment is None:
environment = self._get_parent_attribute('environment', extend=True)
return environment
def filter_tagged_tasks(self, play_context, all_vars):
'''
Creates a new block, with task lists filtered based on the tags contained
within the play_context object.
'''
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
tmp_list.append(evaluate_block(task))
elif task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
new_block = self.copy()
new_block.block = evaluate_and_append_task(block.block)
new_block.rescue = evaluate_and_append_task(block.rescue)
new_block.always = evaluate_and_append_task(block.always)
return new_block
return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
| gpl-3.0 | -992,071,965,753,633,400 | 36.120787 | 145 | 0.568067 | false |
aimas/TuniErp-8.0 | addons/account/account_cash_statement.py | 283 | 15868 | # encoding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 PC Solutions (<http://pcsol.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_cashbox_line(osv.osv):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'pieces'
def _sub_total(self, cr, uid, ids, name, arg, context=None):
""" Calculates Sub total
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
res[obj.id] = {
'subtotal_opening' : obj.pieces * obj.number_opening,
'subtotal_closing' : obj.pieces * obj.number_closing,
}
return res
def on_change_sub_opening(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the opening """
return {'value' : {'subtotal_opening' : (pieces * number) or 0.0 }}
def on_change_sub_closing(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the closing """
return {'value' : {'subtotal_closing' : (pieces * number) or 0.0 }}
_columns = {
'pieces': fields.float('Unit of Currency', digits_compute=dp.get_precision('Account')),
'number_opening' : fields.integer('Number of Units', help='Opening Unit Numbers'),
'number_closing' : fields.integer('Number of Units', help='Closing Unit Numbers'),
'subtotal_opening': fields.function(_sub_total, string='Opening Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'subtotal_closing': fields.function(_sub_total, string='Closing Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'bank_statement_id' : fields.many2one('account.bank.statement', ondelete='cascade'),
}
class account_cash_statement(osv.osv):
_inherit = 'account.bank.statement'
def _update_balances(self, cr, uid, ids, context=None):
"""
Set starting and ending balances according to pieces count
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
if (statement.journal_id.type not in ('cash',)):
continue
if not statement.journal_id.cash_control:
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
if float_compare(statement.balance_end_real, statement.balance_end, precision_digits=prec):
statement.write({'balance_end_real' : statement.balance_end})
continue
start = end = 0
for line in statement.details_ids:
start += line.subtotal_opening
end += line.subtotal_closing
data = {
'balance_start': start,
'balance_end_real': end,
}
res[statement.id] = data
super(account_cash_statement, self).write(cr, uid, [statement.id], data, context=context)
return res
def _get_sum_entry_encoding(self, cr, uid, ids, name, arg, context=None):
""" Find encoding total of statements "
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = sum((line.amount for line in statement.line_ids), 0.0)
return res
def _get_company(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
company_pool = self.pool.get('res.company')
user = user_pool.browse(cr, uid, uid, context=context)
company_id = user.company_id
if not company_id:
company_id = company_pool.search(cr, uid, [])
return company_id and company_id[0] or False
def _get_statement_from_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _compute_difference(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.balance_end_real - obj.balance_end
return result
def _compute_last_closing_balance(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
if obj.state == 'draft':
statement_ids = self.search(cr, uid,
[('journal_id', '=', obj.journal_id.id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
if not statement_ids:
continue
else:
st = self.browse(cr, uid, statement_ids[0], context=context)
result[obj.id] = st.balance_end_real
return result
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
result = super(account_cash_statement, self).onchange_journal_id(cr, uid, ids, journal_id)
if not journal_id:
return result
statement_ids = self.search(cr, uid,
[('journal_id', '=', journal_id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
opening_details_ids = self._get_cash_open_box_lines(cr, uid, journal_id, context)
if opening_details_ids:
result['value']['opening_details_ids'] = opening_details_ids
if not statement_ids:
return result
st = self.browse(cr, uid, statement_ids[0], context=context)
result.setdefault('value', {}).update({'last_closing_balance' : st.balance_end_real})
return result
_columns = {
'total_entry_encoding': fields.function(_get_sum_entry_encoding, string="Total Transactions",
store = {
'account.bank.statement': (lambda self, cr, uid, ids, context=None: ids, ['line_ids','move_line_ids'], 10),
'account.bank.statement.line': (_get_statement_from_line, ['amount'], 10),
},
help="Total of cash transaction lines."),
'closing_date': fields.datetime("Closed On"),
'details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='CashBox Lines', copy=True),
'opening_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Opening Cashbox Lines'),
'closing_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Closing Cashbox Lines'),
'user_id': fields.many2one('res.users', 'Responsible', required=False),
'difference' : fields.function(_compute_difference, method=True, string="Difference", type="float", help="Difference between the theoretical closing balance and the real closing balance."),
'last_closing_balance' : fields.function(_compute_last_closing_balance, method=True, string='Last Closing Balance', type='float'),
}
_defaults = {
'state': 'draft',
'date': lambda self, cr, uid, context={}: context.get('date', time.strftime("%Y-%m-%d %H:%M:%S")),
'user_id': lambda self, cr, uid, context=None: uid,
}
def _get_cash_open_box_lines(self, cr, uid, journal_id, context):
details_ids = []
if not journal_id:
return details_ids
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal and (journal.type == 'cash'):
last_pieces = None
if journal.with_last_closing_balance == True:
domain = [('journal_id', '=', journal.id),
('state', '=', 'confirm')]
last_bank_statement_ids = self.search(cr, uid, domain, limit=1, order='create_date desc', context=context)
if last_bank_statement_ids:
last_bank_statement = self.browse(cr, uid, last_bank_statement_ids[0], context=context)
last_pieces = dict(
(line.pieces, line.number_closing) for line in last_bank_statement.details_ids
)
for value in journal.cashbox_line_ids:
nested_values = {
'number_closing' : 0,
'number_opening' : last_pieces.get(value.pieces, 0) if isinstance(last_pieces, dict) else 0,
'pieces' : value.pieces
}
details_ids.append([0, False, nested_values])
return details_ids
def create(self, cr, uid, vals, context=None):
journal_id = vals.get('journal_id')
if journal_id and not vals.get('opening_details_ids'):
vals['opening_details_ids'] = vals.get('opening_details_ids') or self._get_cash_open_box_lines(cr, uid, journal_id, context)
res_id = super(account_cash_statement, self).create(cr, uid, vals, context=context)
self._update_balances(cr, uid, [res_id], context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
"""
Update redord(s) comes in {ids}, with new value comes as {vals}
return True on success, False otherwise
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids to be update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: True on success, False otherwise
"""
if vals.get('journal_id', False):
cashbox_line_obj = self.pool.get('account.cashbox.line')
cashbox_ids = cashbox_line_obj.search(cr, uid, [('bank_statement_id', 'in', ids)], context=context)
cashbox_line_obj.unlink(cr, uid, cashbox_ids, context)
res = super(account_cash_statement, self).write(cr, uid, ids, vals, context=context)
self._update_balances(cr, uid, ids, context)
return res
def _user_allow(self, cr, uid, statement_id, context=None):
return True
def button_open(self, cr, uid, ids, context=None):
""" Changes statement state to Running.
@return: True
"""
obj_seq = self.pool.get('ir.sequence')
if context is None:
context = {}
statement_pool = self.pool.get('account.bank.statement')
for statement in statement_pool.browse(cr, uid, ids, context=context):
vals = {}
if not self._user_allow(cr, uid, statement.id, context=context):
raise osv.except_osv(_('Error!'), (_('You do not have rights to open this %s journal!') % (statement.journal_id.name, )))
if statement.name and statement.name == '/':
c = {'fiscalyear_id': statement.period_id.fiscalyear_id.id}
if statement.journal_id.sequence_id:
st_number = obj_seq.next_by_id(cr, uid, statement.journal_id.sequence_id.id, context=c)
else:
st_number = obj_seq.next_by_code(cr, uid, 'account.cash.statement', context=c)
vals.update({
'name': st_number
})
vals.update({
'state': 'open',
})
self.write(cr, uid, [statement.id], vals, context=context)
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
if journal_type == 'bank':
return super(account_cash_statement, self).statement_close(cr, uid, ids, journal_type, context)
vals = {
'state':'confirm',
'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")
}
return self.write(cr, uid, ids, vals, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
if journal_type == 'bank':
return super(account_cash_statement, self).check_status_condition(cr, uid, state, journal_type)
return state=='open'
def button_confirm_cash(self, cr, uid, ids, context=None):
absl_proxy = self.pool.get('account.bank.statement.line')
TABLES = ((_('Profit'), 'profit_account_id'), (_('Loss'), 'loss_account_id'),)
for obj in self.browse(cr, uid, ids, context=context):
if obj.difference == 0.0:
continue
elif obj.difference < 0.0:
account = obj.journal_id.loss_account_id
name = _('Loss')
if not obj.journal_id.loss_account_id:
raise osv.except_osv(_('Error!'), _('There is no Loss Account on the journal %s.') % (obj.journal_id.name,))
else: # obj.difference > 0.0
account = obj.journal_id.profit_account_id
name = _('Profit')
if not obj.journal_id.profit_account_id:
raise osv.except_osv(_('Error!'), _('There is no Profit Account on the journal %s.') % (obj.journal_id.name,))
values = {
'statement_id' : obj.id,
'journal_id' : obj.journal_id.id,
'account_id' : account.id,
'amount' : obj.difference,
'name' : name,
}
absl_proxy.create(cr, uid, values, context=context)
return super(account_cash_statement, self).button_confirm_bank(cr, uid, ids, context=context)
class account_journal(osv.osv):
_inherit = 'account.journal'
def _default_cashbox_line_ids(self, cr, uid, context=None):
# Return a list of coins in Euros.
result = [
dict(pieces=value) for value in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500]
]
return result
_columns = {
'cashbox_line_ids' : fields.one2many('account.journal.cashbox.line', 'journal_id', 'CashBox', copy=True),
}
_defaults = {
'cashbox_line_ids' : _default_cashbox_line_ids,
}
class account_journal_cashbox_line(osv.osv):
_name = 'account.journal.cashbox.line'
_rec_name = 'pieces'
_columns = {
'pieces': fields.float('Values', digits_compute=dp.get_precision('Account')),
'journal_id' : fields.many2one('account.journal', 'Journal', required=True, select=1, ondelete="cascade"),
}
_order = 'pieces asc'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,267,754,697,502,580,000 | 42.593407 | 197 | 0.580287 | false |
zhaochl/python-utils | verify_code/Imaging-1.1.7/build/lib.linux-x86_64-2.7/ImtImagePlugin.py | 40 | 2203 | #
# The Python Imaging Library.
# $Id$
#
# IM Tools support for PIL
#
# history:
# 1996-05-27 fl Created (read 8-bit images only)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) Fredrik Lundh 1996-2001.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import re
import Image, ImageFile
#
# --------------------------------------------------------------------
field = re.compile(r"([a-z]*) ([^ \r\n]*)")
##
# Image plugin for IM Tools images.
class ImtImageFile(ImageFile.ImageFile):
format = "IMT"
format_description = "IM Tools"
def _open(self):
# Quick rejection: if there's not a LF among the first
# 100 bytes, this is (probably) not a text header.
if not "\n" in self.fp.read(100):
raise SyntaxError, "not an IM file"
self.fp.seek(0)
xsize = ysize = 0
while 1:
s = self.fp.read(1)
if not s:
break
if s == chr(12):
# image data begins
self.tile = [("raw", (0,0)+self.size,
self.fp.tell(),
(self.mode, 0, 1))]
break
else:
# read key/value pair
# FIXME: dangerous, may read whole file
s = s + self.fp.readline()
if len(s) == 1 or len(s) > 100:
break
if s[0] == "*":
continue # comment
m = field.match(s)
if not m:
break
k, v = m.group(1,2)
if k == "width":
xsize = int(v)
self.size = xsize, ysize
elif k == "height":
ysize = int(v)
self.size = xsize, ysize
elif k == "pixel" and v == "n8":
self.mode = "L"
#
# --------------------------------------------------------------------
Image.register_open("IMT", ImtImageFile)
#
# no extension registered (".im" is simply too common)
| apache-2.0 | -3,973,355,818,431,233,000 | 22.688172 | 70 | 0.433954 | false |
darkryder/django | tests/admin_views/models.py | 4 | 25377 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import tempfile
import uuid
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@property
def name_property(self):
"""
A property that simply returns the name. Used to test #24461
"""
return self.name
@python_2_unicode_compatible
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True)
another_section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True, related_name='+')
sub_section = models.ForeignKey(Section, models.SET_NULL, null=True, blank=True, related_name='+')
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
def model_year_reversed(self):
return self.date.year
model_year_reversed.admin_order_field = '-date'
model_year_reversed.short_description = ''
@python_2_unicode_compatible
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book, models.CASCADE)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
@python_2_unicode_compatible
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra1: %s' % self.xtra
@python_2_unicode_compatible
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
@python_2_unicode_compatible
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
@python_2_unicode_compatible
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField(default=False)
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
@python_2_unicode_compatible
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, models.CASCADE, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
title = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Inquisition(models.Model):
expected = models.BooleanField(default=False)
leader = models.ForeignKey(Actor, models.CASCADE)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
@python_2_unicode_compatible
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(
Inquisition,
models.CASCADE,
limit_choices_to={
'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
},
)
defendant0 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': False},
related_name='as_defendant0',
)
defendant1 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': True},
related_name='as_defendant1',
)
def __str__(self):
return self.title
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
@python_2_unicode_compatible
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
@python_2_unicode_compatible
class StumpJoke(models.Model):
variation = models.CharField(max_length=100)
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
def __str__(self):
return self.variation
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
@python_2_unicode_compatible
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, models.CASCADE, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
@python_2_unicode_compatible
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, models.CASCADE, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE, editable=False)
name = models.CharField(max_length=30, blank=True)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
@python_2_unicode_compatible
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, models.CASCADE, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title, models.CASCADE)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
the_recommender = models.ForeignKey(Recommender, models.CASCADE)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector, models.CASCADE)
expensive = models.BooleanField(default=True)
@python_2_unicode_compatible
class Category(models.Model):
collector = models.ForeignKey(Collector, models.CASCADE)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
def link_posted_default():
return datetime.date.today() - datetime.timedelta(days=7)
class Link(models.Model):
posted = models.DateField(default=link_posted_default)
url = models.URLField()
post = models.ForeignKey("Post", models.CASCADE)
readonly_link_content = models.TextField()
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost, models.CASCADE)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
readonly_content = models.TextField()
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
# Proxy model to test overridden fields attrs on Post model so as not to
# interfere with other tests.
class FieldOverridePost(Post):
class Meta:
proxy = True
@python_2_unicode_compatible
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
@python_2_unicode_compatible
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, models.CASCADE, related_name='lead_plots')
contact = models.ForeignKey(Villain, models.CASCADE, related_name='contact_plots')
tags = GenericRelation(FunkyTag)
def __str__(self):
return self.name
@python_2_unicode_compatible
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot, models.CASCADE, null=True, blank=True)
def __str__(self):
return self.details
class PlotProxy(Plot):
class Meta:
proxy = True
@python_2_unicode_compatible
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain, models.CASCADE)
def __str__(self):
return self.location
@python_2_unicode_compatible
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain, models.CASCADE)
def __str__(self):
return self.location
@python_2_unicode_compatible
class Bookmark(models.Model):
name = models.CharField(max_length=60)
tag = GenericRelation(FunkyTag, related_query_name='bookmark')
def __str__(self):
return self.name
@python_2_unicode_compatible
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo', models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne, models.CASCADE)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping', related_name='pizzas')
class Album(models.Model):
owner = models.ForeignKey(User, models.SET_NULL, null=True, blank=True)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee, models.CASCADE)
class Question(models.Model):
question = models.CharField(max_length=20)
posted = models.DateField(default=datetime.date.today)
@python_2_unicode_compatible
class Answer(models.Model):
question = models.ForeignKey(Question, models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
@python_2_unicode_compatible
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
@python_2_unicode_compatible
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PluggableSearchPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
# `db_index=False` because MySQL cannot index large CharField (#21196).
slug = models.SlugField(max_length=1000, db_index=False)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
@python_2_unicode_compatible
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(blank=True)
slug2 = models.SlugField(blank=True)
slug3 = models.SlugField(blank=True, allow_unicode=True)
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated, models.CASCADE)
name = models.CharField(max_length=75)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UnchangeableObject(models.Model):
"""
Model whose change_view is disabled in admin
Refs #20640.
"""
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(
blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')),
)
class ParentWithDependentChildren(models.Model):
"""
Issue #20522
Model where the validation of child foreign-key relationships depends
on validation of the parent
"""
some_required_info = models.PositiveIntegerField()
family_name = models.CharField(max_length=255, blank=False)
class DependentChild(models.Model):
"""
Issue #20522
Model that depends on validation of the parent class for one of its
fields to validate during clean
"""
parent = models.ForeignKey(ParentWithDependentChildren, models.CASCADE)
family_name = models.CharField(max_length=255)
class _Manager(models.Manager):
def get_queryset(self):
return super(_Manager, self).get_queryset().filter(pk__gt=1)
class FilteredManager(models.Model):
def __str__(self):
return "PK=%d" % self.pk
pk_gt_1 = _Manager()
objects = models.Manager()
class EmptyModelVisible(models.Model):
""" See ticket #11277. """
class EmptyModelHidden(models.Model):
""" See ticket #11277. """
class EmptyModelMixin(models.Model):
""" See ticket #11277. """
class State(models.Model):
name = models.CharField(max_length=100)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Restaurant(models.Model):
city = models.ForeignKey(City, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Worker(models.Model):
work_at = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
# Models for #23329
class ReferencedByParent(models.Model):
name = models.CharField(max_length=20, unique=True)
class ParentWithFK(models.Model):
fk = models.ForeignKey(
ReferencedByParent,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class ChildOfReferer(ParentWithFK):
pass
# Models for #23431
class ReferencedByInline(models.Model):
name = models.CharField(max_length=20, unique=True)
class InlineReference(models.Model):
fk = models.ForeignKey(
ReferencedByInline,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class InlineReferer(models.Model):
refs = models.ManyToManyField(InlineReference)
# Models for #23604 and #23915
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
recipes = models.ManyToManyField(Recipe, through='RecipeIngredient')
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
# Model for #23839
class NotReferenced(models.Model):
# Don't point any FK at this model.
pass
# Models for #23934
class ExplicitlyProvidedPK(models.Model):
name = models.IntegerField(primary_key=True)
class ImplicitlyGeneratedPK(models.Model):
name = models.IntegerField(unique=True)
# Models for #25622
class ReferencedByGenRel(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class GenRelReference(models.Model):
references = GenericRelation(ReferencedByGenRel)
class ParentWithUUIDPK(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=100)
def __str__(self):
return str(self.id)
class RelatedWithUUIDPKModel(models.Model):
parent = models.ForeignKey(ParentWithUUIDPK, on_delete=models.SET_NULL, null=True, blank=True)
| bsd-3-clause | -6,062,716,396,123,983,000 | 24.886619 | 110 | 0.687527 | false |
Versent/ansible-modules-core | cloud/amazon/ec2_eip.py | 1 | 9973 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eip
short_description: associate an EC2 elastic IP with an instance.
description:
- This module associates AWS EC2 elastic IP addresses with instances
version_added: "1.4"
options:
instance_id:
description:
- The EC2 instance id
required: false
public_ip:
description:
- The elastic IP address to associate with the instance.
- If absent, allocate a new address
required: false
state:
description:
- If present, associate the IP with the instance.
- If absent, disassociate the IP with the instance.
required: false
choices: ['present', 'absent']
default: present
region:
description:
- the EC2 region to use
required: false
default: null
aliases: [ ec2_region ]
in_vpc:
description:
- allocate an EIP inside a VPC or not
required: false
default: false
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- Reuse an EIP that is not associated to an instance (when available),'''
''' instead of allocating a new one.
required: false
default: false
version_added: "1.6"
extends_documentation_fragment: aws
author: "Lorin Hochstein (@lorin) <[email protected]>"
notes:
- This module will return C(public_ip) on success, which will contain the
public IP address associated with the instance.
- There may be a delay between the time the Elastic IP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and
pause to delay further playbook execution until the instance is reachable,
if necessary.
'''
EXAMPLES = '''
- name: associate an elastic IP with an instance
ec2_eip: instance_id=i-1212f003 ip=93.184.216.119
- name: disassociate an elastic IP from an instance
ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent
- name: allocate a new elastic IP and associate it with an instance
ec2_eip: instance_id=i-1212f003
- name: allocate a new elastic IP without associating it to anything
action: ec2_eip
register: eip
- name: output the IP
debug: msg="Allocated IP is {{ eip.public_ip }}"
- name: another way of allocating an elastic IP without associating it to anything
ec2_eip: state='present'
- name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes'''
''' group=webserver count=3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip: "instance_id={{ item }}"
with_items: ec2.instance_ids
- name: allocate a new elastic IP inside a VPC in us-west-2
ec2_eip: region=us-west-2 in_vpc=yes
register: eip
- name: output the IP
debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}"
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class EIPException(Exception):
pass
def associate_ip_and_instance(ec2, address, instance_id, check_mode):
if address_is_associated_with_instance(ec2, address, instance_id):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.associate_address(instance_id,
allocation_id=address.allocation_id)
else:
res = ec2.associate_address(instance_id,
public_ip=address.public_ip)
if not res:
raise EIPException('association failed')
return {'changed': True}
def disassociate_ip_and_instance(ec2, address, instance_id, check_mode):
if not address_is_associated_with_instance(ec2, address, instance_id):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.disassociate_address(
association_id=address.association_id)
else:
res = ec2.disassociate_address(public_ip=address.public_ip)
if not res:
raise EIPException('disassociation failed')
return {'changed': True}
def _find_address_by_ip(ec2, public_ip):
try:
return ec2.get_all_addresses([public_ip])[0]
except boto.exception.EC2ResponseError as e:
if "Address '{}' not found.".format(public_ip) not in e.message:
raise
def _find_address_by_instance_id(ec2, instance_id):
addresses = ec2.get_all_addresses(None, {'instance-id': instance_id})
if addresses:
return addresses[0]
def find_address(ec2, public_ip, instance_id):
""" Find an existing Elastic IP address """
if public_ip:
return _find_address_by_ip(ec2, public_ip)
elif instance_id:
return _find_address_by_instance_id(ec2, instance_id)
def address_is_associated_with_instance(ec2, address, instance_id):
""" Check if the elastic IP is currently associated with the instance """
if address:
return address and address.instance_id == instance_id
return False
def allocate_address(ec2, domain, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """
if reuse_existing_ip_allowed:
domain_filter = {'domain': domain or 'standard'}
all_addresses = ec2.get_all_addresses(filters=domain_filter)
unassociated_addresses = [a for a in all_addresses
if not a.instance_id]
if unassociated_addresses:
return unassociated_addresses[0]
return ec2.allocate_address(domain=domain)
def release_address(ec2, address, check_mode):
""" Release a previously allocated elastic IP address """
# If we're in check mode, nothing else to do
if not check_mode:
if not address.release():
EIPException('release failed')
return {'changed': True}
def find_instance(ec2, instance_id):
""" Attempt to find the EC2 instance and return it """
reservations = ec2.get_all_reservations(instance_ids=[instance_id])
if len(reservations) == 1:
instances = reservations[0].instances
if len(instances) == 1:
return instances[0]
raise EIPException("could not find instance" + instance_id)
def ensure_present(ec2, domain, address, instance_id,
reuse_existing_ip_allowed, check_mode):
changed = False
# Return the EIP object since we've been given a public IP
if not address:
if check_mode:
return {'changed': True}
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
changed = True
if instance_id:
# Allocate an IP for instance since no public_ip was provided
instance = find_instance(ec2, instance_id)
if instance.vpc_id:
domain = 'vpc'
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_instance(ec2, address, instance_id,
check_mode)
changed = changed or assoc_result['changed']
return {'changed': changed, 'public_ip': address.public_ip}
def ensure_absent(ec2, domain, address, instance_id, check_mode):
if not address:
return {'changed': False}
# disassociating address from instance
if instance_id:
return disassociate_ip_and_instance(ec2, address, instance_id,
check_mode)
# releasing address
else:
return release_address(ec2, address, check_mode)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id=dict(required=False),
public_ip=dict(required=False, aliases=['ip']),
state=dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc=dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed=dict(required=False, type='bool',
default=False),
wait_timeout=dict(default=300),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
instance_id = module.params.get('instance_id')
public_ip = module.params.get('public_ip')
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = 'vpc' if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
try:
address = find_address(ec2, public_ip, instance_id)
if state == 'present':
result = ensure_present(ec2, domain, address, instance_id,
reuse_existing_ip_allowed,
module.check_mode)
else:
result = ensure_absent(ec2, domain, address, instance_id, module.check_mode)
except (boto.exception.EC2ResponseError, EIPException) as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.ec2 import * # noqa
if __name__ == '__main__':
main()
| gpl-3.0 | 7,650,753,511,779,120,000 | 31.275081 | 88 | 0.650857 | false |
nitzmahone/ansible | test/units/modules/network/f5/test_bigip_smtp.py | 21 | 5016 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_smtp import ApiParameters
from library.modules.bigip_smtp import ModuleParameters
from library.modules.bigip_smtp import ModuleManager
from library.modules.bigip_smtp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_smtp import ApiParameters
from ansible.modules.network.f5.bigip_smtp import ModuleParameters
from ansible.modules.network.f5.bigip_smtp import ModuleManager
from ansible.modules.network.f5.bigip_smtp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='[email protected]',
authentication=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.smtp_server == '1.1.1.1'
assert p.smtp_server_port == 25
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == 'password'
assert p.local_host_name == 'smtp.mydomain.com'
assert p.encryption == 'tls'
assert p.update_password == 'always'
assert p.from_address == '[email protected]'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_sys_smtp_server.json'))
assert p.name == 'foo'
assert p.smtp_server == 'mail.foo.bar'
assert p.smtp_server_port == 465
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == '$M$Ch$this-is-encrypted=='
assert p.local_host_name == 'mail-host.foo.bar'
assert p.encryption == 'ssl'
assert p.from_address == '[email protected]'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='[email protected]',
authentication=True,
partition='Common',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['encryption'] == 'tls'
assert results['smtp_server'] == '1.1.1.1'
assert results['smtp_server_port'] == 25
assert results['local_host_name'] == 'smtp.mydomain.com'
assert results['authentication'] is True
assert results['from_address'] == '[email protected]'
assert 'smtp_server_username' not in results
assert 'smtp_server_password' not in results
| gpl-3.0 | 5,708,921,683,412,216,000 | 32 | 91 | 0.63437 | false |
Pintor95/django-jukebox | includes/json.py | 3 | 1337 | """
This file contains everything needed to send and receive JSON requests.
"""
from django.utils.functional import Promise
from django.utils import simplejson
class JSMessage(object):
"""
The JSMessages class is used to pass JSON messages to client JavaScripts.
Use this as a vessel for JSON stuff instead of directly sending JSON
text.
"""
# This is a general message to show the client or to describe the packet.
message = None
# Boolean to indicate whether the message is describing an error.
is_error = False
# Store strings, dictionary, or list in here for the client to parse.
contents = None
def __init__(self, message, is_error=False, contents=None):
"""
Default constructor. Only required argument is message.
"""
self.message = message
self.is_error = is_error
if contents:
self.contents = contents
def __str__(self):
"""
The to-string method is called when sending JSMessage objects. This
is what the client will see.
"""
# Encode the dictionary and return it for sending.
return simplejson.dumps({"message": self.message,
"is_error": self.is_error,
"contents": self.contents}) | gpl-3.0 | -7,172,712,630,842,558,000 | 35.162162 | 77 | 0.619297 | false |
mistercrunch/airflow | airflow/providers/amazon/aws/operators/glacier.py | 6 | 1894 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.glacier import GlacierHook
from airflow.utils.decorators import apply_defaults
class GlacierCreateJobOperator(BaseOperator):
"""
Initiate an Amazon Glacier inventory-retrieval job
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlacierCreateJobOperator`
:param aws_conn_id: The reference to the AWS connection details
:type aws_conn_id: str
:param vault_name: the Glacier vault on which job is executed
:type vault_name: str
"""
template_fields = ("vault_name",)
@apply_defaults
def __init__(
self,
*,
aws_conn_id="aws_default",
vault_name: str,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.vault_name = vault_name
def execute(self, context):
hook = GlacierHook(aws_conn_id=self.aws_conn_id)
response = hook.retrieve_inventory(vault_name=self.vault_name)
return response
| apache-2.0 | -1,496,449,530,701,960,700 | 34.074074 | 83 | 0.704329 | false |
scholer/cadnano2.5 | cadnano/extras/__init__.py | 2 | 1253 | # The MIT License
#
# Copyright (c) 2011 Wyss Institute at Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
"""
__init__.py
Created by Shawn Douglas on 2011-01-23.
"""
| mit | 5,845,931,635,588,238,000 | 40.766667 | 79 | 0.768555 | false |
sumedhasingla/VTK | Filters/Hybrid/Testing/Python/TestGreedyTerrainDecimation.py | 20 | 1948 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
# Read the data: a height field results
demReader = vtk.vtkDEMReader()
demReader.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demReader.Update()
lo = demReader.GetOutput().GetScalarRange()[0]
hi = demReader.GetOutput().GetScalarRange()[1]
# Decimate the terrain
deci = vtk.vtkGreedyTerrainDecimation()
deci.SetInputConnection(demReader.GetOutputPort())
deci.BoundaryVertexDeletionOn()
# deci.SetErrorMeasureToSpecifiedReduction()
# deci.SetReduction(0.95)
deci.SetErrorMeasureToNumberOfTriangles()
deci.SetNumberOfTriangles(5000)
# deci.SetErrorMeasureToAbsoluteError()
# deci.SetAbsoluteError(25.0)
# deci.SetErrorMeasureToRelativeError()
# deci.SetAbsoluteError(0.01)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(deci.GetOutputPort())
normals.SetFeatureAngle(60)
normals.ConsistencyOn()
normals.SplittingOff()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(lo, hi)
demMapper.SetLookupTable(lut)
actor = vtk.vtkLODActor()
actor.SetMapper(demMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.SetBackground(.1, .2, .4)
iren.SetDesiredUpdateRate(5)
ren1.GetActiveCamera().SetViewUp(0, 0, 1)
ren1.GetActiveCamera().SetPosition(-99900, -21354, 131801)
ren1.GetActiveCamera().SetFocalPoint(41461, 41461, 2815)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(1.2)
ren1.ResetCameraClippingRange()
renWin.Render()
iren.Initialize()
#iren.Start()
| bsd-3-clause | 3,052,508,174,115,363,300 | 26.055556 | 63 | 0.791581 | false |
serensoner/CouchPotatoServer | libs/gntp/cli.py | 122 | 4143 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
import logging
import os
import sys
from optparse import OptionParser, OptionGroup
from gntp.notifier import GrowlNotifier
from gntp.shim import RawConfigParser
from gntp.version import __version__
DEFAULT_CONFIG = os.path.expanduser('~/.gntp')
config = RawConfigParser({
'hostname': 'localhost',
'password': None,
'port': 23053,
})
config.read([DEFAULT_CONFIG])
if not config.has_section('gntp'):
config.add_section('gntp')
class ClientParser(OptionParser):
def __init__(self):
OptionParser.__init__(self, version="%%prog %s" % __version__)
group = OptionGroup(self, "Network Options")
group.add_option("-H", "--host",
dest="host", default=config.get('gntp', 'hostname'),
help="Specify a hostname to which to send a remote notification. [%default]")
group.add_option("--port",
dest="port", default=config.getint('gntp', 'port'), type="int",
help="port to listen on [%default]")
group.add_option("-P", "--password",
dest='password', default=config.get('gntp', 'password'),
help="Network password")
self.add_option_group(group)
group = OptionGroup(self, "Notification Options")
group.add_option("-n", "--name",
dest="app", default='Python GNTP Test Client',
help="Set the name of the application [%default]")
group.add_option("-s", "--sticky",
dest='sticky', default=False, action="store_true",
help="Make the notification sticky [%default]")
group.add_option("--image",
dest="icon", default=None,
help="Icon for notification (URL or /path/to/file)")
group.add_option("-m", "--message",
dest="message", default=None,
help="Sets the message instead of using stdin")
group.add_option("-p", "--priority",
dest="priority", default=0, type="int",
help="-2 to 2 [%default]")
group.add_option("-d", "--identifier",
dest="identifier",
help="Identifier for coalescing")
group.add_option("-t", "--title",
dest="title", default=None,
help="Set the title of the notification [%default]")
group.add_option("-N", "--notification",
dest="name", default='Notification',
help="Set the notification name [%default]")
group.add_option("--callback",
dest="callback",
help="URL callback")
self.add_option_group(group)
# Extra Options
self.add_option('-v', '--verbose',
dest='verbose', default=0, action='count',
help="Verbosity levels")
def parse_args(self, args=None, values=None):
values, args = OptionParser.parse_args(self, args, values)
if values.message is None:
print('Enter a message followed by Ctrl-D')
try:
message = sys.stdin.read()
except KeyboardInterrupt:
exit()
else:
message = values.message
if values.title is None:
values.title = ' '.join(args)
# If we still have an empty title, use the
# first bit of the message as the title
if values.title == '':
values.title = message[:20]
values.verbose = logging.WARNING - values.verbose * 10
return values, message
def main():
(options, message) = ClientParser().parse_args()
logging.basicConfig(level=options.verbose)
if not os.path.exists(DEFAULT_CONFIG):
logging.info('No config read found at %s', DEFAULT_CONFIG)
growl = GrowlNotifier(
applicationName=options.app,
notifications=[options.name],
defaultNotifications=[options.name],
hostname=options.host,
password=options.password,
port=options.port,
)
result = growl.register()
if result is not True:
exit(result)
# This would likely be better placed within the growl notifier
# class but until I make _checkIcon smarter this is "easier"
if options.icon is not None and not options.icon.startswith('http'):
logging.info('Loading image %s', options.icon)
f = open(options.icon)
options.icon = f.read()
f.close()
result = growl.notify(
noteType=options.name,
title=options.title,
description=message,
icon=options.icon,
sticky=options.sticky,
priority=options.priority,
callback=options.callback,
identifier=options.identifier,
)
if result is not True:
exit(result)
if __name__ == "__main__":
main()
| gpl-3.0 | 4,628,882,713,008,139,000 | 28.382979 | 80 | 0.689114 | false |
benfinke/ns_python | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_filterpolicy_binding.py | 3 | 14851 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_filterpolicy_binding(base_resource) :
""" Binding class showing the filterpolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._sc = ""
self._name = ""
self._targetlbvserver = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""For a rewrite policy, the bind point to which to bind the policy. Note: This parameter applies only to rewrite policies, because content switching policies are evaluated only at request time.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""For a rewrite policy, the bind point to which to bind the policy. Note: This parameter applies only to rewrite policies, because content switching policies are evaluated only at request time.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to be invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label to be invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def targetlbvserver(self) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of label to be invoked.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of label to be invoked.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def sc(self) :
ur"""The state of SureConnect the specified virtual server.<br/>Possible values = ON, OFF.
"""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_filterpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_filterpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch csvserver_filterpolicy_binding resources.
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of csvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count csvserver_filterpolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of csvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_filterpolicy_binding = [csvserver_filterpolicy_binding() for _ in range(length)]
| apache-2.0 | -2,206,839,184,825,346,000 | 35.759901 | 430 | 0.730725 | false |
jhgoebbert/cvl-fabric-launcher | pyinstaller-2.1/tests/basic/test_module__file__attribute.py | 14 | 1175 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Test the value of the __file__ module attribute.
# In frozen mode it is for package set to
#
# sys.prefix/package/__init__.pyc
# sys.prefix/module.pyc
import os
import sys
import shutil as module
import xml.sax as package
correct_mod = os.path.join(sys.prefix, 'shutil.pyc')
correct_pkg = os.path.join(sys.prefix, 'xml', 'sax', '__init__.pyc')
# Print.
print(' mod.__file__: %s' % module.__file__)
print(' mod.__file__: %s' % correct_mod)
print(' pkg.__file__: %s' % package.__file__)
print(' pkg.__file__: %s' % correct_pkg)
# Test correct values.
if not module.__file__ == correct_mod:
raise SystemExit('MODULE.__file__ attribute is wrong.')
if not package.__file__ == correct_pkg:
raise SystemExit('PACKAGE.__file__ attribute is wrong.')
| gpl-3.0 | -5,539,479,761,952,040,000 | 28.375 | 78 | 0.584681 | false |
Sumith1896/sympy | sympy/combinatorics/tensor_can.py | 4 | 40951 | from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.combinatorics.permutations import Permutation, _af_rmul, \
_af_invert, _af_new
from sympy.combinatorics.perm_groups import PermutationGroup, _orbit, \
_orbit_transversal
from sympy.combinatorics.util import _distribute_gens_by_base, \
_orbits_transversals_from_bsgs
"""
References for tensor canonicalization:
[1] R. Portugal "Algorithmic simplification of tensor expressions",
J. Phys. A 32 (1999) 7779-7789
[2] R. Portugal, B.F. Svaiter "Group-theoretic Approach for Symbolic
Tensor Manipulation: I. Free Indices"
arXiv:math-ph/0107031v1
[3] L.R.U. Manssur, R. Portugal "Group-theoretic Approach for Symbolic
Tensor Manipulation: II. Dummy Indices"
arXiv:math-ph/0107032v1
[4] xperm.c part of XPerm written by J. M. Martin-Garcia
http://www.xact.es/index.html
"""
def dummy_sgs(dummies, sym, n):
"""
Return the strong generators for dummy indices
Parameters
==========
dummies : list of dummy indices
`dummies[2k], dummies[2k+1]` are paired indices
sym : symmetry under interchange of contracted dummies::
* None no symmetry
* 0 commuting
* 1 anticommuting
n : number of indices
in base form the dummy indices are always in consecutive positions
Examples
========
>>> from sympy.combinatorics.tensor_can import dummy_sgs
>>> dummy_sgs(range(2, 8), 0, 8)
[[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9],
[0, 1, 2, 3, 6, 7, 4, 5, 8, 9]]
"""
if len(dummies) > n:
raise ValueError("List too large")
res = []
# exchange of contravariant and covariant indices
if sym is not None:
for j in dummies[::2]:
a = list(range(n + 2))
if sym == 1:
a[n] = n + 1
a[n + 1] = n
a[j], a[j + 1] = a[j + 1], a[j]
res.append(a)
# rename dummy indices
for j in dummies[:-3:2]:
a = list(range(n + 2))
a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1]
res.append(a)
return res
def _min_dummies(dummies, sym, indices):
"""
Return list of minima of the orbits of indices in group of dummies
see `double_coset_can_rep` for the description of `dummies` and `sym`
indices is the initial list of dummy indices
Examples
========
>>> from sympy.combinatorics.tensor_can import _min_dummies
>>> _min_dummies([list(range(2, 8))], [0], list(range(10)))
[0, 1, 2, 2, 2, 2, 2, 2, 8, 9]
"""
num_types = len(sym)
m = []
for dx in dummies:
if dx:
m.append(min(dx))
else:
m.append(None)
res = indices[:]
for i in range(num_types):
for c, i in enumerate(indices):
for j in range(num_types):
if i in dummies[j]:
res[c] = m[j]
break
return res
def _trace_S(s, j, b, S_cosets):
"""
Return the representative h satisfying s[h[b]] == j
If there is not such a representative return None
"""
for h in S_cosets[b]:
if s[h[b]] == j:
return h
return None
def _trace_D(gj, p_i, Dxtrav):
"""
Return the representative h satisfying h[gj] == p_i
If there is not such a representative return None
"""
for h in Dxtrav:
if h[gj] == p_i:
return h
return None
def _dumx_remove(dumx, dumx_flat, p0):
"""
remove p0 from dumx
"""
res = []
for dx in dumx:
if p0 not in dx:
res.append(dx)
continue
k = dx.index(p0)
if k % 2 == 0:
p0_paired = dx[k + 1]
else:
p0_paired = dx[k - 1]
dx.remove(p0)
dx.remove(p0_paired)
dumx_flat.remove(p0)
dumx_flat.remove(p0_paired)
res.append(dx)
def transversal2coset(size, base, transversal):
a = []
j = 0
for i in range(size):
if i in base:
a.append(sorted(transversal[j].values()))
j += 1
else:
a.append([list(range(size))])
j = len(a) - 1
while a[j] == [list(range(size))]:
j -= 1
return a[:j + 1]
def double_coset_can_rep(dummies, sym, b_S, sgens, S_transversals, g):
"""
Butler-Portugal algorithm for tensor canonicalization with dummy indices
dummies
list of lists of dummy indices,
one list for each type of index;
the dummy indices are put in order contravariant, covariant
[d0, -d0, d1, -d1, ...].
sym
list of the symmetries of the index metric for each type.
possible symmetries of the metrics
* 0 symmetric
* 1 antisymmetric
* None no symmetry
b_S
base of a minimal slot symmetry BSGS.
sgens
generators of the slot symmetry BSGS.
S_transversals
transversals for the slot BSGS.
g
permutation representing the tensor.
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
A tensor with dummy indices can be represented in a number
of equivalent ways which typically grows exponentially with
the number of indices. To be able to establish if two tensors
with many indices are equal becomes computationally very slow
in absence of an efficient algorithm.
The Butler-Portugal algorithm [3] is an efficient algorithm to
put tensors in canonical form, solving the above problem.
Portugal observed that a tensor can be represented by a permutation,
and that the class of tensors equivalent to it under slot and dummy
symmetries is equivalent to the double coset `D*g*S`
(Note: in this documentation we use the conventions for multiplication
of permutations p, q with (p*q)(i) = p[q[i]] which is opposite
to the one used in the Permutation class)
Using the algorithm by Butler to find a representative of the
double coset one can find a canonical form for the tensor.
To see this correspondence,
let `g` be a permutation in array form; a tensor with indices `ind`
(the indices including both the contravariant and the covariant ones)
can be written as
`t = T(ind[g[0],..., ind[g[n-1]])`,
where `n= len(ind)`;
`g` has size `n + 2`, the last two indices for the sign of the tensor
(trick introduced in [4]).
A slot symmetry transformation `s` is a permutation acting on the slots
`t -> T(ind[(g*s)[0]],..., ind[(g*s)[n-1]])`
A dummy symmetry transformation acts on `ind`
`t -> T(ind[(d*g)[0]],..., ind[(d*g)[n-1]])`
Being interested only in the transformations of the tensor under
these symmetries, one can represent the tensor by `g`, which transforms
as
`g -> d*g*s`, so it belongs to the coset `D*g*S`.
Let us explain the conventions by an example.
Given a tensor `T^{d3 d2 d1}{}_{d1 d2 d3}` with the slot symmetries
`T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}`
and symmetric metric, find the tensor equivalent to it which
is the lowest under the ordering of indices:
lexicographic ordering `d1, d2, d3` then and contravariant index
before covariant index; that is the canonical form of the tensor.
The canonical form is `-T^{d1 d2 d3}{}_{d1 d2 d3}`
obtained using `T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`.
To convert this problem in the input for this function,
use the following labelling of the index names
(- for covariant for short) `d1, -d1, d2, -d2, d3, -d3`
`T^{d3 d2 d1}{}_{d1 d2 d3}` corresponds to `g = [4,2,0,1,3,5,6,7]`
where the last two indices are for the sign
`sgens = [Permutation(0,2)(6,7), Permutation(0,4)(6,7)]`
sgens[0] is the slot symmetry `-(0,2)`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a2 a1 a0 a3 a4 a5}`
sgens[1] is the slot symmetry `-(0,4)`
`T^{a0 a1 a2 a3 a4 a5} = -T^{a4 a1 a2 a3 a0 a5}`
The dummy symmetry group D is generated by the strong base generators
`[(0,1),(2,3),(4,5),(0,1)(2,3),(2,3)(4,5)]`
The dummy symmetry acts from the left
`d = [1,0,2,3,4,5,6,7]` exchange `d1 -> -d1`
`T^{d3 d2 d1}{}_{d1 d2 d3} == T^{d3 d2}{}_{d1}{}^{d1}{}_{d2 d3}`
`g=[4,2,0,1,3,5,6,7] -> [4,2,1,0,3,5,6,7] = _af_rmul(d, g)`
which differs from `_af_rmul(g, d)`.
The slot symmetry acts from the right
`s = [2,1,0,3,4,5,7,6]` exchanges slots 0 and 2 and changes sign
`T^{d3 d2 d1}{}_{d1 d2 d3} == -T^{d1 d2 d3}{}_{d1 d2 d3}`
`g=[4,2,0,1,3,5,6,7] -> [0,2,4,1,3,5,7,6] = _af_rmul(g, s)`
Example in which the tensor is zero, same slot symmetries as above:
`T^{d3}{}_{d1,d2}{}^{d1}{}_{d3}{}^{d2}`
`= -T^{d3}{}_{d1,d3}{}^{d1}{}_{d2}{}^{d2}` under slot symmetry `-(2,4)`;
`= T_{d3 d1}{}^{d3}{}^{d1}{}_{d2}{}^{d2}` under slot symmetry `-(0,2)`;
`= T^{d3}{}_{d1 d3}{}^{d1}{}_{d2}{}^{d2}` symmetric metric;
`= 0` since two of these lines have tensors differ only for the sign.
The double coset D*g*S consists of permutations `h = d*g*s` corresponding
to equivalent tensors; if there are two `h` which are the same apart
from the sign, return zero; otherwise
choose as representative the tensor with indices
ordered lexicographically according to `[d1, -d1, d2, -d2, d3, -d3]`
that is `rep = min(D*g*S) = min([d*g*s for d in D for s in S])`
The indices are fixed one by one; first choose the lowest index
for slot 0, then the lowest remaining index for slot 1, etc.
Doing this one obtains a chain of stabilizers
`S -> S_{b0} -> S_{b0,b1} -> ...` and
`D -> D_{p0} -> D_{p0,p1} -> ...`
where `[b0, b1, ...] = range(b)` is a base of the symmetric group;
the strong base `b_S` of S is an ordered sublist of it;
therefore it is sufficient to compute once the
strong base generators of S using the Schreier-Sims algorithm;
the stabilizers of the strong base generators are the
strong base generators of the stabilizer subgroup.
`dbase = [p0,p1,...]` is not in general in lexicographic order,
so that one must recompute the strong base generators each time;
however this is trivial, there is no need to use the Schreier-Sims
algorithm for D.
The algorithm keeps a TAB of elements `(s_i, d_i, h_i)`
where `h_i = d_i*g*s_i` satisfying `h_i[j] = p_j` for `0 <= j < i`
starting from `s_0 = id, d_0 = id, h_0 = g`.
The equations `h_0[0] = p_0, h_1[1] = p_1,...` are solved in this order,
choosing each time the lowest possible value of p_i
For `j < i`
`d_i*g*s_i*S_{b_0,...,b_{i-1}}*b_j = D_{p_0,...,p_{i-1}}*p_j`
so that for dx in `D_{p_0,...,p_{i-1}}` and sx in
`S_{base[0],...,base[i-1]}` one has `dx*d_i*g*s_i*sx*b_j = p_j`
Search for dx, sx such that this equation holds for `j = i`;
it can be written as `s_i*sx*b_j = J, dx*d_i*g*J = p_j`
`sx*b_j = s_i**-1*J; sx = trace(s_i**-1, S_{b_0,...,b_{i-1}})`
`dx**-1*p_j = d_i*g*J; dx = trace(d_i*g*J, D_{p_0,...,p_{i-1}})`
`s_{i+1} = s_i*trace(s_i**-1*J, S_{b_0,...,b_{i-1}})`
`d_{i+1} = trace(d_i*g*J, D_{p_0,...,p_{i-1}})**-1*d_i`
`h_{i+1}*b_i = d_{i+1}*g*s_{i+1}*b_i = p_i`
`h_n*b_j = p_j` for all j, so that `h_n` is the solution.
Add the found `(s, d, h)` to TAB1.
At the end of the iteration sort TAB1 with respect to the `h`;
if there are two consecutive `h` in TAB1 which differ only for the
sign, the tensor is zero, so return 0;
if there are two consecutive `h` which are equal, keep only one.
Then stabilize the slot generators under `i` and the dummy generators
under `p_i`.
Assign `TAB = TAB1` at the end of the iteration step.
At the end `TAB` contains a unique `(s, d, h)`, since all the slots
of the tensor `h` have been fixed to have the minimum value according
to the symmetries. The algorithm returns `h`.
It is important that the slot BSGS has lexicographic minimal base,
otherwise there is an `i` which does not belong to the slot base
for which `p_i` is fixed by the dummy symmetry only, while `i`
is not invariant from the slot stabilizer, so `p_i` is not in
general the minimal value.
This algorithm differs slightly from the original algorithm [3]:
the canonical form is minimal lexicographically, and
the BSGS has minimal base under lexicographic order.
Equal tensors `h` are eliminated from TAB.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.tensor_can import double_coset_can_rep, get_transversals
>>> gens = [Permutation(x) for x in [[2,1,0,3,4,5,7,6], [4,1,2,3,0,5,7,6]]]
>>> base = [0, 2]
>>> g = Permutation([4,2,0,1,3,5,6,7])
>>> transversals = get_transversals(base, gens)
>>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g)
[0, 1, 2, 3, 4, 5, 7, 6]
>>> g = Permutation([4,1,3,0,5,2,6,7])
>>> double_coset_can_rep([list(range(6))], [0], base, gens, transversals, g)
0
"""
size = g.size
g = g.array_form
num_dummies = size - 2
indices = list(range(num_dummies))
all_metrics_with_sym = all([_ is not None for _ in sym])
num_types = len(sym)
dumx = dummies[:]
dumx_flat = []
for dx in dumx:
dumx_flat.extend(dx)
b_S = b_S[:]
sgensx = [h._array_form for h in sgens]
if b_S:
S_transversals = transversal2coset(size, b_S, S_transversals)
# strong generating set for D
dsgsx = []
for i in range(num_types):
dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies))
ginv = _af_invert(g)
idn = list(range(size))
# TAB = list of entries (s, d, h) where h = _af_rmuln(d,g,s)
# for short, in the following d*g*s means _af_rmuln(d,g,s)
TAB = [(idn, idn, g)]
for i in range(size - 2):
b = i
testb = b in b_S and sgensx
if testb:
sgensx1 = [_af_new(_) for _ in sgensx]
deltab = _orbit(size, sgensx1, b)
else:
deltab = set([b])
# p1 = min(IMAGES) = min(Union D_p*h*deltab for h in TAB)
if all_metrics_with_sym:
md = _min_dummies(dumx, sym, indices)
else:
md = [min(_orbit(size, [_af_new(
ddx) for ddx in dsgsx], ii)) for ii in range(size - 2)]
p_i = min([min([md[h[x]] for x in deltab]) for s, d, h in TAB])
dsgsx1 = [_af_new(_) for _ in dsgsx]
Dxtrav = _orbit_transversal(size, dsgsx1, p_i, False, af=True) \
if dsgsx else None
if Dxtrav:
Dxtrav = [_af_invert(x) for x in Dxtrav]
# compute the orbit of p_i
for ii in range(num_types):
if p_i in dumx[ii]:
# the orbit is made by all the indices in dum[ii]
if sym[ii] is not None:
deltap = dumx[ii]
else:
# the orbit is made by all the even indices if p_i
# is even, by all the odd indices if p_i is odd
p_i_index = dumx[ii].index(p_i) % 2
deltap = dumx[ii][p_i_index::2]
break
else:
deltap = [p_i]
TAB1 = []
nTAB = len(TAB)
while TAB:
s, d, h = TAB.pop()
if min([md[h[x]] for x in deltab]) != p_i:
continue
deltab1 = [x for x in deltab if md[h[x]] == p_i]
# NEXT = s*deltab1 intersection (d*g)**-1*deltap
dg = _af_rmul(d, g)
dginv = _af_invert(dg)
sdeltab = [s[x] for x in deltab1]
gdeltap = [dginv[x] for x in deltap]
NEXT = [x for x in sdeltab if x in gdeltap]
# d, s satisfy
# d*g*s*base[i-1] = p_{i-1}; using the stabilizers
# d*g*s*S_{base[0],...,base[i-1]}*base[i-1] =
# D_{p_0,...,p_{i-1}}*p_{i-1}
# so that to find d1, s1 satisfying d1*g*s1*b = p_i
# one can look for dx in D_{p_0,...,p_{i-1}} and
# sx in S_{base[0],...,base[i-1]}
# d1 = dx*d; s1 = s*sx
# d1*g*s1*b = dx*d*g*s*sx*b = p_i
for j in NEXT:
if testb:
# solve s1*b = j with s1 = s*sx for some element sx
# of the stabilizer of ..., base[i-1]
# sx*b = s**-1*j; sx = _trace_S(s, j,...)
# s1 = s*trace_S(s**-1*j,...)
s1 = _trace_S(s, j, b, S_transversals)
if not s1:
continue
else:
s1 = [s[ix] for ix in s1]
else:
s1 = s
#assert s1[b] == j # invariant
# solve d1*g*j = p_i with d1 = dx*d for some element dg
# of the stabilizer of ..., p_{i-1}
# dx**-1*p_i = d*g*j; dx**-1 = trace_D(d*g*j,...)
# d1 = trace_D(d*g*j,...)**-1*d
# to save an inversion in the inner loop; notice we did
# Dxtrav = [perm_af_invert(x) for x in Dxtrav] out of the loop
if Dxtrav:
d1 = _trace_D(dg[j], p_i, Dxtrav)
if not d1:
continue
else:
if p_i != dg[j]:
continue
d1 = idn
assert d1[dg[j]] == p_i # invariant
d1 = [d1[ix] for ix in d]
h1 = [d1[g[ix]] for ix in s1]
#assert h1[b] == p_i # invariant
TAB1.append((s1, d1, h1))
# if TAB contains equal permutations, keep only one of them;
# if TAB contains equal permutations up to the sign, return 0
TAB1.sort(key=lambda x: x[-1])
nTAB1 = len(TAB1)
prev = [0] * size
while TAB1:
s, d, h = TAB1.pop()
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
else:
TAB.append((s, d, h))
prev = h
# stabilize the SGS
sgensx = [h for h in sgensx if h[b] == b]
if b in b_S:
b_S.remove(b)
_dumx_remove(dumx, dumx_flat, p_i)
dsgsx = []
for i in range(num_types):
dsgsx.extend(dummy_sgs(dumx[i], sym[i], num_dummies))
return TAB[0][-1]
def canonical_free(base, gens, g, num_free):
"""
canonicalization of a tensor with respect to free indices
choosing the minimum with respect to lexicographical ordering
in the free indices
``base``, ``gens`` BSGS for slot permutation group
``g`` permutation representing the tensor
``num_free`` number of free indices
The indices must be ordered with first the free indices
see explanation in double_coset_can_rep
The algorithm is a variation of the one given in [2].
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import canonical_free
>>> gens = [[1,0,2,3,5,4], [2,3,0,1,4,5],[0,1,3,2,5,4]]
>>> gens = [Permutation(h) for h in gens]
>>> base = [0, 2]
>>> g = Permutation([2, 1, 0, 3, 4, 5])
>>> canonical_free(base, gens, g, 4)
[0, 3, 1, 2, 5, 4]
Consider the product of Riemann tensors
``T = R^{a}_{d0}^{d1,d2}*R_{d2,d1}^{d0,b}``
The order of the indices is ``[a,b,d0,-d0,d1,-d1,d2,-d2]``
The permutation corresponding to the tensor is
``g = [0,3,4,6,7,5,2,1,8,9]``
In particular ``a`` is position ``0``, ``b`` is in position ``9``.
Use the slot symmetries to get `T` is a form which is the minimal
in lexicographic order in the free indices ``a`` and ``b``, e.g.
``-R^{a}_{d0}^{d1,d2}*R^{b,d0}_{d2,d1}`` corresponding to
``[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]``
>>> from sympy.combinatorics.tensor_can import riemann_bsgs, tensor_gens
>>> base, gens = riemann_bsgs
>>> size, sbase, sgens = tensor_gens(base, gens, [[],[]], 0)
>>> g = Permutation([0,3,4,6,7,5,2,1,8,9])
>>> canonical_free(sbase, [Permutation(h) for h in sgens], g, 2)
[0, 3, 4, 6, 1, 2, 7, 5, 9, 8]
"""
g = g.array_form
size = len(g)
if not base:
return g[:]
transversals = get_transversals(base, gens)
m = len(base)
for x in sorted(g[:-2]):
if x not in base:
base.append(x)
h = g
for i, transv in enumerate(transversals):
b = base[i]
h_i = [size]*num_free
# find the element s in transversals[i] such that
# _af_rmul(h, s) has its free elements with the lowest position in h
s = None
for sk in transv.values():
h1 = _af_rmul(h, sk)
hi = [h1.index(ix) for ix in range(num_free)]
if hi < h_i:
h_i = hi
s = sk
if s:
h = _af_rmul(h, s)
return h
def _get_map_slots(size, fixed_slots):
res = list(range(size))
pos = 0
for i in range(size):
if i in fixed_slots:
continue
res[i] = pos
pos += 1
return res
def _lift_sgens(size, fixed_slots, free, s):
a = []
j = k = 0
fd = list(zip(fixed_slots, free))
fd = [y for x, y in sorted(fd)]
num_free = len(free)
for i in range(size):
if i in fixed_slots:
a.append(fd[k])
k += 1
else:
a.append(s[j] + num_free)
j += 1
return a
def canonicalize(g, dummies, msym, *v):
"""
canonicalize tensor formed by tensors
Parameters
==========
g : permutation representing the tensor
dummies : list representing the dummy indices
it can be a list of dummy indices of the same type
or a list of lists of dummy indices, one list for each
type of index;
the dummy indices must come after the free indices,
and put in order contravariant, covariant
[d0, -d0, d1,-d1,...]
msym : symmetry of the metric(s)
it can be an integer or a list;
in the first case it is the symmetry of the dummy index metric;
in the second case it is the list of the symmetries of the
index metric for each type
v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i : BSGS for tensors of this type.
The BSGS should have minimal base under lexicographic ordering;
if not, an attempt is made do get the minimal BSGS;
in case of failure,
canonicalize_naive is used, which is much slower.
n_i : number of tensors of type `i`.
sym_i : symmetry under exchange of component tensors of type `i`.
Both for msym and sym_i the cases are
* None no symmetry
* 0 commuting
* 1 anticommuting
Returns
=======
0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Algorithm
=========
First one uses canonical_free to get the minimum tensor under
lexicographic order, using only the slot symmetries.
If the component tensors have not minimal BSGS, it is attempted
to find it; if the attempt fails canonicalize_naive
is used instead.
Compute the residual slot symmetry keeping fixed the free indices
using tensor_gens(base, gens, list_free_indices, sym).
Reduce the problem eliminating the free indices.
Then use double_coset_can_rep and lift back the result reintroducing
the free indices.
Examples
========
one type of index with commuting metric;
`A_{a b}` and `B_{a b}` antisymmetric and commuting
`T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`
`ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices
g = [1,3,0,5,4,2,6,7]
`T_c = 0`
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product
>>> from sympy.combinatorics import Permutation
>>> base2a, gens2a = get_symmetric_group_sgs(2, 1)
>>> t0 = (base2a, gens2a, 1, 0)
>>> t1 = (base2a, gens2a, 2, 0)
>>> g = Permutation([1,3,0,5,4,2,6,7])
>>> canonicalize(g, range(6), 0, t0, t1)
0
same as above, but with `B_{a b}` anticommuting
`T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`
can = [0,2,1,4,3,5,7,6]
>>> t1 = (base2a, gens2a, 2, 1)
>>> canonicalize(g, range(6), 0, t0, t1)
[0, 2, 1, 4, 3, 5, 7, 6]
two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,
both with commuting metric
`f^{a b c}` antisymmetric, commuting
`A_{m a}` no symmetry, commuting
`T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`
ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]
g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]
The canonical tensor is
`T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`
can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]
>>> base_f, gens_f = get_symmetric_group_sgs(3, 1)
>>> base1, gens1 = get_symmetric_group_sgs(1)
>>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
>>> t0 = (base_f, gens_f, 2, 0)
>>> t1 = (base_A, gens_A, 4, 0)
>>> dummies = [range(2, 10), range(10, 14)]
>>> g = Permutation([0,7,3,1,9,5,11,6,10,4,13,2,12,8,14,15])
>>> canonicalize(g, dummies, [0, 0], t0, t1)
[0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]
"""
from sympy.combinatorics.testutil import canonicalize_naive
if not isinstance(msym, list):
if not msym in [0, 1, None]:
raise ValueError('msym must be 0, 1 or None')
num_types = 1
else:
num_types = len(msym)
if not all(msymx in [0, 1, None] for msymx in msym):
raise ValueError('msym entries must be 0, 1 or None')
if len(dummies) != num_types:
raise ValueError(
'dummies and msym must have the same number of elements')
size = g.size
num_tensors = 0
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
# check that the BSGS is minimal;
# this property is used in double_coset_can_rep;
# if it is not minimal use canonicalize_naive
if not _is_minimal_bsgs(base_i, gens_i):
mbsgs = get_minimal_bsgs(base_i, gens_i)
if not mbsgs:
can = canonicalize_naive(g, dummies, msym, *v)
return can
base_i, gens_i = mbsgs
v1.append((base_i, gens_i, [[]] * n_i, sym_i))
num_tensors += n_i
if num_types == 1 and not isinstance(msym, list):
dummies = [dummies]
msym = [msym]
flat_dummies = []
for dumx in dummies:
flat_dummies.extend(dumx)
if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)):
raise ValueError('dummies is not valid')
# slot symmetry of the tensor
size1, sbase, sgens = gens_products(*v1)
if size != size1:
raise ValueError(
'g has size %d, generators have size %d' % (size, size1))
free = [i for i in range(size - 2) if i not in flat_dummies]
num_free = len(free)
# g1 minimal tensor under slot symmetry
g1 = canonical_free(sbase, sgens, g, num_free)
if not flat_dummies:
return g1
# save the sign of g1
sign = 0 if g1[-1] == size - 1 else 1
# the free indices are kept fixed.
# Determine free_i, the list of slots of tensors which are fixed
# since they are occupied by free indices, which are fixed.
start = 0
for i in range(len(v)):
free_i = []
base_i, gens_i, n_i, sym_i = v[i]
len_tens = gens_i[0].size - 2
# for each component tensor get a list od fixed islots
for j in range(n_i):
# get the elements corresponding to the component tensor
h = g1[start:(start + len_tens)]
fr = []
# get the positions of the fixed elements in h
for k in free:
if k in h:
fr.append(h.index(k))
free_i.append(fr)
start += len_tens
v1[i] = (base_i, gens_i, free_i, sym_i)
# BSGS of the tensor with fixed free indices
# if tensor_gens fails in gens_product, use canonicalize_naive
size, sbase, sgens = gens_products(*v1)
# reduce the permutations getting rid of the free indices
pos_dummies = [g1.index(x) for x in flat_dummies]
pos_free = [g1.index(x) for x in range(num_free)]
size_red = size - num_free
g1_red = [x - num_free for x in g1 if x in flat_dummies]
if sign:
g1_red.extend([size_red - 1, size_red - 2])
else:
g1_red.extend([size_red - 2, size_red - 1])
map_slots = _get_map_slots(size, pos_free)
sbase_red = [map_slots[i] for i in sbase if i not in pos_free]
sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens]
dummies_red = [[x - num_free for x in y] for y in dummies]
transv_red = get_transversals(sbase_red, sgens_red)
g1_red = _af_new(g1_red)
g2 = double_coset_can_rep(
dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red)
if g2 == 0:
return 0
# lift to the case with the free indices
g3 = _lift_sgens(size, pos_free, free, g2)
return g3
def perm_af_direct_product(gens1, gens2, signed=True):
"""
direct products of the generators gens1 and gens2
Examples
========
>>> from sympy.combinatorics.tensor_can import perm_af_direct_product
>>> gens1 = [[1,0,2,3], [0,1,3,2]]
>>> gens2 = [[1,0]]
>>> perm_af_direct_product(gens1, gens2, False)
[[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]]
>>> gens1 = [[1,0,2,3,5,4], [0,1,3,2,4,5]]
>>> gens2 = [[1,0,2,3]]
>>> perm_af_direct_product(gens1, gens2, True)
[[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]]
"""
gens1 = [list(x) for x in gens1]
gens2 = [list(x) for x in gens2]
s = 2 if signed else 0
n1 = len(gens1[0]) - s
n2 = len(gens2[0]) - s
start = list(range(n1))
end = list(range(n1, n1 + n2))
if signed:
gens1 = [gen[:-2] + end + [gen[-2] + n2, gen[-1] + n2]
for gen in gens1]
gens2 = [start + [x + n1 for x in gen] for gen in gens2]
else:
gens1 = [gen + end for gen in gens1]
gens2 = [start + [x + n1 for x in gen] for gen in gens2]
res = gens1 + gens2
return res
def bsgs_direct_product(base1, gens1, base2, gens2, signed=True):
"""
direct product of two BSGS
base1 base of the first BSGS.
gens1 strong generating sequence of the first BSGS.
base2, gens2 similarly for the second BSGS.
signed flag for signed permutations.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import (get_symmetric_group_sgs, bsgs_direct_product)
>>> Permutation.print_cyclic = True
>>> base1, gens1 = get_symmetric_group_sgs(1)
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> bsgs_direct_product(base1, gens1, base2, gens2)
([1], [Permutation(4)(1, 2)])
"""
s = 2 if signed else 0
n1 = gens1[0].size - s
base = list(base1)
base += [x + n1 for x in base2]
gens1 = [h._array_form for h in gens1]
gens2 = [h._array_form for h in gens2]
gens = perm_af_direct_product(gens1, gens2, signed)
size = len(gens[0])
id_af = list(range(size))
gens = [h for h in gens if h != id_af]
if not gens:
gens = [id_af]
return base, [_af_new(h) for h in gens]
def get_symmetric_group_sgs(n, antisym=False):
"""
Return base, gens of the minimal BSGS for (anti)symmetric tensor
``n`` rank of the tensor
``antisym = False`` symmetric tensor
``antisym = True`` antisymmetric tensor
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> Permutation.print_cyclic = True
>>> get_symmetric_group_sgs(3)
([0, 1], [Permutation(4)(0, 1), Permutation(4)(1, 2)])
"""
if n == 1:
return [], [_af_new(list(range(3)))]
gens = [Permutation(n - 1)(i, i + 1)._array_form for i in range(n - 1)]
if antisym == 0:
gens = [x + [n, n + 1] for x in gens]
else:
gens = [x + [n + 1, n] for x in gens]
base = list(range(n - 1))
return base, [_af_new(h) for h in gens]
riemann_bsgs = [0, 2], [Permutation(0, 1)(4, 5), Permutation(2, 3)(4, 5),
Permutation(5)(0, 2)(1, 3)]
def get_transversals(base, gens):
"""
Return transversals for the group with BSGS base, gens
"""
if not base:
return []
stabs = _distribute_gens_by_base(base, gens)
orbits, transversals = _orbits_transversals_from_bsgs(base, stabs)
transversals = [dict((x, h._array_form) for x, h in y.items()) for y in
transversals]
return transversals
def _is_minimal_bsgs(base, gens):
"""
Check if the BSGS has minimal base under lexigographic order.
base, gens BSGS
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import riemann_bsgs, _is_minimal_bsgs
>>> _is_minimal_bsgs(*riemann_bsgs)
True
>>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0,1)(4,5), Permutation(5)(0,2)(1,3)]))
>>> _is_minimal_bsgs(*riemann_bsgs1)
False
"""
base1 = []
sgs1 = gens[:]
size = gens[0].size
for i in range(size):
if not all(h._array_form[i] == i for h in sgs1):
base1.append(i)
sgs1 = [h for h in sgs1 if h._array_form[i] == i]
return base1 == base
def get_minimal_bsgs(base, gens):
"""
Compute a minimal GSGS
base, gens BSGS
If base, gens is a minimal BSGS return it; else return a minimal BSGS
if it fails in finding one, it returns None
TODO: use baseswap in the case in which if it fails in finding a
minimal BSGS
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import get_minimal_bsgs
>>> Permutation.print_cyclic = True
>>> riemann_bsgs1 = ([2, 0], ([Permutation(5)(0,1)(4,5), Permutation(5)(0,2)(1,3)]))
>>> get_minimal_bsgs(*riemann_bsgs1)
([0, 2], [Permutation(0, 1)(4, 5), Permutation(5)(0, 2)(1, 3), Permutation(2, 3)(4, 5)])
"""
G = PermutationGroup(gens)
base, gens = G.schreier_sims_incremental()
if not _is_minimal_bsgs(base, gens):
return None
return base, gens
def tensor_gens(base, gens, list_free_indices, sym=0):
"""
Returns size, res_base, res_gens BSGS for n tensors of the same type
base, gens BSGS for tensors of this type
list_free_indices list of the slots occupied by fixed indices
for each of the tensors
sym symmetry under commutation of two tensors
sym None no symmetry
sym 0 commuting
sym 1 anticommuting
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import tensor_gens, get_symmetric_group_sgs
>>> Permutation.print_cyclic = True
two symmetric tensors with 3 indices without free indices
>>> base, gens = get_symmetric_group_sgs(3)
>>> tensor_gens(base, gens, [[], []])
(8, [0, 1, 3, 4], [Permutation(7)(0, 1), Permutation(7)(1, 2), Permutation(7)(3, 4), Permutation(7)(4, 5), Permutation(7)(0, 3)(1, 4)(2, 5)])
two symmetric tensors with 3 indices with free indices in slot 1 and 0
>>> tensor_gens(base, gens, [[1],[0]])
(8, [0, 4], [Permutation(7)(0, 2), Permutation(7)(4, 5)])
four symmetric tensors with 3 indices, two of which with free indices
"""
def _get_bsgs(G, base, gens, free_indices):
"""
return the BSGS for G.pointwise_stabilizer(free_indices)
"""
if not free_indices:
return base[:], gens[:]
else:
H = G.pointwise_stabilizer(free_indices)
base, sgs = H.schreier_sims_incremental()
return base, sgs
# if not base there is no slot symmetry for the component tensors
# if list_free_indices.count([]) < 2 there is no commutation symmetry
# so there is no resulting slot symmetry
if not base and list_free_indices.count([]) < 2:
n = len(list_free_indices)
size = gens[0].size
size = n * (gens[0].size - 2) + 2
return size, [], [_af_new(list(range(size)))]
# if any(list_free_indices) one needs to compute the pointwise
# stabilizer, so G is needed
if any(list_free_indices):
G = PermutationGroup(gens)
else:
G = None
# no_free list of lists of indices for component tensors without fixed
# indices
no_free = []
size = gens[0].size
id_af = list(range(size))
num_indices = size - 2
if not list_free_indices[0]:
no_free.append(list(range(num_indices)))
res_base, res_gens = _get_bsgs(G, base, gens, list_free_indices[0])
for i in range(1, len(list_free_indices)):
base1, gens1 = _get_bsgs(G, base, gens, list_free_indices[i])
res_base, res_gens = bsgs_direct_product(res_base, res_gens,
base1, gens1, 1)
if not list_free_indices[i]:
no_free.append(list(range(size - 2, size - 2 + num_indices)))
size += num_indices
nr = size - 2
res_gens = [h for h in res_gens if h._array_form != id_af]
# if sym there are no commuting tensors stop here
if sym is None or not no_free:
if not res_gens:
res_gens = [_af_new(id_af)]
return size, res_base, res_gens
# if the component tensors have moinimal BSGS, so is their direct
# product P; the slot symmetry group is S = P*C, where C is the group
# to (anti)commute the component tensors with no free indices
# a stabilizer has the property S_i = P_i*C_i;
# the BSGS of P*C has SGS_P + SGS_C and the base is
# the ordered union of the bases of P and C.
# If P has minimal BSGS, so has S with this base.
base_comm = []
for i in range(len(no_free) - 1):
ind1 = no_free[i]
ind2 = no_free[i + 1]
a = list(range(ind1[0]))
a.extend(ind2)
a.extend(ind1)
base_comm.append(ind1[0])
a.extend(list(range(ind2[-1] + 1, nr)))
if sym == 0:
a.extend([nr, nr + 1])
else:
a.extend([nr + 1, nr])
res_gens.append(_af_new(a))
res_base = list(res_base)
# each base is ordered; order the union of the two bases
for i in base_comm:
if i not in res_base:
res_base.append(i)
res_base.sort()
if not res_gens:
res_gens = [_af_new(id_af)]
return size, res_base, res_gens
def gens_products(*v):
"""
Returns size, res_base, res_gens BSGS for n tensors of different types
v is a sequence of (base_i, gens_i, free_i, sym_i)
where
base_i, gens_i BSGS of tensor of type `i`
free_i list of the fixed slots for each of the tensors
of type `i`; if there are `n_i` tensors of type `i`
and none of them have fixed slots, `free = [[]]*n_i`
sym 0 (1) if the tensors of type `i` (anti)commute among themselves
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, gens_products
>>> Permutation.print_cyclic = True
>>> base, gens = get_symmetric_group_sgs(2)
>>> gens_products((base,gens,[[],[]],0))
(6, [0, 2], [Permutation(5)(0, 1), Permutation(5)(2, 3), Permutation(5)(0, 2)(1, 3)])
>>> gens_products((base,gens,[[1],[]],0))
(6, [2], [Permutation(5)(2, 3)])
"""
res_size, res_base, res_gens = tensor_gens(*v[0])
for i in range(1, len(v)):
size, base, gens = tensor_gens(*v[i])
res_base, res_gens = bsgs_direct_product(res_base, res_gens, base,
gens, 1)
res_size = res_gens[0].size
id_af = list(range(res_size))
res_gens = [h for h in res_gens if h != id_af]
if not res_gens:
res_gens = [id_af]
return res_size, res_base, res_gens
| bsd-3-clause | -114,056,885,535,940,300 | 33.586993 | 145 | 0.562306 | false |
danny200309/anaconda | anaconda_lib/linting/anaconda_mccabe.py | 9 | 1577 | # -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
"""
Anaconda McCabe
"""
import ast
from .mccabe import McCabeChecker
class AnacondaMcCabe(object):
"""Wrapper object around McCabe python script
"""
checker = McCabeChecker
def __init__(self, code, filename):
self.code = code
self.filename = filename
@property
def tree(self):
"""Compile and send back an AST if buffer is able to be parsed
"""
try:
code = self.code.encode('utf8') + b'\n'
return compile(code, self.filename, 'exec', ast.PyCF_ONLY_AST)
except SyntaxError:
return None
def get_code_complexity(self, threshold=7):
"""Get the code complexity for the current buffer and return it
"""
if self.tree is not None:
self.checker.max_complexity = threshold
return self.parse(self.checker(self.tree, self.filename).run())
return None
def parse(self, complexities):
"""
Parse the given list of complexities to something that anaconda
understand and is able to handle
"""
errors = []
for complexity in complexities:
errors.append({
'line': int(complexity[0]),
'offset': int(complexity[1] + 1),
'code': complexity[2].split(' ', 1)[0],
'message': complexity[2].split(' ', 1)[1]
})
return errors
| gpl-3.0 | 8,689,864,748,901,092,000 | 24.852459 | 75 | 0.574509 | false |
linjk/mysql-5.6 | xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/bug606981_test.py | 19 | 5429 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import tarfile
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup paths
for del_path in [backup_path]:
if os.path.exists(del_path):
shutil.rmtree(del_path)
def test_ib_stream(self):
self.servers = servers
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
tar_file_path = os.path.join(backup_path,'out.tar')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# Add desired option to config file
config_file = open(master_server.cnf_file,'a')
config_file.write("innodb_flush_method=O_DIRECT\n")
config_file.close()
# take a backup
try:
os.mkdir(backup_path)
except OSError:
pass
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--stream=tar"
, "--user=root"
, "--port=%d" %master_server.master_port
, "--host=127.0.0.1"
, "--no-timestamp"
, "--ibbackup=%s" %xtrabackup
, "%s > %s" %(backup_path,tar_file_path)
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
expected_output = "xtrabackup: using O_DIRECT"
self.assertTrue(expected_output in output, msg=output)
# stop the server
master_server.stop()
# extract our backup tarball
cmd = "tar -ivxf %s" %tar_file_path
retcode, output = self.execute_cmd(cmd, output_path, backup_path, True)
self.assertEqual(retcode,0,output)
# Check for Bug 723318 - seems quicker than separate test case
self.assertTrue('xtrabackup_binary' in os.listdir(backup_path)
, msg = "Bug723318: xtrabackup_binary not included in tar archive when streaming")
# do prepare on backup
cmd = [ innobackupex
, "--apply-log"
, "--no-timestamp"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0,output)
# remove old datadir
shutil.rmtree(master_server.datadir)
os.mkdir(master_server.datadir)
# restore from backup
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--copy-back"
, "--ibbackup=%s" %(xtrabackup)
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, output)
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertEqual(master_server.status,1, 'Server failed restart from restored datadir...')
# Check the server is ok
query = "SELECT COUNT(*) FROM test.DD"
expected_output = ((100L,),)
retcode, output = self.execute_query(query, master_server)
self.assertEqual(output, expected_output, msg = "%s || %s" %(output, expected_output))
| gpl-2.0 | -9,108,359,535,533,477,000 | 39.514925 | 110 | 0.577639 | false |
sfam/home-assistant | homeassistant/components/device_tracker/demo.py | 9 | 1257 | """
homeassistant.components.device_tracker.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo platform for the device tracker.
device_tracker:
platform: demo
"""
import random
from homeassistant.components.device_tracker import DOMAIN
def setup_scanner(hass, config, see):
""" Set up a demo tracker. """
def offset():
""" Return random offset. """
return (random.randrange(500, 2000)) / 2e5 * random.choice((-1, 1))
def random_see(dev_id, name):
""" Randomize a sighting. """
see(
dev_id=dev_id,
host_name=name,
gps=(hass.config.latitude + offset(),
hass.config.longitude + offset()),
gps_accuracy=random.randrange(50, 150),
battery=random.randrange(10, 90)
)
def observe(call=None):
""" Observe three entities. """
random_see('demo_paulus', 'Paulus')
random_see('demo_anne_therese', 'Anne Therese')
observe()
see(
dev_id='demo_home_boy',
host_name='Home Boy',
gps=[hass.config.latitude - 0.00002, hass.config.longitude + 0.00002],
gps_accuracy=20,
battery=53
)
hass.services.register(DOMAIN, 'demo', observe)
return True
| mit | -884,261,330,498,163,300 | 24.653061 | 78 | 0.564837 | false |
OpenSTC-Eleger/stc-achats | account.py | 1 | 6485 | # -*- coding: utf-8 -*-
##############################################################################
# Copyright (C) 2012 SICLIC http://siclic.fr
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#############################################################################
from osv import osv, fields
from tools.translate import _
from openbase.openbase_core import OpenbaseCore
#class account_analytic_account(OpenbaseCore):
# _inherit = "account.analytic.account"
# _name = "account.analytic.account"
#
# _columns = {
# 'code_antenne':fields.char('Antenne Code', size=16, help='Antenne code from CIRIL instance'),
# }
#
#account_analytic_account()
class account_invoice_line(OpenbaseCore):
_inherit = "account.invoice.line"
_name = "account.invoice.line"
_columns = {
'merge_line_ids':fields.one2many('openstc.merge.line.ask', 'invoice_line_id','Regroupement des Besoins')
}
def _check_qte_merge_qte_invoice(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context):
qte_ref = line.quantity
qte = 0
for merge_line in line.merge_line_ids:
if not merge_line.product_id or merge_line.product_id.id == line.product_id.id:
qte += merge_line.qty_remaining
else:
raise osv.except_osv(_('Error'),_('you have associcated merge lines that does not rely to order line product'))
if qte_ref < qte:
return False
else:
return True
return False
_constraints = [(_check_qte_merge_qte_invoice,_('Error, product qty is lower than product qty of summed merge lines you have associated to this order line'),['product_id','merge_line_ids'])]
account_invoice_line()
class account_invoice(OpenbaseCore):
_inherit = "account.invoice"
_name = "account.invoice"
_columns = {
}
#Ecrit pour les merge_lines le move_line associé à la ligne de facture
def action_number(self, cr, uid, ids, context=None):
if super(account_invoice, self).action_number(cr, uid, ids, context):
move_line_obj = self.pool.get("account.move.line")
for inv in self.browse(cr, uid, ids, context):
analytic_lines_by_prod = {}
#On lie chaque produit au move_line auquel il est associé
for move_line in inv.move_id.line_id:
#TOCHECK: ne prendre que les move_line de débits ? (Correspondant à l'achat?)
if move_line.debit > 0.0:
analytic_lines_by_prod.update({move_line.product_id.id:move_line.id})
#Puis pour chaque ligne de facture de l'objet en cours, on lie le move_id aux merge_lines associés
for line in inv.invoice_line:
if line.product_id.id in analytic_lines_by_prod:
#Si un besoin est recensé par la facture, on le lie avec le move_line, sinon c'est une écriture classique de move_line
if line.merge_line_ids:
move_line_obj.write(cr, uid, analytic_lines_by_prod[line.product_id.id], {'merge_line_ids':[(4,x.id) for x in line.merge_line_ids]}, context)
return True
return False
account_invoice()
class account_move_line(OpenbaseCore):
_inherit = "account.move.line"
_name = "account.move.line"
_columns = {
'merge_line_ids':fields.one2many('openstc.merge.line.ask','move_line_id','Besoins en Fournitures associés'),
}
def _check_prod(self, cr, uid, ids, context=None):
for move_line in self.browse(cr, uid, ids, context):
for merge_line in move_line.merge_line_ids:
if not merge_line.product_id or merge_line.product_id.id <> move_line.product_id.id:
return False
return True
return True
_constraints = [(_check_prod,_('Error, All merge lines associated to this move line does not match same product as this move line'),['product_id','merge_line_ids'])]
account_move_line()
class account_tax(OpenbaseCore):
_inherit = "account.tax"
_name = "account.tax"
_columns = {
'code_tax_ciril':fields.char('Ciril Tax Code', size=8, help="this field refer to Tax Code from Ciril instance"),
}
account_tax()
class account_analytic_account(OpenbaseCore):
_inherit = "account.analytic.account"
_actions = {
'delete': lambda self,cr,uid,record,groups_code: not record.crossovered_budget_line,
'update': lambda self,cr,uid,record,groups_code: True
}
account_analytic_account()
class account_account(OpenbaseCore):
_inherit = "account.account"
def _get_complete_name(self, cr, uid, ids, name, args, context=None):
ret = {}.fromkeys(ids, '')
for account in self.browse(cr, uid, ids, context=context):
ret[account.id] = account.name_get()[0][1]
return ret
_columns = {
'complete_name': fields.function(_get_complete_name, method=True, type='char', store=True)
}
#add analytic purchase journal to purchase journal (m20 field)
def init_stc_achat_accounting(self, cr, uid, analytic_journal_id, context=None):
if analytic_journal_id:
journal_id = self.pool.get("account.journal").search(cr, uid, [('type','=','purchase')])
if journal_id:
self.pool.get("account.journal").write(cr, uid, journal_id, {'analytic_journal_id':analytic_journal_id})
return True
print "Error, purchase journal not found"
return False
print "Error, analytic purchase journal not found"
return False
| agpl-3.0 | 6,554,240,706,995,700,000 | 41.89404 | 194 | 0.60593 | false |
pinax/pinax-announcements | setup.py | 2 | 3593 | from setuptools import find_packages, setup
VERSION = "4.0.0"
LONG_DESCRIPTION = """
.. image:: http://pinaxproject.com/pinax-design/patches/pinax-announcements.svg
:target: https://pypi.python.org/pypi/pinax-announcements/
===================
Pinax Announcements
===================
.. image:: https://img.shields.io/pypi/v/pinax-announcements.svg
:target: https://pypi.python.org/pypi/pinax-announcements/
\
.. image:: https://img.shields.io/circleci/project/github/pinax/pinax-announcements.svg
:target: https://circleci.com/gh/pinax/pinax-announcements
.. image:: https://img.shields.io/codecov/c/github/pinax/pinax-announcements.svg
:target: https://codecov.io/gh/pinax/pinax-announcements
.. image:: https://img.shields.io/github/contributors/pinax/pinax-announcements.svg
:target: https://github.com/pinax/pinax-announcements/graphs/contributors
.. image:: https://img.shields.io/github/issues-pr/pinax/pinax-announcements.svg
:target: https://github.com/pinax/pinax-announcements/pulls
.. image:: https://img.shields.io/github/issues-pr-closed/pinax/pinax-announcements.svg
:target: https://github.com/pinax/pinax-announcements/pulls?q=is%3Apr+is%3Aclosed
\
.. image:: http://slack.pinaxproject.com/badge.svg
:target: http://slack.pinaxproject.com/
.. image:: https://img.shields.io/badge/license-MIT-blue.svg
:target: https://opensource.org/licenses/MIT/
\
``pinax-announcements`` is a well tested, documented, and proven solution
for any site wanting announcements for it's users.
Announcements have title and content, with options for filtering their display:
* ``site_wide`` - True or False
* ``members_only`` - True or False
* ``publish_start`` - date/time or none
* ``publish_end`` - date/time or none
``pinax-announcements`` has three options for dismissing an announcement:
* ``DISMISSAL_NO`` - always visible
* ``DISMISSAL_SESSION`` - dismiss for the session
* ``DISMISSAL_PERMANENT`` - dismiss forever
Supported Django and Python Versions
------------------------------------
+-----------------+-----+-----+-----+
| Django / Python | 3.6 | 3.7 | 3.8 |
+=================+=====+=====+=====+
| 2.2 | * | * | * |
+-----------------+-----+-----+-----+
| 3.0 | * | * | * |
+-----------------+-----+-----+-----+
"""
setup(
author="Pinax Team",
author_email="[email protected]",
description="a Django announcements app",
name="pinax-announcements",
long_description=LONG_DESCRIPTION,
version=VERSION,
url="http://github.com/pinax/pinax-announcements/",
license="MIT",
packages=find_packages(),
package_data={
"announcements": []
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
"django>=2.2",
],
tests_require=[
"django-test-plus>=1.0.22",
"pinax-templates>=1.0.4",
"mock>=2.0.0",
],
test_suite="runtests.runtests",
zip_safe=False
)
| mit | 9,047,119,632,050,888,000 | 33.548077 | 87 | 0.618146 | false |
danmergens/mi-instrument | mi/core/driver_scheduler.py | 5 | 15720 | #!/usr/bin/env python
"""
@package mi.core.driver_scheduler Event Scheduler used in drivers
@file mi/core/driver_scheduler.py
@author Bill French
@brief Provides task/event scheduling for drivers
uses the PolledScheduler and provides a common, simplified interface
for instrument and platform drivers.
The scheduler is configured by passing a configuration dictionary
to the constructor or my calling add_config. Calling add_config
more than once create new schedulers, but leave older schedulers
inplace.
Note: All schedulers with trigger type of 'polled' are required
to have unique names. An exception is thrown if you try to add
duplicate names.
Configuration Dict:
config = {
# Scheduler for an absolute time. All parameters are required.
'absolute_job': {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.ABSOLUTE,
DriverSchedulerConfigKey.DATE: some_date_object_or_string
}
DriverSchedulerConfigKey.CALLBACK: self._callback_method
},
# Scheduler using a cron style syntax, cron parameters are optional,
# but at least one must be explicitly specified.
'cron_job': {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.CRON,
DriverSchedulerConfigKey.YEAR: '*',
DriverSchedulerConfigKey.MONTH: '*',
DriverSchedulerConfigKey.DAY: '*',
DriverSchedulerConfigKey.WEEK: '1',
DriverSchedulerConfigKey.DAY_OF_WEEK: '*',
DriverSchedulerConfigKey.HOUR: '*/3',
DriverSchedulerConfigKey.MINUTE: '*',
DriverSchedulerConfigKey.SECOND: '*'
},
DriverSchedulerConfigKey.CALLBACK: self._callback_method
},
# Scheduler using a interval style job, all parameters are optional,
# but at least one must be explicitly specified. Default value is 0
'interval_job': {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.WEEKS: '1',
DriverSchedulerConfigKey.DAYS: '1',
DriverSchedulerConfigKey.HOURS: '1',
DriverSchedulerConfigKey.MINUTES: '1',
DriverSchedulerConfigKey.SECONDS: '1'
}
},
# Scheduler using a polled interval style job. This type of job
# will automatically be triggered to run at a rate of 'maximum interval'
# since the last run time. The job can also be coerced in to running
# before the automatically triggered time by manually requesting the
# process be run using the run_job(job_name) method. When manually
# triggering the job it will only run if the current time is >= to
# the last run time + the minimum interval.
#
# Minimum interval is a required parameter and just like the interval
# job, at least one parameter in the interval configuration is required
# to be set.
#
# Maximum interval is an optional parameter. If set the job will
# be triggered to run automatically if it has not run from the
# max interval duration. If not given the job must be manually
# polled to cause the trigger to fire.
'polled_interval_job': {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.POLLED_INTERVAL,
DriverSchedulerConfigKey.MINIMAL_INTERVAL: {
DriverSchedulerConfigKey.WEEKS: '1',
DriverSchedulerConfigKey.DAYS: '1',
DriverSchedulerConfigKey.HOURS: '1',
DriverSchedulerConfigKey.MINUTES: '1',
DriverSchedulerConfigKey.SECONDS: '1'
},
DriverSchedulerConfigKey.MAXIMUM_INTERVAL: {
DriverSchedulerConfigKey.WEEKS: '1',
DriverSchedulerConfigKey.DAYS: '1',
DriverSchedulerConfigKey.HOURS: '1',
DriverSchedulerConfigKey.MINUTES: '1',
DriverSchedulerConfigKey.SECONDS: '1'
}
}
}
}
USAGE:
scheduler = DriverScheduler(config)
-or-
scheduler = DriverScheduler()
scheduler.add_config(config)
# To run polled jobs
job_name = 'polled_interval_job'
try:
if(scheduler.run_job(job_name)):
log.info("Job has been triggered to run")
else:
log.info("Job has not reached the minimum runtime yet")
except LookupError:
log.error("No job found with that name")
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
import inspect
from mi.core.log import get_logger; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.scheduler import PolledScheduler
from mi.core.exceptions import SchedulerException
class TriggerType(BaseEnum):
ABSOLUTE = 'absolute'
INTERVAL = 'interval'
CRON = 'cron'
POLLED_INTERVAL = 'polled'
class DriverSchedulerConfigKey(BaseEnum):
# Common Config Constants
TRIGGER = 'trigger'
CALLBACK = 'callback'
###
# Trigger Specific Constants
###
TRIGGER_TYPE = 'type'
# Absolute Date
DATE = 'date'
# Polled Interval
MINIMAL_INTERVAL = 'minimum_interval'
MAXIMUM_INTERVAL = 'maximum_interval'
# Interval and Polled Interval
WEEKS = 'weeks'
DAYS = 'days'
HOURS = 'hours'
MINUTES = 'minutes'
SECONDS = 'seconds'
# Cron
YEAR = 'year'
MONTH = 'month'
DAY = 'day'
WEEK = 'week'
DAY_OF_WEEK = 'day_of_week'
HOUR = 'hour'
MINUTE = 'minute'
SECOND = 'second'
class DriverScheduler(object):
"""
Class to facilitate event scheduling in drivers.
jobs.
"""
def __init__(self, config = None):
"""
config structure:
{
test_name: {
trigger: {}
callback: some_function
}
}
@param config: job configuration structure.
"""
self._scheduler = PolledScheduler()
if(config):
self.add_config(config)
def shutdown(self):
self._scheduler.shutdown()
def run_job(self, name):
"""
Try to run a polled job with the passed in name. If it
runs then return true, otherwise false.
@param name: name of the job
@raise LookupError if we fail to find the job
"""
return self._scheduler.run_polled_job(name)
def add_config(self, config):
"""
Add new jobs to the scheduler using the passed in config
config structure:
{
test_name: {
trigger: {}
callback: some_function
}
}
@param config: job configuration structure.
@raise SchedulerException if we fail to add the job
"""
if(not isinstance(config, dict)):
raise SchedulerException("scheduler config not a dict")
if(len(config.keys()) == 0):
raise SchedulerException("scheduler config empty")
for (name, config) in config.items():
try:
self._add_job(name, config)
except ValueError as e:
raise SchedulerException("failed to schedule job: %s" % e)
except TypeError as e:
raise SchedulerException("failed to schedule job: %s" % e)
if(not self._scheduler.running):
self._scheduler.start()
def remove_job(self, callback):
self._scheduler.unschedule_func(callback)
def _add_job(self, name, config):
"""
Add a new job to the scheduler based on the trigger configuration
@param name: name of the job
@param config: job configuration
@raise SchedulerError if we fail to add the job
"""
log.debug(" Config name: %s value: %s" % (name, config))
if(config == None):
raise SchedulerException("job config empty")
if(not isinstance(config, dict)):
raise SchedulerException("job config not a dict")
trigger = self._get_trigger_from_config(config)
trigger_type = trigger.get(DriverSchedulerConfigKey.TRIGGER_TYPE)
if(trigger_type == None):
raise SchedulerException("trigger type missing")
if(trigger_type == TriggerType.ABSOLUTE):
self._add_job_absolute(name, config)
elif(trigger_type == TriggerType.CRON):
self._add_job_cron(name, config)
elif(trigger_type == TriggerType.INTERVAL):
self._add_job_interval(name, config)
elif(trigger_type == TriggerType.POLLED_INTERVAL):
self._add_job_polled_interval(name, config)
else:
raise SchedulerException("unknown trigger type '%s'" % trigger_type)
def _get_trigger_from_config(self, config):
"""
get and validate the trigger dictionary from the config object.
@param config: configuration object to inspect
@return: dictionary from the config for the trigger config
"""
trigger = config.get(DriverSchedulerConfigKey.TRIGGER)
if(trigger == None):
raise SchedulerException("trigger definition missing")
if(not isinstance(trigger, dict)):
raise SchedulerException("config missing trigger definition")
return trigger
def _get_callback_from_config(self, config):
"""
get and verify the callback parameter from a job config.
@param config: configuration object to inspect
@return: callback method from the config for the trigger config
"""
callback = config.get(DriverSchedulerConfigKey.CALLBACK)
if(callback == None):
raise SchedulerException("callback definition missing")
if(not callable(callback)):
raise SchedulerException("callback incorrect type: '%s'" % type(callback))
return callback
def _add_job_absolute(self, name, config):
"""
Add a new job to the scheduler based on the trigger configuration
@param name: name of the job
@param config: job configuration
@raise SchedulerError if we fail to add the job
"""
if(not isinstance(config, dict)):
raise SchedulerException("config not a dict")
callback = self._get_callback_from_config(config)
trigger = self._get_trigger_from_config(config)
dt = trigger.get(DriverSchedulerConfigKey.DATE)
if(dt == None):
raise SchedulerException("trigger missing parameter: %s" % DriverSchedulerConfigKey.DATE)
self._scheduler.add_date_job(callback, dt)
def _add_job_cron(self, name, config):
"""
Add a new job to the scheduler based on the trigger configuration
@param name: name of the job
@param config: job configuration
@raise SchedulerError if we fail to add the job
"""
if(not isinstance(config, dict)):
raise SchedulerException("config not a dict")
callback = self._get_callback_from_config(config)
trigger = self._get_trigger_from_config(config)
year = trigger.get(DriverSchedulerConfigKey.YEAR)
month = trigger.get(DriverSchedulerConfigKey.MONTH)
day = trigger.get(DriverSchedulerConfigKey.DAY)
week = trigger.get(DriverSchedulerConfigKey.WEEK)
day_of_week = trigger.get(DriverSchedulerConfigKey.DAY_OF_WEEK)
hour = trigger.get(DriverSchedulerConfigKey.HOUR)
minute = trigger.get(DriverSchedulerConfigKey.MINUTE)
second = trigger.get(DriverSchedulerConfigKey.SECOND)
if(year==None and month==None and day==None and week==None and
day_of_week==None and hour==None and minute==None and second==None):
raise SchedulerException("at least one cron parameter required!")
self._scheduler.add_cron_job(callback, year=year, month=month, day=day, week=week,
day_of_week=day_of_week, hour=hour, minute=minute, second=second)
def _add_job_interval(self, name, config):
"""
Add a new job to the scheduler based on the trigger configuration
@param name: name of the job
@param config: job configuration
@raise SchedulerError if we fail to add the job
"""
if(not isinstance(config, dict)):
raise SchedulerException("config not a dict")
callback = self._get_callback_from_config(config)
trigger = self._get_trigger_from_config(config)
weeks = trigger.get(DriverSchedulerConfigKey.WEEKS, 0)
days = trigger.get(DriverSchedulerConfigKey.DAYS, 0)
hours = trigger.get(DriverSchedulerConfigKey.HOURS, 0)
minutes = trigger.get(DriverSchedulerConfigKey.MINUTES, 0)
seconds = trigger.get(DriverSchedulerConfigKey.SECONDS, 0)
if(not (weeks or days or hours or minutes or seconds)):
raise SchedulerException("at least interval parameter required!")
self._scheduler.add_interval_job(callback, weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds)
def _add_job_polled_interval(self, name, config):
"""
Add a new job to the scheduler based on the trigger configuration
@param name: name of the job
@param config: job configuration
@raise SchedulerError if we fail to add the job
"""
if(not isinstance(config, dict)):
raise SchedulerException("config not a dict")
callback = self._get_callback_from_config(config)
trigger = self._get_trigger_from_config(config)
min_interval = trigger.get(DriverSchedulerConfigKey.MINIMAL_INTERVAL)
max_interval = trigger.get(DriverSchedulerConfigKey.MAXIMUM_INTERVAL)
if(min_interval == None):
raise SchedulerException("%s missing from trigger configuration" % DriverSchedulerConfigKey.MINIMAL_INTERVAL)
if(not isinstance(min_interval, dict)):
raise SchedulerException("%s trigger configuration not a dict" % DriverSchedulerConfigKey.MINIMAL_INTERVAL)
min_weeks = min_interval.get(DriverSchedulerConfigKey.WEEKS, 0)
min_days = min_interval.get(DriverSchedulerConfigKey.DAYS, 0)
min_hours = min_interval.get(DriverSchedulerConfigKey.HOURS, 0)
min_minutes = min_interval.get(DriverSchedulerConfigKey.MINUTES, 0)
min_seconds = min_interval.get(DriverSchedulerConfigKey.SECONDS, 0)
if(not (min_weeks or min_days or min_hours or min_minutes or min_seconds)):
raise SchedulerException("at least interval parameter required!")
min_interval_obj = self._scheduler.interval(min_weeks, min_days, min_hours, min_minutes, min_seconds)
max_interval_obj = None
if(max_interval != None):
if(not isinstance(max_interval, dict)):
raise SchedulerException("%s trigger configuration not a dict" % DriverSchedulerConfigKey.MINIMAL_INTERVAL)
max_weeks = max_interval.get(DriverSchedulerConfigKey.WEEKS, 0)
max_days = max_interval.get(DriverSchedulerConfigKey.DAYS, 0)
max_hours = max_interval.get(DriverSchedulerConfigKey.HOURS, 0)
max_minutes = max_interval.get(DriverSchedulerConfigKey.MINUTES, 0)
max_seconds = max_interval.get(DriverSchedulerConfigKey.SECONDS, 0)
if(max_weeks or max_days or max_hours or max_minutes or max_seconds):
max_interval_obj = self._scheduler.interval(max_weeks, max_days, max_hours, max_minutes, max_seconds)
self._scheduler.add_polled_job(callback, name, min_interval_obj, max_interval_obj)
| bsd-2-clause | 8,924,218,871,820,338,000 | 36.339667 | 123 | 0.644466 | false |
blaze33/django | django/conf/locale/ko/formats.py | 107 | 2105 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y년 n월 j일'
TIME_FORMAT = 'A g:i:s'
DATETIME_FORMAT = 'Y년 n월 j일 g:i:s A'
YEAR_MONTH_FORMAT = 'Y년 F월'
MONTH_DAY_FORMAT = 'F월 j일'
SHORT_DATE_FORMAT = 'Y-n-j.'
SHORT_DATETIME_FORMAT = 'Y-n-j H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
'%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix.
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H시 %M분 %S초', # '14시 30분 59초'
'%H시 %M분', # '14시 30분'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초'
'%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause | 2,224,589,443,596,827,100 | 38.392157 | 81 | 0.507218 | false |
Jacobichou/electron | script/cpplint.py | 153 | 1850 | #!/usr/bin/env python
import fnmatch
import os
import sys
from lib.util import execute
IGNORE_FILES = [
os.path.join('atom', 'app', 'atom_main.cc'),
os.path.join('atom', 'browser', 'mac', 'atom_application.h'),
os.path.join('atom', 'browser', 'mac', 'atom_application_delegate.h'),
os.path.join('atom', 'browser', 'native_window_mac.h'),
os.path.join('atom', 'browser', 'resources', 'win', 'resource.h'),
os.path.join('atom', 'browser', 'ui', 'cocoa', 'event_processing_window.h'),
os.path.join('atom', 'browser', 'ui', 'cocoa', 'atom_menu_controller.h'),
os.path.join('atom', 'browser', 'ui', 'gtk', 'gtk_custom_menu.cc'),
os.path.join('atom', 'browser', 'ui', 'gtk', 'gtk_custom_menu_item.cc'),
os.path.join('atom', 'common', 'api', 'api_messages.h'),
os.path.join('atom', 'common', 'api', 'atom_extensions.h'),
os.path.join('atom', 'common', 'atom_version.h'),
os.path.join('atom', 'common', 'common_message_generator.cc'),
os.path.join('atom', 'common', 'common_message_generator.h'),
os.path.join('atom', 'common', 'swap_or_assign.h'),
]
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
files = list_files(['app', 'browser', 'common', 'renderer'],
['*.cc', '*.h'])
call_cpplint(list(set(files) - set(IGNORE_FILES)))
def list_files(directories, filters):
matches = []
for directory in directories:
for root, _, filenames, in os.walk(os.path.join('atom', directory)):
for f in filters:
for filename in fnmatch.filter(filenames, f):
matches.append(os.path.join(root, filename))
return matches
def call_cpplint(files):
cpplint = os.path.join(SOURCE_ROOT, 'vendor', 'depot_tools', 'cpplint.py')
execute([sys.executable, cpplint] + files)
if __name__ == '__main__':
sys.exit(main())
| mit | -7,144,347,197,517,798,000 | 33.90566 | 78 | 0.630811 | false |
sbellem/django-rest-framework | rest_framework/utils/humanize_datetime.py | 144 | 1285 | """
Helper functions that convert strftime formats into more readable representations.
"""
from rest_framework import ISO_8601
def datetime_formats(formats):
format = ', '.join(formats).replace(
ISO_8601,
'YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z]'
)
return humanize_strptime(format)
def date_formats(formats):
format = ', '.join(formats).replace(ISO_8601, 'YYYY[-MM[-DD]]')
return humanize_strptime(format)
def time_formats(formats):
format = ', '.join(formats).replace(ISO_8601, 'hh:mm[:ss[.uuuuuu]]')
return humanize_strptime(format)
def humanize_strptime(format_string):
# Note that we're missing some of the locale specific mappings that
# don't really make sense.
mapping = {
"%Y": "YYYY",
"%y": "YY",
"%m": "MM",
"%b": "[Jan-Dec]",
"%B": "[January-December]",
"%d": "DD",
"%H": "hh",
"%I": "hh", # Requires '%p' to differentiate from '%H'.
"%M": "mm",
"%S": "ss",
"%f": "uuuuuu",
"%a": "[Mon-Sun]",
"%A": "[Monday-Sunday]",
"%p": "[AM|PM]",
"%z": "[+HHMM|-HHMM]"
}
for key, val in mapping.items():
format_string = format_string.replace(key, val)
return format_string
| bsd-2-clause | -9,189,681,227,853,836,000 | 26.340426 | 82 | 0.54786 | false |
bohlian/erpnext | erpnext/controllers/status_updater.py | 22 | 14617 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, comma_or, nowdate, getdate
from frappe import _
from frappe.model.document import Document
def validate_status(status, options):
if status not in options:
frappe.throw(_("Status must be one of {0}").format(comma_or(options)))
status_map = {
"Lead": [
["Lost Quotation", "has_lost_quotation"],
["Opportunity", "has_opportunity"],
["Quotation", "has_quotation"],
["Converted", "has_customer"],
],
"Opportunity": [
["Lost", "eval:self.status=='Lost'"],
["Lost", "has_lost_quotation"],
["Quotation", "has_active_quotation"],
["Converted", "has_ordered_quotation"],
["Closed", "eval:self.status=='Closed'"]
],
"Quotation": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Lost", "eval:self.status=='Lost'"],
["Ordered", "has_sales_order"],
["Cancelled", "eval:self.docstatus==2"],
],
"Sales Order": [
["Draft", None],
["To Deliver and Bill", "eval:self.per_delivered < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_delivered == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Deliver", "eval:self.per_delivered < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_delivered == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.order_type == 'Maintenance' and self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Sales Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Paid", "eval:self.outstanding_amount<=0 and self.docstatus==1 and self.is_return==0"],
["Credit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1 and self.is_return==0 and get_value('Sales Invoice', {'is_return': 1, 'return_against': self.name, 'docstatus': 1})"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Paid", "eval:self.outstanding_amount<=0 and self.docstatus==1 and self.is_return==0"],
["Debit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1 and self.is_return==0 and get_value('Purchase Invoice', {'is_return': 1, 'return_against': self.name, 'docstatus': 1})"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Order": [
["Draft", None],
["To Receive and Bill", "eval:self.per_received < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_received == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Receive", "eval:self.per_received < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_received == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Delivered", "eval:self.status=='Delivered'"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Delivery Note": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Purchase Receipt": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Material Request": [
["Draft", None],
["Stopped", "eval:self.status == 'Stopped'"],
["Cancelled", "eval:self.docstatus == 2"],
["Pending", "eval:self.status != 'Stopped' and self.per_ordered == 0 and self.docstatus == 1"],
["Partially Ordered", "eval:self.status != 'Stopped' and self.per_ordered < 100 and self.per_ordered > 0 and self.docstatus == 1"],
["Ordered", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Purchase'"],
["Transferred", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Material Transfer'"],
["Issued", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Material Issue'"]
]
}
class StatusUpdater(Document):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False, status=None, update_modified=True):
if self.is_new():
if self.get('amended_from'):
self.status = 'Draft'
return
if self.doctype in status_map:
_status = self.status
if status and update:
self.db_set("status", status)
sl = status_map[self.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.status = s[0]
break
elif s[1].startswith("eval:"):
if frappe.safe_eval(s[1][5:], None, { "self": self.as_dict(), "getdate": getdate,
"nowdate": nowdate, "get_value": frappe.db.get_value }):
self.status = s[0]
break
elif getattr(self, s[1])():
self.status = s[0]
break
if self.status != _status and self.status not in ("Cancelled", "Partially Ordered",
"Ordered", "Issued", "Transferred"):
self.add_comment("Label", _(self.status))
if update:
self.db_set('status', self.status, update_modified = update_modified)
def validate_qty(self):
"""Validates qty at row level"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
if "target_ref_field" not in args:
# if target_ref_field is not specified, the programmer does not want to validate qty / amount
continue
# get unique transactions to update
for d in self.get_all_children():
if d.doctype == args['source_dt'] and d.get(args["join_field"]):
args['name'] = d.get(args['join_field'])
# get all qty where qty > target_field
item = frappe.db.sql("""select item_code, `{target_ref_field}`,
`{target_field}`, parenttype, parent from `tab{target_dt}`
where `{target_ref_field}` < `{target_field}`
and name=%s and docstatus=1""".format(**args),
args['name'], as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
# if not item[args['target_ref_field']]:
# msgprint(_("Note: System will not check over-delivery and over-booking for Item {0} as quantity or amount is 0").format(item.item_code))
if args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - item[args['target_ref_field']]
if item['reduce_by'] > .01:
self.limits_crossed_error(args, item)
elif item[args['target_ref_field']]:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
self.limits_crossed_error(args, item)
def limits_crossed_error(self, args, item):
'''Raise exception for limits crossed'''
frappe.throw(_('This document is over limit by {0} {1} for item {4}. Are you making another {3} against the same {2}?')
.format(
frappe.bold(_(item["target_ref_field"].title())),
frappe.bold(item["reduce_by"]),
frappe.bold(_(args.get('target_dt'))),
frappe.bold(_(self.doctype)),
frappe.bold(item.get('item_code'))
) + '<br><br>' +
_('To allow over-billing or over-ordering, update "Allowance" in Stock Settings or the Item.'),
title = _('Limit Crossed'))
def update_qty(self, update_modified=True):
"""Updates qty or amount at row level
:param update_modified: If true, updates `modified` and `modified_by` for target parent doc
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.name.replace('"', '\"')
else:
args['cond'] = ' and parent!="%s"' % self.name.replace('"', '\"')
self._update_children(args, update_modified)
if "percent_join_field" in args:
self._update_percent_field_in_targets(args, update_modified)
def _update_children(self, args, update_modified):
"""Update quantities or amount in child table"""
for d in self.get_all_children():
if d.doctype != args['source_dt']:
continue
self._update_modified(args, update_modified)
# updates qty in the child table
args['detail_id'] = d.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
if not args.get("second_source_extra_cond"):
args["second_source_extra_cond"] = ""
args['second_source_condition'] = """ + ifnull((select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (`tab%(second_source_dt)s`.docstatus=1) %(second_source_extra_cond)s), 0) """ % args
if args['detail_id']:
if not args.get("extra_cond"): args["extra_cond"] = ""
frappe.db.sql("""update `tab%(target_dt)s`
set %(target_field)s = (
(select ifnull(sum(%(source_field)s), 0)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s) %(extra_cond)s)
%(second_source_condition)s
)
%(update_modified)s
where name='%(detail_id)s'""" % args)
def _update_percent_field_in_targets(self, args, update_modified=True):
"""Update percent field in parent transaction"""
distinct_transactions = set([d.get(args['percent_join_field'])
for d in self.get_all_children(args['source_dt'])])
for name in distinct_transactions:
if name:
args['name'] = name
self._update_percent_field(args, update_modified)
def _update_percent_field(self, args, update_modified=True):
"""Update percent field in parent transaction"""
self._update_modified(args, update_modified)
if args.get('target_parent_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = round(
ifnull((select
ifnull(sum(if(%(target_ref_field)s > %(target_field)s, abs(%(target_field)s), abs(%(target_ref_field)s))), 0)
/ sum(abs(%(target_ref_field)s)) * 100
from `tab%(target_dt)s` where parent="%(name)s"), 0), 2)
%(update_modified)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(%(target_parent_field)s<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.99,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
if update_modified:
target = frappe.get_doc(args["target_parent_dt"], args["name"])
target.set_status(update=True)
target.notify_update()
def _update_modified(self, args, update_modified):
args['update_modified'] = ''
if update_modified:
args['update_modified'] = ', modified = now(), modified_by = "{0}"'\
.format(frappe.db.escape(frappe.session.user))
def update_billing_status_for_zero_amount_refdoc(self, ref_dt):
ref_fieldname = ref_dt.lower().replace(" ", "_")
zero_amount_refdoc = []
all_zero_amount_refdoc = frappe.db.sql_list("""select name from `tab%s`
where docstatus=1 and base_net_total = 0""" % ref_dt)
for item in self.get("items"):
if item.get(ref_fieldname) \
and item.get(ref_fieldname) in all_zero_amount_refdoc \
and item.get(ref_fieldname) not in zero_amount_refdoc:
zero_amount_refdoc.append(item.get(ref_fieldname))
if zero_amount_refdoc:
self.update_billing_status(zero_amount_refdoc, ref_dt, ref_fieldname)
def update_billing_status(self, zero_amount_refdoc, ref_dt, ref_fieldname):
for ref_dn in zero_amount_refdoc:
ref_doc_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tab%s Item`
where parent=%s""" % (ref_dt, '%s'), (ref_dn))[0][0])
billed_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0)
from `tab%s Item` where %s=%s and docstatus=1""" %
(self.doctype, ref_fieldname, '%s'), (ref_dn))[0][0])
per_billed = ((ref_doc_qty if billed_qty > ref_doc_qty else billed_qty)\
/ ref_doc_qty)*100
ref_doc = frappe.get_doc(ref_dt, ref_dn)
ref_doc.db_set("per_billed", per_billed)
ref_doc.set_status(update=True)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(frappe.db.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(frappe.db.get_value('Stock Settings', None, 'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
| gpl-3.0 | 5,669,877,712,528,662,000 | 40.174648 | 201 | 0.652665 | false |
LukeC92/iris | lib/iris/tests/integration/plot/test_netcdftime.py | 3 | 2243 | # (C) British Crown Copyright 2016 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test plot of time coord with non-gregorian calendar.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import netcdftime
import numpy as np
from iris.coords import AuxCoord
from cf_units import Unit
if tests.NC_TIME_AXIS_AVAILABLE:
from nc_time_axis import CalendarDateTime
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
@tests.skip_nc_time_axis
@tests.skip_plot
class Test(tests.GraphicsTest):
def test_360_day_calendar(self):
n = 360
calendar = '360_day'
time_unit = Unit('days since 1970-01-01 00:00', calendar=calendar)
time_coord = AuxCoord(np.arange(n), 'time', units=time_unit)
times = [time_unit.num2date(point) for point in time_coord.points]
times = [netcdftime.datetime(atime.year, atime.month, atime.day,
atime.hour, atime.minute, atime.second)
for atime in times]
expected_ydata = np.array([CalendarDateTime(time, calendar)
for time in times])
line1, = iplt.plot(time_coord)
result_ydata = line1.get_ydata()
self.assertArrayEqual(expected_ydata, result_ydata)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | -6,989,007,306,716,357,000 | 33.507692 | 76 | 0.691039 | false |
Yannig/ansible | lib/ansible/modules/inventory/add_host.py | 16 | 2112 | # -*- mode: python -*-
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory
description:
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
Takes variables so you can define the new hosts more fully.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
name:
aliases: [ 'hostname', 'host' ]
description:
- The hostname/ip of the host to add to the inventory, can include a colon and a port number.
required: true
groups:
aliases: [ 'groupname', 'group' ]
description:
- The groups to add the hostname to, comma separated.
required: false
notes:
- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
to iterate use a with\_ directive.
- This module is also supported for Windows targets.
- The alias 'host' of the parameter 'name' is only available on >=2.4
author:
- "Ansible Core Team"
- "Seth Vidal"
'''
EXAMPLES = '''
# add host to group 'just_created' with variable foo=42
- add_host:
name: "{{ ip_from_ec2 }}"
groups: just_created
foo: 42
# add a host with a non-standard port local to your machines
- add_host:
name: "{{ new_ip }}:{{ new_port }}"
# add a host alias that we reach through a tunnel (Ansible <= 1.9)
- add_host:
hostname: "{{ new_ip }}"
ansible_ssh_host: "{{ inventory_hostname }}"
ansible_ssh_port: "{{ new_port }}"
# add a host alias that we reach through a tunnel (Ansible >= 2.0)
- add_host:
hostname: "{{ new_ip }}"
ansible_host: "{{ inventory_hostname }}"
ansible_port: "{{ new_port }}"
'''
| gpl-3.0 | 7,055,968,463,526,600,000 | 30.522388 | 110 | 0.653883 | false |
hefen1/chromium | chrome/test/data/nacl/gdb_rsp.py | 99 | 2431 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause | 8,271,119,997,895,116,000 | 29.772152 | 78 | 0.625668 | false |
Akasurde/bodhi | bodhi/services/overrides.py | 1 | 8799 | # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import math
from cornice import Service
from pyramid.exceptions import HTTPNotFound
from sqlalchemy import func, distinct
from sqlalchemy.sql import or_
from bodhi import log
from bodhi.models import Build, BuildrootOverride, Package, Release, User
import bodhi.schemas
import bodhi.services.errors
from bodhi.validators import (
validate_override_builds,
validate_expiration_date,
validate_packages,
validate_releases,
validate_username,
)
override = Service(name='override', path='/overrides/{nvr}',
description='Buildroot Overrides',
cors_origins=bodhi.security.cors_origins_ro)
overrides = Service(name='overrides', path='/overrides/',
description='Buildroot Overrides',
# Note, this 'rw' is not a typo. the @comments service has
# a ``post`` section at the bottom.
cors_origins=bodhi.security.cors_origins_rw)
@override.get(accept=("application/json", "text/json"), renderer="json",
error_handler=bodhi.services.errors.json_handler)
@override.get(accept=("application/javascript"), renderer="jsonp",
error_handler=bodhi.services.errors.jsonp_handler)
@override.get(accept=("text/html"), renderer="override.html",
error_handler=bodhi.services.errors.html_handler)
def get_override(request):
db = request.db
nvr = request.matchdict.get('nvr')
build = Build.get(nvr, db)
if not build:
request.errors.add('url', 'nvr', 'No such build')
request.errors.status = HTTPNotFound.code
return
if not build.override:
request.errors.add('url', 'nvr',
'No buildroot override for this build')
request.errors.status = HTTPNotFound.code
return
return dict(override=build.override)
@overrides.get(schema=bodhi.schemas.ListOverrideSchema,
accept=("application/json", "text/json"), renderer="json",
error_handler=bodhi.services.errors.json_handler,
validators=(validate_packages, validate_releases,
validate_username)
)
@overrides.get(schema=bodhi.schemas.ListOverrideSchema,
accept=("application/javascript"), renderer="jsonp",
error_handler=bodhi.services.errors.jsonp_handler,
validators=(validate_packages, validate_releases,
validate_username)
)
@overrides.get(schema=bodhi.schemas.ListOverrideSchema,
accept=('application/atom+xml'), renderer='rss',
error_handler=bodhi.services.errors.html_handler,
validators=(validate_packages, validate_releases,
validate_username)
)
@overrides.get(schema=bodhi.schemas.ListOverrideSchema,
accept=('text/html'), renderer='overrides.html',
error_handler=bodhi.services.errors.html_handler,
validators=(validate_packages, validate_releases,
validate_username)
)
def query_overrides(request):
db = request.db
data = request.validated
query = db.query(BuildrootOverride)
expired = data.get('expired')
if expired is not None:
if expired:
query = query.filter(BuildrootOverride.expired_date!=None)
else:
query = query.filter(BuildrootOverride.expired_date==None)
packages = data.get('packages')
if packages is not None:
query = query.join(BuildrootOverride.build).join(Build.package)
query = query.filter(or_(*[Package.name==pkg.name for pkg in packages]))
releases = data.get('releases')
if releases is not None:
query = query.join(BuildrootOverride.build).join(Build.release)
query = query.filter(or_(*[Release.name==r.name for r in releases]))
like = data.get('like')
if like is not None:
query = query.join(BuildrootOverride.build)
query = query.filter(or_(*[
Build.nvr.like('%%%s%%' % like)
]))
submitter = data.get('user')
if submitter is not None:
query = query.filter(BuildrootOverride.submitter==submitter)
query = query.order_by(BuildrootOverride.submission_date.desc())
# We can't use ``query.count()`` here because it is naive with respect to
# all the joins that we're doing above.
count_query = query.with_labels().statement\
.with_only_columns([func.count(distinct(BuildrootOverride.id))])\
.order_by(None)
total = db.execute(count_query).scalar()
page = data.get('page')
rows_per_page = data.get('rows_per_page')
pages = int(math.ceil(total / float(rows_per_page)))
query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
return dict(
overrides=query.all(),
page=page,
pages=pages,
rows_per_page=rows_per_page,
total=total,
chrome=data.get('chrome'),
display_user=data.get('display_user'),
)
@overrides.post(schema=bodhi.schemas.SaveOverrideSchema,
acl=bodhi.security.packagers_allowed_acl,
accept=("application/json", "text/json"), renderer='json',
error_handler=bodhi.services.errors.json_handler,
validators=(
validate_override_builds,
validate_expiration_date,
))
@overrides.post(schema=bodhi.schemas.SaveOverrideSchema,
acl=bodhi.security.packagers_allowed_acl,
accept=("application/javascript"), renderer="jsonp",
error_handler=bodhi.services.errors.jsonp_handler,
validators=(
validate_override_builds,
validate_expiration_date,
))
def save_override(request):
"""Save a buildroot override
This entails either creating a new buildroot override, or editing an
existing one. To edit an existing buildroot override, the buildroot
override's original id needs to be specified in the ``edited`` parameter.
"""
data = request.validated
edited = data.pop("edited")
caveats = []
try:
submitter = User.get(request.user.name, request.db)
if edited is None:
builds = data['builds']
overrides = []
if len(builds) > 1:
caveats.append({
'name': 'nvrs',
'description': 'Your override submission was '
'split into %i.' % len(builds)
})
for build in builds:
log.info("Creating a new buildroot override: %s" % build.nvr)
overrides.append(BuildrootOverride.new(
request,
build=build,
submitter=submitter,
notes=data['notes'],
expiration_date=data['expiration_date'],
))
if len(builds) > 1:
result = dict(overrides=overrides)
else:
result = overrides[0]
else:
log.info("Editing buildroot override: %s" % edited)
edited = Build.get(edited, request.db)
if edited is None:
request.errors.add('body', 'edited', 'No such build')
return
result = BuildrootOverride.edit(
request, edited=edited, submitter=submitter,
notes=data["notes"], expired=data["expired"],
expiration_date=data["expiration_date"]
)
if not result:
# Some error inside .edit(...)
return
except Exception as e:
log.exception(e)
request.errors.add('body', 'override',
'Unable to save buildroot override: %s' % e)
return
if not isinstance(result, dict):
result = result.__json__()
result['caveats'] = caveats
return result
| gpl-2.0 | -112,274,493,101,357,650 | 35.970588 | 81 | 0.603364 | false |
phdowling/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause | -2,570,489,902,927,458,000 | 21.483333 | 55 | 0.447739 | false |
lekum/ansible-modules-extras | packaging/os/opkg.py | 114 | 5208 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Pelletier <[email protected]>
# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: opkg
author: "Patrick Pelletier (@skinp)"
short_description: Package manager for OpenWrt
description:
- Manages OpenWrt packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
force:
description:
- opkg --force parameter used
choices: ["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]
required: false
default: absent
version_added: "2.0"
update_cache:
description:
- update the package db first
required: false
default: "no"
choices: [ "yes", "no" ]
notes: []
'''
EXAMPLES = '''
- opkg: name=foo state=present
- opkg: name=foo state=present update_cache=yes
- opkg: name=foo state=absent
- opkg: name=foo,bar state=absent
- opkg: name=foo state=present force=overwrite
'''
import pipes
def update_package_db(module, opkg_path):
""" Updates packages list. """
rc, out, err = module.run_command("%s update" % opkg_path)
if rc != 0:
module.fail_json(msg="could not update package db")
def query_package(module, opkg_path, name, state="present"):
""" Returns whether a package is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_packages(module, opkg_path, packages):
""" Uninstalls one or more packages if installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
if query_package(module, opkg_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, opkg_path, packages):
""" Installs one or more packages if not already installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
install_c = 0
for package in packages:
if query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
if not query_package(module, opkg_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=["pkg"], required=True),
state = dict(default="present", choices=["present", "installed", "absent", "removed"]),
force = dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]),
update_cache = dict(default="no", aliases=["update-cache"], type='bool')
)
)
opkg_path = module.get_bin_path('opkg', True, ['/bin'])
p = module.params
if p["update_cache"]:
update_package_db(module, opkg_path)
pkgs = p["name"].split(",")
if p["state"] in ["present", "installed"]:
install_packages(module, opkg_path, pkgs)
elif p["state"] in ["absent", "removed"]:
remove_packages(module, opkg_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 1,351,075,541,198,791,400 | 29.816568 | 194 | 0.625384 | false |
damien-dg/horizon | openstack_dashboard/dashboards/project/stacks/mappings.py | 12 | 15003 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.defaultfilters import register # noqa
from django.utils import html
from django.utils import safestring
import six
import six.moves.urllib.parse as urlparse
from openstack_dashboard.api import swift
LOG = logging.getLogger(__name__)
resource_urls = {
"AWS::AutoScaling::AutoScalingGroup": {
'link': 'horizon:project:stacks:detail'},
"AWS::CloudFormation::Stack": {
'link': 'horizon:project:stacks:detail'},
"AWS::EC2::Instance": {
'link': 'horizon:project:instances:detail'},
"AWS::EC2::InternetGateway": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::NetworkInterface": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::RouteTable": {
'link': 'horizon:project:routers:detail'},
"AWS::EC2::SecurityGroup": {
'link': 'horizon:project:access_and_security:index'},
"AWS::EC2::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"AWS::EC2::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"AWS::EC2::VPC": {
'link': 'horizon:project:networks:detail'},
"AWS::S3::Bucket": {
'link': 'horizon:project:containers:index'},
"OS::Cinder::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"OS::Heat::AccessPolicy": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::AutoScalingGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::CloudConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Neutron::Firewall": {
'link': 'horizon:project:firewalls:firewalldetails'},
"OS::Neutron::FirewallPolicy": {
'link': 'horizon:project:firewalls:policydetails'},
"OS::Neutron::FirewallRule": {
'link': 'horizon:project:firewalls:ruledetails'},
"OS::Heat::HARestarter": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::InstanceGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::MultipartMime": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::ResourceGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::SoftwareConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::StructuredConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::StructuredDeployment": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::Stack": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::WaitCondition": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::WaitConditionHandle": {
'link': 'horizon:project:stacks:detail'},
"OS::Neutron::HealthMonitor": {
'link': 'horizon:project:loadbalancers:monitordetails'},
"OS::Neutron::IKEPolicy": {
'link': 'horizon:project:vpn:ikepolicydetails'},
"OS::Neutron::IPsecPolicy": {
'link': 'horizon:project:vpn:ipsecpolicydetails'},
"OS::Neutron::IPsecSiteConnection": {
'link': 'horizon:project:vpn:ipsecsiteconnectiondetails'},
"OS::Neutron::Net": {
'link': 'horizon:project:networks:detail'},
"OS::Neutron::Pool": {
'link': 'horizon:project:loadbalancers:pooldetails'},
"OS::Neutron::PoolMember": {
'link': 'horizon:project:loadbalancers:memberdetails'},
"OS::Neutron::Port": {
'link': 'horizon:project:networks:ports:detail'},
"OS::Neutron::Router": {
'link': 'horizon:project:routers:detail'},
"OS::Neutron::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"OS::Neutron::VPNService": {
'link': 'horizon:project:vpn:vpnservicedetails'},
"OS::Nova::KeyPair": {
'link': 'horizon:project:access_and_security:index'},
"OS::Nova::Server": {
'link': 'horizon:project:instances:detail'},
"OS::Swift::Container": {
'link': 'horizon:project:containers:index',
'format_pattern': '%s' + swift.FOLDER_DELIMITER},
}
def resource_to_url(resource):
if not resource or not resource.physical_resource_id:
return None
mapping = resource_urls.get(resource.resource_type, {})
try:
if 'link' not in mapping:
return None
format_pattern = mapping.get('format_pattern') or '%s'
rid = format_pattern % resource.physical_resource_id
url = reverse(mapping['link'], args=(rid,))
except Exception as e:
LOG.exception(e)
return None
return url
@register.filter
def stack_output(output):
if not output:
return u''
if isinstance(output, six.string_types):
parts = urlparse.urlsplit(output)
if parts.netloc and parts.scheme in ('http', 'https'):
url = html.escape(output)
safe_link = u'<a href="%s" target="_blank">%s</a>' % (url, url)
return safestring.mark_safe(safe_link)
if isinstance(output, dict) or isinstance(output, list):
output = json.dumps(output, indent=2)
return safestring.mark_safe(u'<pre>%s</pre>' % html.escape(output))
static_url = getattr(settings, "STATIC_URL", "/static/")
resource_images = {
'LB_FAILED': static_url + 'dashboard/img/lb-red.svg',
'LB_DELETE': static_url + 'dashboard/img/lb-red.svg',
'LB_IN_PROGRESS': static_url + 'dashboard/img/lb-gray.gif',
'LB_INIT': static_url + 'dashboard/img/lb-gray.svg',
'LB_COMPLETE': static_url + 'dashboard/img/lb-green.svg',
'DB_FAILED': static_url + 'dashboard/img/db-red.svg',
'DB_DELETE': static_url + 'dashboard/img/db-red.svg',
'DB_IN_PROGRESS': static_url + 'dashboard/img/db-gray.gif',
'DB_INIT': static_url + 'dashboard/img/db-gray.svg',
'DB_COMPLETE': static_url + 'dashboard/img/db-green.svg',
'STACK_FAILED': static_url + 'dashboard/img/stack-red.svg',
'STACK_DELETE': static_url + 'dashboard/img/stack-red.svg',
'STACK_IN_PROGRESS': static_url + 'dashboard/img/stack-gray.gif',
'STACK_INIT': static_url + 'dashboard/img/stack-gray.svg',
'STACK_COMPLETE': static_url + 'dashboard/img/stack-green.svg',
'SERVER_FAILED': static_url + 'dashboard/img/server-red.svg',
'SERVER_DELETE': static_url + 'dashboard/img/server-red.svg',
'SERVER_IN_PROGRESS': static_url + 'dashboard/img/server-gray.gif',
'SERVER_INIT': static_url + 'dashboard/img/server-gray.svg',
'SERVER_COMPLETE': static_url + 'dashboard/img/server-green.svg',
'ALARM_FAILED': static_url + 'dashboard/img/alarm-red.svg',
'ALARM_DELETE': static_url + 'dashboard/img/alarm-red.svg',
'ALARM_IN_PROGRESS': static_url + 'dashboard/img/alarm-gray.gif',
'ALARM_INIT': static_url + 'dashboard/img/alarm-gray.svg',
'ALARM_COMPLETE': static_url + 'dashboard/img/alarm-green.svg',
'VOLUME_FAILED': static_url + 'dashboard/img/volume-red.svg',
'VOLUME_DELETE': static_url + 'dashboard/img/volume-red.svg',
'VOLUME_IN_PROGRESS': static_url + 'dashboard/img/volume-gray.gif',
'VOLUME_INIT': static_url + 'dashboard/img/volume-gray.svg',
'VOLUME_COMPLETE': static_url + 'dashboard/img/volume-green.svg',
'IMAGE_FAILED': static_url + 'dashboard/img/image-red.svg',
'IMAGE_DELETE': static_url + 'dashboard/img/image-red.svg',
'IMAGE_IN_PROGRESS': static_url + 'dashboard/img/image-gray.gif',
'IMAGE_INIT': static_url + 'dashboard/img/image-gray.svg',
'IMAGE_COMPLETE': static_url + 'dashboard/img/image-green.svg',
'WAIT_FAILED': static_url + 'dashboard/img/wait-red.svg',
'WAIT_DELETE': static_url + 'dashboard/img/wait-red.svg',
'WAIT_IN_PROGRESS': static_url + 'dashboard/img/wait-gray.gif',
'WAIT_INIT': static_url + 'dashboard/img/wait-gray.svg',
'WAIT_COMPLETE': static_url + 'dashboard/img/wait-green.svg',
'FIREWALL_FAILED': static_url + 'dashboard/img/firewall-red.svg',
'FIREWALL_DELETE': static_url + 'dashboard/img/firewall-red.svg',
'FIREWALL_IN_PROGRESS': static_url + 'dashboard/img/firewall-gray.gif',
'FIREWALL_INIT': static_url + 'dashboard/img/firewall-gray.svg',
'FIREWALL_COMPLETE': static_url + 'dashboard/img/firewall-green.svg',
'FLOATINGIP_FAILED': static_url + 'dashboard/img/floatingip-red.svg',
'FLOATINGIP_DELETE': static_url + 'dashboard/img/floatingip-red.svg',
'FLOATINGIP_IN_PROGRESS': static_url + 'dashboard/img/floatingip-gray.gif',
'FLOATINGIP_INIT': static_url + 'dashboard/img/floatingip-gray.svg',
'FLOATINGIP_COMPLETE': static_url + 'dashboard/img/floatingip-green.svg',
'ROUTER_FAILED': static_url + 'dashboard/img/router-red.svg',
'ROUTER_DELETE': static_url + 'dashboard/img/router-red.svg',
'ROUTER_IN_PROGRESS': static_url + 'dashboard/img/router-gray.gif',
'ROUTER_INIT': static_url + 'dashboard/img/router-gray.svg',
'ROUTER_COMPLETE': static_url + 'dashboard/img/router-green.svg',
'POLICY_FAILED': static_url + 'dashboard/img/policy-red.svg',
'POLICY_DELETE': static_url + 'dashboard/img/policy-red.svg',
'POLICY_IN_PROGRESS': static_url + 'dashboard/img/policy-gray.gif',
'POLICY_INIT': static_url + 'dashboard/img/policy-gray.svg',
'POLICY_COMPLETE': static_url + 'dashboard/img/policy-green.svg',
'CONFIG_FAILED': static_url + 'dashboard/img/config-red.svg',
'CONFIG_DELETE': static_url + 'dashboard/img/config-red.svg',
'CONFIG_IN_PROGRESS': static_url + 'dashboard/img/config-gray.gif',
'CONFIG_INIT': static_url + 'dashboard/img/config-gray.svg',
'CONFIG_COMPLETE': static_url + 'dashboard/img/config-green.svg',
'NETWORK_FAILED': static_url + 'dashboard/img/network-red.svg',
'NETWORK_DELETE': static_url + 'dashboard/img/network-red.svg',
'NETWORK_IN_PROGRESS': static_url + 'dashboard/img/network-gray.gif',
'NETWORK_INIT': static_url + 'dashboard/img/network-gray.svg',
'NETWORK_COMPLETE': static_url + 'dashboard/img/network-green.svg',
'PORT_FAILED': static_url + 'dashboard/img/port-red.svg',
'PORT_DELETE': static_url + 'dashboard/img/port-red.svg',
'PORT_IN_PROGRESS': static_url + 'dashboard/img/port-gray.gif',
'PORT_INIT': static_url + 'dashboard/img/port-gray.svg',
'PORT_COMPLETE': static_url + 'dashboard/img/port-green.svg',
'SECURITYGROUP_FAILED': static_url + 'dashboard/img/securitygroup-red.svg',
'SECURITYGROUP_DELETE': static_url + 'dashboard/img/securitygroup-red.svg',
'SECURITYGROUP_IN_PROGRESS':
static_url + 'dashboard/img/securitygroup-gray.gif',
'SECURITYGROUP_INIT': static_url + 'dashboard/img/securitygroup-gray.svg',
'SECURITYGROUP_COMPLETE':
static_url + 'dashboard/img/securitygroup-green.svg',
'VPN_FAILED': static_url + 'dashboard/img/vpn-red.svg',
'VPN_DELETE': static_url + 'dashboard/img/vpn-red.svg',
'VPN_IN_PROGRESS': static_url + 'dashboard/img/vpn-gray.gif',
'VPN_INIT': static_url + 'dashboard/img/vpn-gray.svg',
'VPN_COMPLETE': static_url + 'dashboard/img/vpn-green.svg',
'FLAVOR_FAILED': static_url + 'dashboard/img/flavor-red.svg',
'FLAVOR_DELETE': static_url + 'dashboard/img/flavor-red.svg',
'FLAVOR_IN_PROGRESS': static_url + 'dashboard/img/flavor-gray.gif',
'FLAVOR_INIT': static_url + 'dashboard/img/flavor-gray.svg',
'FLAVOR_COMPLETE': static_url + 'dashboard/img/flavor-green.svg',
'KEYPAIR_FAILED': static_url + 'dashboard/img/keypair-red.svg',
'KEYPAIR_DELETE': static_url + 'dashboard/img/keypair-red.svg',
'KEYPAIR_IN_PROGRESS': static_url + 'dashboard/img/keypair-gray.gif',
'KEYPAIR_INIT': static_url + 'dashboard/img/keypair-gray.svg',
'KEYPAIR_COMPLETE': static_url + 'dashboard/img/keypair-green.svg',
'UNKNOWN_FAILED': static_url + 'dashboard/img/unknown-red.svg',
'UNKNOWN_DELETE': static_url + 'dashboard/img/unknown-red.svg',
'UNKNOWN_IN_PROGRESS': static_url + 'dashboard/img/unknown-gray.gif',
'UNKNOWN_INIT': static_url + 'dashboard/img/unknown-gray.svg',
'UNKNOWN_COMPLETE': static_url + 'dashboard/img/unknown-green.svg',
}
resource_types = {
# LB
'LoadBalance': 'LB',
'HealthMonitor': 'LB',
'PoolMember': 'LB',
'Pool': 'LB',
# DB
'DBInstance': 'DB',
'Database': 'DB',
# SERVER
'Instance': 'SERVER',
'Server': 'SERVER',
# ALARM
'Alarm': 'ALARM',
'CombinationAlarm': 'ALARM',
'CWLiteAlarm': 'ALARM',
# VOLUME
'Volume': 'VOLUME',
'VolumeAttachment': 'VOLUME',
# STACK
'stack': 'STACK',
'AutoScalingGroup': 'STACK',
'InstanceGroup': 'STACK',
'ServerGroup': 'STACK',
'ResourceGroup': 'STACK',
# IMAGE
'Image': 'IMAGE',
# WAIT
'WaitCondition': 'WAIT',
'WaitConditionHandle': 'WAIT',
'UpdateWaitConditionHandle': 'WAIT',
# FIREWALL
'Firewall': 'FIREWALL',
'FirewallPolicy': 'FIREWALL',
'FirewallRule': 'FIREWALL',
# FLOATINGIP
'FloatingIP': 'FLOATINGIP',
'FloatingIPAssociation': 'FLOATINGIP',
# ROUTER
'Router': 'ROUTER',
'RouterGateway': 'ROUTER',
'RouterInterface': 'ROUTER',
# POLICY
'ScalingPolicy': 'POLICY',
# CONFIG
'CloudConfig': 'CONFIG',
'MultipartMime': 'CONFIG',
'SoftwareConfig': 'CONFIG',
'SoftwareDeployment': 'CONFIG',
'StructuredConfig': 'CONFIG',
'StructuredDeployment': 'CONFIG',
# NETWORK
'Net': 'NETWORK',
'Subnet': 'NETWORK',
'NetworkGateway': 'NETWORK',
'ProviderNet': 'NETWORK',
# PORT
'Port': 'PORT',
# SECURITYGROUP
'SecurityGroup': 'SECURITYGROUP',
# VPN
'VPNService': 'VPN',
# FLAVOR
'Flavor': 'FLAVOR',
# KEYPAIR
'KeyPair': 'KEYPAIR',
}
def get_resource_type(type):
for key, value in six.iteritems(resource_types):
if key in type:
return value
return 'UNKNOWN'
def get_resource_status(status):
if ('IN_PROGRESS' in status):
return 'IN_PROGRESS'
elif ('FAILED' in status):
return 'FAILED'
elif ('DELETE' in status):
return 'DELETE'
elif ('INIT' in status):
return 'INIT'
else:
return 'COMPLETE'
def get_resource_image(status, type):
"""Sets the image url and in_progress action sw based on status."""
resource_type = get_resource_type(type)
resource_status = get_resource_status(status)
resource_state = resource_type + "_" + resource_status
for key in resource_images:
if key == resource_state:
return resource_images.get(key)
| apache-2.0 | -4,652,282,223,652,116,000 | 41.381356 | 79 | 0.649937 | false |
putcn/Paddle | tools/aws_benchmarking/client/cluster_launcher.py | 7 | 12777 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import time
import math
import logging
import copy
import netaddr
import boto3
import namesgenerator
import paramiko
from scp import SCPClient
import requests
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--key_name', type=str, default="", help="required, key pair name")
parser.add_argument(
'--security_group_id',
type=str,
default="",
help="required, the security group id associated with your VPC")
parser.add_argument(
'--vpc_id',
type=str,
default="",
help="The VPC in which you wish to run test")
parser.add_argument(
'--subnet_id',
type=str,
default="",
help="The Subnet_id in which you wish to run test")
parser.add_argument(
'--pserver_instance_type',
type=str,
default="c5.2xlarge",
help="your pserver instance type, c5.2xlarge by default")
parser.add_argument(
'--trainer_instance_type',
type=str,
default="p2.8xlarge",
help="your trainer instance type, p2.8xlarge by default")
parser.add_argument(
'--task_name',
type=str,
default="",
help="the name you want to identify your job")
parser.add_argument(
'--pserver_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, \
use ami-1ae93962 for us-east-2")
parser.add_argument(
'--pserver_command',
type=str,
default="",
help="pserver start command, format example: python,vgg.py,batch_size:128,is_local:yes"
)
parser.add_argument(
'--trainer_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, \
use ami-1ae93962 for us-west-2")
parser.add_argument(
'--trainer_command',
type=str,
default="",
help="trainer start command, format example: python,vgg.py,batch_size:128,is_local:yes"
)
parser.add_argument(
'--availability_zone',
type=str,
default="us-east-2a",
help="aws zone id to place ec2 instances")
parser.add_argument(
'--trainer_count', type=int, default=1, help="Trainer count")
parser.add_argument(
'--pserver_count', type=int, default=1, help="Pserver count")
parser.add_argument(
'--action', type=str, default="create", help="create|cleanup|status")
parser.add_argument('--pem_path', type=str, help="private key file")
parser.add_argument(
'--pserver_port', type=str, default="5436", help="pserver port")
parser.add_argument(
'--docker_image', type=str, default="busybox", help="training docker image")
parser.add_argument(
'--master_server_port', type=int, default=5436, help="master server port")
parser.add_argument(
'--master_server_public_ip', type=str, help="master server public ip")
parser.add_argument(
'--master_docker_image',
type=str,
default="putcn/paddle_aws_master:latest",
help="master docker image id")
parser.add_argument(
'--no_clean_up',
type=str2bool,
default=False,
help="whether to clean up after training")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
ec2client = boto3.client('ec2')
def print_arguments():
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def create_subnet():
# if no vpc id provided, list vpcs
logging.info("start creating subnet")
if not args.vpc_id:
logging.info("no vpc provided, trying to find the default one")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "isDefault",
"Values": ["true", ]
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No default VPC')
args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"]
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("default vpc fount with id %s and CidrBlock %s" %
(args.vpc_id, vpc_cidrBlock))
if not vpc_cidrBlock:
logging.info("trying to find cidrblock for vpc")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No VPC found')
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("cidrblock for vpc is %s" % vpc_cidrBlock)
# list subnets in vpc in order to create a new one
logging.info("trying to find ip blocks for new subnet")
subnets_desc = ec2client.describe_subnets(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
ips_taken = []
for subnet_dec in subnets_desc["Subnets"]:
ips_taken.append(subnet_dec["CidrBlock"])
ip_blocks_avaliable = netaddr.IPSet(
[vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken)
# adding 10 addresses as buffer
cidr_prefix = 32 - math.ceil(
math.log(args.pserver_count + args.trainer_count + 10, 2))
if cidr_prefix <= 16:
raise ValueError('Too many nodes to fit in current VPC')
for ipnetwork in ip_blocks_avaliable.iter_cidrs():
try:
subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next()
logging.info("subnet ip block found %s" % (subnet_cidr))
break
except Exception:
pass
if not subnet_cidr:
raise ValueError(
'No avaliable subnet to fit required nodes in current VPC')
logging.info("trying to create subnet")
subnet_desc = ec2client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=args.vpc_id,
AvailabilityZone=args.availability_zone)
subnet_id = subnet_desc["Subnet"]["SubnetId"]
subnet_waiter = ec2client.get_waiter('subnet_available')
# sleep for 1s before checking its state
time.sleep(1)
subnet_waiter.wait(SubnetIds=[subnet_id, ])
logging.info("subnet created")
logging.info("adding tags to newly created subnet")
ec2client.create_tags(
Resources=[subnet_id, ],
Tags=[{
"Key": "Task_name",
'Value': args.task_name
}])
return subnet_id
def run_instances(image_id, instance_type, count=1, role="MASTER", cmd=""):
response = ec2client.run_instances(
ImageId=image_id,
InstanceType=instance_type,
MaxCount=count,
MinCount=count,
UserData=cmd,
DryRun=False,
InstanceInitiatedShutdownBehavior="stop",
KeyName=args.key_name,
Placement={'AvailabilityZone': args.availability_zone},
NetworkInterfaces=[{
'DeviceIndex': 0,
'SubnetId': args.subnet_id,
"AssociatePublicIpAddress": True,
'Groups': args.security_group_ids
}],
TagSpecifications=[{
'ResourceType': "instance",
'Tags': [{
"Key": 'Task_name',
"Value": args.task_name + "_master"
}, {
"Key": 'Role',
"Value": role
}]
}])
instance_ids = []
for instance in response["Instances"]:
instance_ids.append(instance["InstanceId"])
if len(instance_ids) > 0:
logging.info(str(len(instance_ids)) + " instance(s) created")
else:
logging.info("no instance created")
#create waiter to make sure it's running
logging.info("waiting for instance to become accessible")
waiter = ec2client.get_waiter('instance_status_ok')
waiter.wait(
Filters=[{
"Name": "instance-status.status",
"Values": ["ok"]
}, {
"Name": "instance-status.reachability",
"Values": ["passed"]
}, {
"Name": "instance-state-name",
"Values": ["running"]
}],
InstanceIds=instance_ids)
instances_response = ec2client.describe_instances(InstanceIds=instance_ids)
return instances_response["Reservations"][0]["Instances"]
def generate_task_name():
return namesgenerator.get_random_name()
def init_args():
if not args.task_name:
args.task_name = generate_task_name()
logging.info("task name generated %s" % (args.task_name))
if not args.pem_path:
args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem"
if args.security_group_id:
args.security_group_ids = (args.security_group_id, )
def create():
init_args()
# create subnet
if not args.subnet_id:
args.subnet_id = create_subnet()
# create master node
master_instance_response = run_instances(
image_id="ami-7a05351f", instance_type="t2.nano")
logging.info("master server started")
args.master_server_public_ip = master_instance_response[0][
"PublicIpAddress"]
args.master_server_ip = master_instance_response[0]["PrivateIpAddress"]
logging.info("master server started, master_ip=%s, task_name=%s" %
(args.master_server_public_ip, args.task_name))
# cp config file and pems to master node
ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(
hostname=args.master_server_public_ip, username="ubuntu", pkey=ssh_key)
with SCPClient(ssh_client.get_transport()) as scp:
scp.put(os.path.expanduser("~") + "/" + ".aws",
recursive=True,
remote_path='/home/ubuntu/')
scp.put(args.pem_path,
remote_path='/home/ubuntu/' + args.key_name + ".pem")
logging.info("credentials and pem copied to master")
# set arguments and start docker
kick_off_cmd = "docker run -d -v /home/ubuntu/.aws:/root/.aws/"
kick_off_cmd += " -v /home/ubuntu/" + args.key_name + ".pem:/root/" + args.key_name + ".pem"
kick_off_cmd += " -v /home/ubuntu/logs/:/root/logs/"
kick_off_cmd += " -p " + str(args.master_server_port) + ":" + str(
args.master_server_port)
kick_off_cmd += " " + args.master_docker_image
args_to_pass = copy.copy(args)
args_to_pass.action = "serve"
del args_to_pass.pem_path
del args_to_pass.security_group_ids
del args_to_pass.master_docker_image
del args_to_pass.master_server_public_ip
for arg, value in sorted(vars(args_to_pass).iteritems()):
if value:
kick_off_cmd += ' --%s %s' % (arg, value)
logging.info(kick_off_cmd)
stdin, stdout, stderr = ssh_client.exec_command(command=kick_off_cmd)
return_code = stdout.channel.recv_exit_status()
logging.info(return_code)
if return_code != 0:
raise Exception("Error while kicking off master")
logging.info(
"master server finished init process, visit %s to check master log" %
(get_master_web_url("/status")))
def cleanup():
print requests.post(get_master_web_url("/cleanup")).text
def status():
print requests.post(get_master_web_url("/status")).text
def get_master_web_url(path):
return "http://" + args.master_server_public_ip + ":" + str(
args.master_server_port) + path
if __name__ == "__main__":
print_arguments()
if args.action == "create":
if not args.key_name or not args.security_group_id:
raise ValueError("key_name and security_group_id are required")
create()
elif args.action == "cleanup":
if not args.master_server_public_ip:
raise ValueError("master_server_public_ip is required")
cleanup()
elif args.action == "status":
if not args.master_server_public_ip:
raise ValueError("master_server_public_ip is required")
status()
| apache-2.0 | 7,141,779,924,866,873,000 | 29.787952 | 96 | 0.616498 | false |
siddhartha-chandra/i_cake_python | 20.py | 1 | 1719 | # You want to be able to access the largest element in a stack.
# You've already implemented this Stack class:
class Stack:
# initialize an empty list
def __init__(self):
self.items = []
# push a new item to the last index
def push(self, item):
self.items.append(item)
# remove the last item
def pop(self):
# if the stack is empty, return None
# (it would also be reasonable to throw an exception)
if not self.items:
return None
return self.items.pop()
# see what the last item is
def peek(self):
if not self.items:
return None
return self.items[-1]
# Now, use your Stack class to implement a new class MaxStack with a function get_max() that returns the largest element in the stack. get_max() should not remove the item.
#
# Your stacks will contain only integers.
class MaxStack:
def __init__(self):
self.stack = Stack()
self.maxes_stack = Stack()
# Add a new item to the top of our stack. If the item is greater
# than or equal to the last item in maxes_stack, it's
# the new max! So we'll add it to maxes_stack.
def push(self, item):
self.stack.push(item)
if item > self.maxes_stack.peek():
self.maxes_stack.push(item)
# Remove and return the top item from our stack. If it equals
# the top item in maxes_stack, they must have been pushed in together.
# So we'll pop it out of maxes_stack too.
def pop(self):
item = self.stack.pop()
if item == self.maxes_stack.peek():
self.maxes_stack.pop()
return item
def get_max(self):
return self.maxes_stack.peek()
| gpl-3.0 | 8,218,064,899,164,200,000 | 29.157895 | 172 | 0.62071 | false |
kellinm/anaconda | pyanaconda/ui/gui/xkl_wrapper.py | 3 | 15362 | #
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <[email protected]>
#
"""
This module include functions and classes for dealing with multiple layouts in
Anaconda. It wraps the libxklavier functionality to protect Anaconda from
dealing with its "nice" API that looks like a Lisp-influenced "good old C" and
also systemd-localed functionality.
It provides a XklWrapper class with several methods that can be used for listing
and various modifications of keyboard layouts settings.
"""
import gi
gi.require_version("GdkX11", "3.0")
gi.require_version("Xkl", "1.0")
from gi.repository import GdkX11, Xkl
import threading
import gettext
from collections import namedtuple
from pyanaconda import flags
from pyanaconda import iutil
from pyanaconda.constants import DEFAULT_KEYBOARD
from pyanaconda.keyboard import join_layout_variant, parse_layout_variant, KeyboardConfigError, InvalidLayoutVariantSpec
from pyanaconda.ui.gui.utils import gtk_action_wait
import logging
log = logging.getLogger("anaconda")
Xkb_ = lambda x: gettext.translation("xkeyboard-config", fallback=True).gettext(x)
iso_ = lambda x: gettext.translation("iso_639", fallback=True).gettext(x)
# namedtuple for information about a keyboard layout (its language and description)
LayoutInfo = namedtuple("LayoutInfo", ["lang", "desc"])
class XklWrapperError(KeyboardConfigError):
"""Exception class for reporting libxklavier-related problems"""
pass
class XklWrapper(object):
"""
Class wrapping the libxklavier functionality
Use this class as a singleton class because it provides read-only data
and initialization (that takes quite a lot of time) reads always the
same data. It doesn't have sense to make multiple instances
"""
_instance = None
_instance_lock = threading.Lock()
@staticmethod
def get_instance():
with XklWrapper._instance_lock:
if not XklWrapper._instance:
XklWrapper._instance = XklWrapper()
return XklWrapper._instance
def __init__(self):
#initialize Xkl-related stuff
display = GdkX11.x11_get_default_xdisplay()
self._engine = Xkl.Engine.get_instance(display)
self._rec = Xkl.ConfigRec()
if not self._rec.get_from_server(self._engine):
raise XklWrapperError("Failed to get configuration from server")
#X is probably initialized to the 'us' layout without any variant and
#since we want to add layouts with variants we need the layouts and
#variants lists to have the same length. Add "" padding to variants.
#See docstring of the add_layout method for details.
diff = len(self._rec.layouts) - len(self._rec.variants)
if diff > 0 and flags.can_touch_runtime_system("activate layouts"):
self._rec.set_variants(self._rec.variants + (diff * [""]))
if not self._rec.activate(self._engine):
# failed to activate layouts given e.g. by a kickstart (may be
# invalid)
lay_var_str = ",".join(map(join_layout_variant,
self._rec.layouts,
self._rec.variants))
log.error("Failed to activate layouts: '%s', "
"falling back to default %s", lay_var_str, DEFAULT_KEYBOARD)
self._rec.set_layouts([DEFAULT_KEYBOARD])
self._rec.set_variants([""])
if not self._rec.activate(self._engine):
# failed to activate even the default layout, something is
# really wrong
raise XklWrapperError("Failed to initialize layouts")
#needed also for Gkbd.KeyboardDrawingDialog
self.configreg = Xkl.ConfigRegistry.get_instance(self._engine)
self.configreg.load(False)
self._layout_infos = dict()
self._layout_infos_lock = threading.RLock()
self._switch_opt_infos = dict()
self._switch_opt_infos_lock = threading.RLock()
#this might take quite a long time
self.configreg.foreach_language(self._get_language_variants, None)
self.configreg.foreach_country(self._get_country_variants, None)
#'grp' means that we want layout (group) switching options
self.configreg.foreach_option('grp', self._get_switch_option, None)
def _get_lang_variant(self, c_reg, item, subitem, lang):
if subitem:
name = item.get_name() + " (" + subitem.get_name() + ")"
description = subitem.get_description()
else:
name = item.get_name()
description = item.get_description()
#if this layout has already been added for some other language,
#do not add it again (would result in duplicates in our lists)
if name not in self._layout_infos:
with self._layout_infos_lock:
self._layout_infos[name] = LayoutInfo(lang, description)
def _get_country_variant(self, c_reg, item, subitem, country):
if subitem:
name = item.get_name() + " (" + subitem.get_name() + ")"
description = subitem.get_description()
else:
name = item.get_name()
description = item.get_description()
# if the layout was not added with any language, add it with a country
if name not in self._layout_infos:
with self._layout_infos_lock:
self._layout_infos[name] = LayoutInfo(country, description)
def _get_language_variants(self, c_reg, item, user_data=None):
lang_name, lang_desc = item.get_name(), item.get_description()
c_reg.foreach_language_variant(lang_name, self._get_lang_variant, lang_desc)
def _get_country_variants(self, c_reg, item, user_data=None):
country_name, country_desc = item.get_name(), item.get_description()
c_reg.foreach_country_variant(country_name, self._get_country_variant,
country_desc)
def _get_switch_option(self, c_reg, item, user_data=None):
"""Helper function storing layout switching options in foreach cycle"""
desc = item.get_description()
name = item.get_name()
with self._switch_opt_infos_lock:
self._switch_opt_infos[name] = desc
def get_current_layout(self):
"""
Get current activated X layout and variant
:return: current activated X layout and variant (e.g. "cz (qwerty)")
"""
# ported from the widgets/src/LayoutIndicator.c code
self._engine.start_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE)
state = self._engine.get_current_state()
cur_group = state.group
num_groups = self._engine.get_num_groups()
# BUG?: if the last layout in the list is activated and removed,
# state.group may be equal to n_groups
if cur_group >= num_groups:
cur_group = num_groups - 1
layout = self._rec.layouts[cur_group] # pylint: disable=unsubscriptable-object
try:
variant = self._rec.variants[cur_group] # pylint: disable=unsubscriptable-object
except IndexError:
# X server may have forgotten to add the "" variant for its default layout
variant = ""
self._engine.stop_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE)
return join_layout_variant(layout, variant)
def get_available_layouts(self):
"""A list of layouts"""
with self._layout_infos_lock:
return list(self._layout_infos.keys())
def get_switching_options(self):
"""Method returning list of available layout switching options"""
with self._switch_opt_infos_lock:
return list(self._switch_opt_infos.keys())
def get_layout_variant_description(self, layout_variant, with_lang=True, xlated=True):
"""
Get description of the given layout-variant.
:param layout_variant: layout-variant specification (e.g. 'cz (qwerty)')
:type layout_variant: str
:param with_lang: whether to include language of the layout-variant (if defined)
in the description or not
:type with_lang: bool
:param xlated: whethe to return translated or english version of the description
:type xlated: bool
:return: description of the layout-variant specification (e.g. 'Czech (qwerty)')
:rtype: str
"""
layout_info = self._layout_infos[layout_variant]
# translate language and upcase its first letter, translate the
# layout-variant description
if xlated:
lang = iutil.upcase_first_letter(iso_(layout_info.lang))
description = Xkb_(layout_info.desc)
else:
lang = iutil.upcase_first_letter(layout_info.lang)
description = layout_info.desc
if with_lang and lang and not description.startswith(lang):
return "%s (%s)" % (lang, description)
else:
return description
def get_switch_opt_description(self, switch_opt):
"""
Get description of the given layout switching option.
:param switch_opt: switching option name/ID (e.g. 'grp:alt_shift_toggle')
:type switch_opt: str
:return: description of the layout switching option (e.g. 'Alt + Shift')
:rtype: str
"""
# translate the description of the switching option
return Xkb_(self._switch_opt_infos[switch_opt])
@gtk_action_wait
def activate_default_layout(self):
"""
Activates default layout (the first one in the list of configured
layouts).
"""
self._engine.lock_group(0)
def is_valid_layout(self, layout):
"""Return if given layout is valid layout or not"""
return layout in self._layout_infos
@gtk_action_wait
def add_layout(self, layout):
"""
Method that tries to add a given layout to the current X configuration.
The X layouts configuration is handled by two lists. A list of layouts
and a list of variants. Index-matching items in these lists (as if they
were zipped) are used for the construction of real layouts (e.g.
'cz (qwerty)').
:param layout: either 'layout' or 'layout (variant)'
:raise XklWrapperError: if the given layout is invalid or cannot be added
"""
try:
#we can get 'layout' or 'layout (variant)'
(layout, variant) = parse_layout_variant(layout)
except InvalidLayoutVariantSpec as ilverr:
raise XklWrapperError("Failed to add layout: %s" % ilverr)
#do not add the same layout-variant combinanion multiple times
if (layout, variant) in list(zip(self._rec.layouts, self._rec.variants)):
return
self._rec.set_layouts(self._rec.layouts + [layout])
self._rec.set_variants(self._rec.variants + [variant])
if not self._rec.activate(self._engine):
raise XklWrapperError("Failed to add layout '%s (%s)'" % (layout,
variant))
@gtk_action_wait
def remove_layout(self, layout):
"""
Method that tries to remove a given layout from the current X
configuration.
See also the documentation for the add_layout method.
:param layout: either 'layout' or 'layout (variant)'
:raise XklWrapperError: if the given layout cannot be removed
"""
#we can get 'layout' or 'layout (variant)'
(layout, variant) = parse_layout_variant(layout)
layouts_variants = list(zip(self._rec.layouts, self._rec.variants))
if not (layout, variant) in layouts_variants:
msg = "'%s (%s)' not in the list of added layouts" % (layout,
variant)
raise XklWrapperError(msg)
idx = layouts_variants.index((layout, variant))
new_layouts = self._rec.layouts[:idx] + self._rec.layouts[(idx + 1):] # pylint: disable=unsubscriptable-object
new_variants = self._rec.variants[:idx] + self._rec.variants[(idx + 1):] # pylint: disable=unsubscriptable-object
self._rec.set_layouts(new_layouts)
self._rec.set_variants(new_variants)
if not self._rec.activate(self._engine):
raise XklWrapperError("Failed to remove layout '%s (%s)'" % (layout,
variant))
@gtk_action_wait
def replace_layouts(self, layouts_list):
"""
Method that replaces the layouts defined in the current X configuration
with the new ones given.
:param layouts_list: list of layouts defined as either 'layout' or
'layout (variant)'
:raise XklWrapperError: if layouts cannot be replaced with the new ones
"""
new_layouts = list()
new_variants = list()
for layout_variant in layouts_list:
(layout, variant) = parse_layout_variant(layout_variant)
new_layouts.append(layout)
new_variants.append(variant)
self._rec.set_layouts(new_layouts)
self._rec.set_variants(new_variants)
if not self._rec.activate(self._engine):
msg = "Failed to replace layouts with: %s" % ",".join(layouts_list)
raise XklWrapperError(msg)
@gtk_action_wait
def set_switching_options(self, options):
"""
Method that sets options for layout switching. It replaces the old
options with the new ones.
:param options: layout switching options to be set
:type options: list or generator
:raise XklWrapperError: if the old options cannot be replaced with the
new ones
"""
#preserve old "non-switching options"
new_options = [opt for opt in self._rec.options if "grp:" not in opt] # pylint: disable=not-an-iterable
new_options += options
self._rec.set_options(new_options)
if not self._rec.activate(self._engine):
msg = "Failed to set switching options to: %s" % ",".join(options)
raise XklWrapperError(msg)
| gpl-2.0 | -6,848,959,228,302,923,000 | 37.891139 | 121 | 0.629931 | false |
broferek/ansible | lib/ansible/modules/notification/bearychat.py | 52 | 5908 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Jiangge Zhang <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: bearychat
short_description: Send BearyChat notifications
description:
- The M(bearychat) module sends notifications to U(https://bearychat.com)
via the Incoming Robot integration.
version_added: "2.4"
author: "Jiangge Zhang (@tonyseek)"
options:
url:
description:
- BearyChat WebHook URL. This authenticates you to the bearychat
service. It looks like
C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
required: true
text:
description:
- Message to send.
markdown:
description:
- If C(yes), text will be parsed as markdown.
default: 'yes'
type: bool
channel:
description:
- Channel to send the message to. If absent, the message goes to the
default channel selected by the I(url).
attachments:
description:
- Define a list of attachments. For more information, see
https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
"""
EXAMPLES = """
- name: Send notification message via BearyChat
local_action:
module: bearychat
url: |
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
text: "{{ inventory_hostname }} completed"
- name: Send notification message via BearyChat all options
local_action:
module: bearychat
url: |
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
text: "{{ inventory_hostname }} completed"
markdown: no
channel: "#ansible"
attachments:
- title: "Ansible on {{ inventory_hostname }}"
text: "May the Force be with you."
color: "#ffffff"
images:
- http://example.com/index.png
"""
RETURN = """
msg:
description: execution result
returned: success
type: str
sample: "OK"
"""
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except Exception:
HAS_URLPARSE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def build_payload_for_bearychat(module, text, markdown, channel, attachments):
payload = {}
if text is not None:
payload['text'] = text
if markdown is not None:
payload['markdown'] = markdown
if channel is not None:
payload['channel'] = channel
if attachments is not None:
payload.setdefault('attachments', []).extend(
build_payload_for_bearychat_attachment(
module, item.get('title'), item.get('text'), item.get('color'),
item.get('images'))
for item in attachments)
payload = 'payload=%s' % module.jsonify(payload)
return payload
def build_payload_for_bearychat_attachment(module, title, text, color, images):
attachment = {}
if title is not None:
attachment['title'] = title
if text is not None:
attachment['text'] = text
if color is not None:
attachment['color'] = color
if images is not None:
target_images = attachment.setdefault('images', [])
if not isinstance(images, (list, tuple)):
images = [images]
for image in images:
if isinstance(image, dict) and 'url' in image:
image = {'url': image['url']}
elif hasattr(image, 'startswith') and image.startswith('http'):
image = {'url': image}
else:
module.fail_json(
msg="BearyChat doesn't have support for this kind of "
"attachment image")
target_images.append(image)
return attachment
def do_notify_bearychat(module, url, payload):
response, info = fetch_url(module, url, data=payload)
if info['status'] != 200:
url_info = urlparse(url)
obscured_incoming_webhook = urlunparse(
(url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
module.fail_json(
msg=" failed to send %s to %s: %s" % (
payload, obscured_incoming_webhook, info['msg']))
def main():
module = AnsibleModule(argument_spec={
'url': dict(type='str', required=True, no_log=True),
'text': dict(type='str'),
'markdown': dict(default='yes', type='bool'),
'channel': dict(type='str'),
'attachments': dict(type='list'),
})
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
url = module.params['url']
text = module.params['text']
markdown = module.params['markdown']
channel = module.params['channel']
attachments = module.params['attachments']
payload = build_payload_for_bearychat(
module, text, markdown, channel, attachments)
do_notify_bearychat(module, url, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()
| gpl-3.0 | -5,230,945,677,076,728,000 | 31.461538 | 104 | 0.640657 | false |
uwdata/termite-data-server | web2py/gluon/languages.py | 9 | 35572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
| Plural subsystem is created by Vladyslav Kozlovskyy (Ukraine) <[email protected]>
Translation system
--------------------------------------------
"""
import os
import re
import sys
import pkgutil
import logging
from cgi import escape
from threading import RLock
try:
import copyreg as copy_reg # python 3
except ImportError:
import copy_reg # python 2
from gluon.portalocker import read_locked, LockedFile
from utf8 import Utf8
from gluon.fileutils import listdir
from gluon.cfs import getcfs
from gluon.html import XML, xmlescape
from gluon.contrib.markmin.markmin2html import render, markmin_escape
from string import maketrans
__all__ = ['translator', 'findT', 'update_all_languages']
ostat = os.stat
oslistdir = os.listdir
pjoin = os.path.join
pexists = os.path.exists
pdirname = os.path.dirname
isdir = os.path.isdir
DEFAULT_LANGUAGE = 'en'
DEFAULT_LANGUAGE_NAME = 'English'
# DEFAULT PLURAL-FORMS RULES:
# language doesn't use plural forms
DEFAULT_NPLURALS = 1
# only one singular/plural form is used
DEFAULT_GET_PLURAL_ID = lambda n: 0
# word is unchangeable
DEFAULT_CONSTRUCT_PLURAL_FORM = lambda word, plural_id: word
NUMBERS = (int, long, float)
# pattern to find T(blah blah blah) expressions
PY_STRING_LITERAL_RE = r'(?<=[^\w]T\()(?P<name>'\
+ r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\
+ r"(?:'(?:[^'\\]|\\.)*')|" + r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\
+ r'(?:"(?:[^"\\]|\\.)*"))'
regex_translate = re.compile(PY_STRING_LITERAL_RE, re.DOTALL)
regex_param = re.compile(r'{(?P<s>.+?)}')
# pattern for a valid accept_language
regex_language = \
re.compile('([a-z]{2,3}(?:\-[a-z]{2})?(?:\-[a-z]{2})?)(?:[,;]|$)')
regex_langfile = re.compile('^[a-z]{2,3}(-[a-z]{2})?\.py$')
regex_backslash = re.compile(r"\\([\\{}%])")
regex_plural = re.compile('%({.+?})')
regex_plural_dict = re.compile('^{(?P<w>[^()[\]][^()[\]]*?)\((?P<n>[^()\[\]]+)\)}$') # %%{word(varname or number)}
regex_plural_tuple = re.compile(
'^{(?P<w>[^[\]()]+)(?:\[(?P<i>\d+)\])?}$') # %%{word[index]} or %%{word}
regex_plural_file = re.compile('^plural-[a-zA-Z]{2}(-[a-zA-Z]{2})?\.py$')
def is_writable():
""" returns True if and only if the filesystem is writable """
from gluon.settings import global_settings
return not global_settings.web2py_runtime_gae
def safe_eval(text):
if text.strip():
try:
import ast
return ast.literal_eval(text)
except ImportError:
return eval(text, {}, {})
return None
# used as default filter in translator.M()
def markmin(s):
def markmin_aux(m):
return '{%s}' % markmin_escape(m.group('s'))
return render(regex_param.sub(markmin_aux, s),
sep='br', autolinks=None, id_prefix='')
# UTF8 helper functions
def upper_fun(s):
return unicode(s, 'utf-8').upper().encode('utf-8')
def title_fun(s):
return unicode(s, 'utf-8').title().encode('utf-8')
def cap_fun(s):
return unicode(s, 'utf-8').capitalize().encode('utf-8')
ttab_in = maketrans("\\%{}", '\x1c\x1d\x1e\x1f')
ttab_out = maketrans('\x1c\x1d\x1e\x1f', "\\%{}")
# cache of translated messages:
# global_language_cache:
# { 'languages/xx.py':
# ( {"def-message": "xx-message",
# ...
# "def-message": "xx-message"}, lock_object )
# 'languages/yy.py': ( {dict}, lock_object )
# ...
# }
global_language_cache = {}
def get_from_cache(cache, val, fun):
lang_dict, lock = cache
lock.acquire()
try:
result = lang_dict.get(val)
finally:
lock.release()
if result:
return result
lock.acquire()
try:
result = lang_dict.setdefault(val, fun())
finally:
lock.release()
return result
def clear_cache(filename):
cache = global_language_cache.setdefault(
filename, ({}, RLock()))
lang_dict, lock = cache
lock.acquire()
try:
lang_dict.clear()
finally:
lock.release()
def read_dict_aux(filename):
lang_text = read_locked(filename).replace('\r\n', '\n')
clear_cache(filename)
try:
return safe_eval(lang_text) or {}
except Exception:
e = sys.exc_info()[1]
status = 'Syntax error in %s (%s)' % (filename, e)
logging.error(status)
return {'__corrupted__': status}
def read_dict(filename):
""" Returns dictionary with translation messages
"""
return getcfs('lang:' + filename, filename,
lambda: read_dict_aux(filename))
def read_possible_plural_rules():
"""
Creates list of all possible plural rules files
The result is cached in PLURAL_RULES dictionary to increase speed
"""
plurals = {}
try:
import gluon.contrib.plural_rules as package
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):
if len(modname) == 2:
module = __import__(package.__name__ + '.' + modname,
fromlist=[modname])
lang = modname
pname = modname + '.py'
nplurals = getattr(module, 'nplurals', DEFAULT_NPLURALS)
get_plural_id = getattr(
module, 'get_plural_id',
DEFAULT_GET_PLURAL_ID)
construct_plural_form = getattr(
module, 'construct_plural_form',
DEFAULT_CONSTRUCT_PLURAL_FORM)
plurals[lang] = (lang, nplurals, get_plural_id,
construct_plural_form)
except ImportError:
e = sys.exc_info()[1]
logging.warn('Unable to import plural rules: %s' % e)
return plurals
PLURAL_RULES = read_possible_plural_rules()
def read_possible_languages_aux(langdir):
def get_lang_struct(lang, langcode, langname, langfile_mtime):
if lang == 'default':
real_lang = langcode.lower()
else:
real_lang = lang
(prules_langcode,
nplurals,
get_plural_id,
construct_plural_form
) = PLURAL_RULES.get(real_lang[:2], ('default',
DEFAULT_NPLURALS,
DEFAULT_GET_PLURAL_ID,
DEFAULT_CONSTRUCT_PLURAL_FORM))
if prules_langcode != 'default':
(pluraldict_fname,
pluraldict_mtime) = plurals.get(real_lang,
plurals.get(real_lang[:2],
('plural-%s.py' % real_lang, 0)))
else:
pluraldict_fname = None
pluraldict_mtime = 0
return (langcode, # language code from !langcode!
langname,
# language name in national spelling from !langname!
langfile_mtime, # m_time of language file
pluraldict_fname, # name of plural dictionary file or None (when default.py is not exist)
pluraldict_mtime, # m_time of plural dictionary file or 0 if file is not exist
prules_langcode, # code of plural rules language or 'default'
nplurals, # nplurals for current language
get_plural_id, # get_plural_id() for current language
construct_plural_form) # construct_plural_form() for current language
plurals = {}
flist = oslistdir(langdir) if isdir(langdir) else []
# scan languages directory for plural dict files:
for pname in flist:
if regex_plural_file.match(pname):
plurals[pname[7:-3]] = (pname,
ostat(pjoin(langdir, pname)).st_mtime)
langs = {}
# scan languages directory for langfiles:
for fname in flist:
if regex_langfile.match(fname) or fname == 'default.py':
fname_with_path = pjoin(langdir, fname)
d = read_dict(fname_with_path)
lang = fname[:-3]
langcode = d.get('!langcode!', lang if lang != 'default'
else DEFAULT_LANGUAGE)
langname = d.get('!langname!', langcode)
langfile_mtime = ostat(fname_with_path).st_mtime
langs[lang] = get_lang_struct(lang, langcode,
langname, langfile_mtime)
if 'default' not in langs:
# if default.py is not found,
# add DEFAULT_LANGUAGE as default language:
langs['default'] = get_lang_struct('default', DEFAULT_LANGUAGE,
DEFAULT_LANGUAGE_NAME, 0)
deflang = langs['default']
deflangcode = deflang[0]
if deflangcode not in langs:
# create language from default.py:
langs[deflangcode] = deflang[:2] + (0,) + deflang[3:]
return langs
def read_possible_languages(langpath):
return getcfs('langs:' + langpath, langpath,
lambda: read_possible_languages_aux(langpath))
def read_plural_dict_aux(filename):
lang_text = read_locked(filename).replace('\r\n', '\n')
try:
return eval(lang_text) or {}
except Exception:
e = sys.exc_info()[1]
status = 'Syntax error in %s (%s)' % (filename, e)
logging.error(status)
return {'__corrupted__': status}
def read_plural_dict(filename):
return getcfs('plurals:' + filename, filename,
lambda: read_plural_dict_aux(filename))
def write_plural_dict(filename, contents):
if '__corrupted__' in contents:
return
fp = None
try:
fp = LockedFile(filename, 'w')
fp.write('#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n{\n# "singular form (0)": ["first plural form (1)", "second plural form (2)", ...],\n')
for key in sorted(contents, sort_function):
forms = '[' + ','.join([repr(Utf8(form))
for form in contents[key]]) + ']'
fp.write('%s: %s,\n' % (repr(Utf8(key)), forms))
fp.write('}\n')
except (IOError, OSError):
if is_writable():
logging.warning('Unable to write to file %s' % filename)
return
finally:
if fp:
fp.close()
def sort_function(x,y):
return cmp(unicode(x, 'utf-8').lower(), unicode(y, 'utf-8').lower())
def write_dict(filename, contents):
if '__corrupted__' in contents:
return
fp = None
try:
fp = LockedFile(filename, 'w')
fp.write('# -*- coding: utf-8 -*-\n{\n')
for key in sorted(contents, sort_function):
fp.write('%s: %s,\n' % (repr(Utf8(key)),
repr(Utf8(contents[key]))))
fp.write('}\n')
except (IOError, OSError):
if is_writable():
logging.warning('Unable to write to file %s' % filename)
return
finally:
if fp:
fp.close()
class lazyT(object):
"""
Never to be called explicitly, returned by
translator.__call__() or translator.M()
"""
m = s = T = f = t = None
M = is_copy = False
def __init__(
self,
message,
symbols={},
T=None,
filter=None,
ftag=None,
M=False
):
if isinstance(message, lazyT):
self.m = message.m
self.s = message.s
self.T = message.T
self.f = message.f
self.t = message.t
self.M = message.M
self.is_copy = True
else:
self.m = message
self.s = symbols
self.T = T
self.f = filter
self.t = ftag
self.M = M
self.is_copy = False
def __repr__(self):
return "<lazyT %s>" % (repr(Utf8(self.m)), )
def __str__(self):
return str(self.T.apply_filter(self.m, self.s, self.f, self.t) if self.M else
self.T.translate(self.m, self.s))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __add__(self, other):
return '%s%s' % (self, other)
def __radd__(self, other):
return '%s%s' % (other, self)
def __mul__(self, other):
return str(self) * other
def __cmp__(self, other):
return cmp(str(self), str(other))
def __hash__(self):
return hash(str(self))
def __getattr__(self, name):
return getattr(str(self), name)
def __getitem__(self, i):
return str(self)[i]
def __getslice__(self, i, j):
return str(self)[i:j]
def __iter__(self):
for c in str(self):
yield c
def __len__(self):
return len(str(self))
def xml(self):
return str(self) if self.M else escape(str(self))
def encode(self, *a, **b):
return str(self).encode(*a, **b)
def decode(self, *a, **b):
return str(self).decode(*a, **b)
def read(self):
return str(self)
def __mod__(self, symbols):
if self.is_copy:
return lazyT(self)
return lazyT(self.m, symbols, self.T, self.f, self.t, self.M)
def pickle_lazyT(c):
return str, (c.xml(),)
copy_reg.pickle(lazyT, pickle_lazyT)
class translator(object):
"""
This class is instantiated by gluon.compileapp.build_environment
as the T object
Example:
T.force(None) # turns off translation
T.force('fr, it') # forces web2py to translate using fr.py or it.py
T("Hello World") # translates "Hello World" using the selected file
Note:
- there is no need to force since, by default, T uses
http_accept_language to determine a translation file.
- en and en-en are considered different languages!
- if language xx-yy is not found force() probes other similar languages
using such algorithm: `xx-yy.py -> xx.py -> xx-yy*.py -> xx*.py`
"""
def __init__(self, langpath, http_accept_language):
self.langpath = langpath
self.http_accept_language = http_accept_language
# filled in self.force():
#------------------------
# self.cache
# self.accepted_language
# self.language_file
# self.plural_language
# self.nplurals
# self.get_plural_id
# self.construct_plural_form
# self.plural_file
# self.plural_dict
# self.requested_languages
#----------------------------------------
# filled in self.set_current_languages():
#----------------------------------------
# self.default_language_file
# self.default_t
# self.current_languages
self.set_current_languages()
self.lazy = True
self.otherTs = {}
self.filter = markmin
self.ftag = 'markmin'
self.ns = None
def get_possible_languages_info(self, lang=None):
"""
Returns info for selected language or dictionary with all
possible languages info from `APP/languages/*.py`
It Returns:
- a tuple containing::
langcode, langname, langfile_mtime,
pluraldict_fname, pluraldict_mtime,
prules_langcode, nplurals,
get_plural_id, construct_plural_form
or None
- if *lang* is NOT defined a dictionary with all possible
languages::
{ langcode(from filename):
( langcode, # language code from !langcode!
langname,
# language name in national spelling from !langname!
langfile_mtime, # m_time of language file
pluraldict_fname,# name of plural dictionary file or None (when default.py is not exist)
pluraldict_mtime,# m_time of plural dictionary file or 0 if file is not exist
prules_langcode, # code of plural rules language or 'default'
nplurals, # nplurals for current language
get_plural_id, # get_plural_id() for current language
construct_plural_form) # construct_plural_form() for current language
}
Args:
lang (str): language
"""
info = read_possible_languages(self.langpath)
if lang:
info = info.get(lang)
return info
def get_possible_languages(self):
""" Gets list of all possible languages for current application """
return list(set(self.current_languages +
[lang for lang in read_possible_languages(self.langpath).iterkeys()
if lang != 'default']))
def set_current_languages(self, *languages):
"""
Sets current AKA "default" languages
Setting one of this languages makes the force() function to turn
translation off
"""
if len(languages) == 1 and isinstance(languages[0], (tuple, list)):
languages = languages[0]
if not languages or languages[0] is None:
# set default language from default.py/DEFAULT_LANGUAGE
pl_info = self.get_possible_languages_info('default')
if pl_info[2] == 0: # langfile_mtime
# if languages/default.py is not found
self.default_language_file = self.langpath
self.default_t = {}
self.current_languages = [DEFAULT_LANGUAGE]
else:
self.default_language_file = pjoin(self.langpath,
'default.py')
self.default_t = read_dict(self.default_language_file)
self.current_languages = [pl_info[0]] # !langcode!
else:
self.current_languages = list(languages)
self.force(self.http_accept_language)
def plural(self, word, n):
""" Gets plural form of word for number *n*
invoked from T()/T.M() in `%%{}` tag
Args:
word (str): word in singular
n (numeric): number plural form created for
Returns:
word (str): word in appropriate singular/plural form
Note:
"word" MUST be defined in current language (T.accepted_language)
"""
if int(n) == 1:
return word
elif word:
id = self.get_plural_id(abs(int(n)))
# id = 0 singular form
# id = 1 first plural form
# id = 2 second plural form
# etc.
if id != 0:
forms = self.plural_dict.get(word, [])
if len(forms) >= id:
# have this plural form:
return forms[id - 1]
else:
# guessing this plural form
forms += [''] * (self.nplurals - len(forms) - 1)
form = self.construct_plural_form(word, id)
forms[id - 1] = form
self.plural_dict[word] = forms
if is_writable() and self.plural_file:
write_plural_dict(self.plural_file,
self.plural_dict)
return form
return word
def force(self, *languages):
"""
Selects language(s) for translation
if a list of languages is passed as a parameter,
the first language from this list that matches the ones
from the possible_languages dictionary will be
selected
default language will be selected if none
of them matches possible_languages.
"""
pl_info = read_possible_languages(self.langpath)
def set_plural(language):
"""
initialize plural forms subsystem
"""
lang_info = pl_info.get(language)
if lang_info:
(pname,
pmtime,
self.plural_language,
self.nplurals,
self.get_plural_id,
self.construct_plural_form
) = lang_info[3:]
pdict = {}
if pname:
pname = pjoin(self.langpath, pname)
if pmtime != 0:
pdict = read_plural_dict(pname)
self.plural_file = pname
self.plural_dict = pdict
else:
self.plural_language = 'default'
self.nplurals = DEFAULT_NPLURALS
self.get_plural_id = DEFAULT_GET_PLURAL_ID
self.construct_plural_form = DEFAULT_CONSTRUCT_PLURAL_FORM
self.plural_file = None
self.plural_dict = {}
language = ''
if len(languages) == 1 and isinstance(languages[0], str):
languages = regex_language.findall(languages[0].lower())
elif not languages or languages[0] is None:
languages = []
self.requested_languages = languages = tuple(languages)
if languages:
all_languages = set(lang for lang in pl_info.iterkeys()
if lang != 'default') \
| set(self.current_languages)
for lang in languages:
# compare "aa-bb" | "aa" from *language* parameter
# with strings from langlist using such alghorythm:
# xx-yy.py -> xx.py -> xx*.py
lang5 = lang[:5]
if lang5 in all_languages:
language = lang5
else:
lang2 = lang[:2]
if len(lang5) > 2 and lang2 in all_languages:
language = lang2
else:
for l in all_languages:
if l[:2] == lang2:
language = l
if language:
if language in self.current_languages:
break
self.language_file = pjoin(self.langpath, language + '.py')
self.t = read_dict(self.language_file)
self.cache = global_language_cache.setdefault(
self.language_file,
({}, RLock()))
set_plural(language)
self.accepted_language = language
return languages
self.accepted_language = language
if not language:
if self.current_languages:
self.accepted_language = self.current_languages[0]
else:
self.accepted_language = DEFAULT_LANGUAGE
self.language_file = self.default_language_file
self.cache = global_language_cache.setdefault(self.language_file,
({}, RLock()))
self.t = self.default_t
set_plural(self.accepted_language)
return languages
def __call__(self, message, symbols={}, language=None, lazy=None, ns=None):
"""
get cached translated plain text message with inserted parameters(symbols)
if lazy==True lazyT object is returned
"""
if lazy is None:
lazy = self.lazy
if not language and not ns:
if lazy:
return lazyT(message, symbols, self)
else:
return self.translate(message, symbols)
else:
if ns:
if ns != self.ns:
self.langpath = os.path.join(self.langpath, ns)
if self.ns is None:
self.ns = ns
otherT = self.__get_otherT__(language, ns)
return otherT(message, symbols, lazy=lazy)
def __get_otherT__(self, language=None, namespace=None):
if not language and not namespace:
raise Exception('Incorrect parameters')
if namespace:
if language:
index = '%s/%s' % (namespace, language)
else:
index = namespace
else:
index = language
try:
otherT = self.otherTs[index]
except KeyError:
otherT = self.otherTs[index] = translator(self.langpath, \
self.http_accept_language)
if language:
otherT.force(language)
return otherT
def apply_filter(self, message, symbols={}, filter=None, ftag=None):
def get_tr(message, prefix, filter):
s = self.get_t(message, prefix)
return filter(s) if filter else self.filter(s)
if filter:
prefix = '@' + (ftag or 'userdef') + '\x01'
else:
prefix = '@' + self.ftag + '\x01'
message = get_from_cache(
self.cache, prefix + message,
lambda: get_tr(message, prefix, filter))
if symbols or symbols == 0 or symbols == "":
if isinstance(symbols, dict):
symbols.update(
(key, xmlescape(value).translate(ttab_in))
for key, value in symbols.iteritems()
if not isinstance(value, NUMBERS))
else:
if not isinstance(symbols, tuple):
symbols = (symbols,)
symbols = tuple(
value if isinstance(value, NUMBERS)
else xmlescape(value).translate(ttab_in)
for value in symbols)
message = self.params_substitution(message, symbols)
return XML(message.translate(ttab_out))
def M(self, message, symbols={}, language=None,
lazy=None, filter=None, ftag=None, ns=None):
"""
Gets cached translated markmin-message with inserted parametes
if lazy==True lazyT object is returned
"""
if lazy is None:
lazy = self.lazy
if not language and not ns:
if lazy:
return lazyT(message, symbols, self, filter, ftag, True)
else:
return self.apply_filter(message, symbols, filter, ftag)
else:
if ns:
self.langpath = os.path.join(self.langpath, ns)
otherT = self.__get_otherT__(language, ns)
return otherT.M(message, symbols, lazy=lazy)
def get_t(self, message, prefix=''):
"""
Use ## to add a comment into a translation string
the comment can be useful do discriminate different possible
translations for the same string (for example different locations)::
T(' hello world ') -> ' hello world '
T(' hello world ## token') -> ' hello world '
T('hello ## world## token') -> 'hello ## world'
the ## notation is ignored in multiline strings and strings that
start with ##. This is needed to allow markmin syntax to be translated
"""
if isinstance(message, unicode):
message = message.encode('utf8')
if isinstance(prefix, unicode):
prefix = prefix.encode('utf8')
key = prefix + message
mt = self.t.get(key, None)
if mt is not None:
return mt
# we did not find a translation
if message.find('##') > 0 and not '\n' in message:
# remove comments
message = message.rsplit('##', 1)[0]
# guess translation same as original
self.t[key] = mt = self.default_t.get(key, message)
# update language file for latter translation
if is_writable() and \
self.language_file != self.default_language_file:
write_dict(self.language_file, self.t)
return regex_backslash.sub(
lambda m: m.group(1).translate(ttab_in), mt)
def params_substitution(self, message, symbols):
"""
Substitutes parameters from symbols into message using %.
also parse `%%{}` placeholders for plural-forms processing.
Returns:
string with parameters
Note:
*symbols* MUST BE OR tuple OR dict of parameters!
"""
def sub_plural(m):
"""String in `%{}` is transformed by this rules:
If string starts with `\\`, `!` or `?` such transformations
take place::
"!string of words" -> "String of word" (Capitalize)
"!!string of words" -> "String Of Word" (Title)
"!!!string of words" -> "STRING OF WORD" (Upper)
"\\!string of words" -> "!string of word"
(remove \\ and disable transformations)
"?word?number" -> "word" (return word, if number == 1)
"?number" or "??number" -> "" (remove number,
if number == 1)
"?word?number" -> "number" (if number != 1)
"""
def sub_tuple(m):
""" word[number], !word[number], !!word[number], !!!word[number]
word, !word, !!word, !!!word, ?word?number, ??number, ?number
?word?word[number], ?word?[number], ??word[number]
"""
w, i = m.group('w', 'i')
c = w[0]
if c not in '!?':
return self.plural(w, symbols[int(i or 0)])
elif c == '?':
(p1, sep, p2) = w[1:].partition("?")
part1 = p1 if sep else ""
(part2, sep, part3) = (p2 if sep else p1).partition("?")
if not sep:
part3 = part2
if i is None:
# ?[word]?number[?number] or ?number
if not part2:
return m.group(0)
num = int(part2)
else:
# ?[word]?word2[?word3][number]
num = int(symbols[int(i or 0)])
return part1 if num == 1 else part3 if num == 0 else part2
elif w.startswith('!!!'):
word = w[3:]
fun = upper_fun
elif w.startswith('!!'):
word = w[2:]
fun = title_fun
else:
word = w[1:]
fun = cap_fun
if i is not None:
return fun(self.plural(word, symbols[int(i)]))
return fun(word)
def sub_dict(m):
""" word(var), !word(var), !!word(var), !!!word(var)
word(num), !word(num), !!word(num), !!!word(num)
?word2(var), ?word1?word2(var), ?word1?word2?word0(var)
?word2(num), ?word1?word2(num), ?word1?word2?word0(num)
"""
w, n = m.group('w', 'n')
c = w[0]
n = int(n) if n.isdigit() else symbols[n]
if c not in '!?':
return self.plural(w, n)
elif c == '?':
# ?[word1]?word2[?word0](var or num), ?[word1]?word2(var or num) or ?word2(var or num)
(p1, sep, p2) = w[1:].partition("?")
part1 = p1 if sep else ""
(part2, sep, part3) = (p2 if sep else p1).partition("?")
if not sep:
part3 = part2
num = int(n)
return part1 if num == 1 else part3 if num == 0 else part2
elif w.startswith('!!!'):
word = w[3:]
fun = upper_fun
elif w.startswith('!!'):
word = w[2:]
fun = title_fun
else:
word = w[1:]
fun = cap_fun
return fun(self.plural(word, n))
s = m.group(1)
part = regex_plural_tuple.sub(sub_tuple, s)
if part == s:
part = regex_plural_dict.sub(sub_dict, s)
if part == s:
return m.group(0)
return part
message = message % symbols
message = regex_plural.sub(sub_plural, message)
return message
def translate(self, message, symbols):
"""
Gets cached translated message with inserted parameters(symbols)
"""
message = get_from_cache(self.cache, message,
lambda: self.get_t(message))
if symbols or symbols == 0 or symbols == "":
if isinstance(symbols, dict):
symbols.update(
(key, str(value).translate(ttab_in))
for key, value in symbols.iteritems()
if not isinstance(value, NUMBERS))
else:
if not isinstance(symbols, tuple):
symbols = (symbols,)
symbols = tuple(
value if isinstance(value, NUMBERS)
else str(value).translate(ttab_in)
for value in symbols)
message = self.params_substitution(message, symbols)
return message.translate(ttab_out)
def findT(path, language=DEFAULT_LANGUAGE):
"""
Note:
Must be run by the admin app
"""
lang_file = pjoin(path, 'languages', language + '.py')
sentences = read_dict(lang_file)
mp = pjoin(path, 'models')
cp = pjoin(path, 'controllers')
vp = pjoin(path, 'views')
mop = pjoin(path, 'modules')
for filename in \
listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\
+ listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0):
data = read_locked(filename)
items = regex_translate.findall(data)
for item in items:
try:
message = safe_eval(item)
except:
continue # silently ignore inproperly formatted strings
if not message.startswith('#') and not '\n' in message:
tokens = message.rsplit('##', 1)
else:
# this allows markmin syntax in translations
tokens = [message]
if len(tokens) == 2:
message = tokens[0].strip() + '##' + tokens[1].strip()
if message and not message in sentences:
sentences[message] = message
if not '!langcode!' in sentences:
sentences['!langcode!'] = (
DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language)
if not '!langname!' in sentences:
sentences['!langname!'] = (
DEFAULT_LANGUAGE_NAME if language in ('default', DEFAULT_LANGUAGE)
else sentences['!langcode!'])
write_dict(lang_file, sentences)
def update_all_languages(application_path):
"""
Note:
Must be run by the admin app
"""
path = pjoin(application_path, 'languages/')
for language in oslistdir(path):
if regex_langfile.match(language):
findT(application_path, language[:-3])
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause | -6,423,560,203,809,788,000 | 34.895055 | 153 | 0.514168 | false |
dmoliveira/networkx | networkx/algorithms/operators/unary.py | 30 | 1747 | """Unary operations on graphs"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['complement', 'reverse']
def complement(G, name=None):
"""Return the graph complement of G.
Parameters
----------
G : graph
A NetworkX graph
name : string
Specify name for new graph
Returns
-------
GC : A new graph.
Notes
------
Note that complement() does not create self-loops and also
does not produce parallel edges for MultiGraphs.
Graph, node, and edge data are not propagated to the new graph.
"""
if name is None:
name = "complement(%s)" % (G.name)
R = G.__class__()
R.name = name
R.add_nodes_from(G)
R.add_edges_from(((n, n2)
for n, nbrs in G.adjacency_iter()
for n2 in G if n2 not in nbrs
if n != n2))
return R
def reverse(G, copy=True):
"""Return the reverse directed graph of G.
Parameters
----------
G : directed graph
A NetworkX directed graph
copy : bool
If True, then a new graph is returned. If False, then the graph is
reversed in place.
Returns
-------
H : directed graph
The reversed G.
"""
if not G.is_directed():
raise nx.NetworkXError("Cannot reverse an undirected graph.")
else:
return G.reverse(copy=copy)
| bsd-3-clause | -4,466,940,595,108,524,500 | 24.318841 | 74 | 0.556955 | false |
norayr/unisubs | apps/videos/migrations/0039_auto__add_field_subtitleversion_is_forked__add_field_subtitlelanguage_.py | 5 | 20292 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SubtitleVersion.is_forked'
db.add_column('videos_subtitleversion', 'is_forked', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
# Adding field 'SubtitleLanguage.is_forked'
db.add_column('videos_subtitlelanguage', 'is_forked', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SubtitleVersion.is_forked'
db.delete_column('videos_subtitleversion', 'is_forked')
# Deleting field 'SubtitleLanguage.is_forked'
db.delete_column('videos_subtitlelanguage', 'is_forked')
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.nullsubtitles': {
'Meta': {'unique_together': "(('video', 'user', 'language'),)", 'object_name': 'NullSubtitles'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.nulltranslations': {
'Meta': {'object_name': 'NullTranslations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.nullvideocaptions': {
'Meta': {'object_name': 'NullVideoCaptions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.stopnotification': {
'Meta': {'object_name': 'StopNotification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'end_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_subtitles': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullSubtitles']", 'null': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language'),)", 'object_name': 'SubtitleLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'was_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.translation': {
'Meta': {'object_name': 'Translation'},
'caption_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_translations': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullTranslations']", 'null': 'True'}),
'translation_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.TranslationVersion']", 'null': 'True'})
},
'videos.translationlanguage': {
'Meta': {'object_name': 'TranslationLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_translated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'was_translated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.translationversion': {
'Meta': {'object_name': 'TranslationVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.TranslationLanguage']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'bliptv_fileid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bliptv_flv_url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dailymotion_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'video_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '2048', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vimeo_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'youtube_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
'videos.videocaption': {
'Meta': {'object_name': 'VideoCaption'},
'caption_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'caption_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'end_time': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_captions': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullVideoCaptions']", 'null': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {}),
'sub_order': ('django.db.models.fields.FloatField', [], {}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.VideoCaptionVersion']", 'null': 'True'})
},
'videos.videocaptionversion': {
'Meta': {'object_name': 'VideoCaptionVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
}
}
complete_apps = ['videos']
| agpl-3.0 | 8,544,339,602,614,510,000 | 78.889764 | 172 | 0.555293 | false |
dischinator/pyload | module/plugins/hooks/SkipRev.py | 4 | 3016 | # -*- coding: utf-8 -*-
import re
from module.PyFile import PyFile
from module.plugins.internal.Addon import Addon
class SkipRev(Addon):
__name__ = "SkipRev"
__type__ = "hook"
__version__ = "0.37"
__status__ = "testing"
__config__ = [("activated", "bool" , "Activated" , False ),
("mode" , "Auto;Manual", "Choose recovery archives to skip" , "Auto"),
("revtokeep", "int" , "Number of recovery archives to keep for package", 0 )]
__description__ = """Skip recovery archives (.rev)"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
def _name(self, pyfile):
return pyfile.pluginclass.get_info(pyfile.url)['name']
def _create_pyFile(self, data):
pylink = self.pyload.api._convertPyFile(data)
return PyFile(self.pyload.files,
pylink.fid,
pylink.url,
pylink.name,
pylink.size,
pylink.status,
pylink.error,
pylink.plugin,
pylink.packageID,
pylink.order)
def download_preparing(self, pyfile):
name = self._name(pyfile)
if pyfile.statusname == "unskipped" or not name.endswith(".rev") or not ".part" in name:
return
revtokeep = -1 if self.config.get('mode') == "Auto" else self.config.get('revtokeep')
if revtokeep:
status_list = (1, 4, 8, 9, 14) if revtokeep < 0 else (1, 3, 4, 8, 9, 14)
pyname = re.compile(r'%s\.part\d+\.rev$' % name.rsplit('.', 2)[0].replace('.', '\.'))
queued = [True for fid, fdata in pyfile.package().getChildren().items() \
if fdata['status'] not in status_list and pyname.match(fdata['name'])].count(True)
if not queued or queued < revtokeep: #: Keep one rev at least in auto mode
return
pyfile.setCustomStatus("SkipRev", "skipped")
def download_failed(self, pyfile):
if pyfile.name.rsplit('.', 1)[-1].strip() not in ("rar", "rev"):
return
revtokeep = -1 if self.config.get('mode') == "Auto" else self.config.get('revtokeep')
if not revtokeep:
return
pyname = re.compile(r'%s\.part\d+\.rev$' % pyfile.name.rsplit('.', 2)[0].replace('.', '\.'))
for fid, fdata in pyfile.package().getChildren().items():
if fdata['status'] == 4 and pyname.match(fdata['name']):
pyfile_new = self._create_pyFile(fdata)
if revtokeep > -1 or pyfile.name.endswith(".rev"):
pyfile_new.setStatus("queued")
else:
pyfile_new.setCustomStatus(_("unskipped"), "queued")
self.pyload.files.save()
pyfile_new.release()
return
| gpl-3.0 | -8,606,556,976,885,780,000 | 34.482353 | 106 | 0.507626 | false |
fentas/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/cmake.py | 123 | 7236 | # Copyright (C) 2012 Intel Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in cmake files.(.cmake, CMakeLists.txt)"""
import re
from common import TabChecker
class CMakeChecker(object):
"""Processes CMake lines for checking style."""
# NO_SPACE_CMDS list are based on commands section of CMake document.
# Now it is generated from
# http://www.cmake.org/cmake/help/v2.8.10/cmake.html#section_Commands.
# Some commands are from default CMake modules such as pkg_check_modules.
# Please keep list in alphabet order.
#
# For commands in this list, spaces should not be added it and its
# parentheses. For eg, message("testing"), not message ("testing")
#
# The conditional commands like if, else, endif, foreach, endforeach,
# while, endwhile and break are listed in ONE_SPACE_CMDS
NO_SPACE_CMDS = [
'add_custom_command', 'add_custom_target', 'add_definitions',
'add_dependencies', 'add_executable', 'add_library',
'add_subdirectory', 'add_test', 'aux_source_directory',
'build_command',
'cmake_minimum_required', 'cmake_policy', 'configure_file',
'create_test_sourcelist',
'define_property',
'enable_language', 'enable_testing', 'endfunction', 'endmacro',
'execute_process', 'export',
'file', 'find_file', 'find_library', 'find_package', 'find_path',
'find_program', 'fltk_wrap_ui', 'function',
'get_cmake_property', 'get_directory_property',
'get_filename_component', 'get_property', 'get_source_file_property',
'get_target_property', 'get_test_property',
'include', 'include_directories', 'include_external_msproject',
'include_regular_expression', 'install',
'link_directories', 'list', 'load_cache', 'load_command',
'macro', 'mark_as_advanced', 'math', 'message',
'option',
#From FindPkgConfig.cmake
'pkg_check_modules',
'project',
'qt_wrap_cpp', 'qt_wrap_ui',
'remove_definitions', 'return',
'separate_arguments', 'set', 'set_directory_properties', 'set_property',
'set_source_files_properties', 'set_target_properties',
'set_tests_properties', 'site_name', 'source_group', 'string',
'target_link_libraries', 'try_compile', 'try_run',
'unset',
'variable_watch',
]
# CMake conditional commands, require one space between command and
# its parentheses, such as "if (", "foreach (", etc.
ONE_SPACE_CMDS = [
'if', 'else', 'elseif', 'endif',
'foreach', 'endforeach',
'while', 'endwhile',
'break',
]
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._tab_checker = TabChecker(file_path, handle_style_error)
def check(self, lines):
self._tab_checker.check(lines)
self._num_lines = len(lines)
for l in xrange(self._num_lines):
self._process_line(l + 1, lines[l])
def _process_line(self, line_number, line_content):
if re.match('(^|\ +)#', line_content):
# ignore comment line
return
l = line_content.expandtabs(4)
# check command like message( "testing")
if re.search('\(\ +', l):
self._handle_style_error(line_number, 'whitespace/parentheses', 5,
'No space after "("')
# check command like message("testing" )
if re.search('\ +\)', l) and not re.search('^\ +\)$', l):
self._handle_style_error(line_number, 'whitespace/parentheses', 5,
'No space before ")"')
self._check_trailing_whitespace(line_number, l)
self._check_no_space_cmds(line_number, l)
self._check_one_space_cmds(line_number, l)
self._check_indent(line_number, line_content)
def _check_trailing_whitespace(self, line_number, line_content):
line_content = line_content.rstrip('\n') # chr(10), newline
line_content = line_content.rstrip('\r') # chr(13), carriage return
line_content = line_content.rstrip('\x0c') # chr(12), form feed, ^L
stripped = line_content.rstrip()
if line_content != stripped:
self._handle_style_error(line_number, 'whitespace/trailing', 5,
'No trailing spaces')
def _check_no_space_cmds(self, line_number, line_content):
# check command like "SET (" or "Set("
for t in self.NO_SPACE_CMDS:
self._check_non_lowercase_cmd(line_number, line_content, t)
if re.search('(^|\ +)' + t.lower() + '\ +\(', line_content):
msg = 'No space between command "' + t.lower() + '" and its parentheses, should be "' + t + '("'
self._handle_style_error(line_number, 'whitespace/parentheses', 5, msg)
def _check_one_space_cmds(self, line_number, line_content):
# check command like "IF (" or "if(" or "if (" or "If ()"
for t in self.ONE_SPACE_CMDS:
self._check_non_lowercase_cmd(line_number, line_content, t)
if re.search('(^|\ +)' + t.lower() + '(\(|\ \ +\()', line_content):
msg = 'One space between command "' + t.lower() + '" and its parentheses, should be "' + t + ' ("'
self._handle_style_error(line_number, 'whitespace/parentheses', 5, msg)
def _check_non_lowercase_cmd(self, line_number, line_content, cmd):
if re.search('(^|\ +)' + cmd + '\ *\(', line_content, flags=re.IGNORECASE) and \
(not re.search('(^|\ +)' + cmd.lower() + '\ *\(', line_content)):
msg = 'Use lowercase command "' + cmd.lower() + '"'
self._handle_style_error(line_number, 'command/lowercase', 5, msg)
def _check_indent(self, line_number, line_content):
#TODO (halton): add indent checking
pass
| bsd-3-clause | -3,950,742,356,907,028,000 | 47.24 | 114 | 0.621338 | false |
stackforge/cloudbase-init | cloudbaseinit/tests/metadata/services/test_nocloudservice.py | 1 | 9767 | # Copyright 2020 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import importlib
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.metadata.services import base
from cloudbaseinit.models import network as nm
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils import serialization
MODULE_PATH = "cloudbaseinit.metadata.services.nocloudservice"
@ddt.ddt
class TestNoCloudNetworkConfigV1Parser(unittest.TestCase):
def setUp(self):
module = importlib.import_module(MODULE_PATH)
self._parser = module.NoCloudNetworkConfigV1Parser()
self.snatcher = testutils.LogSnatcher(MODULE_PATH)
@ddt.data(('', ('Network configuration is empty', None)),
('{t: 1}',
("Network config '{'t': 1}' is not a list", None)),
('["1"]',
("Network config item '1' is not a dictionary",
nm.NetworkDetailsV2(links=[], networks=[], services=[]))),
('[{"type": "router"}]',
("Network config type 'router' is not supported",
nm.NetworkDetailsV2(links=[], networks=[], services=[]))))
@ddt.unpack
def test_parse_empty_result(self, input, expected_result):
with self.snatcher:
result = self._parser.parse(serialization.parse_json_yaml(input))
self.assertEqual(True, expected_result[0] in self.snatcher.output[0])
self.assertEqual(result, expected_result[1])
def test_network_details_v2(self):
expected_bond = nm.Bond(
members=["gbe0", "gbe1"],
type=nm.BOND_TYPE_ACTIVE_BACKUP,
lb_algorithm=None,
lacp_rate=None,
)
expected_link_bond = nm.Link(
id='bond0',
name='bond0',
type=nm.LINK_TYPE_BOND,
enabled=True,
mac_address="52:54:00:12:34:00",
mtu=1450,
bond=expected_bond,
vlan_link=None,
vlan_id=None,
)
expected_link = nm.Link(
id='interface0',
name='interface0',
type=nm.LINK_TYPE_PHYSICAL,
enabled=True,
mac_address="52:54:00:12:34:00",
mtu=1450,
bond=None,
vlan_link=None,
vlan_id=None,
)
expected_link_vlan = nm.Link(
id='vlan0',
name='vlan0',
type=nm.LINK_TYPE_VLAN,
enabled=True,
mac_address="52:54:00:12:34:00",
mtu=1450,
bond=None,
vlan_link='eth1',
vlan_id=150,
)
expected_network = nm.Network(
link='interface0',
address_cidr='192.168.1.10/24',
dns_nameservers=['192.168.1.11'],
routes=[
nm.Route(network_cidr='0.0.0.0/0',
gateway="192.168.1.1")
]
)
expected_network_bond = nm.Network(
link='bond0',
address_cidr='192.168.1.10/24',
dns_nameservers=['192.168.1.11'],
routes=[],
)
expected_network_vlan = nm.Network(
link='vlan0',
address_cidr='192.168.1.10/24',
dns_nameservers=['192.168.1.11'],
routes=[],
)
expected_nameservers = nm.NameServerService(
addresses=['192.168.23.2', '8.8.8.8'],
search='acme.local')
parser_data = """
- type: physical
name: interface0
mac_address: "52:54:00:12:34:00"
mtu: 1450
subnets:
- type: static
address: 192.168.1.10
netmask: 255.255.255.0
gateway: 192.168.1.1
dns_nameservers:
- 192.168.1.11
- type: bond
name: bond0
bond_interfaces:
- gbe0
- gbe1
mac_address: "52:54:00:12:34:00"
params:
bond-mode: active-backup
bond-lacp-rate: false
mtu: 1450
subnets:
- type: static
address: 192.168.1.10
netmask: 255.255.255.0
dns_nameservers:
- 192.168.1.11
- type: vlan
name: vlan0
vlan_link: eth1
vlan_id: 150
mac_address: "52:54:00:12:34:00"
mtu: 1450
subnets:
- type: static
address: 192.168.1.10
netmask: 255.255.255.0
dns_nameservers:
- 192.168.1.11
- type: nameserver
address:
- 192.168.23.2
- 8.8.8.8
search: acme.local
"""
result = self._parser.parse(
serialization.parse_json_yaml(parser_data))
self.assertEqual(result.links[0], expected_link)
self.assertEqual(result.networks[0], expected_network)
self.assertEqual(result.links[1], expected_link_bond)
self.assertEqual(result.networks[1], expected_network_bond)
self.assertEqual(result.links[2], expected_link_vlan)
self.assertEqual(result.networks[2], expected_network_vlan)
self.assertEqual(result.services[0], expected_nameservers)
@ddt.ddt
class TestNoCloudConfigDriveService(unittest.TestCase):
def setUp(self):
self._win32com_mock = mock.MagicMock()
self._ctypes_mock = mock.MagicMock()
self._ctypes_util_mock = mock.MagicMock()
self._win32com_client_mock = mock.MagicMock()
self._pywintypes_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'win32com': self._win32com_mock,
'ctypes': self._ctypes_mock,
'ctypes.util': self._ctypes_util_mock,
'win32com.client': self._win32com_client_mock,
'pywintypes': self._pywintypes_mock})
self._module_patcher.start()
self.addCleanup(self._module_patcher.stop)
self.configdrive_module = importlib.import_module(MODULE_PATH)
self._config_drive = (
self.configdrive_module.NoCloudConfigDriveService())
self.snatcher = testutils.LogSnatcher(MODULE_PATH)
@mock.patch('os.path.normpath')
@mock.patch('os.path.join')
def test_get_data(self, mock_join, mock_normpath):
fake_path = os.path.join('fake', 'path')
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data='fake data'), create=True):
response = self._config_drive._get_data(fake_path)
self.assertEqual('fake data', response)
mock_join.assert_called_with(
self._config_drive._metadata_path, fake_path)
mock_normpath.assert_called_once_with(mock_join.return_value)
@mock.patch('shutil.rmtree')
def test_cleanup(self, mock_rmtree):
fake_path = os.path.join('fake', 'path')
self._config_drive._metadata_path = fake_path
mock_mgr = mock.Mock()
self._config_drive._mgr = mock_mgr
mock_mgr.target_path = fake_path
self._config_drive.cleanup()
mock_rmtree.assert_called_once_with(fake_path,
ignore_errors=True)
self.assertEqual(None, self._config_drive._metadata_path)
@mock.patch(MODULE_PATH + '.NoCloudConfigDriveService._get_meta_data')
def test_get_public_keys(self, mock_get_metadata):
fake_key = 'fake key'
expected_result = [fake_key]
mock_get_metadata.return_value = {
'public-keys': {
'0': {
'openssh-key': fake_key
}
}
}
result = self._config_drive.get_public_keys()
self.assertEqual(result, expected_result)
@ddt.data(('', ('V2 network metadata is empty', None)),
('1', ('V2 network metadata is not a dictionary', None)),
('{}', ('V2 network metadata is empty', None)),
('{}}', ('V2 network metadata could not be deserialized', None)),
('{version: 2}', ("Network data version '2' is not supported",
None)),
(base.NotExistingMetadataException('exc'),
('V2 network metadata not found', True)))
@ddt.unpack
@mock.patch(MODULE_PATH + '.NoCloudConfigDriveService._get_cache_data')
def test_network_details_v2_empty_result(self, input, expected_result,
mock_get_cache_data):
if expected_result[1]:
mock_get_cache_data.side_effect = [input]
else:
mock_get_cache_data.return_value = input
with self.snatcher:
result = self._config_drive.get_network_details_v2()
self.assertEqual(True, expected_result[0] in self.snatcher.output[0])
self.assertEqual(result, None)
mock_get_cache_data.assert_called_with(
"network-config", decode=True)
| apache-2.0 | 2,213,590,478,950,928,000 | 34.908088 | 79 | 0.549503 | false |
zenmoto/splunk-ref-pas-code | spikes/googledrive_addon/bin/splunklib/searchcommands/decorators.py | 5 | 10708 | # Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from inspect import getmembers, isclass, isfunction
from types import FunctionType, MethodType
from json import JSONEncoder
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ordereddict import OrderedDict # must be python 2.6
from .search_command_internals import ConfigurationSettingsType
from .validators import OptionName
class Configuration(object):
""" Defines the configuration settings for a search command.
Documents, validates, and ensures that only relevant configuration settings
are applied. Adds a :code:`name` class variable to search command classes
that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive
:code:`name` the word "Command" is removed from the end of the class name
and then converted to lower case for conformance with the `Search command
style guide <http://docs.splunk.com/Documentation/Splunk/6.0/Search/Searchcommandstyleguide>`_
"""
def __init__(self, **kwargs):
self.settings = kwargs
def __call__(self, o):
if isfunction(o):
# We must wait to finalize configuration as the class containing
# this function is under construction at the time this call to
# decorate a member function. This will be handled in the call to
# o.ConfigurationSettings.fix_up(o), below.
o._settings = self.settings
elif isclass(o):
name = o.__name__
if name.endswith('Command'):
name = name[:-len('Command')]
o.name = name.lower()
if self.settings is not None:
o.ConfigurationSettings = ConfigurationSettingsType(
module='.'.join((o.__module__, o.__name__)),
name='ConfigurationSettings',
bases=(o.ConfigurationSettings,),
settings=self.settings)
o.ConfigurationSettings.fix_up(o)
Option.fix_up(o)
else:
raise TypeError(
'Incorrect usage: Configuration decorator applied to %s'
% (type(o), o.__name__))
return o
class Option(property):
""" Represents a search command option.
Required options must be specified on the search command line.
**Example:**
Short form (recommended). When you are satisfied with built-in or custom
validation behaviors.
.. code-block:: python
:linenos:
total = Option(
doc=''' **Syntax:** **total=***<fieldname>*
**Description:** Name of the field that will hold the computed
sum''',
require=True, validate=validator.Fieldname())
**Example:**
Long form. Useful when you wish to manage the option value and its deleter/
getter/setter side-effects yourself. You must provide a getter and a
setter. If your :code:`Option` requires `destruction <http://docs.python.org/reference/datamodel.html#object.__del__>`_
you must also provide a deleter. You must be prepared to accept a value of
:const:`None` which indicates that your :code:`Option` is unset.
.. code-block:: python
:linenos:
@Option()
def logging_configuration(self):
\""" **Syntax:** logging_configuration=<path>
**Description:** Loads an alternative logging configuration file for
a command invocation. The logging configuration file must be in
Python ConfigParser-format. The *<path>* name and all path names
specified in configuration are relative to the app root directory.
\"""
return self._logging_configuration
@logging_configuration.setter
def logging_configuration(self, value):
if value is not None
logging.configure(value)
self._logging_configuration = value
def __init__(self)
self._logging_configuration = None
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None, name=None,
default=None, require=None, validate=None):
super(Option, self).__init__(fget, fset, fdel, doc)
self.name = None if name is None else OptionName()(name)
self.default = default
self.require = bool(require)
self.validate = validate
def __call__(self, function):
return self.getter(function)
#region Methods
@classmethod
def fix_up(cls, command):
is_option = lambda attribute: isinstance(attribute, Option)
command.option_definitions = getmembers(command, is_option)
member_number = 0
for member_name, option in command.option_definitions:
if option.name is None:
option.name = member_name
if option.fget is None and option.fset is None:
field_name = '_' + member_name
def new_getter(name):
def getter(self):
return getattr(self, name, None)
return getter
fget = new_getter(field_name)
fget = FunctionType(
fget.func_code, fget.func_globals, member_name, None,
fget.func_closure)
fget = MethodType(fget, None, command)
option = option.getter(fget)
def new_setter(name):
def setter(self, value):
setattr(self, name, value)
return setter
fset = new_setter(field_name)
fset = FunctionType(
fset.func_code, fset.func_globals, member_name, None,
fset.func_closure)
fset = MethodType(fset, None, command)
option = option.setter(fset)
setattr(command, member_name, option)
command.option_definitions[member_number] = member_name, option
member_number += 1
return
def deleter(self, function):
deleter = super(Option, self).deleter(function)
return self._reset(deleter, function)
def getter(self, function):
getter = super(Option, self).getter(function)
return self._reset(getter)
def setter(self, function):
f = lambda s, v: function(s, self.validate(v) if self.validate else v)
setter = super(Option, self).setter(f)
return self._reset(setter)
def _reset(self, other):
other.name = self.name
other.default = self.default
other.require = self.require
other.validate = self.validate
return other
#endregion
#region Types
class Encoder(JSONEncoder):
def __init__(self, item):
super(Option.Encoder, self).__init__()
self.item = item
def default(self, o):
# Convert the value of a type unknown to the JSONEncoder
validator = self.item.validator
if validator is None:
return str(o)
return validator.format(o)
class Item(object):
""" Presents an instance/class view over a search command `Option`.
"""
def __init__(self, command, option):
self._command = command
self._option = option
self._is_set = False
def __repr__(self):
return str(self)
def __str__(self):
encoder = Option.Encoder(self)
text = '='.join([self.name, encoder.encode(self.value)])
return text
#region Properties
@property
def is_required(self):
return bool(self._option.require)
@property
def is_set(self):
""" Indicates whether an option value was provided as argument.
"""
return self._is_set
@property
def name(self):
return self._option.name
@property
def validator(self):
return self._option.validate
@property
def value(self):
return self._option.__get__(self._command)
@value.setter
def value(self, value):
self._option.__set__(self._command, value)
self._is_set = True
def reset(self):
self._option.__set__(self._command, self._option.default)
self._is_set = False
#endif
class View(object):
""" Presents a view of the set of `Option` arguments to a search command.
"""
def __init__(self, command):
self._items = OrderedDict([
(option.name, Option.Item(command, option))
for member_name, option in type(command).option_definitions])
return
def __contains__(self, name):
return name in self._items
def __getitem__(self, name):
return self._items[name]
def __iter__(self):
return self._items.__iter__()
def __len__(self):
return len(self._items)
def __repr__(self):
text = ''.join([
'Option.View(',
','.join([repr(item) for item in self.itervalues()]),
')'])
return text
def __str__(self):
text = ' '.join(
[str(item) for item in self.itervalues() if item.is_set])
return text
#region Methods
def get_missing(self):
missing = [
item.name for item in self._items.itervalues()
if item.is_required and not item.is_set]
return missing if len(missing) > 0 else None
def iteritems(self):
return self._items.iteritems()
def iterkeys(self):
return self.__iter__()
def itervalues(self):
return self._items.itervalues()
def reset(self):
for value in self.itervalues():
value.reset()
return
#endif
#endif
| apache-2.0 | 5,928,008,961,594,241,000 | 32.151703 | 123 | 0.577792 | false |
cgroll/j_r_docker | launcher/windows/python/Lib/filecmp.py | 110 | 9588 | """Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = ["cmp","dircmp","cmpfiles"]
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
outcome = _cache.get((f1, f2, s1, s2))
if outcome is None:
outcome = _do_cmp(f1, f2)
if len(_cache) > 100: # limit the maximum size of the cache
_cache.clear()
_cache[f1, f2, s1, s2] = outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = os.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except (os.error, IOError):
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
| mit | 6,159,925,190,720,770,000 | 31.391892 | 84 | 0.572695 | false |
kenglishhi/gae-django-sandbox | django/contrib/sessions/backends/base.py | 90 | 9240 | import base64
import os
import random
import sys
import time
from datetime import datetime, timedelta
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.hashcompat import md5_constructor
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_SESSION_KEY = 18446744073709551616L # 2 << 63
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def keys(self):
return self._session.keys()
def items(self):
return self._session.items()
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def encode(self, session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def decode(self, session_data):
encoded_data = base64.decodestring(session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
raise SuspiciousOperation("User tampered with session cookie.")
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return self._session.has_key(key)
def values(self):
return self._session.values()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def _get_new_session_key(self):
"Returns session key that isn't being used."
# The random module is seeded when this Apache child is created.
# Use settings.SECRET_KEY as added salt.
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example
pid = 1
while 1:
session_key = md5_constructor("%s%s%s%s"
% (randrange(0, MAX_SESSION_KEY), pid, time.time(),
settings.SECRET_KEY)).hexdigest()
if not self.exists(session_key):
break
return session_key
def _get_session_key(self):
if self._session_key:
return self._session_key
else:
self._session_key = self._get_new_session_key()
return self._session_key
def _set_session_key(self, session_key):
self._session_key = session_key
session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self._session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self):
"""Get the number of seconds until the session expires."""
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - datetime.now()
return delta.days * 86400 + delta.seconds
def get_expiry_date(self):
"""Get session the expiry date (as a datetime object)."""
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return datetime.now() + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = datetime.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self.create()
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError
| apache-2.0 | 1,615,816,144,211,927,300 | 31.195122 | 86 | 0.604545 | false |
LeeYiFang/Carkinos | src/probes/views.py | 1 | 122212 | from django.shortcuts import render,render_to_response
from django.http import HttpResponse, Http404,JsonResponse
from django.views.decorators.http import require_GET
from .models import Dataset, CellLine, ProbeID, Sample, Platform, Clinical_Dataset,Clinical_sample,Gene
from django.template import RequestContext
from django.utils.html import mark_safe
import json
import pandas as pd
import numpy as np
from pathlib import Path
import sklearn
from sklearn.decomposition import PCA
from scipy import stats
import os
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
import uuid
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
r=ro.r
#lumi= importr('lumi')
from rpy2.robjects import pandas2ri
pandas2ri.activate()
import csv
#import logging
#logger = logging.getLogger("__name__")
show_row=4000 #more than how many rows will become download file mode
def generate_samples():
d=Dataset.objects.all()
cell_d_name=list(d.values_list('name',flat=True))
same_name=[]
cell_datasets=[] #[[dataset_name,[[primary_site,[cell line]]]]
for i in cell_d_name:
if i=="Sanger Cell Line Project":
alias='sanger'
same_name.append('sanger')
elif i=="NCI60":
alias='nci'
same_name.append('nci')
elif i=="GSE36133":
alias='ccle'
same_name.append('ccle')
else:
alias=i
same_name.append(i)
sample=Sample.objects.filter(dataset_id__name=i).order_by('cell_line_id__primary_site').select_related('cell_line_id')
cell_datasets.append([i,alias,list(sample),[]])
sites=list(sample.values_list('cell_line_id__primary_site',flat=True))
hists=list(sample.values_list('cell_line_id__name',flat=True))
dis_prim=list(sample.values_list('cell_line_id__primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
cell_datasets[-1][3].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
d=Clinical_Dataset.objects.all()
d_name=list(d.values_list('name',flat=True))
datasets=[] #[[dataset_name,[[primary_site,[primary_histology]]],[[filter_type,[filter_choice]]]]
primarys=[] #[[primary_site,[primary_hist]]]
primh_filter=[] #[[filter_type,[filter_choice]]]
f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic']
for i in d_name:
same_name.append(i)
datasets.append([i,[],[]])
sample=Clinical_sample.objects.filter(dataset_id__name=i).order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
datasets[-1][1].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
for f in f_type:
temp=list(set(sample.values_list(f,flat=True)))
datasets[-1][2].append([f,temp])
sample=Clinical_sample.objects.all().order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
primarys.append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
s=Clinical_sample.objects.all()
for f in f_type:
temp=list(set(s.values_list(f,flat=True)))
primh_filter.append([f,temp])
all_full_name=cell_d_name+d_name
return {
'all_full_name':mark_safe(json.dumps(all_full_name)), #full name of all datasets
'same_name':mark_safe(json.dumps(same_name)), #short name for all datasets
'cell_d_name':mark_safe(json.dumps(cell_d_name)), #cell line dataset name(full)
'cell_datasets':cell_datasets,
'd_name': mark_safe(json.dumps(d_name)), #clinical dataset name
'datasets': datasets,
'primarys': primarys,
'primh_filter':primh_filter,
}
def sample_microarray(request):
d=Clinical_Dataset.objects.all()
d_name=list(d.values_list('name',flat=True))
datasets=[] #[[dataset_name,[[primary_site,[primary_histology]]],[[filter_type,[filter_choice]]]]
primarys=[] #[[primary_site,[primary_hist]]]
primh_filter=[] #[[filter_type,[filter_choice]]]
f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic']
for i in d_name:
datasets.append([i,[],[]])
sample=Clinical_sample.objects.filter(dataset_id__name=i).order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
datasets[-1][1].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
for f in f_type:
temp=list(set(sample.values_list(f,flat=True)))
datasets[-1][2].append([f,temp])
sample=Clinical_sample.objects.all().order_by('primary_site')
sites=list(sample.values_list('primary_site',flat=True))
hists=list(sample.values_list('primary_hist',flat=True))
dis_prim=list(sample.values_list('primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
primarys.append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
s=Clinical_sample.objects.all()
for f in f_type:
temp=list(set(s.values_list(f,flat=True)))
primh_filter.append([f,temp])
return render(request, 'sample_microarray.html', {
'd_name': mark_safe(json.dumps(d_name)),
'datasets': datasets,
'primarys': primarys,
'primh_filter':primh_filter,
})
def user_pca(request):
#load the ranking file and the table of probe first,open files
pform=request.POST['data_platform']
uni=[] #to store valid probe offset for getting the correct data
uni_probe=[]
gene_flag=0
if(pform=="others"): #gene level
gene_flag=1
if(request.POST['ngs']=="ngs_u133a"):
pform="U133A"
else:
pform="PLUS2"
elif (pform=="U133A"):
quantile=list(np.load('ranking_u133a.npy'))
probe_path=Path('../').resolve().joinpath('src','Affy_U133A_probe_info.csv')
probe_list = pd.read_csv(probe_path.as_posix())
uni_probe=pd.unique(probe_list['PROBEID'])
else:
quantile=np.load('ranking_u133plus2.npy')
probe_path=Path('../').resolve().joinpath('src','Affy_U133plus2_probe_info.csv')
probe_list = pd.read_csv(probe_path.as_posix())
uni_probe=pd.unique(probe_list['PROBEID'])
propotion=0
table_propotion=0
show=request.POST['show_type'] #get the pca show type
nci_size=Sample.objects.filter(dataset_id__name__in=["NCI60"]).count()
gse_size=Sample.objects.filter(dataset_id__name__in=["GSE36133"]).count()
group_counter=1
user_out_group=[]
s_group_dict={} #store sample
offset_group_dict={} #store offset
cell_line_dict={}
#this part is for selecting cell lines base on dataset
#count how many group
group_counter=1
while True:
temp_name='dataset_g'+str(group_counter)
if temp_name in request.POST:
group_counter=group_counter+1
else:
group_counter=group_counter-1
break
s_group_dict={} #store sample
group_name=[]
offset_group_dict={} #store offset
clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True))
clline=list(Dataset.objects.all().values_list('name',flat=True))
all_exist_dataset=[]
for i in range(1,group_counter+1):
dname='dataset_g'+str(i)
all_exist_dataset=all_exist_dataset+request.POST.getlist(dname)
all_exist_dataset=list(set(all_exist_dataset))
all_base=[0]
for i in range(0,len(all_exist_dataset)-1):
if all_exist_dataset[i] in clline:
all_base.append(all_base[i]+Sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
else:
all_base.append(all_base[i]+Clinical_sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
all_c=[]
for i in range(1,group_counter+1):
s_group_dict['g'+str(i)]=[]
offset_group_dict['g'+str(i)]=[]
cell_line_dict['g'+str(i)]=[]
dname='dataset_g'+str(i)
datasets=request.POST.getlist(dname)
group_name.append('g'+str(i))
for dn in datasets:
if dn=='Sanger Cell Line Project':
c='select_sanger_g'+str(i)
elif dn=='NCI60':
c='select_nci_g'+str(i)
elif dn=='GSE36133':
c='select_ccle_g'+str(i)
if dn in clline:
temp=list(set(request.POST.getlist(c)))
if 'd_sample' in show:
if all_c==[]:
all_c=all_c+temp
uni=temp
else:
uni=list(set(temp)-set(all_c))
all_c=all_c+uni
else:
uni=list(temp) #do not filter duplicate input only when select+centroid
s=Sample.objects.filter(cell_line_id__name__in=uni,dataset_id__name__in=[dn]).order_by('dataset_id'
).select_related('cell_line_id__name','cell_line_id__primary_site','cell_line_id__primary_hist','dataset_id','dataset_id__name')
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('cell_line_id__name',flat=True))
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
else: #dealing with clinical sample datasets
com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
#print(com_hists)
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+dn+'_g'+str(i))
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for t in temp:
if 'stage/' in t:
stage.append(t[6:])
elif 'gender/' in t:
gender.append(t[7:])
elif 'ethnic/' in t:
ethnic.append(t[7:])
elif 'grade/' in t:
grade.append(t[6:])
elif 'stageT/' in t:
T.append(t[7:])
elif 'stageN/' in t:
N.append(t[7:])
elif 'stageM/' in t:
M.append(t[7:])
elif 'metastatic/' in t:
metas.append(t[11:])
'''
if t[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(t[4:])
#print(len(prims))
#print(len(hists))
for x in range(0,len(prims)):
s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x],
primary_hist=hists[x],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas,
).select_related('dataset_id').order_by('id')
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('name',flat=True))
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
#return render_to_response('welcome.html',locals())
all_sample=[]
all_cellline=[]
cell_object=[]
all_offset=[]
sample_counter={}
group_cell=[]
g_s_counter=[0]
for i in range(1,group_counter+1):
all_sample=all_sample+s_group_dict['g'+str(i)] #will not exist duplicate sample if d_sample
all_offset=all_offset+offset_group_dict['g'+str(i)]
all_cellline=all_cellline+cell_line_dict['g'+str(i)]
g_s_counter.append(g_s_counter[i-1]+len(s_group_dict['g'+str(i)]))
for i in all_sample:
sample_counter[i.name]=1
if str(type(i))=="<class 'probes.models.Sample'>":
##print("i am sample!!")
cell_object.append(i.cell_line_id)
else:
##print("i am clinical!!")
cell_object.append(i)
#read the user file
text=request.FILES.getlist('user_file')
user_counter=len(text)
if(gene_flag==1):
user_counter=1
ugroup=[]
for i in range(user_counter):
ugroup.append(request.POST['ugroup_name'+str(i+1)])
if(ugroup[-1]==''):
ugroup[-1]='User_Group'+str(i+1)
dgroup=[]
for i in range(1,group_counter+1):
dgroup.append(request.POST['group_name'+str(i)])
if(dgroup[-1]==''):
dgroup[-1]='Dataset_Group'+str(i)
user_dict={} #{user group number:user 2d array}
samples=0
nans=[] #to store the probe name that has nan
for x in range(1,user_counter+1):
#check the file format and content here first
filetype=str(text[x-1]).split('.')
if(filetype[-1]!="csv"):
error_reason='You have the wrong file type. Please upload .csv files'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
if(text[x-1].size>=80000000): #bytes
error_reason='The file size is too big. Please upload .csv file with size less than 80MB.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
temp_data = pd.read_csv(text[x-1])
col=list(temp_data.columns.values)
samples=samples+len(col)-1
if(samples==0):
error_reason='The file does not have any samples.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
if(gene_flag==0): #probe level check
check_probe=[str(x) for x in list(temp_data.iloc[:,0]) if not str(x).lower().startswith('affx')]
#print(len(check_probe))
if(len(check_probe)!=len(uni_probe)):
error_reason='The probe number does not match with the platform you selected.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
if(set(check_probe)!=set(uni_probe)):
error_reason='The probe number or probe name in your file does not match the platform you selected.'
#error_reason+='</br>The probes that are not in the platform: '+str(set(check_probe)-set(uni_probe))[1:-1]
#error_reason+='</br>The probes that are lacking: '+str(set(uni_probe)-set(check_probe))[1:-1]
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
col=list(temp_data.columns.values)
n=pd.isnull(temp_data).any(1).nonzero()[0]
nans += list(temp_data[col[0]][n])
user_dict[x]=temp_data
if 'd_sample' in show:
if((len(all_sample)+samples)<4):
error_reason='You should have at least 4 samples for PCA. The samples are not enough.<br />'\
'The total number of samples in your uploaded file is '+str(samples)+'.<br />'\
'The number of samples you selected is '+str(len(all_sample))+'.<br />'\
'Total is '+str(len(all_sample)+samples)+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
else:
s_count=0
sample_list=[]
a_sample=np.array(all_sample)
for i in range(1,group_counter+1):
dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]]))
a_cell_object=np.array(cell_object)
for c in dis_cellline:
temp1=np.where((a_cell_object==c))[0]
temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i]))
total_offset=temp1[temp2]
selected_sample=a_sample[total_offset]
if list(selected_sample) in sample_list: #to prevent two different colors in different group
continue
else:
sample_list.append(list(selected_sample))
s_count=s_count+1
if(s_count>=4): #check this part
break
if(s_count>=4): #check this part
break
if((s_count+samples)<4):
error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The total number is not enough.<br />'\
'The total number of dots in you uploaded file is '+str(samples)+'.<br />'\
'The number of centroid dots you selected is '+str(s_count)+'.<br />'\
'Total is '+str(s_count+samples)+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
new_name=[]
origin_name=[]
com_gene=[] #for ngs select same gene
nans=list(set(nans))
for x in range(1,user_counter+1):
#temp_data = pd.read_csv(text[x-1])
temp_data=user_dict[x]
col=list(temp_data.columns.values)
col[0]='probe'
temp_data.columns=col
temp_data.index = temp_data['probe']
temp_data.index.name = None
temp_data=temp_data.iloc[:, 1:]
#add "use_" to user's sample names
col_name=list(temp_data.columns.values) #have user's sample name list here
origin_name=origin_name+list(temp_data.columns.values)
col_name=[ "user_"+str(index)+"_"+s for index,s in enumerate(col_name)]
temp_data.columns=col_name
new_name=new_name+col_name
if(gene_flag==0):
try:
temp_data=temp_data.reindex(uni_probe)
except ValueError:
return HttpResponse('The file has probes with the same names, please let them be unique.')
#remove probe that has nan
temp_data=temp_data.drop(nans)
temp_data=temp_data.rank(method='dense')
#this is for quantile
for i in col_name:
for j in range(0,len(temp_data[i])):
#if(not(np.isnan(temp_data[i][j]))):
temp_data[i][j]=quantile[int(temp_data[i][j]-1)]
if x==1:
data=temp_data
else:
data=np.concatenate((data,temp_data), axis=1)
user_dict[x]=np.array(temp_data)
else:
temp_data=temp_data.drop(nans) #drop nan
temp_data=temp_data.loc[~(temp_data==0).all(axis=1)] #drop all rows with 0 here
temp_data=temp_data.groupby(temp_data.index).first() #drop the duplicate gene row
user_dict[x]=temp_data
#print(temp_data)
#delete nan, combine user data to the datasets,transpose matrix
for x in range(0,len(all_exist_dataset)):
if(gene_flag==0):
if all_exist_dataset[x] in clline:
pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
if all_exist_dataset[x] in clline:
pth=Path('../').resolve().joinpath('src','gene_'+Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
pth=Path('../').resolve().joinpath('src','gene_'+Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path)
if x==0:
val=np.load(pth.as_posix())
else:
val=np.hstack((val, np.load(pth.as_posix())))#combine together
#database dataset remove nan probes
if(gene_flag==0):
uni=[]
p_offset=list(ProbeID.objects.filter(platform__name__in=[pform],Probe_id__in=nans).values_list('offset',flat=True))
for n in range(0,len(uni_probe)):
if(n not in p_offset):
uni.append(n)
else:
#deal with ngs uploaded data here
probe_path=Path('../').resolve().joinpath('src','new_human_gene_info.txt')
#probe_list = pd.read_csv(probe_path.as_posix())
#notice duplicate
#get the match gene first, notice the size issue
info=pd.read_csv(probe_path.as_posix(),sep='\t')
col=list(info.columns.values)
col[0]='symbol'
info.columns=col
info.index = info['symbol']
info.index.name = None
info=info.iloc[:, 1:]
data=user_dict[1]
#data=data.groupby(data.index).first() #drop the duplicate gene row
com_gene=list(data.index)
temp_data=data
rloop=divmod(len(com_gene),990)
if(rloop[1]==0):
rloop=(rloop[0]-1,0)
gg=[]
for x in range(0,rloop[0]+1):
gg+=list(Gene.objects.filter(platform__name__in=[pform],symbol__in=com_gene[x*990:(x+1)*990]).order_by('offset'))
exist_gene=[]
uni=[]
for i in gg:
exist_gene.append(i.symbol)
uni.append(i.offset)
info=info.drop(exist_gene,errors='ignore')
new_data=temp_data.loc[data.index.isin(exist_gene)].reindex(exist_gene)
##print(exist_gene)
##print(new_data.index)
#search remain symbol's alias and symbol
search_alias=list(set(com_gene)-set(exist_gene))
for i in search_alias:
re_symbol=list(set(info.loc[info['alias'].isin([i])].index)) #find whether has alias first
if(len(re_symbol)!=0):
re_match=Gene.objects.filter(platform__name__in=[pform],symbol__in=re_symbol).order_by('offset') #check the symbol in database or not
repeat=len(re_match)
if(repeat!=0): #match gene symbol in database
##print(re_match)
for x in re_match:
to_copy=data.loc[i]
to_copy.name=x.symbol
new_data=new_data.append(to_copy)
uni.append(x.offset)
info=info.drop(x.symbol,errors='ignore')
user_dict[1]=np.array(new_data)
##print("length of new data:"+str(len(new_data)))
##print("data:")
##print(data)
##print("new_data:")
##print(new_data)
data=new_data
if 'd_sample' in show:
val=val[np.ix_(uni,all_offset)]
#print(len(val))
user_offset=len(val[0])
if(gene_flag==1):
#do the rank invariant here
#print("sample with ngs data do rank invariant here")
ref_path=Path('../').resolve().joinpath('src','cv_result.txt')
ref=pd.read_csv(ref_path.as_posix())
col=list(ref.columns.values)
col[0]='symbol'
ref.columns=col
ref.index = ref['symbol']
ref.index.name = None
ref=ref.iloc[:, 1:]
ref=ref.iloc[:5000,:] #rank invariant need 5000 genes
same_gene=list(ref.index.intersection(data.index))
#to lowess
rref=pandas2ri.py2ri(ref.loc[same_gene])
rngs=pandas2ri.py2ri(data.loc[same_gene].mean(axis=1))
rall=pandas2ri.py2ri(data)
ro.globalenv['x'] = rngs
ro.globalenv['y'] = rref
ro.globalenv['newx'] = rall
r('x<-as.vector(as.matrix(x))')
r('y<-as.vector(as.matrix(y))')
r('newx<-as.matrix(newx)')
try:
if(request.POST['data_type']=='raw'):
r('y.loess<-loess(2**y~x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
elif(request.POST['data_type']=='log2'):
r('y.loess<-loess(2**y~2**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,2**newx[,z])))')
else:
r('y.loess<-loess(2**y~10**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,10**newx[,z])))')
except:
error_reason='Match too less genes. Check your gene symbols again. We use NCBI standard gene symbol.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
#r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
data=r('newx')
#print(data[:10])
#print(type(data))
val=np.hstack((np.array(val), np.array(data)))
val=val[~np.isnan(val).any(axis=1)]
val=np.transpose(val)
else:
#val=np.array(val)
val=val[np.ix_(uni)]
user_offset=len(val[0])
if(gene_flag==1):
#print("sample with ngs data do rank invariant here")
ref_path=Path('../').resolve().joinpath('src','cv_result.txt')
ref=pd.read_csv(ref_path.as_posix())
col=list(ref.columns.values)
col[0]='symbol'
ref.columns=col
ref.index = ref['symbol']
ref.index.name = None
ref=ref.iloc[:, 1:]
ref=ref.iloc[:5000,:] #rank invariant need 5000 genes
same_gene=list(ref.index.intersection(data.index))
#to lowess
rref=pandas2ri.py2ri(ref.loc[same_gene])
rngs=pandas2ri.py2ri(data.loc[same_gene].mean(axis=1))
rall=pandas2ri.py2ri(data)
ro.globalenv['x'] = rngs
ro.globalenv['y'] = rref
ro.globalenv['newx'] = rall
r('x<-as.vector(as.matrix(x))')
r('y<-as.vector(as.matrix(y))')
r('newx<-as.matrix(newx)')
try:
if(request.POST['data_type']=='raw'):
r('y.loess<-loess(2**y~x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
elif(request.POST['data_type']=='log2'):
r('y.loess<-loess(2**y~2**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,2**newx[,z])))')
else:
r('y.loess<-loess(2**y~10**x,span=0.3)')
r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,10**newx[,z])))')
except:
error_reason='Match too less genes. Check your gene symbols again. We use NCBI standard gene symbol.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
#r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))')
data=r('newx')
#print(data[:10])
#print(type(data))
val=np.hstack((np.array(val), np.array(data)))
val=val[~np.isnan(val).any(axis=1)]
pca_index=[]
dis_offset=[]
#PREMISE:same dataset same cell line will have only one type of primary site and primary histology
name1=[]
name2=[]
name3=[]
name4=[]
name5=[]
X1=[]
Y1=[]
Z1=[]
X2=[]
Y2=[]
Z2=[]
X3=[]
Y3=[]
Z3=[]
X4=[]
Y4=[]
Z4=[]
X5=[]
Y5=[]
Z5=[]
n=4 #need to fix to the best one
if 'd_sample' in show:
#count the pca first
pca= PCA(n_components=n)
#combine user sample's offset to all_offset in another variable
Xval = pca.fit_transform(val[:,:]) #cannot get Xval with original offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[1:n])
table_propotion=sum(ratio_temp[0:n])
user_new_offset=len(all_offset)
##print(Xval)
max=0
min=10000000000
out_group=[]
exist_cell={}#cell line object:counter
for g in range(1,group_counter+1):
output_cell={}
check={}
for s in range(g_s_counter[g-1],g_s_counter[g]):
if str(type(all_sample[s]))=="<class 'probes.models.Sample'>":
cell=all_sample[s].cell_line_id
else:
cell=all_sample[s]
try:
counter=exist_cell[cell]
exist_cell[cell]=counter+1
except KeyError:
exist_cell[cell]=1
try:
t=output_cell[cell]
except KeyError:
output_cell[cell]=[cell,[]]
check[all_sample[s].name]=[]
sample_counter[all_sample[s].name]=exist_cell[cell]
for i in range(0,len(all_sample)):
if i!=s:
try:
if(all_sample[s].name not in check[all_sample[i].name]):
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,cell.primary_site,cell.primary_hist,
all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,cell_object[i].primary_site
,cell_object[i].primary_hist,all_sample[i].dataset_id.name,distance])
check[all_sample[s].name].append(all_sample[i].name)
except KeyError:
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,cell.primary_site,cell.primary_hist,
all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,cell_object[i].primary_site
,cell_object[i].primary_hist,all_sample[i].dataset_id.name,distance])
check[all_sample[s].name].append(all_sample[i].name)
g_count=1
u_count=len(user_dict[g_count][0]) #sample number in first user file
for i in range(user_new_offset,user_new_offset+len(origin_name)): #remember to prevent empty file uploaded
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,cell.primary_site,cell.primary_hist,
all_sample[s].dataset_id.name," ",origin_name[i-user_new_offset]," "," ","User Group"+str(g_count),distance])
if ((i-user_new_offset+1)==u_count):
g_count+=1
try:
u_count+=len(user_dict[g_count][0])
except KeyError:
u_count+=0
if(g==1):
name3.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X3.append(round(Xval[s][n-3],5))
Y3.append(round(Xval[s][n-2],5))
Z3.append(round(Xval[s][n-1],5))
elif(g==2):
name4.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X4.append(round(Xval[s][n-3],5))
Y4.append(round(Xval[s][n-2],5))
Z4.append(round(Xval[s][n-1],5))
elif(g==3):
name5.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X5.append(round(Xval[s][n-3],5))
Y5.append(round(Xval[s][n-2],5))
Z5.append(round(Xval[s][n-1],5))
dictlist=[]
for key, value in output_cell.items():
temp = [value]
dictlist+=temp
output_cell=list(dictlist)
out_group.append(["Dataset Group"+str(g),output_cell])
if g==group_counter:
output_cell={}
g_count=1
output_cell[g_count]=[" ",[]]
u_count=len(user_dict[g_count][0])
temp_count=u_count
temp_g=1
before=0
for i in range(user_new_offset,user_new_offset+len(origin_name)):
for x in range(0,len(all_sample)):
distance=np.linalg.norm(Xval[x][n-3:n]-Xval[i][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count),all_cellline[x]
,all_sample[x].name,cell_object[x].primary_site,cell_object[x].primary_hist,
all_sample[x].dataset_id.name,distance])
temp_g=1
temp_count=len(user_dict[temp_g][0])
for j in range(user_new_offset,user_new_offset+before):
if ((j-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(Xval[j][n-3:n]-Xval[i][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
," ",origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance])
temp_g=g_count
temp_count=len(user_dict[g_count][0])
for j in range(i+1,user_new_offset+len(origin_name)):
if ((j-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(Xval[j][n-3:n]-Xval[i][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
," ",origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance])
if g_count==1:
name1.append(origin_name[i-user_new_offset])
X1.append(round(Xval[i][n-3],5))
Y1.append(round(Xval[i][n-2],5))
Z1.append(round(Xval[i][n-1],5))
else:
name2.append(origin_name[i-user_new_offset])
X2.append(round(Xval[i][n-3],5))
Y2.append(round(Xval[i][n-2],5))
Z2.append(round(Xval[i][n-1],5))
if ((i-user_new_offset+1)==u_count):
dictlist=[]
for key, value in output_cell.items():
temp = [value]
dictlist+=temp
output_cell=list(dictlist)
user_out_group.append(["User Group"+str(g_count),output_cell])
g_count+=1
before=u_count+before
#print("I am here!!")
try:
u_count+=len(user_dict[g_count][0])
output_cell={}
output_cell[g_count]=[" ",[]]
except KeyError:
u_count+=0
#[g,[group_cell_1 object,[[outputs paired1,......,],[paired2],[paired3]]],[group_cell_2 object,[[pair1],[pair2]]]]
#for xx in origin_name:
#sample_counter[xx]=1
##print(out_group)
element_counter=0
for i in out_group:
for temp_list in i[1]:
element_counter=element_counter+len(temp_list[1])
for temp in temp_list[1]:
if(temp[5]!=" "):
temp[5]=temp[5]+'('+str(sample_counter[temp[6]])+')'
for i in user_out_group:
for temp_list in i[1]:
for temp in temp_list[1]:
if(temp[2]!=" "):
temp[2]=temp[2]+'('+str(sample_counter[temp[3]])+')'
return_html='user_pca.html'
else:
#This part is for centroid display
return_html='user_pca_center.html'
#This part is for select cell line base on dataset,count centroid base on the dataset
#group中的cell line為單位來算重心
location_dict={} #{group number:[[cell object,dataset,new location]]}
combined=[]
sample_list=[]
pca_index=np.array(pca_index)
X_val=[]
val_a=np.array(val)
a_all_offset=np.array(all_offset)
a_sample=np.array(all_sample)
for i in range(1,group_counter+1):
dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) #cell object may have duplicate cell line since:NCI A + CCLE A===>[A,A]
location_dict['g'+str(i)]=[]
dataset_dict={}
a_cell_object=np.array(cell_object)
for c in dis_cellline: #dis_cellline may not have the same order as cell_object
temp1=np.where((a_cell_object==c))[0]
temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i]))
total_offset=temp1[temp2]
selected_val=val_a[:,a_all_offset[total_offset]]
selected_val=np.transpose(selected_val)
new_loca=(np.mean(selected_val,axis=0,dtype=np.float64,keepdims=True)).tolist()[0]
selected_sample=a_sample[total_offset]
if list(selected_sample) in sample_list: #to prevent two different colors in different group
continue
else:
sample_list.append(list(selected_sample))
d_temp=[]
for s in selected_sample:
d_temp.append(s.dataset_id.name)
dataset_dict[c]="/".join(list(set(d_temp)))
X_val.append(new_loca)
location_dict['g'+str(i)].append([c,dataset_dict[c],len(X_val)-1]) #the last part is the index to get pca result from new_val
combined.append([c,dataset_dict[c],len(X_val)-1]) #all cell line, do not matter order
#run the pca
user_new_offset=len(X_val)
temp_val=np.transpose(val[:,user_offset:])
for x in range(0,len(temp_val)):
X_val.append(list(temp_val[x]))
if(len(X_val)<4):
error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The total number is not enough.<br />'\
'The total number of dots in you uploaded file is '+str(len(temp_val))+'.<br />'\
'The number of centroid dots you selected is '+str(len(X_val)-len(temp_val))+'.<br />'\
'Total is '+str(len(X_val))+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
X_val=np.matrix(X_val)
pca= PCA(n_components=n)
new_val = pca.fit_transform(X_val[:,:]) #cannot get Xval with original offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[1:n])
table_propotion=sum(ratio_temp[0:n])
#print(new_val)
out_group=[]
min=10000000000
max=0
element_counter=0
for g in range(1,group_counter+1):
output_cell=[]
exist_cell={}
for group_c in location_dict['g'+str(g)]: #a list of [c,dataset_dict[c],new_val index] in group one
cell=group_c[0]
key_string=cell.name+'/'+cell.primary_site+'/'+cell.primary_hist+'/'+group_c[1]
exist_cell[key_string]=[]
output_cell.append([cell,[]])
#count the distance
for temp_list in combined:
c=temp_list[0]
temp_string=c.name+'/'+c.primary_site+'/'+c.primary_hist+'/'+temp_list[1]
try:
if(key_string not in exist_cell[temp_string]):
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist
,group_c[1],temp_list[0].name,temp_list[0].primary_site,temp_list[0].primary_hist,temp_list[1],distance])
element_counter=element_counter+1
except KeyError:
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist
,group_c[1],temp_list[0].name,temp_list[0].primary_site,temp_list[0].primary_hist,temp_list[1],distance])
element_counter=element_counter+1
exist_cell[key_string].append(temp_string)
g_count=1
u_count=len(user_dict[g_count][0]) #sample number in first user file
for i in range(user_new_offset,user_new_offset+len(origin_name)):
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[i][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist,group_c[1]
,origin_name[i-user_new_offset]," "," ","User Group"+str(g_count),distance])
element_counter=element_counter+1
if ((i-user_new_offset+1)==u_count):
g_count+=1
try:
u_count+=len(user_dict[g_count][0])
except KeyError:
u_count+=0
if(g==1):
name3.append(cell.name+'<br>'+group_c[1])
X3.append(round(new_val[group_c[2]][n-3],5))
Y3.append(round(new_val[group_c[2]][n-2],5))
Z3.append(round(new_val[group_c[2]][n-1],5))
elif(g==2):
name4.append(cell.name+'<br>'+group_c[1])
X4.append(round(new_val[group_c[2]][n-3],5))
Y4.append(round(new_val[group_c[2]][n-2],5))
Z4.append(round(new_val[group_c[2]][n-1],5))
elif(g==3):
name5.append(cell.name+'<br>'+group_c[1])
X5.append(round(new_val[group_c[2]][n-3],5))
Y5.append(round(new_val[group_c[2]][n-2],5))
Z5.append(round(new_val[group_c[2]][n-1],5))
out_group.append(["Dataset Group"+str(g),output_cell])
if g==group_counter:
output_cell=[]
g_count=1
output_cell.append([" ",[]])
u_count=len(user_dict[g_count][0])
temp_count=u_count
temp_g=1
before=0
for i in range(user_new_offset,user_new_offset+len(origin_name)):
for temp_list in combined:
c=temp_list[0]
distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
,c.name,c.primary_site,c.primary_hist,temp_list[1],distance])
temp_g=1
temp_count=len(user_dict[temp_g][0])
for j in range(user_new_offset,user_new_offset+before):
if ((j-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[j][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
,origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance])
temp_g=g_count
temp_count=len(user_dict[g_count][0])
for x in range(i+1,user_new_offset+len(origin_name)):
if ((x-user_new_offset)==temp_count):
temp_g+=1
try:
temp_count+=len(user_dict[temp_g][0])
except KeyError:
temp_count+=0
distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[x][n-3:n]))
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count)
,origin_name[x-user_new_offset]," "," ","User Group"+str(temp_g),distance])
if g_count==1:
name1.append(origin_name[i-user_new_offset])
X1.append(round(new_val[i][n-3],5))
Y1.append(round(new_val[i][n-2],5))
Z1.append(round(new_val[i][n-1],5))
else:
name2.append(origin_name[i-user_new_offset])
X2.append(round(new_val[i][n-3],5))
Y2.append(round(new_val[i][n-2],5))
Z2.append(round(new_val[i][n-1],5))
if ((i-user_new_offset+1)==u_count):
user_out_group.append(["User Group"+str(g_count),output_cell])
g_count+=1
before=u_count+before
#print("I am here!!")
try:
u_count+=len(user_dict[g_count][0])
output_cell=[]
output_cell.append([" ",[]])
except KeyError:
u_count+=0
#print(element_counter)
#print(show_row)
if(element_counter>show_row):
big_flag=1
sid=str(uuid.uuid1())+".csv"
if(return_html=='user_pca.html'):
dataset_header=['Group Cell Line/Clinical Sample','Sample Name','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance']
user_header=['User Sample Name','Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance']
else:
dataset_header=['Group Cell Line/Clinical Sample','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance']
user_header=['User Sample Name','Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance']
P=Path('../').resolve().joinpath('src','static','csv',"dataset_"+sid)
userP=Path('../').resolve().joinpath('src','static','csv',"user_"+sid)
assP=Path('../').resolve().joinpath('src','assets','csv',"dataset_"+sid)
assuserP=Path('../').resolve().joinpath('src','assets','csv',"user_"+sid)
#print("start writing files")
with open(str(assP), "w", newline='') as f:
writer = csv.writer(f)
for index,output_cell in out_group:
writer.writerows([[dgroup[int(index[-1])-1]]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
writer.writerows(b)
#print("end writing first file")
'''
with open(str(assP), "w", newline='') as ff:
writer = csv.writer(ff)
for index,output_cell in out_group:
writer.writerows([[index]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
writer.writerows(b)
'''
#print("end writing 2 file")
with open(str(assuserP), "w", newline='') as ff:
writer = csv.writer(ff)
for index,output_cell in user_out_group:
writer.writerows([[ugroup[int(index[-1])-1]]])
writer.writerows([user_header])
for cell_line,b in output_cell:
writer.writerows(b)
#print("end writing 3 file")
'''
with open(str(userP), "w", newline='') as f:
writer = csv.writer(f)
for index,output_cell in user_out_group:
writer.writerows([[index]])
writer.writerows([user_header])
for cell_line,b in output_cell:
writer.writerows(b)
'''
#print("end writing 4 file")
data_file_name="dataset_"+sid
user_file_name="user_"+sid
else:
big_flag=0
data_file_name=0
user_file_name=0
return render_to_response(return_html,RequestContext(request,
{
'ugroup':ugroup,
'dgroup':dgroup,
'min':min,'max':max,
'big_flag':big_flag,
'out_group':out_group,'user_out_group':user_out_group,
'propotion':propotion,
'table_propotion':table_propotion,
'data_file_name':data_file_name,
'user_file_name':user_file_name,
'X1':X1,'name1':mark_safe(json.dumps(name1)),
'Y1':Y1,'name2':mark_safe(json.dumps(name2)),
'Z1':Z1,'name3':mark_safe(json.dumps(name3)),
'X2':X2,'name4':mark_safe(json.dumps(name4)),
'Y2':Y2,'name5':mark_safe(json.dumps(name5)),
'Z2':Z2,
'X3':X3,
'Y3':Y3,
'Z3':Z3,
'X4':X4,
'Y4':Y4,
'Z4':Z4,
'X5':X5,
'Y5':Y5,
'Z5':Z5,
}))
#notice that we need to return a user_pca_center.html, too!!
#return render_to_response('welcome.html',locals())
def express_profiling(request):
return render(request, 'express_profiling.html', generate_samples())
def welcome(request):
return render_to_response('welcome.html',locals())
def help(request):
example_name="CellExpress_Examples.pptx"
tutorial_name="CellExpress_Tutorial.pptx"
return render_to_response('help.html',locals())
def help_similar_assessment(request):
return render_to_response('help_similar_assessment.html',RequestContext(request))
def similar_assessment(request):
return render(request, 'similar_assessment.html', generate_samples())
def gene_signature(request):
return render(request, 'gene_signature.html', generate_samples())
def heatmap(request):
group1=[]
group2=[]
group_count=0
presult={} #{probe object:p value}
expression=[]
probe_out=[]
sample_out=[]
not_found=[]
quantile_flag=0
ratio_flag=0
indata=[]
#get probe from different platform
pform=request.POST.get('data_platform','U133A')
if(pform=="mix_quantile"):
pform="U133A"
quantile_flag=1
if(pform=="mix_ratio"):
pform="U133A"
ratio_flag=1
stop_end=601
return_page_flag=0
user_probe_flag=0
if(request.POST['user_type']=="all"):
all_probe=ProbeID.objects.filter(platform__name=pform).order_by('offset')
probe_offset=list(all_probe.values_list('offset',flat=True))
pro_number=float(request.POST['probe_number']) #significant 0.05 or 0.01
all_probe=list(all_probe)
elif(request.POST['user_type']=="genes"): #for all genes
return_page_flag=1
if(pform=="U133A"):
probe_path=Path('../').resolve().joinpath('src','uni_u133a.txt')
gene_list = pd.read_csv(probe_path.as_posix())
all_probe=list(gene_list['SYMBOL'])
else:
probe_path=Path('../').resolve().joinpath('src','uni_plus2.txt')
gene_list = pd.read_csv(probe_path.as_posix())
all_probe=list(gene_list['SYMBOL'])
pro_number=float(request.POST['probe_number_gene'])
probe_offset=[]
for i in range(0,len(all_probe)):
probe_offset.append(i)
else:
indata=request.POST['keyword']
indata = list(set(indata.split()))
if(request.POST['gtype']=="probeid"):
user_probe_flag=1
all_probe=ProbeID.objects.filter(platform__name=pform,Probe_id__in=indata).order_by('offset')
probe_offset=list(all_probe.values_list('offset',flat=True))
pro_number=float('+inf')
not_found=list(set(set(indata) - set(all_probe.values_list('Probe_id',flat=True))))
all_probe=list(all_probe)
else:
probe_offset=[]
return_page_flag=1
pro_number=float('+inf')
if(pform=="U133A"):
probe_path=Path('../').resolve().joinpath('src','uni_u133a.txt')
gene_list = pd.read_csv(probe_path.as_posix())
gene=list(gene_list['SYMBOL'])
else:
probe_path=Path('../').resolve().joinpath('src','uni_plus2.txt')
gene_list = pd.read_csv(probe_path.as_posix())
gene=list(gene_list['SYMBOL'])
probe_path=Path('../').resolve().joinpath('src','new_human_gene_info.txt')
info=pd.read_csv(probe_path.as_posix(),sep='\t')
col=list(info.columns.values)
col[0]='symbol'
info.columns=col
info.index = info['symbol']
info.index.name = None
info=info.iloc[:, 1:]
all_probe=[]
for i in indata:
try:
probe_offset.append(gene.index(i))
all_probe.append(i)
info=info.drop(i,errors='ignore')
except ValueError:
re_symbol=list(set(info.loc[info['alias'].isin([i])].index)) #find whether has alias first
if(len(re_symbol)!=0):
re_match=Gene.objects.filter(platform__name__in=[pform],symbol__in=re_symbol).order_by('offset') #check the symbol in database or not
repeat=len(re_match)
if(repeat!=0): #match gene symbol in database
##print(re_match)
for x in re_match:
info=info.drop(x.symbol,errors='ignore')
probe_offset.append(x.offset)
all_probe.append(i+"("+x.symbol+")")
else:
not_found.append(i)
else:
not_found.append(i)
#count the number of group
group_counter=1
check_set=[]
while True:
temp_name='dataset_g'+str(group_counter)
if temp_name in request.POST:
group_counter=group_counter+1
else:
group_counter=group_counter-1
break
#get binary data
s_group_dict={} #store sample
val=[] #store value get from binary data
group_name=[]
clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True))
clline=list(Dataset.objects.all().values_list('name',flat=True))
#print(clline)
opened_name=[]
opened_val=[]
for i in range(1,group_counter+1):
s_group_dict['g'+str(i)]=[]
dname='dataset_g'+str(i)
datasets=request.POST.getlist(dname)
temp_name='g'+str(i)
group_name.append(temp_name)
a_data=np.array([])
for dn in datasets:
if dn=='Sanger Cell Line Project':
c='select_sanger_g'+str(i)
elif dn=='NCI60':
c='select_nci_g'+str(i)
elif dn=='GSE36133':
c='select_ccle_g'+str(i)
if dn in clline:
ACELL=request.POST.getlist(c)
s=Sample.objects.filter(dataset_id__name__in=[dn],cell_line_id__name__in=ACELL).order_by('dataset_id').select_related('cell_line_id__name','dataset_id')
s_group_dict['g'+str(i)]=list(s)+s_group_dict['g'+str(i)]
goffset=list(s.values_list('offset',flat=True))
#print(goffset)
if dn not in opened_name: #check if the file is opened
#print("opend file!!")
opened_name.append(dn)
if(return_page_flag==1):
pth=Path('../').resolve().joinpath('src','gene_'+Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Dataset.objects.get(name=dn).data_path)
gap=[Gene.objects.filter(platform__name=pform,symbol="GAPDH")[0].offset]
else:
pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Dataset.objects.get(name=dn).data_path)
gap=list(ProbeID.objects.filter(platform__name=pform).filter(Gene_symbol="GAPDH").order_by('id').values_list('offset',flat=True))
raw_val=np.load(pth.as_posix(),mmap_mode='r')
if(ratio_flag==1):
norm=raw_val[np.ix_(gap)]
raw_val=np.subtract(raw_val,np.mean(norm,axis=0, dtype=np.float64,keepdims=True))
opened_val.append(raw_val)
temp=raw_val[np.ix_(probe_offset,list(goffset))]
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=raw_val[np.ix_(probe_offset,list(goffset))]
else:
temp=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(goffset))]
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(goffset))]
elif dn in clinic:
#print("I am in clinical part")
com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+dn+'_g'+str(i))
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for t in temp:
if 'stage/' in t:
stage.append(t[6:])
elif 'gender/' in t:
gender.append(t[7:])
elif 'ethnic/' in t:
ethnic.append(t[7:])
elif 'grade/' in t:
grade.append(t[6:])
elif 'stageT/' in t:
T.append(t[7:])
elif 'stageN/' in t:
N.append(t[7:])
elif 'stageM/' in t:
M.append(t[7:])
elif 'metastatic/' in t:
metas.append(t[11:])
'''
if t[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(t[4:])
cgoffset=[]
for x in range(0,len(prims)):
s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x],
primary_hist=hists[x],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas,
).select_related('dataset_id').order_by('id')
s_group_dict['g'+str(i)]=list(s)+s_group_dict['g'+str(i)]
cgoffset+=list(s.values_list('offset',flat=True))
if dn not in opened_name: #check if the file is opened
#print("opend file!!")
opened_name.append(dn)
if(return_page_flag==1):
pth=Path('../').resolve().joinpath('src','gene_'+Clinical_Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Clinical_Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_gene_'+Clinical_Dataset.objects.get(name=dn).data_path)
gap=[Gene.objects.filter(platform__name=pform,symbol="GAPDH")[0].offset]
else:
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=dn).data_path)
if(quantile_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Clinical_Dataset.objects.get(name=dn).data_path)
elif(ratio_flag==1):
pth=Path('../').resolve().joinpath('src','mix_'+Clinical_Dataset.objects.get(name=dn).data_path)
gap=list(ProbeID.objects.filter(platform__name=pform).filter(Gene_symbol="GAPDH").order_by('id').values_list('offset',flat=True))
raw_val=np.load(pth.as_posix(),mmap_mode='r')
if(ratio_flag==1):
norm=raw_val[np.ix_(gap)]
raw_val=np.subtract(raw_val,np.mean(norm,axis=0, dtype=np.float64,keepdims=True))
opened_val.append(raw_val)
temp=raw_val[np.ix_(probe_offset,list(cgoffset))]
#print(temp)
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=raw_val[np.ix_(probe_offset,list(cgoffset))]
else:
temp=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(cgoffset))]
if (len(a_data)!=0 ) and (len(temp)!=0):
a_data=np.concatenate((a_data,temp),axis=1)
elif (len(temp)!=0):
a_data=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(cgoffset))]
val.append(a_data.tolist())
#print(len(val))
#print(len(val[0]))
##print(val)
#run the one way ANOVA test or ttest for every probe base on the platform selected
express={}
#logger.info('run ttest or anova')
if group_counter<=2:
for i in range(0,len(all_probe)): #need to fix if try to run on laptop
presult[all_probe[i]]=stats.ttest_ind(list(val[0][i]),list(val[1][i]),equal_var=False,nan_policy='omit')[1]
express[all_probe[i]]=np.append(val[0][i],val[1][i]).tolist()
else:
for i in range(0,len(all_probe)): #need to fix if try to run on laptop
to_anova=[]
for n in range(0,group_counter):
#val[n]=sum(val[n],[])
to_anova.append(val[n][i])
presult[all_probe[i]]=stats.f_oneway(*to_anova)[1]
express[all_probe[i]]=sum(to_anova,[])
#print("test done")
#sort the dictionary with p-value and need to get the expression data again (top20)
#presult[all_probe[0]]=float('nan')
#presult[all_probe[11]]=float('nan')
#how to deal with all "nan"?
tempf=pd.DataFrame(list(presult.items()), columns=['probe', 'pvalue'])
tempf=tempf.replace(to_replace=float('nan'),value=float('+inf'))
presult=dict(zip(tempf.probe, tempf.pvalue))
sortkey=sorted(presult,key=presult.get) #can optimize here
counter=1
cell_probe_val=[]
for w in sortkey:
#print(presult[w],":",w)
if (presult[w]<pro_number):
cell_probe_val.append([w,presult[w]])
print(cell_probe_val)
express_mean=np.mean(np.array(express[w]))
expression.append(list((np.array(express[w]))-express_mean))
if(return_page_flag==1):
probe_out.append(w)
else:
probe_out.append(w.Probe_id+"("+w.Gene_symbol+")")
counter+=1
else:
break
if counter>=stop_end:
break
n_counter=1
for n in group_name:
sample_counter=1
for s in s_group_dict[n]:
dataset_n=s.dataset_id.name
if dataset_n=="Sanger Cell Line Project":
sample_out.append(s.cell_line_id.name+"(SCLP)(group"+str(n_counter)+"-"+str(sample_counter)+")")
elif dataset_n in clline:
#print(s.cell_line_id.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
sample_out.append(s.cell_line_id.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
else: #what to output for clinical part?
#print(s.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
sample_out.append(s.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")")
sample_counter+=1
n_counter+=1
#logger.info('finish combine output samples')
sns.set(font="monospace")
test=pd.DataFrame(data=expression,index=probe_out,columns=sample_out)
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
#test.to_csv('heatmap_text.csv')
try:
#g = sns.clustermap(test,cmap=my_cmap)
if(len(probe_out)<=300):
g = sns.clustermap(test,cmap=my_cmap,xticklabels=list(test.columns),yticklabels=(test.index),figsize=(19.20,48.60))
else:
g = sns.clustermap(test,cmap=my_cmap,xticklabels=list(test.columns),yticklabels=(test.index),figsize=(30.00,100.00))
except:
if((return_page_flag==1) and (probe_out==[])):
probe_out=indata #probe_out is the rows of heatmap
return render_to_response('noprobe.html',RequestContext(request,
{
'user_probe_flag':user_probe_flag,
'return_page_flag':return_page_flag,
'probe_out':probe_out,
'not_found':not_found
}))
'''
plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0)
if counter>=stop_end:
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=4)
else:
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=7)
plt.setp(g.ax_heatmap.get_xticklabels(), rotation=270,ha='center')
'''
sid=str(uuid.uuid1())+".png"
#print(sid)
P=Path('../').resolve().joinpath('src','static','image',sid)
assP=Path('../').resolve().joinpath('src','assets','image',sid)
#g.savefig(str(P))
#plt.figure(figsize=(1920/my_dpi, 2160/my_dpi), dpi=100)
#plt.savefig(str(assP), dpi=my_dpi*10)
g.savefig(str(assP),bbox_inches='tight')
file_name=sid
return render_to_response('heatmap.html',RequestContext(request,
{
'user_probe_flag':user_probe_flag,
'return_page_flag':return_page_flag,
'cell_probe_val':cell_probe_val,
'file_name':file_name,
'pro_number':pro_number,
'not_found':not_found
}))
def pca(request):
propotion=0
table_propotion=0
pform=request.POST['data_platform'] #get the platform
show=request.POST['show_type'] #get the pca show type
group_counter=1
cell_line_dict={}
#count how many group
group_counter=1
while True:
temp_name='dataset_g'+str(group_counter)
if temp_name in request.POST:
group_counter=group_counter+1
else:
group_counter=group_counter-1
break
udgroup=[]
s_group_dict={} #store sample
group_name=[]
offset_group_dict={} #store offset
clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True))
clline=list(Dataset.objects.all().values_list('name',flat=True))
all_exist_dataset=[]
for i in range(1,group_counter+1):
udgroup.append(request.POST['group_name'+str(i)])
#print(udgroup)
if(udgroup[-1]==''):
udgroup[-1]='Group'+str(i)
dname='dataset_g'+str(i)
all_exist_dataset=all_exist_dataset+request.POST.getlist(dname)
all_exist_dataset=list(set(all_exist_dataset))
all_base=[0]
for i in range(0,len(all_exist_dataset)-1):
if all_exist_dataset[i] in clline:
all_base.append(all_base[i]+Sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
else:
all_base.append(all_base[i]+Clinical_sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count())
all_c=[]
for i in range(1,group_counter+1):
s_group_dict['g'+str(i)]=[]
offset_group_dict['g'+str(i)]=[]
cell_line_dict['g'+str(i)]=[]
dname='dataset_g'+str(i)
datasets=request.POST.getlist(dname)
group_name.append('g'+str(i))
goffset_nci=[]
goffset_gse=[]
for dn in datasets:
if dn=='Sanger Cell Line Project':
c='select_sanger_g'+str(i)
elif dn=='NCI60':
c='select_nci_g'+str(i)
elif dn=='GSE36133':
c='select_ccle_g'+str(i)
if dn in clline:
temp=list(set(request.POST.getlist(c)))
if 'd_sample' in show:
if all_c==[]:
all_c=all_c+temp
uni=temp
else:
uni=list(set(temp)-set(all_c))
all_c=all_c+uni
else:
uni=list(temp) #do not filter duplicate input only when select+centroid
s=Sample.objects.filter(cell_line_id__name__in=uni,dataset_id__name__in=[dn]).order_by('dataset_id'
).select_related('cell_line_id__name','cell_line_id__primary_site','cell_line_id__primary_hist','dataset_id','dataset_id__name')
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('cell_line_id__name',flat=True))
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
else: #dealing with clinical sample datasets
com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+dn+'_g'+str(i))
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for t in temp:
if 'stage/' in t:
stage.append(t[6:])
elif 'gender/' in t:
gender.append(t[7:])
elif 'ethnic/' in t:
ethnic.append(t[7:])
elif 'grade/' in t:
grade.append(t[6:])
elif 'stageT/' in t:
T.append(t[7:])
elif 'stageN/' in t:
N.append(t[7:])
elif 'stageM/' in t:
M.append(t[7:])
elif 'metastatic/' in t:
metas.append(t[11:])
'''
if t[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(t[4:])
for x in range(0,len(prims)):
s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x],
primary_hist=hists[x],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas,
).select_related('dataset_id').order_by('id')
s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s)
cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('name',flat=True))
offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)]))
all_sample=[]
all_cellline=[]
cell_object=[]
all_offset=[]
sample_counter={}
group_cell=[]
g_s_counter=[0]
for i in range(1,group_counter+1):
all_sample=all_sample+s_group_dict['g'+str(i)] #will not exist duplicate sample if d_sample
all_offset=all_offset+offset_group_dict['g'+str(i)]
all_cellline=all_cellline+cell_line_dict['g'+str(i)]
g_s_counter.append(g_s_counter[i-1]+len(s_group_dict['g'+str(i)]))
if 'd_sample' in show:
if((len(all_sample))<4):
error_reason='You should have at least 4 samples for PCA. The samples are not enough.<br />'\
'The number of samples you selected is '+str(len(all_sample))+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
for i in all_sample:
sample_counter[i.name]=1
if str(type(i))=="<class 'probes.models.Sample'>":
##print("i am sample!!")
cell_object.append(i.cell_line_id)
else:
##print("i am clinical!!")
cell_object.append(i)
#delete nan, transpose matrix
##open file
for x in range(0,len(all_exist_dataset)):
if all_exist_dataset[x] in clline:
pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=all_exist_dataset[x]).data_path)
else:
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path)
if x==0:
val=np.load(pth.as_posix())
else:
val=np.hstack((val, np.load(pth.as_posix())))#combine together
if 'd_sample' in show:
val=val[:,all_offset]
#val=val[~np.isnan(val).any(axis=1)]
val=np.transpose(val)
pca_index=[]
dis_offset=[]
#PREMISE:same dataset same cell line will have only one type of primary site and primary histology
name1=[]
name2=[]
name3=[]
name4=[]
name5=[]
X1=[]
Y1=[]
Z1=[]
X2=[]
Y2=[]
Z2=[]
X3=[]
Y3=[]
Z3=[]
X4=[]
Y4=[]
Z4=[]
X5=[]
Y5=[]
Z5=[]
if(len(all_exist_dataset)==1):
n=3 #need to fix to the best one #need to fix proportion
else:
n=4
#logger.info('pca show')
if 'd_sample' in show:
#count the pca first
pca= PCA(n_components=n)
Xval = pca.fit_transform(val[:,:]) #cannot get Xval with original all_offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[n-3:n])
table_propotion=sum(ratio_temp[0:n])
##print(Xval)
##print(all_cellline)
##print(all_sample)
max=0
min=10000000000
out_group=[]
exist_cell={}#cell line object:counter
for g in range(1,group_counter+1):
output_cell={}
check={}
for s in range(g_s_counter[g-1],g_s_counter[g]):
if str(type(all_sample[s]))=="<class 'probes.models.Sample'>":
cell=all_sample[s].cell_line_id
else:
cell=all_sample[s]
try:
counter=exist_cell[cell]
exist_cell[cell]=counter+1
except KeyError:
exist_cell[cell]=1
try:
t=output_cell[cell]
except KeyError:
output_cell[cell]=[cell,[]]
check[all_sample[s].name]=[]
sample_counter[all_sample[s].name]=exist_cell[cell]
for i in range(0,len(all_sample)):
if i!=s:
try:
if(all_sample[s].name not in check[all_sample[i].name]):
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,all_sample[i].dataset_id.name,distance,cell_object[i]])
check[all_sample[s].name].append(all_sample[i].name)
except KeyError:
distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n])
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')'
,all_sample[s].name,all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,all_sample[i].dataset_id.name,distance,cell_object[i]])
check[all_sample[s].name].append(all_sample[i].name)
if(g==1):
name1.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X1.append(round(Xval[s][n-3],5))
Y1.append(round(Xval[s][n-2],5))
Z1.append(round(Xval[s][n-1],5))
elif(g==2):
name2.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X2.append(round(Xval[s][n-3],5))
Y2.append(round(Xval[s][n-2],5))
Z2.append(round(Xval[s][n-1],5))
elif(g==3):
name3.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X3.append(round(Xval[s][n-3],5))
Y3.append(round(Xval[s][n-2],5))
Z3.append(round(Xval[s][n-1],5))
elif(g==4):
name4.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X4.append(round(Xval[s][n-3],5))
Y4.append(round(Xval[s][n-2],5))
Z4.append(round(Xval[s][n-1],5))
elif(g==5):
name5.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name)
X5.append(round(Xval[s][n-3],5))
Y5.append(round(Xval[s][n-2],5))
Z5.append(round(Xval[s][n-1],5))
dictlist=[]
for key, value in output_cell.items():
temp = [value]
dictlist+=temp
output_cell=list(dictlist)
out_group.append([g,output_cell])
element_counter=0
#[g,[[group_cell_line,[paired_cellline,......,]],[],[]]]
for i in out_group:
for temp_list in i[1]:
element_counter+=len(temp_list[1])
for temp in temp_list[1]:
##print(temp)
temp[3]=temp[3]+'('+str(sample_counter[temp[4]])+')'
return_html='pca.html'
else:
#This part is for centroid display
return_html='pca_center.html'
element_counter=0
#val=val[~np.isnan(val).any(axis=1)] #bottle neck???
#This part is for select cell line base on dataset,count centroid base on the dataset
#group中的cell line為單位來算重心
#logger.info('pca show centroid with selection')
location_dict={} #{group number:[[cell object,dataset,new location]]}
combined=[]
sample_list=[]
pca_index=np.array(pca_index)
X_val=[]
a_all_offset=np.array(all_offset)
for i in range(1,group_counter+1):
dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) #cell object may have duplicate cell line since:NCI A + CCLE A===>[A,A]
location_dict['g'+str(i)]=[]
dataset_dict={}
a_cell_object=np.array(cell_object)
for c in dis_cellline: #dis_cellline may not have the same order as cell_object
temp1=np.where((a_cell_object==c))[0]
temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i]))
total_offset=temp1[temp2]
selected_val=val[:,a_all_offset[total_offset]]
selected_val=np.transpose(selected_val)
new_loca=(np.mean(selected_val,axis=0,dtype=np.float64,keepdims=True)).tolist()[0]
a_sample=np.array(all_sample)
selected_sample=a_sample[total_offset]
if list(selected_sample) in sample_list: #to prevent two different colors in different group
continue
else:
sample_list.append(list(selected_sample))
##print(selected_sample)
d_temp=[]
for s in selected_sample:
d_temp.append(s.dataset_id.name)
dataset_dict[c]="/".join(list(set(d_temp)))
##print(dataset_dict[c])
X_val.append(new_loca)
location_dict['g'+str(i)].append([c,dataset_dict[c],len(X_val)-1]) #the last part is the index to get pca result from new_val
combined.append([c,dataset_dict[c],len(X_val)-1]) #all cell line, do not matter order
#run the pca
##print(len(X_val))
if((len(X_val))<4):
error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The dots are not enough.<br />'\
'The number of centroid dots you selected is '+str(len(X_val))+'.'
return render_to_response('pca_error.html',RequestContext(request,
{
'error_reason':mark_safe(json.dumps(error_reason)),
}))
X_val=np.matrix(X_val)
pca= PCA(n_components=n)
new_val = pca.fit_transform(X_val[:,:]) #cannot get Xval with original offset any more
ratio_temp=pca.explained_variance_ratio_
propotion=sum(ratio_temp[n-3:n])
table_propotion=sum(ratio_temp[0:n])
##print(new_val)
out_group=[]
min=10000000000
max=0
for g in range(1,group_counter+1):
output_cell=[]
exist_cell={}
for group_c in location_dict['g'+str(g)]: #a list of [c,dataset_dict[c],new_val index] in group one
cell=group_c[0]
key_string=cell.name+'/'+cell.primary_site+'/'+cell.primary_hist+'/'+group_c[1]
exist_cell[key_string]=[]
output_cell.append([cell,[]])
#count the distance
for temp_list in combined:
c=temp_list[0]
temp_string=c.name+'/'+c.primary_site+'/'+c.primary_hist+'/'+temp_list[1]
try:
if(key_string not in exist_cell[temp_string]):
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell,group_c[1],temp_list[0],temp_list[1],distance])
element_counter+=1
exist_cell[key_string].append(temp_string)
except KeyError:
distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n]))
if distance==0:
continue
if distance<min:
min=distance
if distance>max:
max=distance
output_cell[len(output_cell)-1][1].append([cell,group_c[1],temp_list[0],temp_list[1],distance])
element_counter+=1
exist_cell[key_string].append(temp_string)
if(g==1):
name1.append(cell.name+'<br>'+group_c[1])
X1.append(round(new_val[group_c[2]][n-3],5))
Y1.append(round(new_val[group_c[2]][n-2],5))
Z1.append(round(new_val[group_c[2]][n-1],5))
elif(g==2):
name2.append(cell.name+'<br>'+group_c[1])
X2.append(round(new_val[group_c[2]][n-3],5))
Y2.append(round(new_val[group_c[2]][n-2],5))
Z2.append(round(new_val[group_c[2]][n-1],5))
elif(g==3):
name3.append(cell.name+'<br>'+group_c[1])
X3.append(round(new_val[group_c[2]][n-3],5))
Y3.append(round(new_val[group_c[2]][n-2],5))
Z3.append(round(new_val[group_c[2]][n-1],5))
elif(g==4):
name4.append(cell.name+'<br>'+group_c[1])
X4.append(round(new_val[group_c[2]][n-3],5))
Y4.append(round(new_val[group_c[2]][n-2],5))
Z4.append(round(new_val[group_c[2]][n-1],5))
elif(g==5):
name5.append(cell.name+'<br>'+group_c[1])
X5.append(round(new_val[group_c[2]][n-3],5))
Y5.append(round(new_val[group_c[2]][n-2],5))
Z5.append(round(new_val[group_c[2]][n-1],5))
out_group.append([g,output_cell])
#logger.info('end pca')
if(element_counter>show_row):
big_flag=1
sid=str(uuid.uuid1())+".csv"
if(return_html=='pca.html'):
dataset_header=['Group Cell Line/Clinical Sample','Sample Name','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance']
else:
dataset_header=['Group Cell Line/Clinical Sample','Primary Site','Primary Histology'
,'Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance']
P=Path('../').resolve().joinpath('src','static','csv',sid)
assP=Path('../').resolve().joinpath('src','assets','csv',sid)
with open(str(assP), "w", newline='') as f:
writer = csv.writer(f)
for index,output_cell in out_group:
writer.writerows([[udgroup[index-1]]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
temp_b=[]
if(return_html=='pca.html'):
for group_cell,sn,dset,cname,sname,setname,dis,cell_object in b:
temp_b.append([group_cell,sn,cell_line.primary_site,cell_line.primary_hist,dset,cname
,sname,cell_object.primary_site,cell_object.primary_hist,setname,dis])
else:
for group_cell,group_dataset,paired_cell,paired_dataset,dis in b:
temp_b.append([group_cell.name,group_cell.primary_site,group_cell.primary_hist,group_dataset
,paired_cell.name,paired_cell.primary_site,paired_cell.primary_hist,paired_dataset,dis])
writer.writerows(temp_b)
#print('write first file done')
'''
with open(str(assP), "w", newline='') as ff:
writer = csv.writer(ff)
for index,output_cell in out_group:
writer.writerows([[udgroup[index-1]]])
writer.writerows([dataset_header])
for cell_line,b in output_cell:
temp_b=[]
if(return_html=='pca.html'):
for group_cell,sn,dset,cname,sname,setname,dis,cell_object in b:
temp_b.append([group_cell,sn,cell_line.primary_site,cell_line.primary_hist,dset,cname
,sname,cell_object.primary_site,cell_object.primary_hist,setname,dis])
else:
for group_cell,group_dataset,paired_cell,paired_dataset,dis in b:
temp_b.append([group_cell.name,group_cell.primary_site,group_cell.primary_hist,group_dataset
,paired_cell.name,paired_cell.primary_site,paired_cell.primary_hist,paired_dataset,dis])
writer.writerows(temp_b)
'''
#print('write second file done')
data_file_name=sid
else:
big_flag=0
data_file_name=0
return render_to_response(return_html,RequestContext(request,
{
'udgroup':udgroup,
'min':min,'max':max,
'out_group':out_group,
'propotion':propotion,
'big_flag':big_flag,
'data_file_name':data_file_name,
'table_propotion':table_propotion,
'X1':X1,'name1':mark_safe(json.dumps(name1)),
'Y1':Y1,'name2':mark_safe(json.dumps(name2)),
'Z1':Z1,'name3':mark_safe(json.dumps(name3)),
'X2':X2,'name4':mark_safe(json.dumps(name4)),
'Y2':Y2,'name5':mark_safe(json.dumps(name5)),
'Z2':Z2,
'X3':X3,
'Y3':Y3,
'Z3':Z3,
'X4':X4,
'Y4':Y4,
'Z4':Z4,
'X5':X5,
'Y5':Y5,
'Z5':Z5,
}))
def cellline_microarray(request):
# Pre-fetch the cell line field for all samples.
# Reduce N query in to 1. N = number of samples
d=Dataset.objects.all()
d_name=list(d.values_list('name',flat=True))
datasets=[] #[[dataset_name,[[primary_site,[cell line]]]]
an=[]
for i in d_name:
if i=="Sanger Cell Line Project":
alias='sanger'
elif i=="NCI60":
alias='nci'
elif i=="GSE36133":
alias='gse'
else:
alias=i
an.append(alias)
sample=Sample.objects.filter(dataset_id__name=i).order_by('cell_line_id__primary_site').select_related('cell_line_id')
datasets.append([i,alias,list(sample),[]])
sites=list(sample.values_list('cell_line_id__primary_site',flat=True))
hists=list(sample.values_list('cell_line_id__name',flat=True))
dis_prim=list(sample.values_list('cell_line_id__primary_site',flat=True).distinct())
hists=list(hists)
id_counter=0
for p in range(0,len(dis_prim)):
temp=sites.count(dis_prim[p])
datasets[-1][3].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))])
id_counter+=temp
return render(request, 'cellline_microarray.html', {
'an':mark_safe(json.dumps(an)),
'd_name':d_name,
'datasets':datasets,
})
def cell_lines(request):
#samples = Sample.objects.all().select_related('cell_line_id','dataset_id')
#lines=CellLine.objects.all().distinct()
#val_pairs = (
# (l, l.fcell_line_id.prefetch_related('dataset_id__name').values_list('dataset_id__name',flat=True).distinct())
# for l in lines
# )
#context['val_pairs']=val_pairs
cell_line_dict={}
context={}
nr_samples=[]
samples=Sample.objects.all().select_related('cell_line_id','dataset_id').order_by('id')
for ss in samples:
name=ss.cell_line_id.name
primary_site=ss.cell_line_id.primary_site
primary_hist=ss.cell_line_id.primary_hist
comb=name+"/"+primary_site+"/"+primary_hist
dataset=ss.dataset_id.name
try:
sets=cell_line_dict[comb]
if (dataset not in sets):
cell_line_dict[comb]=dataset+"/"+sets
except KeyError:
cell_line_dict[comb]=dataset
nr_samples.append(ss)
val_pairs = (
(ss,cell_line_dict[ss.cell_line_id.name+"/"+ss.cell_line_id.primary_site+"/"+ss.cell_line_id.primary_hist])
for ss in nr_samples
)
context['val_pairs']=val_pairs
return render_to_response('cell_line.html', RequestContext(request, context))
def clinical_search(request):
norm_name=[request.POST['normalize']] #get the normalize gene name
#f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic']
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
#get the probe/gene/id keywords
if 'keyword' in request.POST and request.POST['keyword'] != '':
words = request.POST['keyword']
words = list(set(words.split()))
else:
return HttpResponse("<p>where is your keyword?</p>")
plus2_rank=np.load('ranking_u133plus2.npy') #open only plus2 platform rank
sample_probe_val_pairs=[] #for output
if 'gtype' in request.POST and request.POST['gtype'] == 'probeid':
gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Probe_id__in=words).order_by('id')
probe=list(gene.values_list('offset',flat=True))
##print(gene)
elif 'gtype' in request.POST and request.POST['gtype'] == 'symbol':
gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Gene_symbol__in=words).order_by('id')
probe=list(gene.values_list('offset',flat=True))
else:
gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Entrez_id__in=words).order_by('id')
probe=list(gene.values_list('offset',flat=True))
if request.POST['clinical_method'] == 'prim_dataset':
if 'dataset' in request.POST and request.POST['dataset'] != '':
datas=request.POST.getlist('dataset')
else:
d=Clinical_Dataset.objects.all()
datas=d.values_list('name',flat=True)
com_hists=list(set(request.POST.getlist('primhist')))
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_primh')
for i in temp:
if 'stage/' in i:
stage.append(i[6:])
elif 'gender/' in i:
gender.append(i[7:])
elif 'ethnic/' in i:
ethnic.append(i[7:])
elif 'grade/' in i:
grade.append(i[6:])
elif 'stageT/' in i:
T.append(i[7:])
elif 'stageN/' in i:
N.append(i[7:])
elif 'stageM/' in i:
M.append(i[7:])
elif 'metastatic/' in i:
metas.append(i[11:])
'''
if i[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(i[4:])
for sets in datas:
samples=[]
offset=[]
if request.POST['clinical_method'] == 'prim_dataset':
com_hists=list(set(request.POST.getlist('primd_'+sets))) #can I get this by label to reduce number of queries?
com_hists=[w1 for segments in com_hists for w1 in segments.split('/')]
prims=com_hists[0::2]
hists=com_hists[1::2]
temp=request.POST.getlist('filter_'+sets)
age=[]
gender=[]
ethnic=[]
grade=[]
stage=[]
T=[]
N=[]
M=[]
metas=[]
for i in temp:
if 'stage/' in i:
stage.append(i[6:])
elif 'gender/' in i:
gender.append(i[7:])
elif 'ethnic/' in i:
ethnic.append(i[7:])
elif 'grade/' in i:
grade.append(i[6:])
elif 'stageT/' in i:
T.append(i[7:])
elif 'stageN/' in i:
N.append(i[7:])
elif 'stageM/' in i:
M.append(i[7:])
elif 'metastatic/' in i:
metas.append(i[11:])
'''
if i[11:]=='False':
metas.append(0)
else:
metas.append(1)
'''
else: #"age/"
age.append(i[4:])
for i in range(0,len(prims)):
#metas=[bool(x) for x in metas]
s=Clinical_sample.objects.filter(dataset_id__name=sets,primary_site=prims[i],
primary_hist=hists[i],
age__in=age,
gender__in=gender,
ethnic__in=ethnic,
stage__in=stage,
grade__in=grade,
stageT__in=T,
stageN__in=N,
stageM__in=M,
metastatic__in=metas
).select_related('dataset_id').order_by('id')
samples+=list(s)
offset+=list(s.values_list('offset',flat=True))
##print(s)
pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=sets).data_path)
val=np.load(pth.as_posix(),mmap_mode='r')
norm_probe=ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Gene_symbol__in=norm_name).order_by('id')
probe_offset=list(norm_probe.values_list('offset',flat=True))
temp=val[np.ix_(probe_offset,offset)]
norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(samples)!=0):
raw_test=val[np.ix_(probe,offset)]
normalize=np.subtract(raw_test,norm)#dimension different!!!!
sample_probe_val_pairs += [
(c, p, raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(samples)
]
return render(request, 'clinical_search.html', {
'sample_probe_val_pairs': sample_probe_val_pairs,
})
def data(request):
SANGER=[]
sanger_flag=0
NCI=[]
nci_flag=0
GSE=[]
gse_flag=0
cell=[]
ncicell=[]
CCcell=[]
ps_id='0'
pn_id='0'
if request.POST.get('cell_line_method','text') == 'text':
if request.POST['cellline'] =='':
return HttpResponse("<p>please make sure to enter cell line name in Step3.</p>" )
c = request.POST['cellline']
c = list(set(c.split()))
sanger_flag=1
samples=Sample.objects.filter(dataset_id__name__in=['Sanger Cell Line Project']).order_by('id')
cell=samples.select_related('cell_line_id','dataset_id').filter(cell_line_id__name__in=c).order_by('id')
offset=list(cell.values_list('offset',flat=True))
ps_id='1'
nci_flag=1
ncisamples=Sample.objects.filter(dataset_id__name__in=['NCI60']).select_related('cell_line_id','dataset_id').order_by('id')
ncicell=ncisamples.filter(cell_line_id__name__in=c).order_by('id')
ncioffset=list(ncicell.values_list('offset',flat=True))
pn_id='3'
gse_flag=1
CCsamples=Sample.objects.filter(dataset_id__name__in=['GSE36133']).select_related('cell_line_id','dataset_id').order_by('id')
CCcell=CCsamples.filter(cell_line_id__name__in=c).order_by('id')
CCoffset=list(CCcell.values_list('offset',flat=True))
pn_id='3'
else:
if 'dataset' in request.POST and request.POST['dataset'] != '':
datas=request.POST.getlist('dataset')
if 'Sanger Cell Line Project' in datas:
sanger_flag=1
SANGER=list(set(request.POST.getlist('select_sanger')))
samples=Sample.objects.filter(dataset_id__name__in=['Sanger Cell Line Project']).order_by('id')
cell=samples.select_related('cell_line_id','dataset_id').filter(cell_line_id__name__in=SANGER).order_by('id')
offset=list(cell.values_list('offset',flat=True))
ps_id=str(Platform.objects.filter(name__in=["U133A"])[0].id)
if 'NCI60' in datas:
nci_flag=1
NCI=list(set(request.POST.getlist('select_nci')))
ncisamples=Sample.objects.filter(dataset_id__name__in=['NCI60']).select_related('cell_line_id','dataset_id').order_by('id')
ncicell=ncisamples.filter(cell_line_id__name__in=NCI).order_by('id')
ncioffset=list(ncicell.values_list('offset',flat=True))
pn_id=str(Platform.objects.filter(name__in=["PLUS2"])[0].id)
if 'GSE36133' in datas:
gse_flag=1
GSE=list(set(request.POST.getlist('select_gse')))
CCsamples=Sample.objects.filter(dataset_id__name__in=['GSE36133']).select_related('cell_line_id','dataset_id').order_by('id')
CCcell=CCsamples.filter(cell_line_id__name__in=GSE).order_by('id')
CCoffset=list(CCcell.values_list('offset',flat=True))
pn_id=str(Platform.objects.filter(name__in=["PLUS2"])[0].id)
if len(SANGER)==0 and len(NCI)==0 and len(GSE)==0:
return HttpResponse("<p>please select primary sites.</p>" )
else:
return HttpResponse("<p>please check Step3 again.</p>" )
if 'keyword' in request.POST and request.POST['keyword'] != '':
words = request.POST['keyword']
words = list(set(words.split()))
else:
return HttpResponse("<p>where is your keyword?</p>")
#open files
sanger_val_pth=Path('../').resolve().joinpath('src','sanger_cell_line_proj.npy')
nci_val_pth=Path('../').resolve().joinpath('src','nci60.npy')
gse_val_pth=Path('../').resolve().joinpath('src','GSE36133.npy')
sanger_val=np.load(sanger_val_pth.as_posix(),mmap_mode='r')
nci_val=np.load(nci_val_pth.as_posix(),mmap_mode='r')
gse_val=np.load(gse_val_pth.as_posix(),mmap_mode='r')
u133a_rank=np.load('ranking_u133a.npy')
plus2_rank=np.load('ranking_u133plus2.npy')
gene = []
ncigene = []
CCgene = []
context={}
norm_name=[request.POST['normalize']]
if sanger_flag==1:
#if request.POST['normalize']!='NTRK3-AS1':
sanger_g=ProbeID.objects.filter(platform__in=ps_id).filter(Gene_symbol__in=norm_name).order_by('id')
sanger_probe_offset=list(sanger_g.values_list('offset',flat=True))
temp=sanger_val[np.ix_(sanger_probe_offset,offset)]
norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
#else:
# norm=0.0
else:
norm=0.0 #if / should = 1
if nci_flag==1:
nci_g=ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=norm_name).order_by('id')
nci_probe_offset=list(nci_g.values_list('offset',flat=True))
temp=nci_val[np.ix_(nci_probe_offset,ncioffset)]
nci_norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
##print(nci_norm)
else:
nci_norm=0.0 #if / should = 1
if gse_flag==1:
CC_g=ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=norm_name).order_by('id')
CC_probe_offset=list(CC_g.values_list('offset',flat=True))
temp=gse_val[np.ix_(CC_probe_offset,CCoffset)]
CC_norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True)
##print(CC_norm)
else:
CC_norm=0.0 #if / should = 1
#dealing with probes
if 'gtype' in request.POST and request.POST['gtype'] == 'probeid':
gene = ProbeID.objects.filter(platform__in=ps_id).filter(Probe_id__in=words).order_by('id')
probe_offset=list(gene.values_list('offset',flat=True))
ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Probe_id__in=words).order_by('id')
nciprobe_offset=list(ncigene.values_list('offset',flat=True))
#nci60 and ccle use same probe set(ncigene) and nicprobe
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(cell)!=0):
raw_test=sanger_val[np.ix_(probe_offset,offset)]
normalize=np.subtract(raw_test,norm)#dimension different!!!!
#normalize=np.around(normalize, decimals=1)
cell_probe_val_pairs = (
(c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(cell)
)
else:
cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(ncicell)!=0):
nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)]
nci_normalize=np.subtract(nci_raw_test,nci_norm)
nci_cell_probe_val_pairs = (
(c, p, nci_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(ncicell)
)
else:
nci_cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(CCcell)!=0):
CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)]
CC_normalize=np.subtract(CC_raw_test,CC_norm)
CC_cell_probe_val_pairs = (
(c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(CCcell)
)
else:
CC_cell_probe_val_pairs =()
context['cell_probe_val_pairs']=cell_probe_val_pairs
context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs
context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs
return render_to_response('data.html', RequestContext(request,context))
elif 'gtype' in request.POST and request.POST['gtype'] == 'symbol':
gene = ProbeID.objects.filter(platform__in=ps_id).filter(Gene_symbol__in=words).order_by('id')
probe_offset=gene.values_list('offset',flat=True)
ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=words).order_by('id')
nciprobe_offset=ncigene.values_list('offset',flat=True)
#nci60 and ccle use same probe set(ncigene) and nicprobe
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(cell)!=0):
raw_test=sanger_val[np.ix_(probe_offset,offset)]
normalize=np.subtract(raw_test,norm)
cell_probe_val_pairs = (
(c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(cell)
)
else:
cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(ncicell)!=0):
nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)]
nci_normalize=np.subtract(nci_raw_test,nci_norm)
nci_cell_probe_val_pairs = (
(c, p, nci_raw_test[probe_ix, cell_ix],54676-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(ncicell)
)
else:
nci_cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(CCcell)!=0):
CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)]
CC_normalize=np.subtract(CC_raw_test,CC_norm)
CC_cell_probe_val_pairs = (
(c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(CCcell)
)
else:
CC_cell_probe_val_pairs =()
context['cell_probe_val_pairs']=cell_probe_val_pairs
context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs
context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs
return render_to_response('data.html', RequestContext(request,context))
elif 'gtype' in request.POST and request.POST['gtype'] == 'entrez':
gene = ProbeID.objects.filter(platform__in=ps_id).filter(Entrez_id=words).order_by('id')
probe_offset=gene.values_list('offset',flat=True)
ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Entrez_id__in=words).order_by('id')
nciprobe_offset=ncigene.values_list('offset',flat=True)
#nci60 and ccle use same probe set(ncigene) and nicprobe
# Make a generator to generate all (cell, probe, val) pairs
if(len(gene)!=0 and len(cell)!=0):
raw_test=sanger_val[np.ix_(probe_offset,offset)]
normalize=np.subtract(raw_test,norm)
cell_probe_val_pairs = (
(c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(gene)
for cell_ix, c in enumerate(cell)
)
else:
cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(ncicell)!=0):
nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)]
nci_normalize=np.subtract(nci_raw_test,nci_norm)
nci_cell_probe_val_pairs = (
(c, p, nci_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(ncicell)
)
else:
nci_cell_probe_val_pairs =()
if(len(ncigene)!=0 and len(CCcell)!=0):
CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)]
CC_normalize=np.subtract(CC_raw_test,CC_norm)
CC_cell_probe_val_pairs = (
(c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix])
for probe_ix, p in enumerate(ncigene)
for cell_ix, c in enumerate(CCcell)
)
else:
CC_cell_probe_val_pairs =()
context['cell_probe_val_pairs']=cell_probe_val_pairs
context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs
context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs
return render_to_response('data.html', RequestContext(request,context))
else:
return HttpResponse(
"<p>keyword type not match with your keyword input</p>"
)
| mit | -1,586,038,723,568,720,100 | 43.075036 | 175 | 0.50397 | false |
chaosk/trinitee | trinitee/wiki/views.py | 1 | 6179 | from datetime import datetime
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect
from django.contrib import messages
from django.http import Http404, HttpResponseForbidden
from django.template.response import TemplateResponse
from reversion import revision
from reversion.helpers import generate_patch_html
from reversion.models import Version
from wiki.forms import WikiNewForm, WikiEditForm
from wiki.models import WikiPage
def wiki_index(request):
""" Returns Index wiki page """
page, created = WikiPage.objects.get_or_create(slug='Index',
defaults={'title': u'Index'}
)
return TemplateResponse(request, 'wiki/detail.html', {
'page': page,
})
def wiki_detail(request, slug):
try:
page = WikiPage.objects.get(slug=slug)
except WikiPage.DoesNotExist:
deleted_versions = Version.objects.get_deleted(WikiPage)[:5]
deleted_version = None
for version in deleted_versions:
if version.field_dict.get('slug') == slug:
deleted_version = version
break
if not deleted_version:
raise Http404
else:
return redirect(reverse('wiki_restore', kwargs={
'slug': slug,
'rev': deleted_version.id,
}))
return TemplateResponse(request, 'wiki/detail.html', {
'page': page,
})
def wiki_new(request):
if not request.user.has_perm('wiki.add_wikipage'):
return HttpResponseForbidden()
form = WikiNewForm()
if request.method == 'POST':
form = WikiNewForm(request.POST)
if form.is_valid():
new_page = form.save()
messages.success(request, "New page has been added to the wiki.")
revision.comment = "Initial version"
return redirect(new_page.get_absolute_url())
return TemplateResponse(request, 'wiki/new.html', {
'form': form,
})
def wiki_edit(request, slug):
page = get_object_or_404(WikiPage, slug=slug)
if not request.user.has_perm('wiki.edit_wikipage'):
return HttpResponseForbidden()
form = WikiEditForm(instance=page)
if request.method == 'POST':
form = WikiEditForm(request.POST, instance=page)
if form.is_valid():
form.save()
messages.success(request,
"Successfully updated \"{0}\" page.".format(page)
)
revision.comment = form.cleaned_data.get('comment')
return redirect(page.get_absolute_url())
return TemplateResponse(request, 'wiki/edit.html', {
'page': page,
'form': form,
})
def wiki_delete(request, slug):
page = get_object_or_404(WikiPage, slug=slug)
if not request.user.has_perm('wiki.delete_wikipage'):
return HttpResponseForbidden()
if request.method == 'POST':
page.delete()
messages.success(request,
"Successfully removed \"{0}\" page.".format(page)
)
revision.comment = "Page deleted"
return redirect(reverse('wiki_index'))
return TemplateResponse(request, 'wiki/delete.html', {
'page': page,
})
def wiki_list(request):
return TemplateResponse(request, 'wiki/list.html', {
'pages': WikiPage.objects.all(),
'permissions_url': WikiPage().get_permissions_url(), # hackish
})
def wiki_history(request, slug):
page = get_object_or_404(WikiPage, slug=slug)
versions = Version.objects.get_for_object(page).select_related() \
.order_by('-id')
try:
latest_version_id = versions[0].id
except IndexError:
latest_version_id = None
return TemplateResponse(request, 'wiki/history.html', {
'page': page,
'latest_version_id': latest_version_id,
'versions': versions,
})
def wiki_history_detail(request, slug, rev):
page = get_object_or_404(WikiPage, slug=slug)
try:
version = Version.objects.select_related().get(pk=rev)
except Version.DoesNotExist:
raise Http404
if page.id != int(version.object_id):
raise Http404
return TemplateResponse(request, 'wiki/history_detail.html', {
'page': version.field_dict,
'revision': version.revision,
})
def wiki_compare(request, slug, rev_from, rev_to):
page = get_object_or_404(WikiPage, slug=slug)
try:
version_from = Version.objects.select_related().get(pk=rev_from)
version_to = Version.objects.select_related().get(pk=rev_to)
except Version.DoesNotExist:
raise Http404
if page.id != int(version_from.object_id) or \
int(version_from.object_id) != int(version_to.object_id):
messages.error(request,
"You have tried to compare revisions of different pages."
)
return redirect(reverse('wiki_history', kwargs={'slug': slug}))
revision_to = version_to.revision
revision_from = version_from.revision
patch_html = generate_patch_html(version_from, version_to, "content")
return TemplateResponse(request, 'wiki/compare.html', {
'page': page,
'patch_html': patch_html,
'revision_from': revision_from,
'revision_to': revision_to,
})
def wiki_revert(request, slug, rev):
if not request.user.has_perm('wiki.moderate_wikipage'):
return HttpResponseForbidden()
page = get_object_or_404(WikiPage, slug=slug)
version = get_object_or_404(Version.objects.select_related(), pk=rev)
if page.id != int(version.object_id):
messages.error(request,
"You have tried to revert this page to another page object."
)
return redirect(reverse('wiki_history', kwargs={'slug': slug}))
if request.method == 'POST':
version.revert()
messages.success(request,
"Successfully reverted \"{0}\" page to state from {1}.".format(
page,
datetime.strftime(version.revision.date_created,
"%B %d, %Y, %I:%M %p"
)
)
)
revision.comment = "Reverted to #{0}".format(version.id)
return redirect(reverse('wiki_detail', kwargs={'slug': slug}))
return TemplateResponse(request, 'wiki/revert.html', {
'page': page,
'version': version,
})
def wiki_restore(request, slug, rev):
if not request.user.has_perm('wiki.moderate_wikipage'):
return HttpResponseForbidden()
version = get_object_or_404(Version, pk=rev)
if request.method == 'POST':
version.revert()
messages.success(request,
"Successfully restored \"{0}\" page to state from {1}.".format(
version.field_dict.get('title'),
datetime.strftime(version.revision.date_created,
"%B %d, %Y, %I:%M %p"
)
)
)
revision.comment = "Page restored"
return redirect(reverse('wiki_detail', kwargs={'slug': slug}))
return TemplateResponse(request, 'wiki/restore.html', {
'version': version,
})
| bsd-3-clause | 3,111,516,469,836,567,600 | 29.289216 | 70 | 0.70772 | false |
elahejalalpour/ELRyu | ryu/services/protocols/bgp/operator/commands/set.py | 51 | 2182 | import logging
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_OK
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.commands.responses import \
WrongParamResp
class LoggingCmd(Command):
command = 'logging'
help_msg = 'turn on/off logging at current level'
def __init__(self, *args, **kwargs):
super(LoggingCmd, self).__init__(*args, **kwargs)
self.subcommands = {
'on': self.On,
'off': self.Off,
'level': self.Level
}
def action(self, params):
return CommandsResponse(STATUS_ERROR, 'Command incomplete')
class On(Command):
command = 'on'
help_msg = 'turn-on the logging at the current level'
def action(self, params):
logging.getLogger('bgpspeaker').addHandler(self.api.log_handler)
return CommandsResponse(STATUS_OK, True)
class Off(Command):
command = 'off'
help_msg = 'turn-off the logging'
def action(self, params):
logging.getLogger('bgpspeaker').removeHandler(self.api.log_handler)
return CommandsResponse(STATUS_OK, True)
class Level(Command):
command = 'level'
help_msg = 'set logging level'
param_help_msg = '[debug/info/error]'
def action(self, params):
lvls = {
'debug': logging.DEBUG,
'error': logging.ERROR,
'info': logging.INFO
}
if len(params) == 1 and params[0] in lvls:
self.api.log_handler.setLevel(
lvls.get(params[0], logging.ERROR)
)
return CommandsResponse(STATUS_OK, True)
else:
return WrongParamResp()
class SetCmd(Command):
help_msg = 'set runtime settings'
command = 'set'
subcommands = {'logging': LoggingCmd}
def action(self, params):
return CommandsResponse(STATUS_ERROR, 'Command incomplete')
| apache-2.0 | 3,253,670,541,293,088,000 | 30.623188 | 79 | 0.607241 | false |
yencarnacion/jaikuengine | .google_appengine/lib/cherrypy/cherrypy/tutorial/tut10_http_errors.py | 36 | 2826 | """
Tutorial: HTTP errors
HTTPError is used to return an error response to the client.
CherryPy has lots of options regarding how such errors are
logged, displayed, and formatted.
"""
import os
localDir = os.path.dirname(__file__)
curpath = os.path.normpath(os.path.join(os.getcwd(), localDir))
import cherrypy
class HTTPErrorDemo(object):
# Set a custom response for 403 errors.
_cp_config = {'error_page.403' : os.path.join(curpath, "custom_error.html")}
def index(self):
# display some links that will result in errors
tracebacks = cherrypy.request.show_tracebacks
if tracebacks:
trace = 'off'
else:
trace = 'on'
return """
<html><body>
<p>Toggle tracebacks <a href="toggleTracebacks">%s</a></p>
<p><a href="/doesNotExist">Click me; I'm a broken link!</a></p>
<p><a href="/error?code=403">Use a custom error page from a file.</a></p>
<p>These errors are explicitly raised by the application:</p>
<ul>
<li><a href="/error?code=400">400</a></li>
<li><a href="/error?code=401">401</a></li>
<li><a href="/error?code=402">402</a></li>
<li><a href="/error?code=500">500</a></li>
</ul>
<p><a href="/messageArg">You can also set the response body
when you raise an error.</a></p>
</body></html>
""" % trace
index.exposed = True
def toggleTracebacks(self):
# simple function to toggle tracebacks on and off
tracebacks = cherrypy.request.show_tracebacks
cherrypy.config.update({'request.show_tracebacks': not tracebacks})
# redirect back to the index
raise cherrypy.HTTPRedirect('/')
toggleTracebacks.exposed = True
def error(self, code):
# raise an error based on the get query
raise cherrypy.HTTPError(status = code)
error.exposed = True
def messageArg(self):
message = ("If you construct an HTTPError with a 'message' "
"argument, it wil be placed on the error page "
"(underneath the status line by default).")
raise cherrypy.HTTPError(500, message=message)
messageArg.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HTTPErrorDemo(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HTTPErrorDemo(), config=tutconf)
| apache-2.0 | -411,551,758,686,231,700 | 33.888889 | 85 | 0.602619 | false |
olapaola/olapaola-android-scripting | python/src/Tools/freeze/winmakemakefile.py | 39 | 4824 | import sys, os
# Template used then the program is a GUI program
WINMAINTEMPLATE = """
#include <windows.h>
int WINAPI WinMain(
HINSTANCE hInstance, // handle to current instance
HINSTANCE hPrevInstance, // handle to previous instance
LPSTR lpCmdLine, // pointer to command line
int nCmdShow // show state of window
)
{
extern int Py_FrozenMain(int, char **);
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(__argc, __argv);
}
"""
SERVICETEMPLATE = """
extern int PythonService_main(int, char **);
int main( int argc, char **argv)
{
PyImport_FrozenModules = _PyImport_FrozenModules;
return PythonService_main(argc, argv);
}
"""
subsystem_details = {
# -s flag : (C entry point template), (is it __main__?), (is it a DLL?)
'console' : (None, 1, 0),
'windows' : (WINMAINTEMPLATE, 1, 0),
'service' : (SERVICETEMPLATE, 0, 0),
'com_dll' : ("", 0, 1),
}
def get_custom_entry_point(subsystem):
try:
return subsystem_details[subsystem][:2]
except KeyError:
raise ValueError, "The subsystem %s is not known" % subsystem
def makemakefile(outfp, vars, files, target):
save = sys.stdout
try:
sys.stdout = outfp
realwork(vars, files, target)
finally:
sys.stdout = save
def realwork(vars, moddefns, target):
version_suffix = "%r%r" % sys.version_info[:2]
print "# Makefile for Microsoft Visual C++ generated by freeze.py script"
print
print 'target = %s' % target
print 'pythonhome = %s' % vars['prefix']
print
print 'DEBUG=0 # Set to 1 to use the _d versions of Python.'
print '!IF $(DEBUG)'
print 'debug_suffix=_d'
print 'c_debug=/Zi /Od /DDEBUG /D_DEBUG'
print 'l_debug=/DEBUG'
print 'temp_dir=Build\\Debug'
print '!ELSE'
print 'debug_suffix='
print 'c_debug=/Ox'
print 'l_debug='
print 'temp_dir=Build\\Release'
print '!ENDIF'
print
print '# The following line assumes you have built Python using the standard instructions'
print '# Otherwise fix the following line to point to the library.'
print 'pythonlib = "$(pythonhome)/pcbuild/python%s$(debug_suffix).lib"' % version_suffix
print
# We only ever write one "entry point" symbol - either
# "main" or "WinMain". Therefore, there is no need to
# pass a subsystem switch to the linker as it works it
# out all by itself. However, the subsystem _does_ determine
# the file extension and additional linker flags.
target_link_flags = ""
target_ext = ".exe"
if subsystem_details[vars['subsystem']][2]:
target_link_flags = "-dll"
target_ext = ".dll"
print "# As the target uses Python%s.dll, we must use this compiler option!" % version_suffix
print "cdl = /MD"
print
print "all: $(target)$(debug_suffix)%s" % (target_ext)
print
print '$(temp_dir):'
print ' if not exist $(temp_dir)\. mkdir $(temp_dir)'
print
objects = []
libs = ["shell32.lib", "comdlg32.lib", "wsock32.lib", "user32.lib", "oleaut32.lib"]
for moddefn in moddefns:
print "# Module", moddefn.name
for file in moddefn.sourceFiles:
base = os.path.basename(file)
base, ext = os.path.splitext(base)
objects.append(base + ".obj")
print '$(temp_dir)\%s.obj: "%s"' % (base, file)
print "\t@$(CC) -c -nologo /Fo$* $(cdl) $(c_debug) /D BUILD_FREEZE",
print '"-I$(pythonhome)/Include" "-I$(pythonhome)/PC" \\'
print "\t\t$(cflags) $(cdebug) $(cinclude) \\"
extra = moddefn.GetCompilerOptions()
if extra:
print "\t\t%s \\" % (' '.join(extra),)
print '\t\t"%s"' % file
print
# Add .lib files this module needs
for modlib in moddefn.GetLinkerLibs():
if modlib not in libs:
libs.append(modlib)
print "ADDN_LINK_FILES=",
for addn in vars['addn_link']: print '"%s"' % (addn),
print ; print
print "OBJS=",
for obj in objects: print '"$(temp_dir)\%s"' % (obj),
print ; print
print "LIBS=",
for lib in libs: print '"%s"' % (lib),
print ; print
print "$(target)$(debug_suffix)%s: $(temp_dir) $(OBJS)" % (target_ext)
print "\tlink -out:$(target)$(debug_suffix)%s %s" % (target_ext, target_link_flags),
print "\t$(OBJS) \\"
print "\t$(LIBS) \\"
print "\t$(ADDN_LINK_FILES) \\"
print "\t$(pythonlib) $(lcustom) $(l_debug)\\"
print "\t$(resources)"
print
print "clean:"
print "\t-rm -f *.obj"
print "\t-rm -f $(target).exe"
| apache-2.0 | -7,540,953,272,769,949,000 | 32.041096 | 97 | 0.572968 | false |
insomnia-lab/calibre | src/calibre/ebooks/metadata/lrx.py | 10 | 2432 | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
'''
Read metadata from LRX files
'''
import struct
from zlib import decompress
from lxml import etree
from calibre.ebooks.metadata import MetaInformation, string_to_authors
def _read(f, at, amount):
f.seek(at)
return f.read(amount)
def word_be(buf):
return struct.unpack('>L', buf)[0]
def word_le(buf):
return struct.unpack('<L', buf)[0]
def short_le(buf):
return struct.unpack('<H', buf)[0]
def short_be(buf):
return struct.unpack('>H', buf)[0]
def get_metadata(f):
read = lambda at, amount: _read(f, at, amount)
f.seek(0)
buf = f.read(12)
if buf[4:] == 'ftypLRX2':
offset = 0
while True:
offset += word_be(buf[:4])
try:
buf = read(offset, 8)
except:
raise ValueError('Not a valid LRX file')
if buf[4:] == 'bbeb':
break
offset += 8
buf = read(offset, 16)
if buf[:8].decode('utf-16-le') != 'LRF\x00':
raise ValueError('Not a valid LRX file')
lrf_version = word_le(buf[8:12])
offset += 0x4c
compressed_size = short_le(read(offset, 2))
offset += 2
if lrf_version >= 800:
offset += 6
compressed_size -= 4
uncompressed_size = word_le(read(offset, 4))
info = decompress(f.read(compressed_size))
if len(info) != uncompressed_size:
raise ValueError('LRX file has malformed metadata section')
root = etree.fromstring(info)
bi = root.find('BookInfo')
title = bi.find('Title')
title_sort = title.get('reading', None)
title = title.text
author = bi.find('Author')
author_sort = author.get('reading', None)
mi = MetaInformation(title, string_to_authors(author.text))
mi.title_sort, mi.author_sort = title_sort, author_sort
author = author.text
publisher = bi.find('Publisher')
mi.publisher = getattr(publisher, 'text', None)
mi.tags = [x.text for x in bi.findall('Category')]
mi.language = root.find('DocInfo').find('Language').text
return mi
elif buf[4:8] == 'LRX':
raise ValueError('Librie LRX format not supported')
else:
raise ValueError('Not a LRX file')
| gpl-3.0 | -1,053,442,505,069,345,700 | 28.658537 | 71 | 0.574836 | false |
JohnGriffiths/nipype | nipype/utils/onetime.py | 16 | 2548 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Descriptor support for NIPY.
Utilities to support special Python descriptors [1,2], in particular the use of
a useful pattern for properties we call 'one time properties'. These are
object attributes which are declared as properties, but become regular
attributes once they've been read the first time. They can thus be evaluated
later in the object's life cycle, but once evaluated they become normal, static
attributes with no function call overhead on access or any other constraints.
References
----------
[1] How-To Guide for Descriptors, Raymond
Hettinger. http://users.rcn.com/python/download/Descriptor.htm
[2] Python data model, http://docs.python.org/reference/datamodel.html
"""
class OneTimeProperty(object):
"""A descriptor to make special properties that become normal attributes.
"""
def __init__(self, func):
"""Create a OneTimeProperty instance.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding
the value of this computation.
"""
self.getter = func
self.name = func.func_name
def __get__(self, obj, type=None):
""" Called on attribute access on the class or instance. """
if obj is None:
# Being called on the class, return the original function. This way,
# introspection works on the class.
return self.getter
val = self.getter(obj)
#print "** setattr_on_read - loading '%s'" % self.name # dbg
setattr(obj, self.name, val)
return val
def setattr_on_read(func):
# XXX - beetter names for this?
# - cor_property (copy on read property)
# - sor_property (set on read property)
# - prop2attr_on_read
#... ?
"""Decorator to create OneTimeProperty attributes.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding the
value of this computation.
Examples
--------
>>> class MagicProp(object):
... @setattr_on_read
... def a(self):
... return 99
...
>>> x = MagicProp()
>>> 'a' in x.__dict__
False
>>> x.a
99
>>> 'a' in x.__dict__
True
"""
return OneTimeProperty(func)
| bsd-3-clause | 2,034,723,094,597,288,700 | 30.45679 | 79 | 0.635008 | false |
carlos-ferras/Sequence-ToolKit | view/dialogs/about/ui_credits.py | 1 | 4861 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/krl1to5/Work/FULL/Sequence-ToolKit/2016/resources/ui/dialogs/about/credits.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_credits(object):
def setupUi(self, credits):
credits.setObjectName("credits")
credits.resize(666, 350)
credits.setMinimumSize(QtCore.QSize(666, 350))
self.verticalLayout = QtWidgets.QVBoxLayout(credits)
self.verticalLayout.setObjectName("verticalLayout")
self.credits_label = QtWidgets.QLabel(credits)
font = QtGui.QFont()
font.setPointSize(-1)
self.credits_label.setFont(font)
self.credits_label.setStyleSheet("font-size:34px")
self.credits_label.setAlignment(QtCore.Qt.AlignCenter)
self.credits_label.setObjectName("credits_label")
self.verticalLayout.addWidget(self.credits_label)
self.credits_text = QtWidgets.QTextEdit(credits)
self.credits_text.setReadOnly(True)
self.credits_text.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.credits_text.setObjectName("credits_text")
self.verticalLayout.addWidget(self.credits_text)
self.retranslateUi(credits)
QtCore.QMetaObject.connectSlotsByName(credits)
def retranslateUi(self, credits):
_translate = QtCore.QCoreApplication.translate
credits.setWindowTitle(_translate("credits", "credits"))
self.credits_label.setText(_translate("credits", "Credits"))
self.credits_text.setHtml(_translate("credits", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\">This application is the result of the collaboration between the department of free software from the University of Informatics Sciences (UCI) in Havana and the Luminescence Dating Laboratory at CEADEN.</span></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:10pt; font-weight:600;\">Created and Designed by:</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\">Carlos Manuel Ferrás Hernández</span></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:10pt; font-weight:600;\">Documented by:</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\">Yanet Leonor Quesada Hernández</span></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:10pt; font-weight:600;\">XML structure and concepts:</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Bitstream Charter\'; font-size:12pt; font-weight:600;\">Luis Baly Gil</span></p></body></html>"))
| gpl-3.0 | -3,694,349,771,710,376,000 | 90.660377 | 439 | 0.706258 | false |
chrislit/abydos | abydos/distance/_rogot_goldberg.py | 1 | 4654 | # Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._rogot_goldberg.
Rogot & Goldberg similarity
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['RogotGoldberg']
class RogotGoldberg(_TokenDistance):
r"""Rogot & Goldberg similarity.
For two sets X and Y and a population N, Rogot & Goldberg's
"second index adjusted agreement" :math:`A_2` :cite:`Rogot:1966` is
.. math::
sim_{RogotGoldberg}(X, Y) =
\frac{1}{2}\Bigg(
\frac{2|X \cap Y|}{|X|+|Y|} +
\frac{2|(N \setminus X) \setminus Y|}
{|N \setminus X|+|N \setminus Y|}
\Bigg)
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{RogotGoldberg} =
\frac{1}{2}\Bigg(
\frac{2a}{2a+b+c} +
\frac{2d}{2d+b+c}
\Bigg)
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize RogotGoldberg instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(RogotGoldberg, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Rogot & Goldberg similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Rogot & Goldberg similarity
Examples
--------
>>> cmp = RogotGoldberg()
>>> cmp.sim('cat', 'hat')
0.7487179487179487
>>> cmp.sim('Niall', 'Neil')
0.6795702691656449
>>> cmp.sim('aluminum', 'Catalan')
0.5539941668876179
>>> cmp.sim('ATCG', 'TAGC')
0.496790757381258
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
p1 = a / (2 * a + b + c) if a else 0
p2 = d / (2 * d + b + c) if d else 0
return p1 + p2
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | -1,071,463,639,137,194,000 | 28.0875 | 78 | 0.569188 | false |
ngaranko/bak | bak/projects/management/commands/backup.py | 1 | 1064 | from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from bak.projects.models import Project
from bak.actions.dump_db import dump_database
from bak.actions.dump_directory import rsync_directory
from bak.actions.exceptions import ActionError
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
for project in Project.objects.all():
self.stdout.write("Project: {name}".format(name=project.name))
if project.dump_db:
self.stdout.write(" - Dump DB!")
# TODO: Wrap this call
dump_database(project)
if project.dump_base_dir:
self.stdout.write(" - Dump base dir!")
try:
result = rsync_directory(project)
except ActionError, e:
raise CommandError(e)
else:
self.stdout.write(' - rsync result: {result}'.format(
result=result))
| gpl-2.0 | -3,409,112,878,703,964,000 | 34.466667 | 74 | 0.593985 | false |
KiChjang/servo | tests/wpt/web-platform-tests/fetch/api/resources/redirect.py | 7 | 2869 | import time
from six.moves.urllib.parse import urlencode, urlparse
from wptserve.utils import isomorphic_decode, isomorphic_encode
def main(request, response):
stashed_data = {b'count': 0, b'preflight': b"0"}
status = 302
headers = [(b"Content-Type", b"text/plain"),
(b"Cache-Control", b"no-cache"),
(b"Pragma", b"no-cache")]
if b"Origin" in request.headers:
headers.append((b"Access-Control-Allow-Origin", request.headers.get(b"Origin", b"")))
headers.append((b"Access-Control-Allow-Credentials", b"true"))
else:
headers.append((b"Access-Control-Allow-Origin", b"*"))
token = None
if b"token" in request.GET:
token = request.GET.first(b"token")
data = request.server.stash.take(token)
if data:
stashed_data = data
if request.method == u"OPTIONS":
if b"allow_headers" in request.GET:
headers.append((b"Access-Control-Allow-Headers", request.GET[b'allow_headers']))
stashed_data[b'preflight'] = b"1"
#Preflight is not redirected: return 200
if not b"redirect_preflight" in request.GET:
if token:
request.server.stash.put(request.GET.first(b"token"), stashed_data)
return 200, headers, u""
if b"redirect_status" in request.GET:
status = int(request.GET[b'redirect_status'])
stashed_data[b'count'] += 1
if b"location" in request.GET:
url = isomorphic_decode(request.GET[b'location'])
if b"simple" not in request.GET:
scheme = urlparse(url).scheme
if scheme == u"" or scheme == u"http" or scheme == u"https":
url += u"&" if u'?' in url else u"?"
#keep url parameters in location
url_parameters = {}
for item in request.GET.items():
url_parameters[isomorphic_decode(item[0])] = isomorphic_decode(item[1][0])
url += urlencode(url_parameters)
#make sure location changes during redirection loop
url += u"&count=" + str(stashed_data[b'count'])
headers.append((b"Location", isomorphic_encode(url)))
if b"redirect_referrerpolicy" in request.GET:
headers.append((b"Referrer-Policy", request.GET[b'redirect_referrerpolicy']))
if b"delay" in request.GET:
time.sleep(float(request.GET.first(b"delay", 0)) / 1E3)
if token:
request.server.stash.put(request.GET.first(b"token"), stashed_data)
if b"max_count" in request.GET:
max_count = int(request.GET[b'max_count'])
#stop redirecting and return count
if stashed_data[b'count'] > max_count:
# -1 because the last is not a redirection
return str(stashed_data[b'count'] - 1)
return status, headers, u""
| mpl-2.0 | -1,637,259,686,932,726,000 | 39.408451 | 94 | 0.595678 | false |
sumitsourabh/opencog | opencog/python/pln/rules/context_rules_old.py | 32 | 2706 | from opencog.atomspace import types, TruthValue
import formulas
from pln.rule import Rule
# Todo:
# It may be better to use SubsetLinks instead of ContextLinks, or at
# least implicitly convert them.
# (Context C x).tv = (Subset C x).tv
# (Context C: Subset x y).tv = (Subset (x AND C) (y AND C))
# DeductionRule produces
# Context C: Subset x z
# using
# Context C: Subset x y
# Context C: Subset y z
# Context C: y
# Context C: z
# Special case for direct evaluation Rules.
# Subset A B requires
# Member x A, Member x B
#
# Context C: Subset A B requires
# Member x A
# Member x B
# Member x C
# or something. and then change the math.
# Or
class ContextualRule(Rule):
def __init__(self, chainer, rule):
self._chainer = chainer
self.name = 'Contextual' + rule.name
self.full_name = 'Contextual' + rule.full_name
self._outputs = rule._outputs
self._inputs = rule._inputs
self.formula = rule.formula
context = chainer.new_variable()
self._outputs = [self.contextlink(context, out)
for out in self._outputs]
is_evaluation_rule = 'EvaluationRule' in rule.name
if is_evaluation_rule:
raise "Direct evaluation in a context is not handled yet"
else:
self._inputs = [self.contextlink(context, input)
for input in self._inputs]
print self.name
print self._outputs
print self._inputs
def andlink(self, context, expression):
return self._chainer.link(types.AndLink, [context, expression])
def contextlink(self, context, expression):
return self._chainer.link(types.ContextLink, [context, expression])
def extract_context(self, contextlink):
# Todo: The variable 'context' is never used. Is it supposed to
# be returned instead of 'contextlink'?
context = contextlink.out[0]
expression = contextlink.out[1]
return contextlink, expression
class AndToContextRule(Rule):
"""
(Context C: Subset x y).tv = (Subset (x AND C) (y AND C))
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
link = chainer.link(link_type, [A, B])
contextlink = chainer.link(types.ContextLink, [C, link])
andAC = chainer.link(types.AndLink, [A, C])
andBC = chainer.link(types.AndLink, [B, C])
input = chainer.link(link_type, [andAC, andBC])
Rule.__init__(self,
formula=formulas.identityFormula,
outputs=[contextlink],
inputs=[input])
| agpl-3.0 | 800,855,428,334,231,400 | 28.413043 | 75 | 0.613082 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.