repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
svp-dev/slcore | slc/tools/slc/mt/mtsparc/regdefs.py | 1 | 15832 | from ..common.regmagic import RegMagic
class RegDefs:
iargregs = 8
ilocalregs = 23
# There is not FP support for now, but we define this to keep the
# common implementation happy.
fargregs = 8
flocalregs = 24
regprefix = '%'
regprefix_fmt = '%%'
canon_is_numeric = False
comprefix = '!'
movinsn = 'mov'
fmovinsn = 'fmovs'
# offset in register window of
# first local register
mt_locals_offset = 1
mt_flocals_offset = 0
legacy_regs = {
# globals
'g1' : 1, # always tmp
'g2' : 2, # app reg
'g3' : 3, # app reg
'g4' : 4, # app reg
'g5' : 5, # OS reg
'g6' : 6, # OS reg
'g7' : 7, # OS reg
# output regs
'o5' : 13,
'o4' : 12,
'o3' : 11,
'o2' : 10,
'o1' : 9,
'o0' : 8,
# address of CALL (RA?)
'o7' : 15,
# locals
'l0' : 16,
'l1' : 17,
'l2' : 18,
'l3' : 19,
'l4' : 20,
'l5' : 21,
'l6' : 22,
'l7' : 23,
# inputs
'i5' : 29,
'i4' : 28,
'i3' : 27,
'i2' : 26,
'i1' : 25,
'i0' : 24,
# RA - 8
'i7' : 31,
# zero
'g0' : 0,
# stack pointer
'sp' : 14,
'o6' : 14,
# frame pointer
'fp' : 30,
'i6' : 30
}
legacy_fregs = dict((('f%d' % i, i) for i in xrange(0,32)))
######################
# GCC Allocation order
######################
# /* This is the order in which to allocate registers normally.
# We put %f0-%f7 last among the float registers, so as to make it more
# likely that a pseudo-register which dies in the float return register
# area will get allocated to the float return register, thus saving a move
# instruction at the end of the function.
# Similarly for integer return value registers.
# We know in this case that we will not end up with a leaf function.
# The register allocator is given the global and out registers first
# because these registers are call clobbered and thus less useful to
# global register allocation.
# Next we list the local and in registers. They are not call clobbered
# and thus very useful for global register allocation. We list the input
# registers before the locals so that it is more likely the incoming
# arguments received in those registers can just stay there and not be
# reloaded. */
# #define REG_ALLOC_ORDER \
# { 1, 2, 3, 4, 5, 6, 7, /* %g1-%g7 */ \
# 13, 12, 11, 10, 9, 8, /* %o5-%o0 */ \
# 15, /* %o7 */ \
# 16, 17, 18, 19, 20, 21, 22, 23, /* %l0-%l7 */ \
# 29, 28, 27, 26, 25, 24, 31, /* %i5-%i0,%i7 */\
# 40, 41, 42, 43, 44, 45, 46, 47, /* %f8-%f15 */ \
# 48, 49, 50, 51, 52, 53, 54, 55, /* %f16-%f23 */ \
# 56, 57, 58, 59, 60, 61, 62, 63, /* %f24-%f31 */ \
# 64, 65, 66, 67, 68, 69, 70, 71, /* %f32-%f39 */ \
# 72, 73, 74, 75, 76, 77, 78, 79, /* %f40-%f47 */ \
# 80, 81, 82, 83, 84, 85, 86, 87, /* %f48-%f55 */ \
# 88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
# 39, 38, 37, 36, 35, 34, 33, 32, /* %f7-%f0 */ \
# 96, 97, 98, 99, /* %fcc0-3 */ \
# 100, 0, 14, 30, 101} /* %icc, %g0, %o6, %i6, %sfp */
#
#
# Default reg usage:
# /* 1 for registers that have pervasive standard uses
# and are not available for the register allocator.
# On non-v9 systems:
# g1 is free to use as temporary.
# g2-g4 are reserved for applications. Gcc normally uses them as
# temporaries, but this can be disabled via the -mno-app-regs option.
# g5 through g7 are reserved for the operating system.
# On v9 systems:
# g1,g5 are free to use as temporaries, and are free to use between calls
# if the call is to an external function via the PLT.
# g4 is free to use as a temporary in the non-embedded case.
# g4 is reserved in the embedded case.
# g2-g3 are reserved for applications. Gcc normally uses them as
# temporaries, but this can be disabled via the -mno-app-regs option.
# g6-g7 are reserved for the operating system (or application in
# embedded case).
# ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must
# currently be a fixed register until this pattern is rewritten.
# Register 1 is also used when restoring call-preserved registers in large
# stack frames.
# */
# #define FIXED_REGISTERS \
# {1, 0, 2, 2, 2, 2, 1, 1, \
# 0, 0, 0, 0, 0, 0, 1, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 1, 1, \
# \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# \
# 0, 0, 0, 0, 0, 1}
# /* 1 for registers not available across function calls.
# These must include the FIXED_REGISTERS and also any
# registers that can be used without being saved.
# The latter must include the registers where values are returned
# and the register where structure-value addresses are passed.
# Aside from that, you can include as many other registers as you like. */
# #define CALL_USED_REGISTERS \
# {1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 1, 1, \
# \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# \
# 1, 1, 1, 1, 1, 1}
# REMINDER:
# SPARC reg sems
# %g0 (r00) always zero
# %g1 (r01) [1] temporary value
# %g2 (r02) [2] global 2
# global %g3 (r03) [2] global 3
# %g4 (r04) [2] global 4
# %g5 (r05) reserved for SPARC ABI
# %g6 (r06) reserved for SPARC ABI
# %g7 (r07) reserved for SPARC ABI
# %o0 (r08) [3] outgoing parameter 0 / return value from callee
# %o1 (r09) [1] outgoing parameter 1
# %o2 (r10) [1] outgoing parameter 2
# out %o3 (r11) [1] outgoing parameter 3
# %o4 (r12) [1] outgoing parameter 4
# %o5 (r13) [1] outgoing parameter 5
# %sp, %o6 (r14) [1] stack pointer
# %o7 (r15) [1] temporary value / address of CALL instruction
# %l0 (r16) [3] local 0
# %l1 (r17) [3] local 1
# %l2 (r18) [3] local 2
# local %l3 (r19) [3] local 3
# %l4 (r20) [3] local 4
# %l5 (r21) [3] local 5
# %l6 (r22) [3] local 6
# %l7 (r23) [3] local 7
# %i0 (r24) [3] incoming parameter 0 / return value to caller
# %i1 (r25) [3] incoming parameter 1
# %i2 (r26) [3] incoming parameter 2
# in %i3 (r27) [3] incoming parameter 3
# %i4 (r28) [3] incoming parameter 4
# %i5 (r29) [3] incoming parameter 5
# %fp, %i6 (r30) [3] frame pointer
# %i7 (r31) [3] return address - 8
# Notes:
# [1] assumed by caller to be destroyed (volatile) across a procedure call
# [2] should not be used by SPARC ABI library code
# [3] assumed by caller to be preserved across a procedure call
# /* This is the order in which to allocate registers for
# leaf functions. If all registers can fit in the global and
# output registers, then we have the possibility of having a leaf
# function.
# The macro actually mentioned the input registers first,
# because they get renumbered into the output registers once
# we know really do have a leaf function.
# To be more precise, this register allocation order is used
# when %o7 is found to not be clobbered right before register
# allocation. Normally, the reason %o7 would be clobbered is
# due to a call which could not be transformed into a sibling
# call.
# As a consequence, it is possible to use the leaf register
# allocation order and not end up with a leaf function. We will
# not get suboptimal register allocation in that case because by
# definition of being potentially leaf, there were no function
# calls. Therefore, allocation order within the local register
# window is not critical like it is when we do have function calls. */
# #define REG_LEAF_ALLOC_ORDER \
# { 1, 2, 3, 4, 5, 6, 7, /* %g1-%g7 */ \
# 29, 28, 27, 26, 25, 24, /* %i5-%i0 */ \
# 15, /* %o7 */ \
# 13, 12, 11, 10, 9, 8, /* %o5-%o0 */ \
# 16, 17, 18, 19, 20, 21, 22, 23, /* %l0-%l7 */ \
# 40, 41, 42, 43, 44, 45, 46, 47, /* %f8-%f15 */ \
# 48, 49, 50, 51, 52, 53, 54, 55, /* %f16-%f23 */ \
# 56, 57, 58, 59, 60, 61, 62, 63, /* %f24-%f31 */ \
# 64, 65, 66, 67, 68, 69, 70, 71, /* %f32-%f39 */ \
# 72, 73, 74, 75, 76, 77, 78, 79, /* %f40-%f47 */ \
# 80, 81, 82, 83, 84, 85, 86, 87, /* %f48-%f55 */ \
# 88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
# 39, 38, 37, 36, 35, 34, 33, 32, /* %f7-%f0 */ \
# 96, 97, 98, 99, /* %fcc0-3 */ \
# 100, 0, 14, 30, 31, 101} /* %icc, %g0, %o6, %i6, %i7, %sfp */
reg_mapping = {
# MT Globals
'g0' : 'l7',
'g1' : 'l6',
'g2' : 'l5',
'g3' : 'l4',
'g4' : 'l3',
'g5' : 'l2',
'g6' : 'l1',
'g7' : 'l0',
'gf0' : 'f31',
'gf1' : 'f30',
'gf2' : 'f29',
'gf3' : 'f28',
'gf4' : 'f27',
'gf5' : 'f26',
'gf6' : 'f25',
'gf7' : 'f24',
# MT Shareds
's0' : 'l0',
'd0' : 'l1',
's1' : 'l2',
'd1' : 'l3',
's2' : 'l4',
'd2' : 'l5',
's3' : 'l6',
'd3' : 'l7',
'sf0' : 'f24',
'df0' : 'f25',
'sf1' : 'f26',
'df1' : 'f27',
'sf2' : 'f28',
'df2' : 'f29',
'sf3' : 'f30',
'df3' : 'f31',
# Special locals
'l0' : 'g1', # temp phy 1
'l1' : 'g2', # app reg phy 2
'l2' : 'g3', # app reg phy 3
'l3' : 'g4', # app reg phy 4
'l4' : 'g5', # OS reg phy 5
'l5' : 'g6', # OS reg phy 6
'l6' : 'g7', # OS reg phy 7
'l7' : 'o0', # phy 8
'l8' : 'o1', # phy 9
'l9' : 'o2', # phy 10
'l10' : 'o3', # phy 11
'l11' : 'o4', # phy 12
'l12' : 'o5', # phy 13
'l13' : 'sp', # o6 phy 14
'l14' : 'o7', # CALL HWIRED phy 15
'l15' : 'i0', # phy 16
'l16' : 'i1', # phy 17
'l17' : 'i2', # phy 18
'l18' : 'i3', # phy 19
'l19' : 'i4', # phy 20
'l20' : 'i5', # phy 21
'l21' : 'fp', # i6 phy 22
'l22' : 'i7', # RA-8 phy 23
'l31' : 'g0', # ZERO
'lf0' : 'f0',
'lf1' : 'f1',
'lf2' : 'f2',
'lf3' : 'f3',
'lf4' : 'f4',
'lf5' : 'f5',
'lf6' : 'f6',
'lf7' : 'f7',
'lf8' : 'f8',
'lf9' : 'f9',
'lf10' : 'f10',
'lf11' : 'f11',
'lf12' : 'f12',
'lf13' : 'f13',
'lf14' : 'f14',
'lf15' : 'f15',
'lf16' : 'f16',
'lf17' : 'f17',
'lf18' : 'f18',
'lf19' : 'f19',
'lf20' : 'f20',
'lf21' : 'f21',
'lf22' : 'f22',
'lf23' : 'f23'
}
reg_aliases = {
'tlsp' : 'l13',
'fp' : 'l21',
'idx_init' : 'l0',
'extra_init' : 'l1',
'zero' : 'l31',
'ra' : 'l22',
'ra_leaf' : 'l14',
'callreg' : 'l14'
}
## FIRST IDEA FOR MAPPING
## BASED ON GCC ALLOC ORDER
## -> problem: save/restore uses std/ldd,
## requires "i" locals to lie next to each other
## at even-numbered register slots.
# 'l7' : 'o5',
# 'l8' : 'o4',
# 'l9' : 'o3',
# 'l10' : 'o2',
# 'l11' : 'o1',
# 'l12' : 'o0',
# 'l13' : 'i5',
# 'l14' : 'i4',
# 'l15' : 'o7', # HARD-WIRED INTO CALL: l15 must assemble to %r15 = %o7!
# 'l16' : 'i3',
# 'l17' : 'i2',
# 'l18' : 'i1',
# 'l19' : 'i0',
# 'l20' : 'i7', # RA - 8
# 'l21' : 'sp',
# 'l22' : 'fp',
# def __init__(self):
# import sys
# print >>sys.stderr, "---snip here---\n#! /usr/bin/sed -f"
# rm = self.reg_mapping
# leg = set([rm[k] for k in rm])
# m = {}
# for r in leg:
# m[r] = [k for k in rm if rm[k] == r]
# if m[r][0].startswith('l'):
# m[r][0] += '_' + r
# for k,v in m.items():
# print >>sys.stderr, 's/%%%s/%%t%s/g;' % (k,''.join(v))
# print >>sys.stderr,'s/%tl31/%g0/g;'
# print >>sys.stderr, "---snip here---"
def post_init_regmagic(self, rm):
# all virtual register names, except SP
rm.allregs = set(['$l%d' % x for x in xrange(31 - self.iargregs) if x not in (14,)] + \
['$lf%d' % x for x in xrange(31 - self.fargregs)] + \
['$g%d' % x for x in xrange(self.iargregs)] + \
['$gf%d' % x for x in xrange(self.fargregs)] + \
['$d%d' % x for x in xrange(self.iargregs / 2)] + \
['$df%d' % x for x in xrange(self.fargregs / 2)] + \
['$%d' % x for x in xrange(1,32)])
### Register lists for the compile commands ###
rm.fixed_registers = []
for (v, l) in [("f", rm._freg_inv), ("",rm._reg_inv)]:
fixed = set()
for rl in l:
for r in rl:
if r['cat'] in 'sdg':
fixed.add(r['legnr'])
for r in fixed:
rm.fixed_registers.append('%%%s%d' % (v, r))
### Retain the numeric aliases for all registers
for i in xrange(0, 32):
RegDefs.legacy_regs['r%d' % i] = i
RegDefs.legacy_regs['f%d' % i] = i
regmagic = RegMagic(RegDefs())
__all__ = ['regmagic']
| gpl-3.0 | 2,093,495,678,223,844,000 | 34.981818 | 95 | 0.417256 | false |
google/rekall | rekall-core/rekall/plugins/addrspaces/intel.py | 1 | 25245 | # Rekall Memory Forensics
#
# Copyright 2015 Google Inc. All Rights Reserved.
# Authors:
# Michael Cohen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Implement the base translating address spaces.
This is a complete rewrite of the previous translating address spaces
implemented in Rekall. The goals are:
1) To make a system that is provable and traceable - i.e. It should be possible
to trace the address translation process step by step as it is performed by
Rekall so we can verify how it is implemented.
2) The system must be very fast at the same time. Address translation can be an
expensive operation so we need to ensure we are very quick.
3) The system must be extensible and modifiable. Address translation is a
complex algorithm and varies a lot between operating systems and
architectures. Therefore this implementation is generic and tries to
encapsulate all the nuances of address translation in the OS specific
implementation itself.
How does it work?
-----------------
There are a few main entry points into the translating Address Spaces:
1) vtop(): (Virtual to Physical) This method accepts a virtual address and
translates it to the physical address in the base address space. This is the
workhorse method. It is designed to be very fast but does not give too much
information about how the translation was performed.
2) describe_vtop(): This is the describing sister method of vtop(). It returns a
list of AddressTranslationDescriptor() objects. Each of these describes a
specific step in the translation process. If one was to render each step,
this outlines exactly what happened in each step and how the address is
derived. If the address space translation process succeeds the last
descriptor will be a PhysicalAddressDescriptor() instance which describes the
final physical address. Note that the translation process may request files
to be mapped into the physical address space, so the
PhysicalAddressDescriptor() will point at mapped files (i.e. it may not
actually refer to the physical memory image).
3) get_mappings(): This method generates Run instances which encapsulate each
region available in the virtual address space.
The vtop() method and the describe_vtop() method are very similar since they
implement the same algorithms. However, we do not want to implement the same
thing twice because that leads to maintenance problems and subtle
bugs. Therefore vtop() is simply a wrapper around describe_vtop(). To achieve
the required performance vtop() simply looks for the PhysicalAddressDescriptor()
and returns it. This is essentially a noop for any of the other descriptors and
therefore maintains the same speed benefits.
"""
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.builtins import basestring
from builtins import object
import io
import struct
from rekall import addrspace
from rekall import config
from rekall import obj
from rekall.ui import text as text_renderer
from rekall_lib import utils
config.DeclareOption(
"dtb", group="Autodetection Overrides",
type="IntParser", help="The DTB physical address.")
config.DeclareOption(
"kernel_slide", group="Autodetection Overrides",
type="IntParser", help="Shift for Linux KASLR (see find_kaslr plugin).")
PAGE_SHIFT = 12
PAGE_MASK = ~ 0xFFF
class AddressTranslationDescriptor(object):
"""A descriptor of a step in the translation process.
This is a class because there may be OS specific steps in the address
translation.
"""
object_name = None
def __init__(self, object_name=None, object_value=None, object_address=None,
session=None):
if object_name:
self.object_name = object_name
self.object_value = object_value
self.object_address = object_address
self.session = session
def render(self, renderer):
"""Render this step."""
if self.object_address is not None:
# Properly format physical addresses.
renderer.format(
"{0}@ {1} = {2:addr}\n",
self.object_name,
self.session.physical_address_space.describe(
self.object_address),
self.object_value or 0)
elif self.object_value:
renderer.format("{0} {1}\n",
self.object_name,
self.session.physical_address_space.describe(
self.object_value))
else:
renderer.format("{0}\n", self.object_name)
class CommentDescriptor(object):
def __init__(self, comment, *args, **kwargs):
self.session = kwargs.pop("session", None)
self.comment = comment
self.args = args
def render(self, renderer):
renderer.format(self.comment, *self.args)
class InvalidAddress(CommentDescriptor):
"""Mark an invalid address.
This should be the last descriptor in the collection sequence.
"""
class DescriptorCollection(object):
def __init__(self, session):
self.session = session
self.descriptors = []
def add(self, descriptor_cls, *args, **kwargs):
self.descriptors.append((descriptor_cls, args, kwargs))
def __iter__(self):
for cls, args, kwargs in self.descriptors:
kwargs["session"] = self.session
yield cls(*args, **kwargs)
def __getitem__(self, item):
"""Get a particular descriptor.
Descriptors can be requested by name (e.g. VirtualAddressDescriptor) or
index (e.g. -1).
"""
if isinstance(item, basestring):
for descriptor_cls, args, kwargs in self.descriptors:
if descriptor_cls.__name__ == item:
kwargs["session"] = self.session
return descriptor_cls(*args, **kwargs)
return obj.NoneObject("No descriptor found.")
try:
cls, args, kwargs = self.descriptors[item]
kwargs["session"] = self.session
return cls(*args, **kwargs)
except KeyError:
return obj.NoneObject("No descriptor found.")
def __str__(self):
"""Render ourselves into a string."""
fd = io.StringIO()
ui_renderer = text_renderer.TextRenderer(
session=self.session, fd=fd)
with ui_renderer.start():
for descriptor in self:
descriptor.render(ui_renderer)
return fd.getvalue()
class PhysicalAddressDescriptorCollector(DescriptorCollection):
"""A descriptor collector which only cares about PhysicalAddressDescriptor.
This allows us to reuse all the code in describing the address space
resolution and cheaply implement the standard vtop() method.
"""
physical_address = None
def add(self, descriptor_cls, *_, **kwargs):
if descriptor_cls is PhysicalAddressDescriptor:
address = kwargs.pop("address")
self.physical_address = address
class PhysicalAddressDescriptor(AddressTranslationDescriptor):
"""A descriptor to mark the final physical address resolution."""
def __init__(self, address=0, session=None):
super(PhysicalAddressDescriptor, self).__init__(session=session)
self.address = address
def render(self, renderer):
renderer.format(
"Physical Address {0}\n",
self.session.physical_address_space.describe(self.address))
class VirtualAddressDescriptor(AddressTranslationDescriptor):
"""Mark a virtual address."""
def __init__(self, address=0, dtb=0, session=None):
super(VirtualAddressDescriptor, self).__init__(session=session)
self.dtb = dtb
self.address = address
def render(self, renderer):
renderer.format(
"Virtual Address {0:style=address} (DTB {1:style=address})\n",
self.address, self.dtb)
class IA32PagedMemory(addrspace.PagedReader):
"""Standard x86 32 bit non PAE address space.
Provides an address space for IA32 paged memory, aka the x86
architecture, without Physical Address Extensions (PAE). Allows
callers to map virtual address to offsets in physical memory.
Create a new IA32 address space without PAE to sit on top of
the base address space and a Directory Table Base (CR3 value)
of 'dtb'.
Comments in this class mostly come from the Intel(R) 64 and IA-32
Architectures Software Developer's Manual Volume 3A: System Programming
Guide, Part 1, revision 031, pages 4-8 to 4-15. This book is available
for free at http://www.intel.com/products/processor/manuals/index.htm.
Similar information is also available from Advanced Micro Devices (AMD)
at http://support.amd.com/us/Processor_TechDocs/24593.pdf.
This address space implements paging as described in section "4.3 32-BIT
PAGING" of the above book.
This is simplified from previous versions of rekall, by removing caching
and automated DTB searching (which is now performed by specific plugins in
an OS specific way).
"""
order = 70
valid_mask = 1
def __init__(self, name=None, dtb=None, **kwargs):
"""Instantiate an Intel 32 bit Address space over the layered AS.
Args:
dtb: The dtb address.
"""
super(IA32PagedMemory, self).__init__(**kwargs)
# We must be stacked on someone else:
if self.base == None:
raise TypeError("No base Address Space")
# If the underlying address space already knows about the dtb we use it.
# Allow the dtb to be specified in the session.
self.dtb = dtb or self.session.GetParameter("dtb")
if not self.dtb != None:
raise TypeError("No valid DTB specified. Try the find_dtb"
" plugin to search for the dtb.")
self.name = (name or 'Kernel AS') + "@%#x" % self.dtb
# Use a TLB to make this faster.
self._tlb = addrspace.TranslationLookasideBuffer(1000)
self._cache = utils.FastStore(100)
# Some important masks we can use.
# Is the pagesize flags on?
self.page_size_mask = (1 << 7)
def vtop(self, vaddr):
"""Translates virtual addresses into physical offsets.
The function should return either None (no valid mapping)
or the offset in physical memory where the address maps.
This function is simply a wrapper around describe_vtop() which does all
the hard work. You probably never need to override it.
"""
vaddr = int(vaddr)
try:
return self._tlb.Get(vaddr)
except KeyError:
# The TLB accepts only page aligned virtual addresses.
aligned_vaddr = vaddr & self.PAGE_MASK
collection = self.describe_vtop(
aligned_vaddr, PhysicalAddressDescriptorCollector(self.session))
self._tlb.Put(aligned_vaddr, collection.physical_address)
return self._tlb.Get(vaddr)
def vtop_run(self, addr):
phys_addr = self.vtop(addr)
if phys_addr is not None:
return addrspace.Run(
start=addr,
end=addr,
file_offset=phys_addr,
address_space=self.base)
def describe_vtop(self, vaddr, collection=None):
"""A generator of descriptive statements about stages in translation.
While the regular vtop is called very frequently and therefore must be
fast, this variation is used to examine the translation process in
detail. We therefore emit data about each step of the way - potentially
re-implementing the vtop() method above, but yielding intermediate
results.
Args:
vaddr: The address to translate.
collection: An instance of DescriptorCollection() which will receive
the address descriptors. If not provided we create a new collection.
Returns
A list of AddressTranslationDescriptor() instances.
"""
if collection is None:
collection = DescriptorCollection(self.session)
# Bits 31:12 are from CR3.
# Bits 11:2 are bits 31:22 of the linear address.
pde_addr = ((self.dtb & 0xfffff000) |
((vaddr & 0xffc00000) >> 20))
pde_value = self.read_pte(pde_addr, collection=collection)
collection.add(AddressTranslationDescriptor,
object_name="pde", object_value=pde_value,
object_address=pde_addr)
if not pde_value & self.valid_mask:
collection.add(InvalidAddress, "Invalid PDE")
return collection
# Large page PDE.
if pde_value & self.page_size_mask:
# Bits 31:22 are bits 31:22 of the PDE
# Bits 21:0 are from the original linear address
physical_address = (pde_value & 0xffc00000) | (vaddr & 0x3fffff)
collection.add(CommentDescriptor, "Large page mapped\n")
collection.add(PhysicalAddressDescriptor, address=physical_address)
return collection
# Bits 31:12 are from the PDE
# Bits 11:2 are bits 21:12 of the linear address
pte_addr = (pde_value & 0xfffff000) | ((vaddr & 0x3ff000) >> 10)
pte_value = self.read_pte(pte_addr, collection=collection)
self.describe_pte(collection, pte_addr, pte_value, vaddr)
return collection
def describe_pte(self, collection, pte_addr, pte_value, vaddr):
collection.add(AddressTranslationDescriptor,
object_name="pte", object_value=pte_value,
object_address=pte_addr)
if pte_value & self.valid_mask:
# Bits 31:12 are from the PTE
# Bits 11:0 are from the original linear address
phys_addr = ((pte_value & 0xfffff000) |
(vaddr & 0xfff))
collection.add(PhysicalAddressDescriptor, address=phys_addr)
else:
collection.add(InvalidAddress, "Invalid PTE")
return collection
def read_pte(self, addr, collection=None):
"""Read an unsigned 32-bit integer from physical memory.
Note this always succeeds - reads outside mapped addresses in the image
will simply return 0.
"""
_ = collection
string = self.base.read(addr, 4)
return struct.unpack('<I', string)[0]
def get_mappings(self, start=0, end=2**64):
"""Enumerate all valid memory ranges.
Yields:
tuples of (starting virtual address, size) for valid the memory
ranges.
"""
# Pages that hold PDEs and PTEs are 0x1000 bytes each.
# Each PDE and PTE is four bytes. Thus there are 0x1000 / 4 = 0x400
# PDEs and PTEs we must test
for pde in range(0, 0x400):
vaddr = pde << 22
if vaddr > end:
return
next_vaddr = (pde + 1) << 22
if start > next_vaddr:
continue
pde_addr = ((self.dtb & 0xfffff000) |
(vaddr & 0xffc00000) >> 20)
pde_value = self.read_pte(pde_addr)
if not pde_value & self.valid_mask:
continue
# PDE is for a large page.
if pde_value & self.page_size_mask:
yield addrspace.Run(
start=vaddr,
end=vaddr + 0x400000,
file_offset=(pde_value & 0xffc00000) | (vaddr & 0x3fffff),
address_space=self.base)
continue
# This reads the entire PTE table at once - On
# windows where IO is extremely expensive, its
# about 10 times more efficient than reading it
# one value at the time - and this loop is HOT!
pte_table_addr = ((pde_value & 0xfffff000) |
((vaddr & 0x3ff000) >> 10))
data = self.base.read(pte_table_addr, 4 * 0x400)
pte_table = struct.unpack("<" + "I" * 0x400, data)
tmp1 = vaddr
for i, pte_value in enumerate(pte_table):
vaddr = tmp1 | i << 12
if vaddr > end:
return
next_vaddr = tmp1 | ((i + 1) << 12)
if start > next_vaddr:
continue
if pte_value & self.valid_mask:
yield addrspace.Run(
start=vaddr,
end=vaddr + 0x1000,
file_offset=(pte_value & 0xfffff000) | (vaddr & 0xfff),
address_space=self.base)
def __str__(self):
return u"%s@0x%08X (%s)" % (self.__class__.__name__, self.dtb, self.name)
def __eq__(self, other):
return (super(IA32PagedMemory, self).__eq__(other) and
self.dtb == other.dtb and self.base == other.base)
def end(self):
return (2 ** 32) - 1
class IA32PagedMemoryPae(IA32PagedMemory):
"""Standard x86 32 bit PAE address space.
Provides an address space for IA32 paged memory, aka the x86
architecture, with Physical Address Extensions (PAE) enabled. Allows
callers to map virtual address to offsets in physical memory.
Comments in this class mostly come from the Intel(R) 64 and IA-32
Architectures Software Developer's Manual Volume 3A: System Programming
Guide, Part 1, revision 031, pages 4-15 to 4-23. This book is available
for free at http://www.intel.com/products/processor/manuals/index.htm.
Similar information is also available from Advanced Micro Devices (AMD)
at http://support.amd.com/us/Processor_TechDocs/24593.pdf.
This implements the translation described in Section "4.4.2 Linear-Address
Translation with PAE Paging".
"""
order = 80
__pae = True
def describe_vtop(self, vaddr, collection=None):
"""Explain how a specific address was translated.
Returns:
a list of AddressTranslationDescriptor() instances.
"""
if collection is None:
collection = DescriptorCollection(self.session)
# Bits 31:5 come from CR3
# Bits 4:3 come from bits 31:30 of the original linear address
pdpte_addr = ((self.dtb & 0xffffffe0) |
((vaddr & 0xC0000000) >> 27))
pdpte_value = self.read_pte(pdpte_addr)
collection.add(AddressTranslationDescriptor,
object_name="pdpte", object_value=pdpte_value,
object_address=pdpte_addr)
if not pdpte_value & self.valid_mask:
collection.add(InvalidAddress, "Invalid PDPTE")
return collection
# Bits 51:12 are from the PDPTE
# Bits 11:3 are bits 29:21 of the linear address
pde_addr = (pdpte_value & 0xfffff000) | ((vaddr & 0x3fe00000) >> 18)
self._describe_pde(collection, pde_addr, vaddr)
return collection
def _describe_pde(self, collection, pde_addr, vaddr):
pde_value = self.read_pte(pde_addr)
collection.add(AddressTranslationDescriptor,
object_name="pde", object_value=pde_value,
object_address=pde_addr)
if not pde_value & self.valid_mask:
collection.add(InvalidAddress, "Invalid PDE")
# Large page PDE accesses 2mb region.
elif pde_value & self.page_size_mask:
# Bits 51:21 are from the PDE
# Bits 20:0 are from the original linear address
physical_address = ((pde_value & 0xfffffffe00000) |
(vaddr & 0x1fffff))
collection.add(CommentDescriptor, "Large page mapped\n")
collection.add(PhysicalAddressDescriptor, address=physical_address)
else:
# Bits 51:12 are from the PDE
# Bits 11:3 are bits 20:12 of the original linear address
pte_addr = (pde_value & 0xffffffffff000) | ((vaddr & 0x1ff000) >> 9)
pte_value = self.read_pte(pte_addr)
self.describe_pte(collection, pte_addr, pte_value, vaddr)
def describe_pte(self, collection, pte_addr, pte_value, vaddr):
collection.add(AddressTranslationDescriptor,
object_name="pte", object_value=pte_value,
object_address=pte_addr)
if pte_value & self.valid_mask:
# Bits 51:12 are from the PTE
# Bits 11:0 are from the original linear address
physical_address = (pte_value & 0xffffffffff000) | (vaddr & 0xfff)
collection.add(PhysicalAddressDescriptor, address=physical_address)
else:
collection.add(InvalidAddress, "Invalid PTE\n")
return collection
def read_pte(self, addr, collection=None):
'''
Returns an unsigned 64-bit integer from the address addr in
physical memory. If unable to read from that location, returns None.
'''
try:
return self._cache.Get(addr)
except KeyError:
string = self.base.read(addr, 8)
result = struct.unpack('<Q', string)[0]
self._cache.Put(addr, result)
return result
def get_mappings(self, start=0, end=2**64):
"""A generator of address, length tuple for all valid memory regions."""
# Pages that hold PDEs and PTEs are 0x1000 bytes each.
# Each PDE and PTE is eight bytes. Thus there are 0x1000 / 8 = 0x200
# PDEs and PTEs we must test.
for pdpte_index in range(0, 4):
vaddr = pdpte_index << 30
if vaddr > end:
return
next_vaddr = (pdpte_index + 1) << 30
if start >= next_vaddr:
continue
# Bits 31:5 come from CR3
# Bits 4:3 come from bits 31:30 of the original linear address
pdpte_addr = (self.dtb & 0xffffffe0) | ((vaddr & 0xc0000000) >> 27)
pdpte_value = self.read_pte(pdpte_addr)
if not pdpte_value & self.valid_mask:
continue
tmp1 = vaddr
for pde_index in range(0, 0x200):
vaddr = tmp1 | (pde_index << 21)
if vaddr > end:
return
next_vaddr = tmp1 | ((pde_index + 1) << 21)
if start >= next_vaddr:
continue
# Bits 51:12 are from the PDPTE
# Bits 11:3 are bits 29:21 of the linear address
pde_addr = ((pdpte_value & 0xffffffffff000) |
((vaddr & 0x3fe00000) >> 18))
pde_value = self.read_pte(pde_addr)
if not pde_value & self.valid_mask:
continue
if pde_value & self.page_size_mask:
yield addrspace.Run(
start=vaddr,
end=vaddr+0x200000,
file_offset=(pde_value & 0xfffffffe00000) | (
vaddr & 0x1fffff),
address_space=self.base)
continue
# This reads the entire PTE table at once - On
# windows where IO is extremely expensive, its
# about 10 times more efficient than reading it
# one value at the time - and this loop is HOT!
pte_table_addr = ((pde_value & 0xffffffffff000) |
((vaddr & 0x1ff000) >> 9))
data = self.base.read(pte_table_addr, 8 * 0x200)
pte_table = struct.unpack("<" + "Q" * 0x200, data)
tmp2 = vaddr
for i, pte_value in enumerate(pte_table):
if pte_value & self.valid_mask:
vaddr = tmp2 | i << 12
if vaddr > end:
return
next_vaddr = tmp2 | (i + 1) << 12
if start >= next_vaddr:
continue
yield addrspace.Run(
start=vaddr,
end=vaddr+0x1000,
file_offset=((pte_value & 0xffffffffff000) |
(vaddr & 0xfff)),
address_space=self.base)
| gpl-2.0 | 3,458,658,243,991,293,000 | 36.905405 | 81 | 0.605942 | false |
MeGotsThis/BotGotsThis | pkg/channel/library.py | 1 | 2842 | import asyncio
import bot
from bot import utils
from typing import List, Optional, Union # noqa: F401
from lib.data import Send
from lib.data.message import Message
from lib.database import DatabaseMain
async def come(channel: str,
send: Send) -> bool:
bannedWithReason: Optional[str]
priority: Union[float, int]
db: DatabaseMain
async with DatabaseMain.acquire() as db:
bannedWithReason = await db.isChannelBannedReason(channel)
if bannedWithReason is not None:
send(f'Chat {channel} is banned from joining')
return True
priority = await db.getAutoJoinsPriority(channel)
joinResult: bool = utils.joinChannel(channel, priority)
if joinResult:
send(f'Joining {channel}')
else:
send(f'I am already in {channel}')
return True
async def leave(channel: str,
send: Send) -> bool:
if channel == bot.config.botnick:
return False
send(f'Bye {channel}')
await asyncio.sleep(1.0)
utils.partChannel(channel)
return True
async def auto_join(channel: str,
send: Send,
message: Message) -> bool:
db: DatabaseMain
async with DatabaseMain.acquire() as db:
bannedWithReason: Optional[str]
bannedWithReason = await db.isChannelBannedReason(channel)
if bannedWithReason is not None:
send(f'Chat {channel} is banned from joining')
return True
if len(message) >= 2:
removeMsgs: List[str] = ['0', 'false', 'no', 'remove', 'rem',
'delete', 'del', 'leave', 'part']
if message.lower[1] in removeMsgs:
return await auto_join_delete(db, channel, send)
return await auto_join_add(db, channel, send)
async def auto_join_add(db: DatabaseMain,
channel: str,
send: Send) -> bool:
result: bool = await db.saveAutoJoin(channel, 0)
priority: Union[int, float] = await db.getAutoJoinsPriority(channel)
wasInChat: bool = not utils.joinChannel(channel, priority)
if result and not wasInChat:
send(f'''\
Auto join for {channel} is now enabled and joined {channel} chat''')
elif not wasInChat:
send(f'''\
Auto join for {channel} is already enabled but now joined {channel} chat''')
else:
send(f'''\
Auto join for {channel} is already enabled and already in chat''')
return True
async def auto_join_delete(db: DatabaseMain,
channel: str,
send: Send) -> bool:
result: bool = await db.discardAutoJoin(channel)
if result:
send(f'Auto join for {channel} is now disabled')
else:
send(f'Auto join for {channel} was never enabled')
return True
| gpl-3.0 | -3,594,922,672,161,938,000 | 32.023256 | 76 | 0.611268 | false |
gnina/scripts | generate_counterexample_typeslines.py | 1 | 11114 | #!/usr/bin/env python3
'''
This script will generate the lines for a new types file with the iterative poses generated from counterexample_generation_jobs.py
!!WARNING!!
Part of this process is to determine which newly generated poses are NOT REDUNDANT with the previously generated ones.
This requires an O(n^2) calculation to calculate the RMSD between every pose...
Ergo, this calculation depending on the number of poses in a given pocket could take a very long time.
This script also works on all ligands present in the pocket, so there is the potential for multiple O(n^2) calculations to take place.
We have done our best to avoid needless calculations, but this is why we generate the lines for each pocket independently
ASSUMPTIONS:
i) Poses with <2 RMSD to the crystal pose will be labeled as positive poses
ii) you have obrms installed, and can run it from your commandline
iii) the jobfile provided as input contains the full PATH to the files specified.
iv) the gninatypes files (generated by gninatyper) for the poses in args.input have ALREADY BEEN generated.
v) The crystal ligand files are formatted PDBid_LignameLIGSUFFIX
vi) The OLD sdf file with the unique poses is named LignameOLDUNIQUESUFFIX
INPUT:
i) The path to the pocket you are working on
ii) the threshold RMSD to determine if they are the same pose
iii) the name for the txt file that contains the lines to write (will be written in the POCKET DIRECTORY)
iv) the suffix of the NEW sdf file that contains all of the unique poses
v) the commands file generated from counterexample_generation_jobs.py
vi) --OPTIONAL-- the suffix of the OLD sdf file that contains all of the unique poses
OUTPUT:
==Normal==
i) the typesfile lines to add to generate the new types file
ii) A SDF file containing all of the unique poses for a given ligand -- named LignameUNIQUE_SUFFIX
iii) a ___.sdf file which will be the working file for obrms.
'''
import argparse, re, subprocess, os, sys
import pandas as pd
from rdkit.Chem import AllChem as Chem
def check_exists(filename):
if os.path.isfile(filename) and os.path.getsize(filename)>0:
return True
else:
return False
def get_pocket_lines(filename,pocket):
'''
This function reads the lines from filename, and returns only the lines which contain pocket in them.
'''
all_lines=open(filename).readlines()
lines=[x for x in all_lines if pocket in x]
return lines
def calc_ligand_dic(lines,ligand_suffix):
'''
This function will parse the input list of lines and construct 2 dictionaries
1) ligand name -> [docked files with that ligand]
2) docked_filename -> crystal_file for that pose
'''
data={}
docked_lookup={}
for line in lines:
#1) Getting the crystal ligand file
ligfile=re.split('--autobox_ligand ',line)[1].split()[0]
#2) Getting the name of the ligand ** here we assume the ligfile is PATH/<PDBid>_<ligname><LIGSUFFIX>
ligname=ligfile.split('/')[-1].split(ligand_suffix)[0].split('_')[1]
#3) Check if ligname in data
if ligname not in data:
data[ligname]=[]
#4) grabbing the docked files
outfile=re.split('-o ',line)[1].split()[0]
#5) Adding these files to their corresponding places in the dictionary
data[ligname].append(outfile)
docked_lookup[outfile]=ligfile
return data, docked_lookup
def run_obrms(ligand_file,crystal_file):
'''
This function returns a list of rmsds of the docked ligand file to the crystal file. The list is in the order of the poses.
'''
rmsds=subprocess.check_output(f'obrms {ligand_file} {crystal_file}',shell=True)
rmsds=str(rmsds,'utf-8').rstrip().split('\n')
rmsds=[float(x.split()[-1]) for x in rmsds]
return rmsds
def get_lines_towrite(crystal_lookup,list_of_docked,affinity_lookup,crystal_suffix):
'''
This function will calculate the RMSD of every input pose, to the provided crystal pose.
returns a dictionary of lines --> 'docked pose filename':[lines to write]
'''
lines={}
for docked in list_of_docked:
#Figure out affinity.
affinity=0.0
crystal=crystal_lookup[docked]
cr_lookup=crystal.split(crystal_suffix)[0]
if cr_lookup in affinity_lookup:
affinity=affinity_lookup
print(docked,crystal)
rmsds=run_obrms(docked,crystal)
counter=0
lines[docked]=[]
for r in rmsds:
if r < 2:
label='1'
neg_aff=''
else:
label='0'
neg_aff='-'
rec_gninatypes=docked.split('rec')[0]+'rec_0.gninatypes'
lig_gninatypes=docked.replace('.sdf','_'+str(counter)+'.gninatypes')
lines[docked].append(f'{label} {neg_aff}{affinity} {r} {rec_gninatypes} {lig_gninatypes}\n')
counter+=1
return lines
def run_obrms_cross(filename):
'''
This function returns a pandas dataframe of the RMSD between every pose and every other pose, which is generated using obrms -x
'''
csv=subprocess.check_output('obrms -x '+filename,shell=True)
csv=str(csv,'utf-8').rstrip().split('\n')
data=pd.DataFrame([x.split(',')[1:] for x in csv],dtype=float)
return data
parser=argparse.ArgumentParser(description='Create lines to add to types files from counterexample generation. Assumes data file structure is ROOT/POCKET/FILES.')
parser.add_argument('-p','--pocket',type=str,required=True,help='Name of the pocket that you will be generating the lines for.')
parser.add_argument('-r','--root',type=str,required=True,help='PATH to the ROOT of the pockets.')
parser.add_argument('-i','--input',type=str,required=True,help='File that is output from counterexample_generation_jobs.py')
parser.add_argument('-cs','--crystal_suffix',default='_lig.pdb',help='Expresssion to glob the crystal ligand PDB. Defaults to _lig.pdb. Needs to match what was used with counterexample_generation_jobs.py')
parser.add_argument('--old_unique_suffix',type=str,default=None,help='Suffix for the unique ligand sdf file from a previous run. If set we will load that in and add to it. Default behavior is to generate it from provided input file.')
parser.add_argument('-us','--unique_suffix',type=str,default='_it1___.sdf',help='Suffix for the unique ligand sdf file for this run. Defaults to _it1___.sdf. One will be created for each ligand in the pocket.')
parser.add_argument('--unique_threshold',default=0.25,help='RMSD threshold for unique poses. IE poses with RMSD > thresh are considered unique. Defaults to 0.25.')
parser.add_argument('--lower_confusing_threshold',default=0.5,help='CNNscore threshold for identifying confusing good poses. Score < thresh & under 2RMSD is kept and labelled 1. 0<thresh<1. Default 0.5')
parser.add_argument('--upper_confusing_threshold',default=0.9,help='CNNscore threshold for identifying confusing poor poses. If CNNscore > thresh & over 2RMSD pose is kept and labelled 0. lower<thresh<1. Default 0.9')
parser.add_argument('-o','--outname',type=str,required=True,help='Name of the text file to write the new lines in. DO NOT WRITE THE FULL PATH!')
parser.add_argument('-a','--affinity_lookup',default='pdbbind2017_affs.txt',help='File mapping the PDBid and ligname of the ligand to its pK value. Assmes space delimited "PDBid ligname pK". Defaults to pdbbind2017_affs.txt')
args=parser.parse_args()
#Setting the myroot and root remove variable for use in the script
myroot=os.path.join(args.root,args.pocket,'')
root_remove=os.path.join(args.root,'')
#sanity check threshold
assert args.unique_threshold > 0, "Unique RMSD threshold needs to be positive"
assert 0<args.lower_confusing_threshold <1, "Lower_confusing_threshold needs to be in (0,1)"
assert args.lower_confusing_threshold<args.upper_confusing_threshold<1, "Upper_confusing_threshold needs to be in (lower_confusing_threshold,1)"
#generating our affinity lookup dictionary
affinity_lookup={}
with open(args.affinity_lookup) as infile:
for line in infile:
items=line.split()
key=items[0]+'_'+items[1]
val=items[2]
affinity_lookup[key]=val
#first we will generate the dictionary for the ligand - poses we will use.
tocheck=get_pocket_lines(args.input, args.pocket)
datadic, docked_to_crystal_lookup=calc_ligand_dic(tocheck,args.crystal_suffix)
#main loop of the script
with open(myroot+args.outname,'w') as outfile:
#loop over the ligands
for cr_name, list_o_ligs in datadic.items():
if cr_name!='iqz':
continue
#0) Make sure that the working sdf is free.
sdf_name=myroot+'___.sdf'
sdf_tmp=myroot+'___tmp.sdf'
#if this "___sdf" file already exists, we need to delete it and make a new one.
if check_exists(sdf_name):
os.remove(sdf_name)
#1) Figure out ALL of the lines to write
line_dic=get_lines_towrite(crystal_lookup=docked_to_crystal_lookup,list_of_docked=list_o_ligs,affinity_lookup=affinity_lookup,crystal_suffix=args.crystal_suffix)
#2) Set up the 'working sdf' for the obrms -x calculations, consisting of the confusing examples + any possible previously generated examples
# i) iterate over the possible lines for this ligand, keep only the confusing ones,
# and write the confusing poses into the working sdf file.
w=Chem.SDWriter(sdf_name)
keys=list(line_dic.keys())
for key in keys:
kept_lines=[]
supply=Chem.SDMolSupplier(key,sanitize=False)
for i,mol in enumerate(supply):
curr_line=line_dic[key][i]
score=mol.GetProp('CNNscore')
label=curr_line.split()[0]
#if scored "well", but was a bad pose
if float(score) > args.upper_confusing_threshold and label=='0':
kept_lines.append(curr_line)
w.write(mol)
#or if scored "poor", but was a good pose
elif float(score) < args.lower_confusing_threshold and label=='1':
kept_lines.append(curr_line)
w.write(mol)
#after the lines have been checked, we overwrite and only store the lines we kept.
line_dic[key]=kept_lines
w=None
# ii) Prepend ___.sdf with the previously existing unique poses sdf
offset=0
if args.old_unique_suffix:
print('Prepending existing similarity sdf to working sdf file')
old_sdfname=myroot+cr_name+args.old_unique_suffix
supply=Chem.SDMolSupplier(old_sdfname,sanitize=False)
offset=len(supply)
subprocess.check_call('mv %s %s'%(sdf_name,sdf_tmp),shell=True)
subprocess.check_call('cat %s %s > %s'%(old_sdfname,sdf_tmp,sdf_name),shell=True)
#3) run obrms -x working_sdf to calculate the rmsd between each pose. This is the O(n^2) calculation
unique_data=run_obrms_cross(sdf_name)
#4) determine the newly found "unique" poses
assignments={}
for (r,row) in unique_data.iterrows():
if r not in assignments:
for simi in row[row<args.unique_threshold].index:
if simi not in assignments:
assignments[simi]=r
to_remove=set([k for (k,v) in assignments.items() if k!=v])
#5) write the remaining lines for the newly found "unique" poses.
counter=offset
for key in keys:
for line in line_dic[key]:
if counter not in to_remove:
outfile.write(line.replace(root_remove,''))
counter+=1
#6) Write out the new "uniques" sdf file to allow for easier future generation
new_unique_sdfname=myroot+cr_name+args.unique_suffix
w=Chem.SDWriter(new_unique_sdfname)
supply=Chem.SDMolSupplier(sdf_name,sanitize=False)
for i,mol in enumerate(supply):
if i not in to_remove:
w.write(mol)
| bsd-3-clause | -4,488,924,076,670,344,700 | 43.103175 | 234 | 0.735739 | false |
eschendel/gnum | gnlib/scripts/generate_nt_ascii_to_ctable.py | 1 | 2926 | #!/usr/bin/env python
## Copyright (c) 2015, Eric R. Schendel.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
##
## - Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## - Neither the name of gnum nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from string import lower
output_type = 'const gn_nucleotide_t'
array_name = 'five_bit_ascii_to_nucleotide'
default_nucleotide = 'A'
four_nucleotides_to_tag = {
'A': 'GN_NT_A',
'C': 'GN_NT_C',
'G': 'GN_NT_G',
'T': 'GN_NT_T',
}
# every listed encoding generates a unique ID with their 5 least significant bits
four_nucleotides_to_upper_ascii_encoding = {
'A': {'A', 'W', 'M', 'R', 'D', 'H', 'V', 'N'},
'C': {'C', 'S', 'Y', 'B'},
'G': {'G', 'K'},
'T': {'T', 'U'},
}
five_bit_ascii_to_nucleotide = {}
for nucleotide, ascii_set in four_nucleotides_to_upper_ascii_encoding.iteritems():
for ascii in ascii_set:
five_bit_ascii = ord(ascii) & 0b11111
print ascii, five_bit_ascii, nucleotide
five_bit_ascii_to_nucleotide[five_bit_ascii] = nucleotide
array_values = []
for index in range(32):
nucleotide = default_nucleotide
if index in five_bit_ascii_to_nucleotide:
nucleotide = five_bit_ascii_to_nucleotide[index]
array_values.append(four_nucleotides_to_tag[nucleotide])
print output_type, array_name+'[32] = {'
count = 0
for value in array_values:
if (count % 8) == 0:
if count != 0:
print ''
print ' ',
if (count+1) == len(array_values):
print value
else:
print str(value)+',',
count += 1
print '};'
| bsd-3-clause | -1,499,942,304,325,796,600 | 34.682927 | 82 | 0.685578 | false |
andgoldschmidt/iEBE | check_prerequisites.py | 1 | 8145 | #! /usr/bin/env python
"""
Print a list of tests to see whether all required tools for Ebe calculations
are present.
"""
from os import getcwd, unlink, path
from subprocess import call
numberOfSpaces = 5
def printWarning(warningString):
print("-"*(numberOfSpaces-2) + "> " + warningString)
def printMsg(message):
print(" "*numberOfSpaces + message)
def checkCommand(cmdString, utilityName=None):
"""
Try to execute "cmdString", then use "utilityName" to echo messages.
"""
tempfile = open("response.txt", 'w')
if not utilityName: utilityName=cmdString
call("%s " % cmdString, shell=True, cwd=getcwd(), stdout = tempfile, stderr = tempfile)
tempfile.close()
if "command not found" in open("response.txt").readline():
printWarning("%s *NOT* installed." % utilityName)
unlink("response.txt")
return False
else:
printMsg("%s installed." % utilityName)
unlink("response.txt")
return True
def checkModule(moduleName):
"""
Try to import "moduleName", then echo messages.
"""
try:
__import__(moduleName)
printMsg("python %s module installed." % moduleName)
return True
except:
printWarning("python %s module *NOT* installed." % moduleName)
return False
def checkEnvironment():
"""
Check if the required compiler and running environment are complete.
Return True if the environment is complete, otherwise return False.
"""
finalMsgs = []
print("Start checking...")
print("-"*80)
# check g++ and icpc
if not checkCommand("g++") and not checkCommand("icpc"):
finalMsgs.append("You need to install icpc or g++.")
# check gfortran and ifort
if not checkCommand("gfortran") and not checkCommand("ifort"):
finalMsgs.append("You need to install ifort or gfortran.")
# check make utility
if not checkCommand("make"):
finalMsgs.append("You need to install the make utility.")
# check gsl
if not checkCommand("gsl-config", "gsl"):
finalMsgs.append("You need to install gsl library.")
# check zip and unzip
if not checkCommand("zip --help", "zip") or not checkCommand("unzip --help", "unzip"):
finalMsgs.append("You need both zip and unzip utilities.")
# check numpy
if not checkModule("numpy"):
finalMsgs.append("You need to install python numpy package.")
# print final messages
print("-"*80)
if not finalMsgs:
print("All essential packages installed. Test passed.")
return True
else:
for msg in finalMsgs: print(msg)
return False
def checkExecutables():
"""
Check if all the executables are present, and compile them if not all of
them are. Return True if all the executables can be successfully
generated.
"""
ebeNodeFolder = "EBE-Node"
executables = (
path.join("superMC", "superMC.e"),
path.join("VISHNew", "VISHNew.e"),
path.join("iSS", "iSS.e"),
path.join("iS", "iS.e"),
path.join("iS", "resonance.e"),
path.join("iS", "iInteSp.e"),
path.join("osc2u", "osc2u.e"),
path.join("urqmd", "urqmd.e"),
path.join("trento","src", "trento.e")
)
# check for existence of all executables
existenceFlag = True
print("Checking existence of executables.")
for exe in executables:
if not path.exists(path.join(ebeNodeFolder, exe)):
print("Executable %s not found." % exe)
existenceFlag = False
break
else:
print("Executable %s found." % exe)
# compile if necessary and check again
tempfile = open(path.join("utilities", "CompileRecord.txt"), "w")
if not existenceFlag:
print("Start building executables...")
call("./compile_all.sh", shell=True, cwd="utilities", stdout = tempfile, stderr = tempfile)
tempfile.close()
unlink(path.join("utilities", "CompileRecord.txt"))
# check for existence of all executables again
existenceFlag = True
print("Checking again existence of executables.")
for exe in executables:
if not path.exists(path.join(ebeNodeFolder, exe)):
print("Executable %s still not found." % exe)
existenceFlag = False
return False
print("All executables found.")
return True
def greetings(selection):
if selection==1:
print(r"""
_______ _________ _ _______
|\ /|( ____ \\__ __/( ( /|/ ___ )
| ) ( || ( \/ ) ( | \ ( |\/ ) |
| (___) || (__ | | | \ | | / )
| ___ || __) | | | (\ \) | / /
| ( ) || ( | | | | \ | / /
| ) ( || (____/\___) (___| ) \ | / (_/\
|/ \|(_______/\_______/|/ )_)(_______/
_______ _______ _______ _______
( ____ \( ____ )( ___ )|\ /|( ____ )
| ( \/| ( )|| ( ) || ) ( || ( )|
| | | (____)|| | | || | | || (____)|
| | ____ | __)| | | || | | || _____)
| | \_ )| (\ ( | | | || | | || (
| (___) || ) \ \__| (___) || (___) || )
(_______)|/ \__/(_______)(_______)|/
""")
elif selection==2:
print(r"""
_ _ _ _ _ _
/ /\ / /\ /\ \ /\ \ /\ \ _ /\ \
/ / / / / // \ \ \ \ \ / \ \ /\_\ / \ \
/ /_/ / / // /\ \ \ /\ \_\ / /\ \ \_/ / /__/ /\ \ \
/ /\ \__/ / // / /\ \_\ / /\/_/ / / /\ \___/ //___/ /\ \ \
/ /\ \___\/ // /_/_ \/_/ / / / / / / \/____/ \___\/ / / /
/ / /\/___/ // /____/\ / / / / / / / / / / / /
/ / / / / // /\____\/ / / / / / / / / / / / / _
/ / / / / // / /______ ___/ / /__ / / / / / / \ \ \__/\_\
/ / / / / // / /_______\/\__\/_/___\/ / / / / / \ \___\/ /
\/_/ \/_/ \/__________/\/_________/\/_/ \/_/ \/___/_/
_ _ _ _ _
/\ \ /\ \ /\ \ /\_\ /\ \
/ \ \ / \ \ / \ \ / / / _ / \ \
/ /\ \_\ / /\ \ \ / /\ \ \\ \ \__ /\_\ / /\ \ \
/ / /\/_/ / / /\ \_\ / / /\ \ \\ \___\ / / // / /\ \_\
/ / / ______ / / /_/ / / / / / \ \_\\__ / / / // / /_/ / /
/ / / /\_____\ / / /__\/ / / / / / / // / / / / // / /__\/ /
/ / / \/____ // / /_____/ / / / / / // / / / / // / /_____/
/ / /_____/ / // / /\ \ \ / / /___/ / // / /___/ / // / /
/ / /______\/ // / / \ \ \/ / /____\/ // / /____\/ // / /
\/___________/ \/_/ \_\/\/_________/ \/_________/ \/_/
""")
elif selection==3:
print(r"""
. __.....__ .--. _..._
.'| .-'' '. |__| .' '.
< | / .-''"'-. `. .--.. .-. .
| | / /________\ \| || ' ' |
| | .'''-. | || || | | |.--------.
| |/.'''. \\ .-------------'| || | | ||____ |
| / | | \ '-.____...---.| || | | | / /
| | | | `. .' |__|| | | | .' /
| | | | `''-...... -' | | | | / /___
| '. | '. .-'''-. | | | || |
'---' '---' ' _ \ '--' '--'|_________|
/ /` '. \ _________ _...._
.--./) . | \ ' \ |.' '-.
/.''\\ .-,.--. | ' | ' \ .'```'. '.
| | | | | .-. |\ \ / / \ | \ \
\`-' / | | | | `. ` ..' /_ _ | | | |
/("'` | | | | '-...-'`| ' / | | \ / .
\ '---. | | '- .' | .' | | |\`'-.-' .'
/'""'.\ | | / | / | | | '-....-'`
|| ||| | | `'. | .' '.
\'. __// |_| ' .'| '/'-----------'
`'---' `-' `--'
""")
if __name__ == '__main__':
checkEnvironment()
| gpl-3.0 | 6,418,206,754,668,124,000 | 36.362385 | 99 | 0.352363 | false |
madscatt/zazzie_1.5 | trunk/sassie/simulate/complex_monte_carlo/nmer_dihedral.py | 1 | 34612 | '''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,string,locale,bisect,random,time,platform
import numpy
import random
try:
import Gnuplot,Gnuplot.PlotItems, Gnuplot.funcutils
except:
pass
import sasmol.sasmol as sasmol
import sassie.simulate.constraints.constraints as constraints
import sassie.simulate.monomer_monte_carlo.dihedral_monte_carlo as dihedral
import sassie.simulate.monomer_monte_carlo.dihedral_rotate as dihedral_rotate
import sassie.simulate.energy.dihedral_energy as energy
import sassie.simulate.monomer_monte_carlo.pairs as pairs
import sassie.simulate.monomer_monte_carlo.step as step
import nmer_overlap_check
import nmer_nrotate
# NMER_DIHEDRAL
#
# 09/26/05 -- gag-dihedral search : jc
# 11/19/05 -- gag-dimer dihedral search : jc
# 06/29/09 -- generalized to nmer : jc/sr
# 11/17/11 -- added sasmol support : jc
#
#LC 1 2 3 4 5 6 7
#LC4567890123456789012345678901234567890123456789012345678901234567890123456789
# * **
'''
NMR_DIHEDRAL is the module that contains the functions
that are used to generate ensembles of structures by varying
protein dihedral angles. This particular version allows multiple
flexible proteins in the presence of non-flexible proteins and
nucleic acids.
This module is called from Protein Complex Dihedral Generation from
the main GUI through the graphical_complex_generate.py script.
This module calls to C / Python extension modules to speed up
calculations.
'''
def unpack_variables(variables):
runname = variables['runname'][0]
dcdfile = variables['dcdfile'][0]
path = variables['path'][0]
pdbfile = variables['pdbfile'][0]
trials = variables['trials'][0]
goback = variables['goback'][0]
temp = variables['temp'][0]
nsegments = variables['nsegments'][0]
segbasis = variables['segbasis'][0]
npsegments = variables['npsegments'][0]
flpsegname = variables['flpsegname'][0]
sseglow = variables['seglow'][0]
sseghigh = variables['seghigh'][0]
#cutoff = variables['cutoff'][0]
lowrg = variables['lowrg'][0]
highrg = variables['highrg'][0]
zflag = variables['zflag'][0]
zcutoff = variables['zcutoff'][0]
cflag = variables['cflag'][0]
confile = variables['confile'][0]
plotflag = variables['plotflag'][0]
directedmc = variables['directedmc'][0]
seed = variables['seed'][0]
return runname,dcdfile,path,pdbfile,trials,goback,temp,nsegments,segbasis,npsegments,flpsegname,sseglow,sseghigh,lowrg,highrg,zflag,zcutoff,cflag,confile,plotflag,directedmc,seed
def print_failure(message,txtOutput):
txtOutput.put("\n\n>>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n\n")
txtOutput.put(message)
return
def wait(sti=None, prompt='Plot will clear in 2 seconds ...\n'):
'''
WAIT is the function to prompt the user to clear a plot on a screen
'''
if sti is not None:
print sti
try:
if(platform.system() == "Linux"):
import curses
stdscr = curses.initscr()
stdscr.addstr('press a key to continue')
c = stdscr.getch()
curses.endwin()
except:
time.sleep(1)
def alignment_initialization(all_segment_mol,asegs,abasis,flexible_segments,seglow,seghigh):
all_flexible_align_mask = []
all_flexible_coor_sub_m1 = []
all_flexible_com_sub_m1 = []
all_flexible_sub_m2 = []
for i in xrange(len(flexible_segments)):
this_segment = flexible_segments[i]
idx = asegs.index(this_segment)
m1 = all_segment_mol[idx]
if(m1.moltype()[0] == 'protein'):
this_basis = 'CA'
elif(m1.moltype()[0] == 'rna' or m1.moltype()[0] == 'dna'):
this_basis = 'P'
else:
print 'NO ALIGNMENT BASIS ATOM DEFINED FOR SEGNAME'
sys.exit()
### TODO need to handle the exception in complex_filter.py
### ONLY protein and RNA need this alignment
# get alignment sub molecule
align_filter = 'name[i] == "'+this_basis+'" and (segname[i] == "'+this_segment+'") and (resid[i] >= '+str(seglow[i])+' and resid[i] <= '+str(seghigh[i])+')'
error,align_mask = m1.get_subset_mask(align_filter)
all_flexible_align_mask.append(align_mask)
sub_m1=sasmol.SasMol(2)
error = m1.copy_molecule_using_mask(sub_m1,align_mask,0)
com_sub_m1 = sub_m1.calccom(0)
sub_m1.center(0)
coor_sub_m1 = sub_m1.coor()[0]
all_flexible_coor_sub_m1.append(coor_sub_m1)
all_flexible_com_sub_m1.append(com_sub_m1)
sub_m2 = sasmol.SasMol(4)
error = m1.copy_molecule_using_mask(sub_m2,align_mask,0)
all_flexible_sub_m2.append(sub_m2)
return all_flexible_align_mask,all_flexible_coor_sub_m1,all_flexible_com_sub_m1,all_flexible_sub_m2
def run_file_utilities(runname,pdbpath,pdbfile,dcdfile):
direxist=os.path.exists(runname)
if(direxist==0):
os.system('mkdir -p '+runname+'/')
#
# global run administration
#
genpath=runname+'/complex_monte_carlo'
genpaths=genpath+'/'
direxist=os.path.exists(genpath)
if(direxist==0):
os.system('mkdir -p '+genpath)
cpst='cp '+pdbpath+'/'+pdbfile+' '+genpaths
os.system(cpst)
#
# write global run name, pdb, and dcd filenames to .last_sas
#
fileexist=os.path.exists('.last_sas')
if(fileexist==1):
os.system('mv -f .last_sas .last_sas_bu')
lastsasfile=open('./.last_sas','w')
lastsasfile.write('run_name\t'+runname+'\n')
lastsasfile.write('pdb_name\t'+pdbfile+'\n')
lastsasfile.write('dcd_name\t'+dcdfile+'\n')
return lastsasfile,genpaths
def process_input_variables(psegvariables,segbasis,sseglow,sseghigh,flpsegname):
allsith=[]
allsnumranges=[]
allsrlow=[]
allsrnum=[]
allmoltype=[]
for i in range(len(psegvariables)):
allsnumranges.append(psegvariables[i][0])
allsith.append(psegvariables[i][1])
allsrlow.append(psegvariables[i][2])
allsrnum.append(psegvariables[i][3])
allmoltype.append(psegvariables[i][4])
#abasis=string.split(segbasis,',')
abasis=[item.strip() for item in string.split(segbasis,',')]
#seglow=string.split(sseglow,',')
#seghigh=string.split(sseghigh,',')
aith=[] ; anumranges=[] ; arlow=[] ; arnum=[]; amoltype=[]
for i in range(len(allsith)):
linith=string.split(allsith[i],',')
locith=[]
for i in range(len(linith)):
tith=linith[i]
fith=locale.atof(tith)
if(fith>180.0):
fith=180.0
elif(fith<0.0):
fith=0.0
locith.append(fith)
aith.append(locith)
for i in range(len(allsnumranges)):
nr=locale.atoi(allsnumranges[i])
anumranges.append(nr)
for i in range(len(allsrlow)):
linrlow=string.split(allsrlow[i],',')
linrnum=string.split(allsrnum[i],',')
rlow=[] ; rnum=[]
for k in range(len(linrlow)):
trlow=locale.atoi(linrlow[k])
trnum=locale.atoi(linrnum[k])
rlow.append(trlow)
rnum.append(trnum)
#print 'rlow = ',rlow
#print 'rnum = ',rnum
arlow.append(rlow)
arnum.append(rnum)
for i in range(len(psegvariables)):
moltype=allmoltype[i].strip()
amoltype.append(moltype)
'''
print 'anumranges = ',anumranges
print 'aith = ',aith
print 'arlow = ',arlow
print 'arnum = ',arnum
'''
raw_flexible_segments = string.split(flpsegname,",")
flexible_segments = []
for fp in raw_flexible_segments:
flexible_segments.append(fp.strip())
#print 'flexible_segments = ',flexible_segments
return amoltype,allsith,allsnumranges,allsrlow,allsrnum,abasis,sseglow,sseghigh,anumranges,aith,arlow,arnum,flexible_segments
def initialize_segments(m1,flexible_segments,nsegments,abasis):
segname = m1.segname()
asegs=[]
for tseg in segname:
if(tseg not in asegs):
asegs.append(tseg)
numsegs=len(asegs)
print 'found ',numsegs,' segment names'
first_last_resid = []
all_segment_mask = []
all_segment_full_mask = []
all_segment_basis_full_mask = []
all_segment_mol = []
tmask = ''
keyword_basis = False
if(len(abasis) == 1):
basis = abasis[0].strip()
if(basis.lower() == 'all' or basis.lower() == 'heavy' or basis.lower() == 'backbone'):
keyword_basis = True
for i in xrange(numsegs):
segmol = sasmol.SasMol(0)
error,segment_full_mask = m1.get_subset_mask('segname[i] == "'+asegs[i]+'"')
m1.copy_molecule_using_mask(segmol,segment_full_mask,0)
this_resid = segmol.resid()
first_last_resid.append([this_resid[0],this_resid[-1]])
all_segment_full_mask.append(segment_full_mask)
all_segment_mol.append(segmol)
### this is where abasis is used --> and this is where it matters!
if keyword_basis:
if(basis.lower() == 'all'):
#print 'setting up all atom overlap arrays'
segmol.set_average_vdw()
npairs = segmol.natoms()*(segmol.natoms() - 1)/2
cutoff_array = numpy.zeros(npairs,numpy.float)
pairs.pairs(segmol.atom_vdw(),cutoff_array)
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (not name[i] == "None") '
elif(basis.lower() == 'backbone'):
this_moltype = segmol.moltype()[0]
#print 'this_moltype = ',this_moltype ### check this
if(segmol.moltype()[0] == 'protein'):
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (name[i] == "N" or name[i] == "CA" or name[i] == "C") '
elif(segmol.moltype()[0] == 'rna' or segmol.moltype()[0] == 'dna'):
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (name[i] == "P" or name[i] == "O5\'" or name[i] == "C5\'" or name[i] == "C4\'" or name[i] == "C3\'" or name[i] == "O3\'") '
else:
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (not name[i][0] == "H") '
### TODO --> add to complex_filter so the following hack is not needed
elif(basis.lower() == 'heavy'):
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (not name[i][0] == "H") '
error,segment_basis_mask = m1.get_subset_mask(keyword_basis_filter)
else:
error,segment_basis_mask = m1.get_subset_mask('segname[i] == "'+asegs[i]+'" and name[i] =="'+abasis[i].strip()+'"')
all_segment_basis_full_mask.append(segment_basis_mask)
error,segment_mask = all_segment_mol[i].get_subset_mask('segname[i] == "'+asegs[i]+'"')
all_segment_mask.append(segment_mask)
### TODO ... this is probably why flexible segments need to be first!!
### should just take the NAMES of the flexible segnames to make this
###
### this is also where abasis is used --> but basis_full_mask is ONLY used for zcut
### checking: abasis itself is passed to check_overlap in nmer_nrotate
###
### OPTIONS: use moltype()[0] for each asegs[i] to set the basis (CA--> protein, P --> RNA)
### or better yet, use not hydrogen instead ... as this is ONLY used for z-cut check
###
tmask += '(segname[i] == "'+asegs[i]+'" and (not name[i][0] == "H")) '
#tmask+='segname[i] == "'+asegs[i]+'" and name[i] =="'+abasis[i].strip()+'"'
if i!=len(flexible_segments)-1:
tmask+=' or '
error,basis_full_mask= m1.get_subset_mask(tmask)
#print 'first_last_resid = ',first_last_resid
return asegs,first_last_resid,all_segment_mask,all_segment_full_mask,all_segment_basis_full_mask,basis_full_mask,all_segment_mol,keyword_basis
def initialize_interaction_regions(m1,interpairs,npairs,cutoff,sseglow,asegs,abasis):
if(len(interpairs)>0):
print 'pair distances < cut == ',cutoff,' angstroms between segments have been found'
print 'these distances will be ignorned in overlap check'
print 'interpairs = ',interpairs
else:
print 'all distances between segments are greater than cut == ',cutoff
print 'normal overlap checking will be used'
print 'npairs = ',npairs
### initialize interaction regions in each segment ###
interres=[] ; interatom=[]
for i in range(len(interpairs)):
segnum_1 = interpairs[i][0][0]
segnum_2 = interpairs[i][0][1]
for j in range(len(interpairs[i][1])):
resnum_1 = interpairs[i][1][j][0]
resnum_2 = interpairs[i][1][j][1]
### TODO --> need to match basis here as well
### TODO --> need to match basis here as well
### TODO --> need to match basis here as well
basis_segment_1 = '(segname[i] == "'+asegs[segnum_1]+'" and name[i] =="'+abasis[segnum_1].strip()+'")'
error,basis_mask_segment_1 = m1.get_subset_mask(basis_segment_1)
#idx_1 = numpy.where(basis_mask_segment_1==1.0)[0][resnum_1] # a ugly numpy function
idx_1 = filter(lambda x:basis_mask_segment_1[x]==1.0, range(len(basis_mask_segment_1)))[resnum_1]
basis_segment_2 = '(segname[i] == "'+asegs[segnum_2]+'" and name[i] =="'+abasis[segnum_2].strip()+'")'
error,basis_mask_segment_2 = m1.get_subset_mask(basis_segment_2)
#idx_2 = numpy.where(basis_mask_segment_2==1.0)[0][resnum_2] # a ugly numpy function
idx_2 = filter(lambda x:basis_mask_segment_2[x]==1.0, range(len(basis_mask_segment_2)))[resnum_2]
interres.append([resnum_1,resnum_2])
interatom.append([idx_1,idx_2])
print 'interres = ',interres
print 'interatom = ',interatom
return interatom,interres
def set_up_dihedral_arrays(all_segment_mol,asegs,abasis,amoltype,first_last_resid,flexible_segments,anumranges,arlow,arnum,keyword_basis,txtOutput):
flexible_dihedral_parameters = []
all_flexible_basis_mask = []
for i in xrange(len(flexible_segments)):
this_segname = flexible_segments[i]
idx = asegs.index(this_segname)
m1=all_segment_mol[idx]
### TODO --> need to deal with specific basis here
### TODO --> need to deal with specific basis here
### TODO --> need to deal with specific basis here
if(keyword_basis):
if amoltype[i]=='protein':
basis_atom = "CA"
elif amoltype[i]=='rna':
#basis_atom = "P"
basis_atom = "O5\'"
basis_filter = 'name[i] == "'+basis_atom+'" and segname[i] == "'+this_segname+'"'
else:
basis_filter = 'name[i] == "'+abasis[idx]+'" and segname[i] == "'+this_segname+'"'
error,basis_mask = m1.get_subset_mask(basis_filter)
all_flexible_basis_mask.append(basis_mask)
basis_m1=sasmol.SasMol(1)
error = m1.copy_molecule_using_mask(basis_m1,basis_mask,0)
basis_resname = basis_m1.resname()
basis_resid = basis_m1.resid()
arespsi=[] ; aresphi=[]
numranges=anumranges[i] ; reslow=arlow[i] ; numcont=arnum[i]
if amoltype[i]=='protein':
respsi=[] ; resphi=[]
energy.protein_initialization(respsi,resphi,basis_resid,basis_resname,numranges,reslow,numcont,first_last_resid[idx],txtOutput)
flexible_dihedral_parameters.append([respsi,resphi])
elif amoltype[i]=='rna':
resalpha = [] ; resbeta = [] ; resgamma = [] ; resdelta = [] ; resepsilon = [] ; reseta = []
energy.rna_initialization(resalpha,resbeta,resgamma,resdelta,resepsilon,reseta,basis_resid,basis_resname,numranges,reslow,numcont,first_last_resid[idx],txtOutput)
flexible_dihedral_parameters.append([resalpha,resbeta,resgamma,resdelta,resepsilon,reseta])
return flexible_dihedral_parameters,all_flexible_basis_mask
def set_up_constraints(m1,cflag,confile):
if(cflag == 1):
filter_flag = 0
error,constraint_basis1_array, constraint_basis2_array,distance_array,type_array = constraints.read_constraints(m1,confile,filter_flag)
mask_a_array = [] ; mask_b_array = []
for i in xrange(len(distance_array)):
print constraint_basis1_array[i]
print constraint_basis2_array[i]
print distance_array[i]
print type_array[i]
error,local_mask_a_array = m1.get_subset_mask(constraint_basis1_array[i])
error,local_mask_b_array = m1.get_subset_mask(constraint_basis2_array[i])
mask_a_array.append(local_mask_a_array)
mask_b_array.append(local_mask_b_array)
else:
mask_a_array = [] ; mask_b_array = []
distance_array = [] ; type_array = []
return mask_a_array,mask_b_array,distance_array,type_array
def setup_flexible_residue_mask_arrays(m1,flexible_segments,anumranges,arlow,arnum,amoltype,txtOutput):
all_flexible_residues = []
all_flexible_residue_rotation_indices = []
all_flexible_residue_rotation_mask = []
for i in xrange(len(flexible_segments)):
numranges=anumranges[i] ; reslow=arlow[i] ; numcont=arnum[i]
flexible_residues = dihedral.get_flexible_residues(numranges,reslow,numcont)
all_flexible_residues.append(flexible_residues)
segment_filter = 'segname[i] == "'+flexible_segments[i]+'"'
error,segment_mask = m1.get_subset_mask(segment_filter)
#print 'segment_filter = ',segment_filter
#print 'error = ',error
segment_m1=sasmol.SasMol(98)
error = m1.copy_molecule_using_mask(segment_m1,segment_mask,0)
molecule_type = amoltype[i]
residue_rotation_indices,residue_rotation_mask = dihedral.get_rotation_indices(segment_m1,molecule_type,flexible_residues,txtOutput)
all_flexible_residue_rotation_indices.append(residue_rotation_indices)
all_flexible_residue_rotation_mask.append(residue_rotation_mask)
return all_flexible_residues,all_flexible_residue_rotation_indices,all_flexible_residue_rotation_mask
def evaluate_rg(rg_difference_list,directed_rg_list,accepted_rg_list,this_rg_difference,this_rg,accepted):
maximum_value = max(rg_difference_list)
if(maximum_value > this_rg_difference):
index = rg_difference_list.index(maximum_value)
rg_difference_list[index] = this_rg_difference
directed_rg_list[index] = this_rg
accepted_rg_list[index] = accepted
return
### main method ###
def dihedralgenerate(variables,psegvariables,txtOutput):
#amoltype=['protein','protein']
#amoltype=['rna','protein']
#amoltype=['protein']
#ttxt=time.ctime()
ttxt=time.asctime( time.gmtime( time.time() ) )
st=''.join(['=' for x in xrange(60)])
txtOutput.put("\n%s \n" %(st))
txtOutput.put("DATA FROM RUN: %s \n\n" %(ttxt))
# unpack variables
runname,dcdfile,path,pdbfile,trials,goback,temp,nsegments,segbasis,npsegments,flpsegname,sseglow,sseghigh,lowrg,highrg,zflag,zcutoff,cflag,confile,plotflag,directedmc,seed=unpack_variables(variables)
segbasis.strip()
# process variables
amoltype,allsith,allsnumranges,allsrlow,allsrnum,abasis,seglow,seghigh,anumranges,aith,arlow,arnum,flexible_segments = process_input_variables(psegvariables,segbasis,sseglow,sseghigh,flpsegname)
import pprint; fout = open('a.txt','w'); pprint.pprint(variables,fout); pprint.pprint(psegvariables,fout); pprint.pprint(segbasis,fout); pprint.pprint(seglow,fout); pprint.pprint(seghigh,fout); pprint.pprint(flpsegname,fout); fout.close()
# set up run file I/O
lastsasfile,genpaths = run_file_utilities(runname,path,pdbfile,dcdfile)
kb=1.380658E-23 # J/K
beta=1.0/(temp*kb)
m1 = sasmol.SasMol(0)
m1.read_pdb(path+pdbfile)
nf1=m1.number_of_frames()
#print 'nf1 = %d\n' % nf1
dcdoutfile = m1.open_dcd_write(genpaths+dcdfile)
# set up segment arrays
asegs,first_last_resid,all_segment_mask,all_segment_full_mask,all_segment_basis_full_mask,basis_full_mask,all_segment_mol,keyword_basis = initialize_segments(m1,flexible_segments,nsegments,abasis)
# set up constraints variables
mask_a_array,mask_b_array,distance_array,type_array = set_up_constraints(m1,cflag,confile)
# set up segment alignment coordinates and com arrays
all_flexible_align_mask,all_flexible_coor_sub_m1,all_flexible_com_sub_m1,all_flexible_sub_m2 = alignment_initialization(all_segment_mol,asegs,abasis,flexible_segments,seglow,seghigh)
if(keyword_basis):
if(segbasis.lower() == 'all'):
cutoff = 0.8
elif(segbasis.lower() == 'heavy' or segbasis.lower() == 'backbone'):
cutoff = 0.8
else:
cutoff = 2.0
print 'cutoff = ',cutoff
check_initial_interactions = False
if(check_initial_interactions):
# survey interaction between segments
interpairs,npairs=nmer_overlap_check.nmer_overlap_check(m1,path,pdbfile,cutoff,abasis,keyword_basis)
interatom,interres = initialize_interaction_regions(m1,interpairs,npairs,cutoff,sseglow,asegs,abasis)
else:
interpairs = [] ; npairs = 0
interatom = [] ; interres = []
# set up dihedral parameters for each flexible segment
flexible_dihedral_parameters,all_flexible_basis_mask = set_up_dihedral_arrays(all_segment_mol,asegs,abasis,amoltype,first_last_resid,flexible_segments,anumranges,arlow,arnum,keyword_basis,txtOutput)
if(segbasis.lower() == 'all' or segbasis.lower() == 'heavy' or segbasis.lower() == 'backbone'):
print 'segbasis = ',segbasis,' so I should stop for now\n'
#sys.exit()
else:
print 'segbasis = ',segbasis,' so I should continue\n'
# set up flexible residue rotation mask arrays
all_flexible_residues,all_flexible_residue_rotation_indices,all_flexible_residue_rotation_mask = setup_flexible_residue_mask_arrays(m1,flexible_segments,anumranges,arlow,arnum,amoltype,txtOutput)
step_parameters = step.Setup()
hrg=0.0 ; lowestrg=1000.0
an='psi'
accepted=0 ; over=0 ; badrg=0 ; badz=0 ; badc=0 ; nsteps=0 ; arg=0.0 ; trg=0.0
coor = m1.coor()
frame = 0
# MAIN LOOP
q0=1;th=1.0;seg=asegs[0]
pairdat=[an,q0,th,seg]
all_rg_tally=[] ; accepted_rg_tally=[]
phi_tally=[] ; aphi_tally=[]
psi_tally=[] ; apsi_tally=[]
atpsi_tally=[] ; atphi_tally=[] ; atphipsi_tally=[]
if(plotflag == 1):
graph = Gnuplot.Gnuplot(debug=1)
graph.clear()
graph('set title "Rg Results"')
graph.xlabel('Structure Number')
graph.ylabel('Rg (Angstrom^2)')
nonbondflag = 0
if(seed[0] == 1):
from numpy.random import RandomState
seed_object = RandomState(seed[1])
else:
seed_object = -1
failtally=0 ; acc=0 ; afile=''; accfile=[]
minx=[] ; miny=[] ; minz=[]
maxx=[] ; maxy=[] ; maxz=[]
if(directedmc > 0):
rg_difference_list = []
directed_rg_list = []
accepted_rg_list = []
rg_list_length = 10 ### hardwired
for i in range(trials):
if(seed[0] == 1):
ran_num = seed_object.rand()
tflexsegn = int(len(flexible_segments)*ran_num)
tsegn = asegs.index(flexible_segments[tflexsegn])
else:
tflexsegn = int(len(flexible_segments)*random.random())
tsegn = asegs.index(flexible_segments[tflexsegn])
tseg = asegs[tsegn]
molecule_type = amoltype[tflexsegn]
dtheta = aith[tflexsegn] ; numranges = anumranges[tflexsegn]
reslow = arlow[tflexsegn] ; numcont = arnum[tflexsegn]
segment_full_mask = all_segment_full_mask[tsegn]
error,new_coor = m1.get_coor_using_mask(frame,segment_full_mask)
segment_mol = all_segment_mol[tsegn]
segment_mol.setCoor(new_coor)
'''
if(i<10):
print 'segment_mol.coor()[0,0,0] = ',segment_mol.coor()[0,0,0]
else:
sys.exit()
'''
vdi,vdf,indices,this_mask=step_parameters.chooser(new_coor,segment_mol,pairdat,dtheta,numranges,reslow,numcont,flexible_dihedral_parameters[tflexsegn],beta,all_flexible_residue_rotation_indices[tflexsegn],all_flexible_residue_rotation_mask[tflexsegn],nonbondflag,first_last_resid[tsegn],molecule_type,seed_object)
'''
print 'len(indices) = ',len(indices)
print 'indices[0] = ',indices[0]
print 'indices[-1] = ',indices[-1]
print 'tsegn = ',tsegn
'''
pairdat[3] = tseg
an=pairdat[0] ; q0=pairdat[1] ; th=pairdat[2] ; seg=pairdat[3]
nsteps+=1
re=[0,0,0,0.0,0.0,lowestrg,hrg,0,0,[]]
newafile = nmer_nrotate.rotate(coor,m1,q0,th,an,cutoff,lowrg,highrg,re,accepted,zflag,zcutoff,cflag,dcdoutfile,indices,this_mask,all_flexible_basis_mask[tflexsegn],all_flexible_sub_m2[tflexsegn],all_flexible_align_mask[tflexsegn],all_flexible_coor_sub_m1[tflexsegn],all_flexible_com_sub_m1[tflexsegn],mask_a_array,mask_b_array,distance_array,type_array,first_last_resid[tsegn],molecule_type,all_segment_mask[tsegn],segment_full_mask,all_segment_basis_full_mask,basis_full_mask,all_segment_mol[tsegn],asegs,abasis,interatom,interres)
print '.', ; sys.stdout.flush()
accepted=accepted+re[0] ; over=over+re[1] ; badrg=badrg+re[2] ; rg_value=re[3]
trg=trg+re[3] ; arg=arg+re[4] ; lowestrg=re[5] ; hrg=re[6]
badz=badz+re[7] ; badc=badc+re[8]
if(len(re[9])>0):
minmax=re[9]
minx.append(minmax[0][0]) ; miny.append(minmax[0][1]) ; minz.append(minmax[0][2])
maxx.append(minmax[1][0]) ; maxy.append(minmax[1][1]) ; maxz.append(minmax[1][2])
all_rg_tally.append([i,rg_value])
if(re[0]==1):
accepted_rg_tally.append([i,accepted,rg_value])
if(directedmc > 0):
if(len(rg_difference_list)<= rg_list_length):
this_rg_difference = abs(rg_value - directedmc)
rg_difference_list.append(this_rg_difference)
directed_rg_list.append(rg_value)
accepted_rg_list.append(accepted)
else:
this_rg_difference = abs(rg_value - directedmc)
evaluate_rg(rg_difference_list,directed_rg_list,accepted_rg_list,this_rg_difference,rg_value,accepted)
if(re[0]==0):
if(failtally==goback):
failtally=0
if(accepted > 0):
if(seed[0] == 1):
ran_num = seed_object.rand()
dum=int(accepted*ran_num)-1
elif(directedmc > 0):
local_rg_list_length = len(directed_rg_list)
ran_num = random.randrange(0,local_rg_list_length)
dum = accepted_rg_list[ran_num]
else:
dum=int(accepted*random.random())-1
if(dum==-1):
print '\nreloading coordinates from original starting structure'
m1.read_pdb(path+pdbfile,fastread=True,saspdbrx_topology=True)
coor=m1.coor()
else:
print '\nreloading coordinates from a previously accepted structure'
m1.read_single_dcd_step(genpaths+dcdfile,dum+1)
#m1.read_single_dcd_step(genpaths+dcdfile,dum)
coor=m1.coor()
else:
print '\n>>>>>reloading coordinates from original starting structure'
m1.read_pdb(path+pdbfile,fastread=True,saspdbrx_topology=True)
coor=m1.coor()
else:
failtally=failtally+1
if(((i+1)%(float(trials)/100.0)==0 or (trials<10))):
fraction_done = (float(i+1)/float(trials))
progress_string='\nCOMPLETED '+str(i+1)+' of '+str(trials)+' : '+str(fraction_done*100.0)+' % done'
print('%s\n' % progress_string)
print accepted,' configurations accepted out of ',nsteps,(float(accepted)/nsteps)*100.0,' %\n\n'
report_string='STATUS\t'+str(fraction_done)
txtOutput.put(report_string)
if(i>9):
if((i+1)%(trials/10)==0 and accepted > 0 and i+1>10):
if(plotflag == 1):
graph.plot(Gnuplot.Data(all_rg_tally,using='1:2 w p ps 4',title='all Rg'),Gnuplot.Data(accepted_rg_tally,using='1:3 w lp pt 5 ps 2',title='accepted'))
fraction_done = (float(i+1)/float(trials))
report_string='STATUS\t'+str(fraction_done)
txtOutput.put(report_string)
elif(accepted > 0):
if(plotflag == 1):
graph.plot(Gnuplot.Data(all_rg_tally,using='1:2 w p ps 4',title='all Rg'),Gnuplot.Data(accepted_rg_tally,using='1:3 w lp pt 5 ps 2',title='accepted'))
fraction_done = (float(i+1)/float(trials))
report_string='STATUS\t'+str(fraction_done)
txtOutput.put(report_string)
m1.close_dcd_write(dcdoutfile)
rgplot=open('./'+runname+'/complex_monte_carlo/'+dcdfile+'.all_rg_results_data.txt','w')
rgplot.write('# structure number (structure 1 = 1; not 0), Rg (all)\n')
for ii in range(len(all_rg_tally)):
rgplot.write('%i\t%f\n' % (all_rg_tally[ii][0]+1,all_rg_tally[ii][1]))
rgplot.close()
rgplot=open('./'+runname+'/complex_monte_carlo/'+dcdfile+'.accepted_rg_results_data.txt','w')
rgplot.write('# structure number (structure 1 = 1; not 0), Rg (accepted)\n')
for ii in range(len(accepted_rg_tally)):
rgplot.write('%i\t%f\t%i\n' % (accepted_rg_tally[ii][1]-1,accepted_rg_tally[ii][2],accepted_rg_tally[ii][0]+1))
rgplot.close()
'''
outfile2=open(genpaths+dcdfile+'.phi','w')
outfile3=open(genpaths+dcdfile+'.psi','w')
outfile5=open(genpaths+dcdfile+'.aphi','w')
outfile6=open(genpaths+dcdfile+'.apsi','w')
outfile7=open(genpaths+dcdfile+'.aphivsapsi','w')
outfile7.write('# ACCEPTED STRUCTURES\n')
outfile7.write('# AA phi psi\n')
for i in range(len(phi_tally)):
outfile2.write('%i\t%f\n' % (phi_tally[i][0],phi_tally[i][1]))
for i in range(len(psi_tally)):
outfile3.write('%i\t%f\n' % (psi_tally[i][0],psi_tally[i][1]))
for i in range(len(aphi_tally)):
outfile5.write('%i\t%f\n' % (aphi_tally[i][0],aphi_tally[i][1]))
for i in range(len(apsi_tally)):
outfile6.write('%i\t%f\n' % (apsi_tally[i][0],apsi_tally[i][1]))
for i in range(len(atphipsi_tally)):
outfile7.write('%i\t%f\t%f\n' % (atphipsi_tally[i][0],atphipsi_tally[i][1],atphipsi_tally[i][2]))
outfile2.close()
outfile3.close()
outfile5.close()
outfile6.close()
outfile7.close()
'''
ttxt=time.ctime()
st=''.join(['=' for x in xrange(60)])
if(accepted > 0):
txtOutput.put("Average accepted rg2 = %lf\n" % (arg/(accepted)))
txtOutput.put("Configurations and statistics saved in %s directory\n" % ('./'+genpaths))
else:
txtOutput.put("Average accepted rg2 = %lf\n" % (0.0))
txtOutput.put("\n NO ACCEPTED MOVES\n\n Statistics saved in %s directory\n" % (genpaths))
outfile7=open(genpaths+dcdfile+'.stats','w')
outfile7.write('%s\t%f\t%s\t%f\n' % ('lowest Rg = ',lowestrg,'highest Rg = ',hrg))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('accepted ',accepted,' out of ',nsteps,' moves : ',(accepted/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('overlapped ',over,' out of ',nsteps,' moves : ',(over/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('bad rg2 ',badrg,' out of ',nsteps,' moves : ',(badrg/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('bad z-filter ',badz,' out of ',nsteps,' moves : ',(badz/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('bad constaints ',badc,' out of ',nsteps,' moves : ',(badc/float(nsteps))*100.0, ' %'))
if(accepted>0):
outfile7.write('%s\t%f\n' % ('average accepted rg2 = ',arg/(accepted)))
else:
outfile7.write('%s\t%f\n' % ('average accepted rg2 = ',0.0))
outfile7.write('%s\t%f\n' % ('average total rg2 of ensemble = ',trg/(nsteps)))
print '\n\nDCD data were written to %s\n' % ('./'+genpaths+dcdfile)
txtOutput.put( "\nDCD data were written to %s\n\n" % ('./'+genpaths+dcdfile))
txtOutput.put("lowest Rg = %lf\t highest Rg = %lf\n" % (lowestrg,hrg))
txtOutput.put("accepted %d out of %d : %lf percent\n" % (accepted,nsteps,(accepted/float(nsteps))*100.0))
txtOutput.put("overlapped %d out of %d moves : %lf percent\n" % (over,nsteps,(float(over)/float(nsteps))*100.0))
txtOutput.put("bad rg2 %d out of %d moves : %lf percent\n" % (badrg,nsteps,(float(badrg)/float(nsteps))*100.0))
if(zflag==1):
txtOutput.put("bad zcut %d out of %d moves : %lf percent\n\n\n" % (badz,nsteps,(float(badz)/float(nsteps))*100.0))
if(cflag==1):
txtOutput.put("constraint filter rejected %d out of %d moves : %lf percent\n\n\n" % (badc,nsteps,(float(badc)/float(nsteps))*100.0))
if(len(minx)>0 and len(miny)>0 and len(minz)>0 and len(maxx)>0 and len(maxy)>0 and len(maxz)>0):
min_x = numpy.min(minx) ; min_y = numpy.min(miny) ; min_z = numpy.min(minz)
max_x = numpy.max(maxx) ; max_y = numpy.max(maxy) ; max_z = numpy.max(maxz)
txtOutput.put("\nminimum x = %lf\t maximum x = %lf -> range: %lf Angstroms\n" % (min_x,max_x,(max_x-min_x)))
txtOutput.put("minimum y = %lf\t maximum y = %lf -> range: %lf Angstroms\n" % (min_y,max_y,(max_y-min_y)))
txtOutput.put("minimum z = %lf\t maximum z = %lf -> range: %lf Angstroms\n\n" % (min_z,max_z,(max_z-min_z)))
outfile7.write("\nminimum x = %lf\t maximum x = %lf -> range: %lf Angstroms\n" % (min_x,max_x,(max_x-min_x)))
outfile7.write("minimum y = %lf\t maximum y = %lf -> range: %lf Angstroms\n" % (min_y,max_y,(max_y-min_y)))
outfile7.write("minimum z = %lf\t maximum z = %lf -> range: %lf Angstroms\n\n" % (min_z,max_z,(max_z-min_z)))
outfile7.close()
else:
outfile7.close()
txtOutput.put("\n%s \n" %(st))
lastsasfile.close()
print 'COMPLEX DIHEDRAL IS DONE'
time.sleep(1.5)
if(plotflag == 1):
wait('\n')
return()
if __name__=='__main__':
runname = 'run_0'
dcdfile = 'run_0.dcd'
path = './'
pdbfile = 'fram601.pdb'
trials = '50'
goback = '50'
nsegments = '2'
npsegments = '2'
flpsegname = 'ENDA,ENDB'
segbasis = 'CA, CA'
#segbasis = 'all'
#segbasis = 'heavy'
#segbasis = 'backbone'
seglow = '95, 95'
seghigh = '110, 110'
temp = '300.0'
lowrg = '20.0'
highrg = '185.0'
zflag = '0'
zcutoff = '0.0'
cflag = '0'
confile = 'constraints.txt'
directedmc = '0'
psffilepath='./'
psffilename = 'refgag.psf'
import sassie.sasconfig as sasconfig
parmfilepath=sasconfig._bin_path+'toppar'
parmfilename = 'par_all27_prot_na.inp'
plotflag = '1'
seed = '0, 123'
svariables={}
svariables['cflag'] = (cflag,'int')
svariables['confile'] = (confile,'string')
svariables['dcdfile'] = (dcdfile,'string')
svariables['directedmc'] = (directedmc,'float')
svariables['flpsegname'] = (flpsegname, 'string')
svariables['goback'] = (goback,'int')
svariables['highrg'] = (highrg,'float')
svariables['lowrg'] = (lowrg,'float')
svariables['npsegments'] = (npsegments,'int')
svariables['nsegments'] = (nsegments,'int')
svariables['parmfilename'] = (parmfilename,'string')
svariables['path'] = (path,'string')
svariables['pdbfile'] = (pdbfile,'string')
svariables['plotflag'] = (plotflag,'int')
svariables['psffilename'] = (psffilename,'string')
svariables['runname'] = (runname,'string')
svariables['seed'] = (seed,'int_array')
svariables['segbasis'] = (segbasis,'string')
svariables['seghigh'] = (seghigh,'int_array')
svariables['seglow'] = (seglow,'int_array')
svariables['temp'] = (temp,'float')
svariables['trials'] = (trials,'int')
svariables['zcutoff'] = (zcutoff,'float')
svariables['zflag'] = (zflag, 'int')
psegvariables= [['1', '30', '2', '30', 'protein'], ['1', '30', '2', '30', 'protein']]
import sassie.interface.input_filter as input_filter
error,variables=input_filter.type_check_and_convert(svariables)
#error=generate_filter.check_protein(variables,eflag,monflag)
if(len(error)>0):
print 'error = ',error
sys.exit()
runname=variables['runname'][0]
import multiprocessing
txtQueue=multiprocessing.JoinableQueue()
dihedralgenerate(variables,psegvariables,txtQueue)
| gpl-3.0 | 4,616,042,028,928,589,000 | 34.830228 | 534 | 0.669941 | false |
ian-r-rose/burnman | burnman/minerals/SLB_2005.py | 1 | 2794 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
SLB_2005
^^^^^^^^
Minerals from Stixrude & Lithgow-Bertelloni 2005 and references therein
"""
from __future__ import absolute_import
from .. import mineral_helpers as helpers
from ..mineral import Mineral
class stishovite (Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 14.02e-6,
'K_0': 314.0e9,
'Kprime_0': 4.4,
'G_0': 220.0e9,
'Gprime_0': 1.6,
'molar_mass': .0601,
'n': 3,
'Debye_0': 1044.,
'grueneisen_0': 1.34,
'q_0': 2.4,
'eta_s_0': 5.0}
Mineral.__init__(self)
class periclase (Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 11.24e-6,
'K_0': 161.0e9,
'Kprime_0': 3.8,
'G_0': 131.0e9,
'Gprime_0': 2.1,
'molar_mass': .0403,
'n': 2,
'Debye_0': 773.,
'grueneisen_0': 1.5,
'q_0': 1.5,
'eta_s_0': 2.8}
Mineral.__init__(self)
class wuestite (Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 12.06e-6,
'K_0': 152.0e9,
'Kprime_0': 4.9,
'G_0': 47.0e9,
'Gprime_0': 0.7,
'molar_mass': .0718,
'n': 2,
'Debye_0': 455.,
'grueneisen_0': 1.28,
'q_0': 1.5,
'eta_s_0': 0.8}
Mineral.__init__(self)
class mg_perovskite(Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 24.45e-6,
'K_0': 251.0e9,
'Kprime_0': 4.1,
'G_0': 175.0e9,
'Gprime_0': 1.7,
'molar_mass': .1000,
'n': 5,
'Debye_0': 1070.,
'grueneisen_0': 1.48,
'q_0': 1.4,
'eta_s_0': 2.6}
Mineral.__init__(self)
class fe_perovskite(Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 25.48e-6,
'K_0': 281.0e9,
'Kprime_0': 4.1,
'G_0': 138.0e9,
'Gprime_0': 1.7,
'molar_mass': .1319,
'n': 5,
'Debye_0': 841.,
'grueneisen_0': 1.48,
'q_0': 1.4,
'eta_s_0': 2.1}
Mineral.__init__(self)
mg_bridgmanite = mg_perovskite
fe_bridgmanite = fe_perovskite
| gpl-2.0 | -8,929,445,697,747,135,000 | 22.880342 | 111 | 0.430208 | false |
RawEvan/sharephotos | sharephotos/tests.py | 1 | 3027 | # coding:utf-8
from django.test import TestCase
from django.http import HttpResponse
from django.core.mail import send_mail
from models import Photo, Tag, Interest, Collect
from users.models import User
import dbControl
import faceControl
import time
class DBTestCase(TestCase):
""" Test for functions of database controling. """
def setUp(self):
test_user = User(email='[email protected]')
test_user.save()
def test_add_interest(self):
tag_list = ['test_tag_1', 'test_tag_2']
got_tag_list = []
test_owner = '[email protected]'
dbControl.add_interest(test_owner, tag_list)
interest_obj = Interest.objects.get(email=test_owner, interested_tag=tag_list[0])
self.assertEqual(interest_obj.degree, 1)
def test_add_collect(self):
import pdb
pdb.set_trace()
email = '[email protected]'
photo_url = 'test_url'
tag = u'tag1、tag2'
dbControl.save_photo_and_tag(photo_url=photo_url, description='no',
tag=tag, person_id_list=[], permission='public', owner=email)
photo_id= Photo.objects.get(photo_url=photo_url).id
dbControl.add_collect(email, photo_id)
collected_times = Photo.objects.get(id=photo_id).collected_times
self.assertTrue(Collect.objects.get(
email=email, photo_id=photo_id))
self.assertEqual(collected_times, 1)
self.assertTrue(Interest.objects.get(email=email, interested_tag='tag2'))
def test_cancel_collect(self):
email = '[email protected]'
photo_url = 'test_url'
tag = u'tag1、tag2'
dbControl.save_photo_and_tag(photo_url=photo_url, description='no',
tag=tag, person_id_list=[], permission='public', owner=email)
photo_id= Photo.objects.get(photo_url=photo_url).id
dbControl.add_collect(email, photo_id)
collected_times = Photo.objects.get(id=photo_id).collected_times
self.assertEqual(collected_times, 1)
dbControl.cancel_collect(email=email, photo_id=photo_id)
collected_times = Photo.objects.get(id=photo_id).collected_times
self.assertEqual(collected_times, 0)
class UploadTestCase(TestCase):
""" TestCase for view: upload. """
def test_upload(self):
pass
class FaceTestCase(TestCase):
""" TestCase for faceControl. """
def test_add_face(self):
got_person_list = faceControl.add_faces(
'url', 'http://cdn.sinacloud.net/sharephotos/%E5%A5%A5%E5%B7%B4%E9%A9%AC%E3%80%81%E7%B1%B3%E6%AD%87_Mon_Apr_18_00%3A16%3A01_2016.jpg')
# It's a photo with two persons, so the length is 2.
self.assertEqual(len(got_person_list), 2)
def EmailTest(request):
""" Test the function of sending Email. It's not a nomal testcase. """
message = '%s, Email from sharephotos.sinaapp.com' % time.ctime()
send_mail('Subject_test', message,
'[email protected]', ['[email protected]'])
return HttpResponse(u'send success')
| gpl-3.0 | 5,717,081,953,570,205,000 | 34.151163 | 146 | 0.647701 | false |
nvbn/django-discover-jenkins | discover_jenkins/results.py | 1 | 8654 | import os
import traceback
from datetime import datetime
from itertools import groupby
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
from django.utils.unittest import TextTestResult
from discover_jenkins.utils import total_seconds
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestInfo(object):
class RESULT(object):
SUCCESS = 0
ERROR = 1
FAILURE = 2
EXPECTED_FAILURE = 3
UNEXPECTED_SUCCESS = 4
SKIPPED = 5
__slots__ = ('test_method', 'start_time', 'end_time',
'err', 'stdout', 'stderr', 'result', 'reason')
def __init__(self, **kwargs):
for slot_name in self.__slots__:
setattr(self, slot_name, None)
for key, value in kwargs.items():
setattr(self, key, value)
class XMLTestResult(TextTestResult):
"""
Dumps xml junit output as well as text
"""
def __init__(self, *args, **kwargs):
super(XMLTestResult, self).__init__(*args, **kwargs)
self.testInfos = []
self.currentTestInfo = None
def startTestRun(self):
"""
Called once before any tests are executed.
"""
super(XMLTestResult, self).startTestRun()
def startTest(self, test):
"""
Called when the given test is about to be run
"""
self.currentTestInfo = TestInfo(test_method=test,
start_time=datetime.now())
super(XMLTestResult, self).startTest(test)
def stopTest(self, test):
"""
Called when the given test has been run
"""
self.currentTestInfo.end_time = datetime.now()
self.currentTestInfo.stdout = self._stdout_buffer.getvalue()
self.currentTestInfo.stderr = self._stderr_buffer.getvalue()
self.testInfos.append(self.currentTestInfo)
super(XMLTestResult, self).stopTest(test)
def addError(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()
"""
self.currentTestInfo.result = TestInfo.RESULT.ERROR
self.currentTestInfo.err = err
super(XMLTestResult, self).addError(test, err)
def addFailure(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.currentTestInfo.result = TestInfo.RESULT.FAILURE
self.currentTestInfo.err = err
super(XMLTestResult, self).addFailure(test, err)
def addSuccess(self, test):
"""
Called when a test has completed successfully
"""
self.currentTestInfo.result = TestInfo.RESULT.SUCCESS
super(XMLTestResult, self).addSuccess(test)
def addSkip(self, test, reason):
"""
Called when a test is skipped.
"""
self.currentTestInfo.result = TestInfo.RESULT.SKIPPED
self.currentTestInfo.reason = reason
super(XMLTestResult, self).addSkip(test, reason)
def addExpectedFailure(self, test, err):
"""
Called when an expected failure/error occured.
"""
self.currentTestInfo.result = TestInfo.RESULT.EXPECTED_FAILURE
self.currentTestInfo.err = err
super(XMLTestResult, self).addExpectedFailure(test, err)
def addUnexpectedSuccess(self, test):
"""
Called when a test was expected to fail, but succeed.
"""
self.currentTestInfo.result = TestInfo.RESULT.UNEXPECTED_SUCCESS
super(XMLTestResult, self).addUnexpectedSuccess(test)
def _exc_info_to_string(self, err, test):
"""
Converts a sys.exc_info()-style tuple of values into a string.
"""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = self._stdout_buffer.getvalue()
error = self._stderr_buffer.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def test_method_name(self, test):
"""
Returns the test method name.
"""
test_id = test.id()
return test_id.split('.')[-1]
def test_case_name(self, test):
"""
Returns test case name
"""
testcase = type(test)
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
return module + testcase.__name__
def dump_xml(self, output_dir):
"""
Dumps test result to xml
"""
self.buffer = False
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, 'junit.xml'), 'w') as output:
document = XMLGenerator(output, 'utf-8')
document.startDocument()
document.startElement('testsuites', AttributesImpl({}))
suites = groupby(self.testInfos,
key=lambda test_info: self.test_case_name(
test_info.test_method))
for suite_name, suite in suites:
document.startElement('testsuite',
AttributesImpl({'name': suite_name}))
for test_info in suite:
document.startElement('testcase', AttributesImpl({
'classname': suite_name,
'name': self.test_method_name(test_info.test_method),
'time': '%3f' % total_seconds(
test_info.end_time - test_info.start_time)
}))
if test_info.result == TestInfo.RESULT.ERROR:
document.startElement('error', AttributesImpl({
'message': smart_text(test_info.err[1])
}))
document.characters(self._exc_info_to_string(
test_info.err, test_info.test_method))
document.endElement('error')
elif test_info.result == TestInfo.RESULT.FAILURE:
document.startElement('failure', AttributesImpl({
'message': smart_text(test_info.err[1])
}))
document.characters(self._exc_info_to_string(
test_info.err, test_info.test_method))
document.endElement('failure')
elif test_info.result == \
TestInfo.RESULT.UNEXPECTED_SUCCESS:
document.startElement('error', AttributesImpl({
'message': 'Unexpected success'
}))
document.endElement('error')
elif test_info.result == TestInfo.RESULT.SKIPPED:
document.startElement('skipped', AttributesImpl({}))
document.characters(test_info.reason)
document.endElement('skipped')
if test_info.stdout:
document.startElement('system-out', AttributesImpl({}))
document.characters(test_info.stdout)
document.endElement('system-out')
if test_info.stderr:
document.startElement('system-err', AttributesImpl({}))
document.characters(test_info.stderr)
document.endElement('system-err')
document.endElement('testcase')
document.endElement('testsuite')
document.endElement('testsuites')
document.endDocument()
| bsd-3-clause | 1,724,583,923,445,265,000 | 36.141631 | 79 | 0.544835 | false |
dshean/pygeotools | pygeotools/make_stack.py | 1 | 3176 | #! /usr/bin/env python
#Generate stack from input rasters
import os
import sys
import argparse
import numpy as np
from pygeotools.lib import malib
#Hack to work around file open limit
#import resource
#resource.setrlimit(resource.RLIMIT_CORE,(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
def getparser():
parser = argparse.ArgumentParser(description='Utility to warp stacks of rasters to the same res/extent/proj')
parser.add_argument('-tr', default='max', help='Output resolution (default: %(default)s)')
parser.add_argument('-te', default='union', help='Output extent (default: %(default)s)')
parser.add_argument('-t_srs', default='first', help='Output projection (default: %(default)s)')
parser.add_argument('-outdir', default=None, help='Output directory')
parser.add_argument('-stack_fn', default=None, help='Output filename')
parser.add_argument('-min_dt_ptp', type=float, default=np.nan, help='Minimum number of days between first and last obs for trend calculation at each valid pixel')
parser.add_argument('-min_n', type=int, default=2, help='Minimum number of obs for trend calculation at each valid pixel')
parser.add_argument('-n_cpu', default=None, help='Number of parallel processes to use for robust trend calculation')
parser.add_argument('--trend', dest='trend', action='store_true')
parser.add_argument('--robust', dest='robust', action='store_true')
parser.add_argument('--no-trend', dest='trend', action='store_false')
parser.add_argument('--med', dest='med', action='store_true')
parser.add_argument('--no-med', dest='med', action='store_false')
parser.add_argument('--stats', dest='stats', action='store_true')
parser.add_argument('--no-stats', dest='stats', action='store_false')
parser.add_argument('--save', dest='save', action='store_true')
parser.add_argument('--no-save', dest='save', action='store_false')
parser.add_argument('--datestack', dest='datestack', action='store_true')
parser.add_argument('--no-datestack', dest='datestack', action='store_false')
parser.add_argument('--sort', dest='sort', action='store_true')
parser.add_argument('--no-sort', dest='sort', action='store_false')
parser.add_argument('src_fn_list', nargs='+', help='Input filenames (img1.tif img2.tif ...)')
parser.set_defaults(trend=True, med=False, stats=True, save=True, datestack=True, sort=True)
return parser
def main():
parser = getparser()
args = parser.parse_args()
if args.stack_fn is not None:
if os.path.exists(args.stack_fn):
sys.exit("Found existing stack_fn: %s" % args.stack_fn)
#Note: res and extent are passed directly to warplib.memwarp_multi_fn, so can be many types
s = malib.DEMStack(fn_list=args.src_fn_list, stack_fn=args.stack_fn, outdir=args.outdir, \
res=args.tr, extent=args.te, srs=args.t_srs, \
trend=args.trend, robust=args.robust, n_thresh=args.min_n, min_dt_ptp=args.min_dt_ptp, n_cpu=args.n_cpu, \
med=args.med, stats=args.stats, save=args.save, sort=args.sort, datestack=args.datestack)
print(s.stack_fn)
if __name__ == '__main__':
main()
| mit | -5,951,411,335,719,690,000 | 52.830508 | 166 | 0.687657 | false |
CitrineInformatics/python-citrination-client | citrination_client/models/tests/test_service_status.py | 1 | 4405 | from citrination_client.models import ServiceStatus
from citrination_client.base.errors import CitrinationClientError
import pytest
example_status_response_dict = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 1.0,
"subevent": {
"title": "A slightly more granular description of what were doing",
"subtitle": "An even more granular description of what were doing",
"normalizedProgress": 1.0
}
}
}
def test_can_initialize_from_response_dict():
status = ServiceStatus.from_response_dict(example_status_response_dict)
assert status.is_ready()
assert status.reason == example_status_response_dict["reason"]
assert status.context == example_status_response_dict["context"]
event = status.event
assert event.title == example_status_response_dict["event"]["title"]
assert event.subtitle == example_status_response_dict["event"]["subtitle"]
assert event.normalized_progress == example_status_response_dict["event"]["normalizedProgress"]
subevent = event.subevent
assert subevent.title == example_status_response_dict["event"]["subevent"]["title"]
assert subevent.subtitle == example_status_response_dict["event"]["subevent"]["subtitle"]
assert subevent.normalized_progress == example_status_response_dict["event"]["subevent"]["normalizedProgress"]
example_status_response_dict_without_event = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice"
}
def test_can_initialize_from_response_dict_without_event():
status = ServiceStatus.from_response_dict(example_status_response_dict_without_event)
assert status.is_ready()
assert status.reason == example_status_response_dict_without_event["reason"]
assert status.context == example_status_response_dict_without_event["context"]
assert status.event is None
example_status_response_dict_without_subevent = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 1.0
}
}
def test_can_initialize_from_response_dict_without_subevent():
status = ServiceStatus.from_response_dict(example_status_response_dict_without_subevent)
assert status.is_ready()
assert status.reason == example_status_response_dict_without_subevent["reason"]
assert status.context == example_status_response_dict_without_subevent["context"]
event = status.event
assert event.title == example_status_response_dict_without_subevent["event"]["title"]
assert event.subtitle == example_status_response_dict_without_subevent["event"]["subtitle"]
assert event.normalized_progress == example_status_response_dict_without_subevent["event"]["normalizedProgress"]
assert event.subevent is None
example_status_response_dict_not_ready = {
"reason": "Please wait for machine learning features to become available",
"ready": False,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 0.33
}
}
def test_can_initialize_from_response_dict_not_ready():
status = ServiceStatus.from_response_dict(example_status_response_dict_not_ready)
assert not status.is_ready()
assert status.reason == example_status_response_dict_not_ready["reason"]
assert status.context == example_status_response_dict_not_ready["context"]
event = status.event
assert event.title == example_status_response_dict_not_ready["event"]["title"]
assert event.subtitle == example_status_response_dict_not_ready["event"]["subtitle"]
assert event.normalized_progress == example_status_response_dict_not_ready["event"]["normalizedProgress"]
assert event.subevent is None
example_status_response_dict_nonsense = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 0.33
}
}
| apache-2.0 | 4,386,586,981,293,500,400 | 36.649573 | 116 | 0.720999 | false |
txomon/SpockBot | spock/plugins/core/net.py | 1 | 9269 | """
Provides an asynchronous, crypto and compression aware socket for connecting to
servers and processing incoming packet data.
Coordinates with the Timers plugin to honor clock-time timers
"""
import logging
import select
import socket
import time
from Crypto.Cipher import AES
from spock import utils
from spock.mcp import mcdata, mcpacket
from spock.plugins.base import PluginBase
from spock.utils import pl_announce
logger = logging.getLogger('spock')
class AESCipher(object):
def __init__(self, shared_secret):
# Name courtesy of dx
self.encryptifier = AES.new(shared_secret, AES.MODE_CFB,
IV=shared_secret)
self.decryptifier = AES.new(shared_secret, AES.MODE_CFB,
IV=shared_secret)
def encrypt(self, data):
return self.encryptifier.encrypt(data)
def decrypt(self, data):
return self.decryptifier.decrypt(data)
class SelectSocket(object):
def __init__(self, timer):
self.sending = False
self.timer = timer
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(False)
self.close = self.sock.close
self.shutdown = self.sock.shutdown
self.recv = self.sock.recv
self.send = self.sock.send
def poll(self):
flags = []
if self.sending:
self.sending = False
slist = [(self.sock,), (self.sock,), (self.sock,)]
else:
slist = [(self.sock,), (), (self.sock,)]
timeout = self.timer.get_timeout()
if timeout >= 0:
slist.append(timeout)
try:
rlist, wlist, xlist = select.select(*slist)
except select.error as e:
logger.error("SELECTSOCKET: Socket Error: %s", str(e))
rlist = []
wlist = []
xlist = []
if rlist:
flags.append('SOCKET_RECV')
if wlist:
flags.append('SOCKET_SEND')
if xlist:
flags.append('SOCKET_ERR')
return flags
def reset(self):
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(False)
class NetCore(object):
def __init__(self, sock, event):
self.sock = sock
self.event = event
self.host = None
self.port = None
self.connected = False
self.encrypted = False
self.proto_state = mcdata.HANDSHAKE_STATE
self.comp_state = mcdata.PROTO_COMP_OFF
self.comp_threshold = -1
self.sbuff = b''
self.rbuff = utils.BoundBuffer()
def connect(self, host='localhost', port=25565):
self.host = host
self.port = port
try:
logger.info("NETCORE: Attempting to connect to host: %s port: %s",
host, port)
# Set the connect to be a blocking operation
self.sock.sock.setblocking(True)
self.sock.sock.connect((self.host, self.port))
self.sock.sock.setblocking(False)
self.connected = True
self.event.emit('connect', (self.host, self.port))
logger.info("NETCORE: Connected to host: %s port: %s", host, port)
except socket.error as error:
logger.error("NETCORE: Error on Connect: %s", str(error))
def set_proto_state(self, state):
self.proto_state = state
self.event.emit(mcdata.state_lookup[state] + '_STATE')
def set_comp_state(self, threshold):
self.comp_threshold = threshold
if threshold >= 0:
self.comp_state = mcdata.PROTO_COMP_ON
def push(self, packet):
data = packet.encode(self.comp_state, self.comp_threshold)
self.sbuff += (self.cipher.encrypt(data) if self.encrypted else data)
self.event.emit(packet.ident, packet)
self.event.emit(packet.str_ident, packet)
self.sock.sending = True
def push_packet(self, ident, data):
self.push(mcpacket.Packet(ident, data))
def read_packet(self, data=b''):
self.rbuff.append(
self.cipher.decrypt(data) if self.encrypted else data)
while True:
self.rbuff.save()
try:
packet = mcpacket.Packet(ident=(
self.proto_state,
mcdata.SERVER_TO_CLIENT
)).decode(self.rbuff, self.comp_state)
except utils.BufferUnderflowException:
self.rbuff.revert()
break
except mcpacket.PacketDecodeFailure as err:
logger.warning('NETCORE: Packet decode failed')
logger.warning(
'NETCORE: Failed packet ident is probably: %s',
err.packet.str_ident
)
self.event.emit('PACKET_ERR', err)
break
self.event.emit(packet.ident, packet)
self.event.emit(packet.str_ident, packet)
def enable_crypto(self, secret_key):
self.cipher = AESCipher(secret_key)
self.encrypted = True
def disable_crypto(self):
self.cipher = None
self.encrypted = False
def reset(self):
self.connected = False
self.sock.reset()
self.__init__(self.sock, self.event)
disconnect = reset
@pl_announce('Net')
class NetPlugin(PluginBase):
requires = ('Event', 'Timers')
defaults = {
'bufsize': 4096,
'sock_quit': True,
}
events = {
'event_tick': 'tick',
'SOCKET_RECV': 'handle_recv',
'SOCKET_SEND': 'handle_send',
'SOCKET_ERR': 'handle_err',
'SOCKET_HUP': 'handle_hup',
'PLAY<Disconnect': 'handle_disconnect',
'HANDSHAKE>Handshake': 'handle_handshake',
'LOGIN<Login Success': 'handle_login_success',
'LOGIN<Set Compression': 'handle_comp',
'PLAY<Set Compression': 'handle_comp',
'kill': 'handle_kill',
}
def __init__(self, ploader, settings):
super(NetPlugin, self).__init__(ploader, settings)
self.bufsize = self.settings['bufsize']
self.sock_quit = self.settings['sock_quit']
self.sock = SelectSocket(self.timers)
self.net = NetCore(self.sock, self.event)
self.sock_dead = False
ploader.provides('Net', self.net)
def tick(self, name, data):
if self.net.connected:
for flag in self.sock.poll():
self.event.emit(flag)
else:
timeout = self.timers.get_timeout()
if timeout == -1:
time.sleep(1)
else:
time.sleep(timeout)
# SOCKET_RECV - Socket is ready to recieve data
def handle_recv(self, name, data):
if self.net.connected:
try:
data = self.sock.recv(self.bufsize)
# print('read:', len(data))
if not data:
self.event.emit('SOCKET_HUP')
return
self.net.read_packet(data)
except socket.error as error:
self.event.emit('SOCKET_ERR', error)
# SOCKET_SEND - Socket is ready to send data and Send buffer contains
# data to send
def handle_send(self, name, data):
if self.net.connected:
try:
sent = self.sock.send(self.net.sbuff)
self.net.sbuff = self.net.sbuff[sent:]
if self.net.sbuff:
self.sending = True
except socket.error as error:
logger.error(str(error))
self.event.emit('SOCKET_ERR', error)
# SOCKET_ERR - Socket Error has occured
def handle_err(self, name, data):
self.net.reset()
logger.error("NETPLUGIN: Socket Error: %s", data)
self.event.emit('disconnect', data)
if self.sock_quit and not self.event.kill_event:
self.sock_dead = True
self.event.kill()
# SOCKET_HUP - Socket has hung up
def handle_hup(self, name, data):
self.net.reset()
logger.error("NETPLUGIN: Socket has hung up")
self.event.emit('disconnect', "Socket Hung Up")
if self.sock_quit and not self.event.kill_event:
self.sock_dead = True
self.event.kill()
# Handshake - Change to whatever the next state is going to be
def handle_handshake(self, name, packet):
self.net.set_proto_state(packet.data['next_state'])
# Login Success - Change to Play state
def handle_login_success(self, name, packet):
self.net.set_proto_state(mcdata.PLAY_STATE)
# Handle Set Compression packets
def handle_comp(self, name, packet):
self.net.set_comp_state(packet.data['threshold'])
def handle_disconnect(self, name, packet):
logger.info("NETPLUGIN: Disconnected: %s", packet.data['reason'])
self.event.emit('disconnect', packet.data['reason'])
# Kill event - Try to shutdown the socket politely
def handle_kill(self, name, data):
logger.info("NETPLUGIN: Kill event recieved, shutting down socket")
if not self.sock_dead:
self.sock.shutdown(socket.SHUT_WR)
self.sock.close()
| mit | -8,122,943,707,253,785,000 | 32.828467 | 79 | 0.575467 | false |
jameshensman/VFF | experiments/setting_a_b_M/gpr_special.py | 1 | 4803 | # Copyright 2016 James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import numpy as np
import gpflow
import tensorflow as tf
from vff.spectral_covariance import make_Kuu, make_Kuf
from gpflow import settings
float_type = settings.dtypes.float_type
class GPR_1d(gpflow.models.GPModel):
def __init__(self, X, Y, ms, a, b, kern,
mean_function=gpflow.mean_functions.Zero()):
"""
In this special edition of VFF-GPR, we allow the boundary to be inside the data.
This version is not very efficient. If recomputes the Kuf matrix at
each iteration, and does not precompute any quantites, and does not
exploit Kuu's strcture.
Designed only for a demonstration with a, b, inside the data limits,
for a practical version, use the VFF package.
"""
assert X.shape[1] == 1
assert isinstance(kern, (gpflow.kernels.Matern12,
gpflow.kernels.Matern32,
gpflow.kernels.Matern52))
kern = kern
likelihood = gpflow.likelihoods.Gaussian()
gpflow.models.GPModel.__init__(self, X, Y, kern,
likelihood, mean_function)
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
self.a = a
self.b = b
self.ms = ms
@gpflow.params_as_tensors
def _build_likelihood(self):
num_inducing = tf.size(self.ms)
num_data = tf.shape(self.Y)[0]
output_dim = tf.shape(self.Y)[1]
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = make_Kuf(self.kern, self.X, self.a, self.b, self.ms)
Kuu = make_Kuu(self.kern, self.a, self.b, self.ms)
Kuu = Kuu.get()
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf) / sigma
AAT = tf.matmul(A, tf.transpose(A))
B = AAT + tf.eye(num_inducing * 2 - 1, dtype=float_type)
LB = tf.cholesky(B)
log_det_B = 2. * tf.reduce_sum(tf.log(tf.diag_part(LB)))
c = tf.matrix_triangular_solve(LB, tf.matmul(A, err)) / sigma
# compute log marginal bound
ND = tf.cast(num_data * output_dim, float_type)
D = tf.cast(output_dim, float_type)
bound = -0.5 * ND * tf.log(2 * np.pi * self.likelihood.variance)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(err))/self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * tf.reduce_sum(Kdiag)/self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.diag_part(AAT))
return bound
@gpflow.params_as_tensors
def _build_predict(self, Xnew, full_cov=False):
num_inducing = tf.size(self.ms)
err = self.Y - self.mean_function(self.X)
Kuf = make_Kuf(self.kern, self.X, self.a, self.b, self.ms)
Kuu = make_Kuu(self.kern, self.a, self.b, self.ms)
Kuu = Kuu.get()
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf) / sigma
AAT = tf.matmul(A, tf.transpose(A))
B = AAT + tf.eye(num_inducing * 2 - 1, dtype=float_type)
LB = tf.cholesky(B)
c = tf.matrix_triangular_solve(LB, tf.matmul(A, err)) / sigma
Kus = make_Kuf(self.kern, Xnew, self.a, self.b, self.ms)
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tf.transpose(tmp2), c)
if full_cov:
var = self.kern.K(Xnew) + \
tf.matmul(tf.transpose(tmp2), tmp2) - \
tf.matmul(tf.transpose(tmp1), tmp1)
var = var[:, :, None] * tf.ones(self.Y.shape[1], dtype=float_type)
else:
var = self.kern.Kdiag(Xnew) + \
tf.reduce_sum(tf.square(tmp2), 0) - \
tf.reduce_sum(tf.square(tmp1), 0)
var = var[:, None]# * tf.ones(self.Y.shape[1], dtype=float_type)
return mean + self.mean_function(Xnew), var
| apache-2.0 | -2,842,001,661,177,074,000 | 39.025 | 88 | 0.600042 | false |
angr/angr | angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py | 1 | 7361 | from typing import Dict, TYPE_CHECKING
import logging
import pyvex
import archinfo
from .... import options, BP_BEFORE
from ....blade import Blade
from ....annocfg import AnnotatedCFG
from ....exploration_techniques import Slicecutor
from .resolver import IndirectJumpResolver
if TYPE_CHECKING:
from angr.block import Block
l = logging.getLogger(name=__name__)
class OverwriteTmpValueCallback:
def __init__(self, gp_value):
self.gp_value = gp_value
def overwrite_tmp_value(self, state):
state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits)
class MipsElfFastResolver(IndirectJumpResolver):
def __init__(self, project):
super(MipsElfFastResolver, self).__init__(project, timeless=True)
def filter(self, cfg, addr, func_addr, block, jumpkind):
if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):
return False
return True
def resolve(self, cfg, addr, func_addr, block, jumpkind):
"""
Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:param str jumpkind: The jumpkind.
:return: If it was resolved and targets alongside it
:rtype: tuple
"""
project = self.project
b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,
ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True
)
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
if not sources:
return False, []
source = sources[0]
source_addr = source[0]
annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)
annotated_cfg.from_digraph(b.slice)
state = project.factory.blank_state(addr=source_addr, mode="fastpath",
remove_options=options.refs,
# suppress unconstrained stack reads for `gp`
add_options={
options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS,
options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,
options.NO_CROSS_INSN_OPT,
},
)
state.regs._t9 = func_addr
func = cfg.kb.functions.function(addr=func_addr)
# see if gp is used on this slice at all
gp_used = self._is_gp_used_on_slice(project, b)
gp_value = None
if gp_used:
if 'gp' not in func.info:
# this might a special case: gp is only used once in this function, and it can be initialized right
# before its use site.
# however, it should have been determined in CFGFast
# cannot determine the value of gp. quit
pass
else:
gp_value = func.info['gp']
if gp_value is None:
l.warning('Failed to determine value of register gp for function %#x.', func.addr)
return False, []
# Special handling for cases where `gp` is stored on the stack
gp_offset = project.arch.registers['gp'][0]
self._set_gp_load_callback(state, b, project, gp_offset, gp_value)
state.regs._gp = gp_value
simgr = self.project.factory.simulation_manager(state)
simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True))
simgr.run()
if simgr.cut:
# pick the successor that is cut right after executing `addr`
try:
target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))
except StopIteration:
l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
return False, [ ]
target = target_state.addr
if self._is_target_valid(cfg, target):
l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target)
return True, [ target ]
l.info("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target)
return False, [ ]
l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
return False, [ ]
@staticmethod
def _set_gp_load_callback(state, blade, project, gp_offset, gp_value):
got_gp_stack_store = False
tmps = {}
for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()):
for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load):
# Load from memory to a tmp - assuming it's loading from the stack
tmps[stmt.tmp] = 'stack'
elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset:
if isinstance(stmt.data, pyvex.IRExpr.RdTmp):
tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop
if tmps.get(tmp_offset, None) == 'stack':
# found the load from stack
# we must make sure value of that temporary variable equals to the correct gp value
state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,
condition=lambda s, bbl_addr_=block_addr_in_slice,
tmp_offset_=tmp_offset:
s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,
action=OverwriteTmpValueCallback(
gp_value).overwrite_tmp_value
)
got_gp_stack_store = True
break
if got_gp_stack_store:
break
@staticmethod
def _is_gp_used_on_slice(project, b: Blade) -> bool:
gp_offset = project.arch.registers['gp'][0]
blocks_on_slice: Dict[int, 'Block'] = { }
for block_addr, block_stmt_idx in b.slice.nodes():
if block_addr not in blocks_on_slice:
blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)
block = blocks_on_slice[block_addr]
stmt = block.vex.statements[block_stmt_idx]
if isinstance(stmt, pyvex.IRStmt.WrTmp) \
and isinstance(stmt.data, pyvex.IRExpr.Get) \
and stmt.data.offset == gp_offset:
gp_used = True
break
else:
gp_used = False
return gp_used
| bsd-2-clause | 5,518,427,772,508,819,000 | 42.556213 | 133 | 0.537835 | false |
csomerlot/WIPTools | addin/Install/SingleBMP.py | 1 | 12719 |
# Import system modules
import sys, os
import Helper
import regression
import arcpy
from arcpy import env
from arcpy.sa import *
hp = Helper.Helper(sys.argv)
try:
# Local variables
Rural_1yrQ = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "UndevQ"))
BMPpts = os.path.join(hp.SWorkspace, "BMPptsSin.shp")
Units = hp.units['size']
wtredBMPs = os.path.join(hp.SWorkspace, "wtredBMPs")
bmp_noclip = sys.argv[1]
existing_efficiencies = sys.argv[5].split(';')
proposed_efficiencies = sys.argv[6].split(';')
landuse = sys.argv[7]
if sys.argv[8] != "#":
stream_reductions = sys.argv[8].split(';')
strlngth = sys.argv[9]
#~ parameters = hp.GetAlias(existing_efficiencies)
Streams_nd = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "streams"))
Stream_Raster = hp.RemoveNulls(Streams_nd)
arcpy.CopyRaster_management(hp.Workspace + "\\WIPoutput.mdb\\cumda", os.path.join(hp.SWorkspace, "cumda"))
Cum_da = Raster(os.path.join(hp.SWorkspace, "cumda"))
flowdir = ExtractByMask(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "flowdir"), hp.Mask )
flowdir.save(os.path.join(hp.SWorkspace, "flowdir"))
if landuse == "Existing": LU = "E"
else: LU = "F"
vecMask = os.path.join(hp.SWorkspace, "vectMask.shp")
arcpy.RasterToPolygon_conversion(hp.Mask, vecMask, "SIMPLIFY", "Value")
existing_params = hp.GetAlias(existing_efficiencies)
proposed_params = hp.GetAlias(proposed_efficiencies)
streamreduc_params = hp.GetAlias(stream_reductions)
if not existing_params.keys().sort() == proposed_params.keys().sort() == streamreduc_params.keys().sort():
raise Exception, "Parameters found for Existing efficiencies, Proposed efficiencies, and Stream Reductions does not match"
params = {}
exec(hp.models['ProdTrans']['input'][-1])
hp.log("Preparing input BMPs...")
hp.SetPIDs(bmp_noclip)
arcpy.Clip_analysis(bmp_noclip, vecMask, BMPpts)
for p in existing_params: # If we switch the loops below to be param first point second, then we could include this stuff in the param loop. Right now we don't want to run this calc for every point, hence this bit of code duplication outide the main loops
pn = p[:10].strip()
TSSprod = os.path.join(hp.Workspace + "\\WIPoutput.mdb", "p" + LU + pn)
pointsrc = ""
if os.path.exists(os.path.join(hp.SWorkspace, "pt" + pn)):
pointsrc = "pt" + pn
defEro = 0
if p in params:
defEro = params[p]['DefEro']
hp.log("Calculate Urban/Rural ratio...")
Cumulative_Impervious = Raster(os.path.join(hp.Workspace + "\\WIPoutput.mdb", "cumimpcovlake"))
## usgs_calcs = Helper.USGSVars(hp.Basin)
urbanQcpbas = regression.urbanQcp(hp.Basin, Cum_da, Cumulative_Impervious)
URratio = urbanQcpbas / Rural_1yrQ
hp.log("Add erosivity to existing %s production..." % p)
TSSP_ero_ext = Helper.CalcErosivity(hp, defEro, TSSprod, pointsrc, URratio, Stream_Raster) # need this to be here so that is not repeated many times inside CP, even if there are no CP points
arcpy.CopyRaster_management(TSSP_ero_ext, os.path.join(hp.SWorkspace, "ero") + p[:10].strip())
hp.log("Checking for input BMPs in your area...")
all = arcpy.GetCount_management(BMPpts)
if all <= 1:
raise Exception("You must have more than one point to run this tool!")
hp.log("Looping through input BMPs...")
BMProws = arcpy.SearchCursor(BMPpts)
counter = 0
count = 1
#~ while BMProw:
for BMProw in BMProws:
print "%s\n" % (75*'-')
print BMPpts
BMP_FID = BMProw.getValue("PID")
hp.log(" Processing point %s of %s..." % (count, all))
print " %s BMPID: %s\n" % (BMPpts, BMP_FID)
bmp_type = BMProw.getValue(sys.argv[2])
bmp_Ex1yr = float(BMProw.getValue(sys.argv[3]))
bmp_Prop1yr = float(BMProw.getValue(sys.argv[4]))
hp.log(" Found bmp type of %s, existing Q1 of %s, and proposed Q1 of %s for PID %s" % (bmp_type, bmp_Ex1yr, bmp_Prop1yr, BMP_FID))
SinBMPpts = os.path.join(hp.SWorkspace, "SinBMPpts.shp")
hp.GetSubset(BMPpts, SinBMPpts, " \"PID\" = %s " % BMP_FID)
SingleBMP = os.path.join(hp.SWorkspace, "SingleBMP")
hp.log("Convert this project to a raster mask...")
arcpy.FeatureToRaster_conversion(os.path.join(hp.SWorkspace,SinBMPpts), "PID", SingleBMP, flowdir)
SinBMPmask = Reclassify(SingleBMP, "VALUE", "NoData 0; 0.001 100000 1", "DATA")
SinBMPmask.save(os.path.join(hp.SWorkspace,"SinBMPmask"))
for p in existing_params:
pn = p[:10].strip()
K = os.path.join(hp.SWorkspace, "K" + pn)
TSSP_ero_ext = Raster(os.path.join(hp.SWorkspace, "ero" + pn))
sum, chanp_red, washoff_red = 0, 0, 0
bmp_eeff = float(BMProw.getValue(existing_params[p]))
bmp_peff = float(BMProw.getValue(proposed_params[p]))
stream_red_per_ft = float(BMProw.getValue(streamreduc_params[p]))
hp.log(" Found existing bmp efficiency of %s, proposed bmp efficiency of %s, and stream reduction of %s for PID %s" % (bmp_eeff, bmp_peff, stream_red_per_ft, BMP_FID))
pointsrc = ""
if os.path.exists(os.path.join(hp.SWorkspace, "pt" + pn)):
pointsrc = "pt" + pn
defEro = 0
if p in params:
defEro = params[p]['DefEro']
if bmp_type.lower() in ['bmp', 'new bmp']:
if bmp_Prop1yr < bmp_Ex1yr:
Channel_Prot = 1
else:
Channel_Prot = 0
if not defEro:
hp.log(" No Default erosivity for this BMP")
Channel_Prot = 0
if not Channel_Prot:
hp.log(" No Channel Protection from this BMP")
else:
hp.log(" Calculating Channel Protection from this BMP")
#~ arcpy.Merge_management ("ChanBMPpts.shp; SinBMPpts.shp", "merge.shp")
ModCumDa, thisBMPras, this_ds = regression.ChannelProtection(hp, SinBMPpts, sys.argv[4])
ModCumDa.save(os.path.join(hp.SWorkspace,"modcumda"))
this_ds.save(os.path.join(hp.SWorkspace,"this_ds"))
hp.log("Calculate Future Urban/Rural ratio...")
URratio = this_ds / Rural_1yrQ
URratio.save(os.path.join(hp.SWorkspace,"urratio"))
TSSP_ero = Helper.CalcErosivity(hp, defEro, TSSprod, pointsrc, URratio, Stream_Raster)
TSSP_ero.save(os.path.join(hp.SWorkspace,"tssp_ero"))
hp.log("%s reduction..." % p)
TSSred = TSSP_ero_ext - TSSP_ero
TSSred.save(os.path.join(hp.SWorkspace,"tssred"))
hp.log("Tabulating %s reduction..." % p)
chanp_red = hp.Zonal(TSSred)
print " %s Reduction component from Channel protection = %s\n" % (p, chanp_red)
if bmp_peff > bmp_eeff:
WQ_benefit = 1
else:
WQ_benefit = 0
if not WQ_benefit:
hp.log(" No Water Quality Benefit from this BMP")
else:
hp.log(" Calculating Water Quality Benefit from this BMP")
REMBMPpts = os.path.join(hp.SWorkspace,"RemBMPpts.shp")
hp.GetSubset(BMPpts, REMBMPpts, " \"PID\" <> %s AND %s > 0" % (BMP_FID, existing_params[p]))
#~ arcpy.CopyFeatures_management(BMPpts, )
#~ rows = arcpy.UpdateCursor(os.path.join(hp.SWorkspace,"RemBMPpts.shp"))
#~ row = rows.next()
#~ while row:
#~ if row.getValue("PID") == BMP_FID or float(row.getValue(existing_params[p])) <= 0:
#~ rows.deleteRow(row)
#~ row = rows.next()
#~ del row, rows
#~ hp.log("Adding erosivity to %s production..." % p)
data_ero = Helper.CalcErosivity(hp, defEro, TSSprod, pointsrc, URratio, Stream_Raster)
REMBMPs = (os.path.join(hp.SWorkspace, "REMBMPs"))
hp.log("Convert all other BMPs to Raster...")
arcpy.FeatureToRaster_conversion(REMBMPpts, existing_params[p], REMBMPs, flowdir)
BMPs = hp.RemoveNulls(REMBMPs)
wtredBMPs = ExtractByMask(BMPs / 100.0, hp.Mask)
arcpy.CopyRaster_management(data_ero, os.path.join(hp.SWorkspace,"data_ero"))
data_ero1 = Raster(os.path.join(hp.SWorkspace,"data_ero"))
counter +=1
TSSLoad = hp.BMP2DA(flowdir, pn+str(counter), data_ero1, wtredBMPs)
hp.log("%s reduction..." % p)
TSSLoadpt = TSSLoad * (bmp_peff - bmp_eeff) * SinBMPmask / 100
hp.log("Tabulating %s reduction..." % p)
washoff_red = hp.Zonal(TSSLoadpt)
print " %s Reduction component from Washoff benefit = %s\n" % (p, washoff_red)
WQ = washoff_red
sum = chanp_red + washoff_red
print TSSprod, sum
hp.log("Writing attributes")
hp.SetAtt(BMP_FID, hp.ShortName(p) + "red" + LU[0], sum, bmp_noclip)
if bmp_type.lower() in ['stream restoration']:
# Calculate in-stream reduction ################################
hp.log("Convert Stream Lengths to Raster...")
arcpy.env.extent = os.path.join(hp.SWorkspace, "flowdir")
arcpy.FeatureToRaster_conversion(os.path.join(hp.SWorkspace, "SinBMPpts.shp"), strlngth, os.path.join(hp.SWorkspace, "len"), flowdir)
slengths = Float(Raster(os.path.join(hp.SWorkspace, "len")))
thisstream = hp.AttExtract(slengths, flowdir, "thisstream", Stream_Raster, Units)
hp.log("Make mask...")
ThisBMPmask = Reclassify(thisstream, "Value", ".00001 100000 1;-100000 0 0; NoData 0", "DATA")
ThisBMPmask.save(os.path.join(hp.SWorkspace,"ThisBMPmask"))
hp.log("Calculate reduction...")
streamprod = (bmp_peff/ 100) * Raster(TSSprod) * ThisBMPmask * Power(URratio, 1.5)
streamprod.save(os.path.join(hp.SWorkspace,"streamprod"))
hp.log("Reclassify flowdirection to find straight paths...")
Flowdirs = Reclassify(flowdir, "VALUE", "1 1;2 0;4 1;8 0;16 1;32 0;64 1;128 0", "DATA")
hp.log("Reclassify flowdirection to find diagonal paths...")
Flowdird = Reclassify(flowdir, "VALUE", "1 0;2 1;4 0;8 1;16 0;32 1;64 0;128 1", "DATA")
hp.log("Calculate distance grid...")
Dist = (Flowdirs + Flowdird * 1.4142) * hp.units['size']
hp.log("Calculate length")
thislen = Dist * ThisBMPmask
dist_red = hp.Zonal(thislen) * stream_red_per_ft
print "stream_red_per_ft: %s, dist_red: %s" % (stream_red_per_ft, dist_red)
hp.log("Summarize Stream reduction from point...")
stream_red = hp.Zonal(streamprod) + dist_red
print "Stream reduction", stream_red
hp.log("Writing attributes")
hp.SetAtt(BMP_FID, hp.ShortName(p) + "red" + LU[0], stream_red, bmp_noclip)
count += 1
hp.Close()
except:
i, j, k = sys.exc_info()
hp.EH(i, j, k)
| gpl-3.0 | -7,616,929,208,638,953,000 | 46.636704 | 259 | 0.526378 | false |
baliga-lab/cmonkey2 | cmonkey/network.py | 1 | 13490 | # vi: sw=4 ts=4 et:
"""network.py - cMonkey network module
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import numpy as np
import logging
import os.path
import cmonkey.util as util
import cmonkey.datamatrix as dm
import cmonkey.scoring as scoring
# Python2/Python3 compatibility
try:
xrange
except NameError:
xrange = range
class Network:
"""class to represent a network graph.
The graph is considered undirected
For efficiency reasons, edges is a list of [source, target, weight]
"""
def __init__(self, name, edges, weight, dummy):
"""creates a network from a list of edges"""
self.name = name
self.edges = edges
self.weight = weight
self.__compute_edges_with_source()
def __compute_edges_with_source(self):
self.edges_with_source = {}
for edge in self.edges:
if edge[0] not in self.edges_with_source:
self.edges_with_source[edge[0]] = []
if edge[1] not in self.edges_with_source:
self.edges_with_source[edge[1]] = []
self.edges_with_source[edge[0]].append(edge)
self.edges_with_source[edge[1]].append(edge)
def validate(self, synonyms, genes):
"""Change the names in the network to have the standard names in the
synonyms (elswhere call the thesaurus). Problem: it does not
also rename the ratios matrix to the standard names
Keyword arguments:
synonyms -- The thesaurus.
genes -- The gene names from the ratios.
Usage:
self.validate(synonyms, genes)
"""
# remap first
new_edges = []
for n0, n1, score in self.edges:
n0 = synonyms[n0] if n0 in synonyms else n0
n1 = synonyms[n1] if n1 in synonyms else n1
new_edges.append((n0, n1, score))
self.edges = new_edges
self.__compute_edges_with_source()
# then validate
found = []
for g in genes:
primary = synonyms.get(g, g)
for n0, n1, score in self.edges:
if primary == n0 or primary == n1:
found.append(primary)
if len(found) < len(genes) / 2:
print(edges)
raise(Exception("only %d genes found in edges" % len(found)))
def num_edges(self):
"""returns the number of edges in this graph"""
return len(self.edges)
def total_score(self):
"""returns the sum of edge scores"""
return sum(edge[2] for edge in self.edges) * 2
def normalize_scores_to(self, score):
"""normalizes all edge scores so that they sum up to
the specified score"""
total = self.total_score()
if score != total:
# score_e / score_total * score == score_e * (score_total / score)
# we use this to save a division per loop iteration
scale = float(score) / float(total)
self.edges = [(edge[0], edge[1], edge[2] * scale) for edge in self.edges]
self.__compute_edges_with_source()
def edges_with_node(self, node):
"""returns the edges where node is a node of"""
if node in self.edges_with_source:
return self.edges_with_source[node]
else:
return []
def __repr__(self):
return "Network: %s\n# edges: %d\n" % (self.name,
len(self.edges))
@classmethod
def create(cls, name, edges, weight, organism=None, ratios=None,
check_size=True):
"""standard Factory method"""
logging.debug("Network.create() called with %d edges", len(edges))
if edges is None:
raise Exception("no edges specified in network '%s'" % name)
added = set([])
network_edges = []
nodes = set()
for edge in edges:
nodes.add(edge[0])
nodes.add(edge[1])
"""Shrink the number of edges to the ones that are actually usable. These
are selected by the following considerations:
# 1. check nodes that are in the thesaurus
# 2. check gene names that are in the ratios matrix, but not in the network
# 3. keep the nodes that are in the ratios and are in the thesaurus
"""
num_nodes_orig = len(nodes)
if organism:
thesaurus = organism.thesaurus()
nodes = {n for n in nodes if n in thesaurus}
if ratios:
cano_nodes = {thesaurus[n] for n in nodes}
cano_genes = {thesaurus[row] for row in ratios.row_names
if row in thesaurus}
probes_in = [gene for gene in cano_genes if gene in cano_nodes]
nodes = {n for n in nodes if thesaurus[n] in probes_in}
logging.debug("# nodes in network '%s': %d (of %d)", name, len(nodes), num_nodes_orig)
for edge in edges:
# we ignore self-edges, and edges with nodes not in the final nodes
if edge[0] != edge[1] and edge[0] in nodes and edge[1] in nodes:
key = "%s:%s" % (edge[0], edge[1])
key_rev = "%s:%s" % (edge[1], edge[0])
if key not in added and key_rev not in added:
network_edges.append((edge[0], edge[1], edge[2]))
added.add(key)
added.add(key_rev)
if check_size and len(network_edges) < 10:
raise Exception("Error: only %d edges in network '%s'" % (len(network_edges), name))
logging.debug("Created network '%s' with %d edges", name, len(network_edges))
return Network(name, network_edges, weight, 0)
COMPUTE_NETWORK = None
ALL_GENES = None
NETWORK_SCORE_MEMBERSHIP = None
def compute_network_scores(cluster):
"""Generic method to compute network scores"""
global COMPUTE_NETWORK, ALL_GENES, NETWORK_SCORE_MEMBERSHIP
network = COMPUTE_NETWORK
genes = sorted(NETWORK_SCORE_MEMBERSHIP.rows_for_cluster(cluster))
gene_scores = {}
for gene in genes:
# TODO: optimization: we can use numpy arrays for the scores array
# and then sum
edges = network.edges_with_node(gene)
for edge in edges:
other_gene = edge[0]
if other_gene == gene:
other_gene = edge[1]
if other_gene in ALL_GENES:
if other_gene not in gene_scores:
gene_scores[other_gene] = []
gene_scores[other_gene].append(edge[2])
final_gene_scores = {}
for gene, scores in gene_scores.items():
final_gene_scores[gene] = sum(scores) / len(genes)
final_gene_scores[gene] = -np.log(final_gene_scores[gene] + 1)
return final_gene_scores
class ScoringFunction(scoring.ScoringFunctionBase):
"""Network scoring function. Note that even though there are several
networks, scoring can't be generalized with the default ScoringCombiner,
since the scores are computed through weighted addition rather than
quantile normalization"""
def __init__(self, function_id, cmrun):
"""Create scoring function instance"""
scoring.ScoringFunctionBase.__init__(self, function_id, cmrun)
self.__networks = None
self.run_log = scoring.RunLog(function_id, cmrun.dbsession(),
self.config_params)
def initialize(self, args):
"""process additional parameters"""
self.weights = {nw['type']: nw['weight'] for nw in args['networks']}
def run_logs(self):
return [self.run_log]
def compute(self, iteration_result, ref_matrix=None):
"""overridden compute for storing additional information"""
result = scoring.ScoringFunctionBase.compute(self, iteration_result, ref_matrix)
iteration_result['networks'] = self.score_means
return result
def compute_force(self, iteration_result, ref_matrix=None):
"""overridden compute for storing additional information"""
result = scoring.ScoringFunctionBase.compute_force(self, iteration_result, ref_matrix)
iteration_result['networks'] = self.score_means
return result
def networks(self):
"""networks are cached"""
if self.__networks is None:
self.__networks = retrieve_networks(self.organism)
if self.config_params['remap_network_nodes']:
# network names are non-primary, this can happen
# when the user makes up their own data
for network in self.__networks:
network.validate(self.organism.thesaurus(),
self.gene_names())
return self.__networks
def __update_score_means(self, network_scores):
"""returns the score means, adjusted to the current cluster setup"""
# a dictionary that holds the network score means for
# each cluster, separated for each network
if network_scores:
score_means = {network.name: self.__compute_cluster_score_means(network_scores[network.name])
for network in self.networks()}
return {network: np.average(np.array(list(cluster_score_means.values())))
for network, cluster_score_means in score_means.items()}
return {}
def do_compute(self, iteration_result, ref_matrix=None):
"""compute method, iteration is the 0-based iteration number"""
matrix = dm.DataMatrix(len(self.gene_names()), self.num_clusters(),
self.gene_names())
network_scores = {}
for network in self.networks():
logging.debug("Compute scores for network '%s', WEIGHT: %f",
network.name, network.weight)
start_time = util.current_millis()
network_score = self.__compute_network_cluster_scores(network)
network_scores[network.name] = network_score
self.__update_score_matrix(matrix, network_score, network.weight)
elapsed = util.current_millis() - start_time
logging.debug("NETWORK '%s' SCORING TIME: %f s.",
network.name, (elapsed / 1000.0))
# compute and store score means
self.score_means = self.__update_score_means(network_scores)
return matrix
def __compute_network_cluster_scores(self, network):
"""computes the cluster scores for the given network"""
global COMPUTE_NETWORK, ALL_GENES, NETWORK_SCORE_MEMBERSHIP
result = {}
use_multiprocessing = self.config_params[
scoring.KEY_MULTIPROCESSING]
# Set the huge memory objects into globals
# These are readonly anyways, but using Manager.list() or something
# similar brings this down to a crawl
COMPUTE_NETWORK = network
ALL_GENES = set(self.gene_names()) # optimization: O(1) lookup
NETWORK_SCORE_MEMBERSHIP = self.membership
if use_multiprocessing:
with util.get_mp_pool(self.config_params) as pool:
map_results = pool.map(compute_network_scores, xrange(1, self.num_clusters() + 1))
for cluster in xrange(1, self.num_clusters() + 1):
result[cluster] = map_results[cluster - 1]
else:
for cluster in xrange(1, self.num_clusters() + 1):
result[cluster] = compute_network_scores(cluster)
# cleanup
COMPUTE_NETWORK = None
ALL_GENES = None
NETWORK_SCORE_MEMBERSHIP = None
return result
def __update_score_matrix(self, matrix, network_score, weight):
"""add values into the result score matrix"""
mvalues = matrix.values
gene_names = self.gene_names()
for cluster in xrange(1, self.num_clusters() + 1):
cluster_genes = set(network_score[cluster].keys())
for row_index in xrange(self.ratios.num_rows):
gene = gene_names[row_index]
if gene in cluster_genes:
weighted_score = network_score[cluster][gene] * weight
mvalues[row_index][cluster - 1] += weighted_score
def __compute_cluster_score_means(self, network_score):
"""compute the score means on the given network score"""
result = {}
for cluster in xrange(1, self.num_clusters() + 1):
cluster_scores = [network_score[cluster][gene]
if gene in network_score[cluster] else 0.0
for gene in self.rows_for_cluster(cluster)]
result[cluster] = util.trim_mean(cluster_scores, 0.05)
return result
def retrieve_networks(organism):
"""retrieves the networks provided by the organism object and
possibly other sources, doing some normalization if necessary
Note: wanted to make it private, but the scoring function
can not see it after doing so"""
networks = organism.networks()
max_score = 0
for network in networks:
#logging.debug("Network '%s' with %d edges", network.name(),
# network.num_edges())
nw_total = network.total_score()
if nw_total > max_score:
max_score = nw_total
for network in networks:
network.normalize_scores_to(max_score)
return networks
| lgpl-3.0 | 2,727,564,660,023,877,000 | 39.510511 | 105 | 0.592809 | false |
fvilca/cnn_tensorflow_cifar | cifar10_multi_gpu_train.py | 1 | 10371 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
#tf.contrib.deprecated.scalar_summary(loss_name, l)
tf.scalar_summary(loss_name,1)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.contrib.deprecated.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients',
grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(
tf.contrib.deprecated.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.contrib.deprecated.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| mit | -3,657,095,802,407,810,000 | 36.576087 | 80 | 0.646514 | false |
food52/thumbor | thumbor/console.py | 1 | 2470 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import optparse
from thumbor.context import ServerParameters
from thumbor import __version__
def get_server_parameters(arguments=None):
parser = optparse.OptionParser(usage="thumbor or type thumbor -h (--help) for help", description=__doc__, version=__version__)
parser.add_option("-p", "--port", type="int", dest="port", default=8888, help="The port to run this thumbor instance at [default: %default].")
parser.add_option("-i", "--ip", dest="ip", default="0.0.0.0", help="The host address to run this thumbor instance at [default: %default].")
parser.add_option("-f", "--fd", dest="file_descriptor", help="The file descriptor number or path to listen for connections on (--port and --ip will be ignored if this is set) [default: %default].")
parser.add_option("-c", "--conf", dest="conf", default="", help="The path of the configuration file to use for this thumbor instance [default: %default].")
parser.add_option("-k", "--keyfile", dest="keyfile", default="", help="The path of the security key file to use for this thumbor instance [default: %default].")
parser.add_option("-l", "--log-level", dest="log_level", default="warning", help="The log level to be used. Possible values are: debug, info, warning, error, critical or notset. [default: %default].")
parser.add_option("-o", "--log_file", dest="log_file", default="", help="Path of the file to log to.")
parser.add_option("-a", "--app", dest="app", default='thumbor.app.ThumborServiceApp', help="A custom app to use for this thumbor server in case you subclassed ThumborServiceApp [default: %default].")
(options, args) = parser.parse_args(arguments)
port = options.port
ip = options.ip
fd = options.file_descriptor
conf = options.conf or None
keyfile = options.keyfile or None
log_level = options.log_level
log_file = options.log_file
return ServerParameters(port=port,
ip=ip,
config_path=conf,
keyfile=keyfile,
log_level=log_level,
app_class=options.app,
fd=fd,
log_file=log_file)
| mit | 8,738,400,710,362,373,000 | 53.888889 | 204 | 0.640891 | false |
Mariaanisimova/pythonintask | PINp/2014/Cheraneva A.S/task_10_24.py | 1 | 3072 | #Задача 10. Вариант 24.
#Напишите программу "Генератор персонажей" для игры.
#Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость.
#Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.
#Cheraneva A.S.
#28.05.2016
print ("""
Добро пожаловать в "Генератор персонажей".
Вы можете распределить 30 очков между 4 характеристиками:
Сила, Здоровье, Мудрость и Ловкость. Вы можете как и брать из общего
числа пункотв, так и возвращать. Распределяйте характеристики с умом. Удачи!
""")
STR=0
HP=0
INT=0
AGL=0
point=30
chislo=0
print("Если хотите изменить Силу, то напишите 'Сила'. Если Здоровье, то 'Здоровье'. Если Мудрость, то 'Мудрость'. Если Ловкость, то 'Ловкость'.")
while True:
if STR<0 or HP<0 or INT<0 or AGL<0 or point>30:
print("Ошибка")
break
#chislo=int(input("Напишите снова"))
elif point==0:
print("Вы распределили очки. Их распределение:\nСила:",STR,"\nЗдоровье:",HP,"\nМудрость:",INT,"\nЛовкость:",AGL)
break
print("Ваши очки:\nСила:",STR,"\nЗдоровье:",HP,"\nМудрость:",INT,"\nЛовкость:",AGL,"\nНераспределённые очки:",point)
user_input=input("")
if user_input=="Сила" :
chislo=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
STR+=chislo
point-=chislo
else :
print('Слишком много')
elif user_input=="Здоровье":
chislo=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
HP+=chislo
point-=chislo
else :
print('Слишком много')
elif user_input=="Мудрость":
chislo=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
INT+=chislo
point-=chislo
else :
print('Слишком много')
elif user_input=="Ловкость":
chislo=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
AGL+=chislo
point-=chislo
else :
print('Слишком много')
input("Нажмите Enter для выхода.")
| apache-2.0 | -906,661,393,097,690,600 | 31.967742 | 172 | 0.692162 | false |
talyian/elastalert | elastalert/create_index.py | 1 | 3287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import yaml
from elasticsearch.client import Elasticsearch
def main():
if os.path.isfile('../config.yaml'):
filename = '../config.yaml'
elif os.path.isfile('config.yaml'):
filename = 'config.yaml'
else:
filename = ''
username = None
password = None
use_ssl = None
http_auth = None
if filename:
with open(filename) as config_file:
data = yaml.load(config_file)
host = data.get('es_host')
port = data.get('es_port')
username = data.get('es_username')
password = data.get('es_password')
use_ssl = data.get('use_ssl')
else:
host = raw_input("Enter elasticsearch host: ")
port = int(raw_input("Enter elasticsearch port: "))
while use_ssl is None:
resp = raw_input("Use SSL? t/f: ").lower()
use_ssl = True if resp in ('t', 'true') else (False if resp in ('f', 'false') else None)
username = raw_input("Enter optional basic-auth username: ")
password = raw_input("Enter optional basic-auth password: ")
if username and password:
http_auth = username + ':' + password
es = Elasticsearch(host=host, port=port, use_ssl=use_ssl, http_auth=http_auth)
silence_mapping = {'silence': {'properties': {'rule_name': {'index': 'not_analyzed', 'type': 'string'},
'until': {'type': 'date', 'format': 'dateOptionalTime'}}}}
ess_mapping = {'elastalert_status': {'properties': {'rule_name': {'index': 'not_analyzed', 'type': 'string'},
'@timestamp': {'format': 'dateOptionalTime', 'type': 'date'}}}}
es_mapping = {'elastalert': {'properties': {'rule_name': {'index': 'not_analyzed', 'type': 'string'},
'match_body': {'enabled': False, 'type': 'object'}}}}
error_mapping = {'elastalert_error': {'properties': {'data': {'type': 'object', 'enabled': False}}}}
index = raw_input('New index name? (Default elastalert_status) ')
index = index if index else 'elastalert_status'
old_index = raw_input('Name of existing index to copy? (Default None) ')
res = None
if old_index:
print("Downloading existing data...")
res = es.search(index=old_index, body={}, size=500000)
print("Got %s documents" % (len(res['hits']['hits'])))
es.indices.create(index)
es.indices.put_mapping(index=index, doc_type='elastalert', body=es_mapping)
es.indices.put_mapping(index=index, doc_type='elastalert_status', body=ess_mapping)
es.indices.put_mapping(index=index, doc_type='silence', body=silence_mapping)
es.indices.put_mapping(index=index, doc_type='elastalert_error', body=error_mapping)
print("New index %s created" % (index))
if res:
bulk = ''.join(['%s\n%s\n' % (json.dumps({'create': {'_type': doc['_type'], '_index': index}}),
json.dumps(doc['_source'])) for doc in res['hits']['hits']])
print("Uploading data...")
es.bulk(body=bulk, index=index)
print("Done!")
if __name__ == '__main__':
main()
| apache-2.0 | -5,382,246,338,983,577,000 | 39.580247 | 119 | 0.571037 | false |
gautam1858/tensorflow | tensorflow/tools/compatibility/tf_upgrade_v2.py | 1 | 54358 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import pasta
import six
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import renames_v2
from tensorflow.tools.compatibility import reorders_v2
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
}
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames, use the OLD function name.
# These renames happen after the arguments have been processed.
self.manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_image_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.batch_and_drop_remainder":
"tf.compat.v1.contrib.data.batch_and_drop_remainder",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.padded_batch_and_drop_remainder":
"tf.compat.v1.contrib.data.padded_batch_and_drop_remainder",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.read_batch_features":
"tf.compat.v1.contrib.data.read_batch_features",
"tf.contrib.data.reduce_dataset":
"tf.compat.v1.contrib.data.reduce_dataset",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.sliding_window_batch":
"tf.compat.v1.contrib.data.sliding_window_batch",
"tf.contrib.data.sloppy_interleave":
"tf.compat.v1.contrib.data.sloppy_interleave",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.rnn.RNNCell":
"tf.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.count_nonzero":
"tf.math.count_nonzero",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.pywrap_tensorflow":
"tf.compat.v1.pywrap_tensorflow",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
"tf.zeros_initializer":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.initializers.truncated_normal",
"tf.image.resize_images":
"tf.image.resize",
"tf.random_poisson":
"tf.random.poisson",
"tf.debugging.assert_greater":
"tf.compat.v1.debugging.assert_greater",
"tf.debugging.assert_greater_equal":
"tf.compat.v1.debugging.assert_greater_equal",
"tf.debugging.assert_integer":
"tf.compat.v1.debugging.assert_integer",
"tf.debugging.assert_less":
"tf.compat.v1.debugging.assert_less",
"tf.debugging.assert_less_equal":
"tf.compat.v1.debugging.assert_less_equal",
"tf.debugging.assert_near":
"tf.compat.v1.debugging.assert_near",
"tf.debugging.assert_negative":
"tf.compat.v1.debugging.assert_negative",
"tf.debugging.assert_non_negative":
"tf.compat.v1.debugging.assert_non_negative",
"tf.debugging.assert_non_positive":
"tf.compat.v1.debugging.assert_non_positive",
"tf.debugging.assert_none_equal":
"tf.compat.v1.debugging.assert_none_equal",
"tf.debugging.assert_type":
"tf.compat.v1.debugging.assert_type",
"tf.debugging.assert_positive":
"tf.compat.v1.debugging.assert_positive",
"tf.debugging.assert_equal":
"tf.compat.v1.debugging.assert_equal",
"tf.debugging.assert_scalar":
"tf.compat.v1.debugging.assert_scalar",
"tf.assert_equal":
"tf.compat.v1.assert_equal",
"tf.assert_less":
"tf.compat.v1.assert_less",
"tf.assert_greater":
"tf.compat.v1.assert_greater",
"tf.debugging.assert_rank":
"tf.compat.v1.debugging.assert_rank",
"tf.debugging.assert_rank_at_least":
"tf.compat.v1.debugging.assert_rank_at_least",
"tf.debugging.assert_rank_in":
"tf.compat.v1.debugging.assert_rank_in",
"tf.assert_rank":
"tf.compat.v1.assert_rank",
}
# pylint: enable=line-too-long
# Mapping from function to the new name of the function
self.symbol_renames = renames_v2.renames
self.symbol_renames.update(self.manual_symbol_renames)
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_gather",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = reorders_v2.reorders
# Specially handled functions (pasta version)
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs, errors)
# Where logs and errors are lists to which (line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
self.function_transformers = {
"tf.nn.dropout": self._dropout_transformer,
"tf.batch_gather": self._batch_gather_transformer,
"tf.to_bfloat16": self._cast_transformer,
"tf.to_complex128": self._cast_transformer,
"tf.to_complex64": self._cast_transformer,
"tf.to_double": self._cast_transformer,
"tf.to_float": self._cast_transformer,
"tf.to_int32": self._cast_transformer,
"tf.to_int64": self._cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
self._softmax_cross_entropy_with_logits_transformer,
"tf.image.resize_area": self._image_resize_transformer,
"tf.image.resize_bicubic": self._image_resize_transformer,
"tf.image.resize_bilinear": self._image_resize_transformer,
"tf.image.resize_nearest_neighbor": self._image_resize_transformer,
}
decay_function_comment = (
"WARNING: <function name> has been changed to return a callable instead"
" of a tensor when graph building, but its functionality remains "
"unchanged during eager execution (returns a callable like "
"before). The converter cannot detect and fix this reliably, so "
"this usage has been converted to compat.v1 (even though it may already"
" be correct).\n"
)
# TODO(b/118888586): add default value change to update script.
default_loss_reduction_changed = (
"WARNING: default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE.\n"
)
assert_return_type_comment = (
"WARNING: assert_* functions have been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
"WARNING: assert_rank_* functions have been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
tf_01s_like_no_optimize_comment = (
"WARNING: tf.zeros_like and tf.ones_like no longer have the optimize "
"argument in TF 2.0 or after (also, `tensor' argument is renamed to "
"`input')."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
deprecate_partition_strategy_comment = (
"WARNING: `partition_strategy` has been removed from `%s` "
" The 'div' strategy is used by default.")
initializers_no_dtype_comment = (
"WARNING: tf.initializers and tf.keras.initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"call method in TF 2.0 and after. The only API symbols are now "
"tf.keras.initializers.* or tf.initializers.*."
"\nThe calls have been converted to compat.v1 for safety (even though "
"they may already have been correct).")
uniform_unit_scaling_initializer_comment = (
"WARNING: uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
metrics_comment = (
"WARNING: tf.metrics have been converted to object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
"WARNING: tf.losses have been converted to object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
export_saved_model_renamed = (
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
make_initializable_iterator_deprecation = (
"(Manual edit required) The "
"`tf.data.Dataset.make_initializable_iterator()` method has been "
"removed. If you are using the Estimator API, you can return a dataset "
"directly from your input functions without creating an iterator. "
"As a last resort, please replace calls to that method on `dataset` "
"with a call to "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
make_one_shot_iterator_deprecation = (
"(Manual edit required) The "
"`tf.data.Dataset.make_one_shot_iterator()` method has been "
"removed. If you are using eager execution, you can iterate over "
"`dataset` using a Python `for` loop. If you are using the Estimator "
"API, you can return a dataset directly from your input functions "
"without creating an iterator. As a last resort, please replace calls "
"to that method on `dataset` with a call to "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.make_initializable_iterator":
make_initializable_iterator_deprecation,
"*.make_one_shot_iterator":
make_one_shot_iterator_deprecation,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.device":
"tf.device no longer takes function as an argument. "
"'devide_name_or_function' argument has been renamed to "
"'device_name'.",
"tf.flags":
"tf.flags has been removed, please use the argparse or absl"
" module if you need command line parsing.",
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.estimator.LinearClassifier":
default_loss_reduction_changed,
"tf.estimator.LinearRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedClassifier":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineRegressor":
default_loss_reduction_changed,
"tf.nn.conv1d":
"WARNING: use_cudnn_on_gpu argument has been removed and \"value\""
" was renamed to \"input\"",
"tf.nn.conv2d":
"WARNING: use_cudnn_on_gpu argument has been removed and "
"\"filter\" was renamed to \"filters\"",
"tf.nn.conv2d_backprop_filter":
"WARNING: use_cudnn_on_gpu argument has been removed",
"tf.nn.conv2d_backprop_input":
"WARNING: use_cudnn_on_gpu argument has been removed and "
"\"filter\" was renamed to \"filters\"",
"tf.nn.erosion2d":
"WARNING: <function name> now requires a data_format argument",
"tf.nn.nce_loss":
deprecate_partition_strategy_comment % "tf.nn.nce_loss",
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment %
"tf.nn.safe_embedding_lookup_sparse",
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment % "tf.nn.sampled_softmax_loss",
"tf.zeros_like":
tf_01s_like_no_optimize_comment,
"tf.ones_like":
tf_01s_like_no_optimize_comment,
"tf.nn.embedding_lookup":
"WARNING: validate_indices argument has been removed.",
"tf.while_loop":
"tf.while_loop no longer takes 'return_same_structure' argument. "
"'return_same_structure' now defaults to True. Also, 'name'"
"argument is now the last argument.",
"tf.image.sample_distorted_bounding_box":
"tf.image.sample_distorted_bounding_box no longer takes 'seed2' "
"argument.",
"tf.nn.ctc_beam_search_decoder":
"tf.nn.ctc_beam_search_decoder no longer takes 'merge_repeated' "
"argument. 'merge_repeated' now defaults to False.",
"tf.nn.fractional_avg_pool":
"tf.nn.fractional_avg_pool no longer takes 'seed2' and "
"'deterministic' arguments. Now it takes a single 'seed' arg. If "
"'seed' is zero, the execution is random and deterministic "
"otherwise",
"tf.nn.fractional_max_pool":
"tf.nn.fractional_max_pool no longer takes 'seed2' and "
"'deterministic' arguments. Now it takes a single 'seed' arg. If "
"'seed' is zero, the execution is random and deterministic "
"otherwise",
"tf.test.assert_equal_graph_def":
"tf.assert_equal_graph_def no longer takes 'checkpoint_v2' "
"argument. 'checkpoint_v2' now defaults to True.",
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.uniform_unit_scaling":
uniform_unit_scaling_initializer_comment,
"tf.uniform_unit_scaling_initializer":
uniform_unit_scaling_initializer_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
}
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.gradients": {
("colocate_gradients_with_ops", 4):
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True.",
},
"*.minimize": {
("colocate_gradients_with_ops", 5):
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True.",
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4):
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True.",
},
"tf.cond": {
("strict", 3):
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True."
},
}
self.symbol_renames = {
name: new_name
for name, new_name in self.symbol_renames.items()
}
@staticmethod
def _dropout_transformer(parent, node, full_name, name, logs, errors):
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value. Please check this transformation.\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
errors.append((node.lineno, node.col_offset,
"ERROR: tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
errors.append((node.lineno, node.col_offset,
"WARNING: tf.nn.dropout has changed the semantics of the "
"second argument. Please check the applied transformation."
))
return node
@staticmethod
def _cast_transformer(parent, node, full_name, name, logs, errors):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
@staticmethod
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs, errors):
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
if six.PY3:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
else:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
logs.append((node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
_wrap_label(karg, karg.value)
return node
return node
@staticmethod
def _batch_gather_transformer(parent, node, full_name, name, logs, errors):
# Check if the call already has a batch_dims argument
if any([kw.arg == "batch_dims" for kw in node.keywords]):
logs.append((node.lineno, node.col_offset, "tf.batch_gather already has "
"batch_dims argument. Neat."))
return None
minus_one = ast.Num(n=-1)
minus_one.lineno = 0
minus_one.col_offset = 0
new_arg = ast.keyword("batch_dims", minus_one)
node.keywords.append(new_arg)
logs.append((node.lineno, node.col_offset,
"Added keyword argument batch_dims=-1 to tf.batch_gather."))
return node
@staticmethod
def _image_resize_transformer(parent, node, full_name, name, logs, errors):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
| apache-2.0 | -5,698,780,922,763,350,000 | 36.933008 | 80 | 0.559642 | false |
deklungel/iRulez | old/modules/telegram/telegram.py | 1 | 12009 | #!/usr/bin/env python
#Version 1.9
import sys
sys.path.append('/var/www/html/modules/libraries')
import time
import pprint
import telepot
import mysql.connector
import datetime
import iRulez_logging as logger
import paho.mqtt.client as mqtt
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
from inspect import currentframe
file = open('/var/www/html/config.php', 'r')
debug = "DEBUG"
info = "INFO"
alert = "ALERT"
logger.printLog(info,'**** Telgram Started ****', str(logger.get_linenumber()))
for line in file:
if "db_name" in line: MySQL_database = line.split('"')[3]
elif "db_user" in line: MySQL_username = line.split('"')[3]
elif "db_password" in line: MySQL_password = line.split('"')[3]
try:
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
query = ("SELECT Setting,value FROM Settings")
logger.printLog(debug,query,str(logger.get_linenumber()))
cursor.execute(query)
for (Setting, value) in cursor:
if Setting == "MQTT_ip_address":
MQTT_ip_address = value
elif Setting == "MQTT_port_python":
MQTT_port = int(value)
elif Setting == "BotID":
BotIDTmp = value
elif Setting == "TokenBOT":
BotToken = value
elif Setting == "NotificationSnooze":
NotificationSnooze = value
elif Setting == "TimeBetweenNotification":
TimeBetweenNotification = value
elif Setting == "Notification Method":
NotificationMethod = value
if BotToken == "":
raise Exception('NO BotToken provided')
except Exception as e:
logger.printLog(alert,e,str(logger.get_linenumber()))
raise
finally:
cursor.close()
cnx.close()
BotIDS = BotIDTmp.split('|')
AllLow = []
AllLowN = []
def handle(msg):
chat_id = msg['chat']['id']
command = msg['text']
logger.printLog(debug,'Got command: %s' % command, str(logger.get_linenumber()))
logger.printLog(debug,'Got chatID from : %s' % chat_id, str(logger.get_linenumber()))
if str(chat_id) in BotIDS:
if command == '/status':
try:
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
query = ("SELECT naam, arduino, pin FROM Core_Arduino_Outputs WHERE Status = 'ON' AND telegram = '1'")
logger.printLog(debug,str(query), str(logger.get_linenumber()))
cursor.execute(query)
NotificationList = []
for (naam,arduino,pin) in cursor:
NotificationList.append([naam,arduino,pin])
except Exception as e:
logger.printLog(alert,e,str(logger.get_linenumber()))
raise
finally:
cursor.close()
cnx.close()
KeyBoardArray = []
if len(NotificationList) > 0:
Message = "Following lights are on"
else:
Message = "No lights are on!"
global AllLow
AllLow = []
for Notication in NotificationList:
text = str(Notication[0])
callback = NotificationMethod+'|Low|;'+str(Notication[0])+';'+str(Notication[1])+';'+str(Notication[2])
KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],)
AllLow.append([Notication[1],Notication[2]])
if len(NotificationList) > 1:
text = "* Alles uit *"
callback = NotificationMethod+'|Low|AllLow'
KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],)
markup = InlineKeyboardMarkup(inline_keyboard=KeyBoardArray)
logger.printLog(debug,"status has been send to "+ str(chat_id), str(logger.get_linenumber()))
bot.sendMessage(chat_id, Message, reply_markup=markup)
elif command == '/enroll':
hide_keyboard = {'hide_keyboard': True}
text = 'Give this ID to you iRulez Administrator: '+str(chat_id)
logger.printLog(debug,"Enrollment has been send to "+ str(chat_id), str(logger.get_linenumber()))
bot.sendMessage(chat_id, text , reply_markup=hide_keyboard)
# elif command == '/time':
# show_keyboard = {'keyboard': [['Yes','No']]}
# hide_keyboard = {'hide_keyboard': True}
# bot.sendMessage(chat_id, 'This is a custom keyboard', reply_markup=hide_keyboard)
def on_callback_query(msg):
query_id, from_id, data = telepot.glance(msg, flavor='callback_query')
logger.printLog(debug,'Callback query:'+ str(query_id) +' '+ str(from_id) +' '+ str(data), str(logger.get_linenumber()))
actionsArr = data.split('|')
global AllLowN
global AllLow
relais = actionsArr[2].split(';')
if actionsArr[1] == "Low":
if relais[0] == "AllLow":
tmpText = "All Lights are out"
for relais in AllLow:
topic = "arduino"+str(relais[0])+"/relais"+str(relais[1])+"/action"
payload ="L"
logger.printLog(debug,"Publish: " + topic +":"+ payload , str(logger.get_linenumber()) )
mqttc.publish(topic,payload, 0, False)
elif relais[0] == "AllLowN":
tmpText = "All Lights are out"
global AllLowN
for relais in AllLowN:
topic = "arduino"+str(relais[1])+"/relais"+str(relais[2])+"/action"
payload ="L"
logger.printLog(debug,"Publish: " + topic +":"+ payload , str(logger.get_linenumber()) )
mqttc.publish(topic,payload, 0, False)
else:
tmpText = relais[1]+" out"
topic = "arduino"+str(relais[2])+"/relais"+str(relais[3])+"/action"
payload ="L"
logger.printLog(debug,"Publish: " + topic +":"+ payload , str(logger.get_linenumber()) )
mqttc.publish(topic,payload, 0, False)
elif actionsArr[1] == "Ignore":
try:
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
for relais in AllLowN:
query = ("UPDATE Core_Arduino_Outputs SET notification_dismiss = 1 WHERE id="+str(relais[0]))
logger.printLog(debug,query, str(logger.get_linenumber()))
cursor.execute(query)
cnx.commit()
except Exception as e:
logger.printLog(alert,e,str(logger.get_linenumber()))
raise
finally:
cursor.close()
cnx.close()
tmpText = "Notification ignored"
elif actionsArr[1] == "Snooze":
Time = datetime.datetime.now() + datetime.timedelta(seconds=int(NotificationSnooze)*60)
try:
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
for relais in AllLowN:
query = ("UPDATE Core_Arduino_Outputs SET notification_snooze = '"+str(Time)+"' WHERE id="+str(relais[0]))
logger.printLog(debug,query, str(logger.get_linenumber()))
cursor.execute(query)
cnx.commit()
except Exception as e:
logger.printLog(alert,e,str(logger.get_linenumber()))
raise
finally:
cursor.close()
cnx.close()
tmpText = "Notifications Snoozed for "+str(NotificationSnooze)+"min"
if actionsArr[0] == 'notification':
logger.printLog(debug,"Notification has been send to "+ str(query_id), str(logger.get_linenumber()))
bot.answerCallbackQuery(query_id, text=tmpText)
elif actionsArr[0] == 'alert':
logger.printLog(debug,"Alert has been send to "+ str(query_id), str(logger.get_linenumber()))
bot.answerCallbackQuery(query_id, text=tmpText, show_alert=True)
def on_connect(mqttc, obj, rc):
logger.printLog(debug,"rc: "+str(rc) , str(logger.get_linenumber()))
def on_message(mqttc, obj, msg):
hide_keyboard = {'hide_keyboard': True}
for BotID in BotIDS:
logger.printLog(debug,"Notification message has been send to "+BotID, str(logger.get_linenumber()))
bot.sendMessage(int(BotID), str(msg.payload) , reply_markup=hide_keyboard)
def on_publish(mqttc, obj, mid):
logger.printLog(debug,"Publish: "+str(mid), str(logger.get_linenumber()))
def on_subscribe(mqttc, obj, mid, granted_qos):
logger.printLog(debug,"Subscribed: "+str(mid)+" "+str(granted_qos) , str(logger.get_linenumber()))
def on_log(mqttc, obj, level, string):
logger.printLog(debug,string , str(logger.get_linenumber()))
def on_disconnect(client, userdata, rc):
logger.printLog(info, "on_disconnect!", str(logger.get_linenumber()))
exit()
def checkRelay():
try:
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
query = ("SELECT id, naam, status_time, notification, arduino, pin, notification_snooze FROM Core_Arduino_Outputs WHERE notification IS NOT NULL AND notification <> '' AND notification_dismiss = 0 AND status = 'ON' ")
logger.printLog(debug,str(query), str(logger.get_linenumber()))
cursor.execute(query)
NotificationList = []
for (id, naam, Time_on, notification,arduino,pin,snooze) in cursor:
logger.printLog(debug,'Found Record: %s' % naam, str(logger.get_linenumber()))
Time = datetime.datetime.now()
time_delta = (Time - Time_on).total_seconds()
if (int(time_delta) > int(notification)):
if snooze is None:
logger.printLog(debug,'Add : %s to notification list NOT SNOOZED' % naam, str(logger.get_linenumber()))
NotificationList.append([id,naam,arduino,pin])
else:
if snooze < Time :
logger.printLog(debug,'Add : %s to notification list SNOOZED' % naam, str(logger.get_linenumber()))
NotificationList.append([id,naam,arduino,pin])
except Exception as e:
logger.printLog(alert,e,str(logger.get_linenumber()))
raise
finally:
cursor.close()
cnx.close()
if len(NotificationList) >0:
KeyBoardArray = []
Message = "Following lights are on"
global AllLowN
AllLowN = []
for Notication in NotificationList:
text = str(Notication[1])
callback = NotificationMethod+'|Low|'+str(Notication[0])+';'+str(Notication[1])+';'+str(Notication[2])+';'+str(Notication[3])
KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],)
AllLowN.append([Notication[0],Notication[2],Notication[3]])
if len(NotificationList) > 1:
text = "* Alles uit *"
callback = NotificationMethod+'|Low|AllLowN'
KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],)
text = "* Ignore *"
callback = NotificationMethod+'|Ignore|'
KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],)
text = "* Snooze "+str(NotificationSnooze)+"min *"
callback = NotificationMethod+'|Snooze|'
KeyBoardArray.append( [InlineKeyboardButton(text=str(text), callback_data=str(callback))],)
markup = InlineKeyboardMarkup(inline_keyboard=KeyBoardArray)
for BotID in BotIDS:
logger.printLog(debug,"Notification message has been send to "+BotID, str(logger.get_linenumber()))
bot.sendMessage(int(BotID), Message, reply_markup=markup)
Time = datetime.datetime.now() + datetime.timedelta(seconds=int(TimeBetweenNotification)*60)
try:
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
for relais in AllLowN:
query = ("UPDATE Core_Arduino_Outputs SET notification_snooze = '"+str(Time)+"' WHERE id="+str(relais[0]))
logger.printLog(debug,query, str(logger.get_linenumber()))
cursor.execute(query)
cnx.commit()
except Exception as e:
logger.printLog(alert,e,str(logger.get_linenumber()))
raise
finally:
cursor.close()
cnx.close()
#tmpText = "Notifications Snoozed for "+str(NotificationSnooze)+"min"
bot = telepot.Bot(BotToken)
bot.message_loop({'chat': handle,'callback_query': on_callback_query})
logger.printLog(info,"Listening ...",str(logger.get_linenumber()))
# Keep the program running.
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.on_disconnect = on_disconnect
# Uncomment to enable debug messages
#mqttc.on_log = on_log
running = True
while running:
try:
mqttc.connect(MQTT_ip_address,int(MQTT_port), 60)
running = False
except:
logger.printLog(alert,"Sleep" , str(logger.get_linenumber()))
time.sleep(5)
logger.printLog(info,"Connected" , str(logger.get_linenumber()))
mqttc.subscribe("Telegram/Message", 0)
counter = int(time.time())
while True:
mqttc.loop()
if(counter + 10 <= int(time.time())):
checkRelay()
counter = int(time.time()) | mit | -7,143,340,784,685,586,000 | 35.840491 | 219 | 0.696394 | false |
efiop/dvc | dvc/repo/get.py | 1 | 1920 | import logging
import os
from dvc.exceptions import DvcException
from dvc.path_info import PathInfo
from dvc.utils import resolve_output
from dvc.utils.fs import remove
logger = logging.getLogger(__name__)
class GetDVCFileError(DvcException):
def __init__(self):
super().__init__(
"the given path is a DVC file, you must specify a data file "
"or a directory"
)
def get(url, path, out=None, rev=None, jobs=None):
import shortuuid
from dvc.dvcfile import is_valid_filename
from dvc.external_repo import external_repo
out = resolve_output(path, out)
if is_valid_filename(out):
raise GetDVCFileError()
# Creating a directory right beside the output to make sure that they
# are on the same filesystem, so we could take the advantage of
# reflink and/or hardlink. Not using tempfile.TemporaryDirectory
# because it will create a symlink to tmpfs, which defeats the purpose
# and won't work with reflink/hardlink.
dpath = os.path.dirname(os.path.abspath(out))
tmp_dir = os.path.join(dpath, "." + str(shortuuid.uuid()))
# Try any links possible to avoid data duplication.
#
# Not using symlink, because we need to remove cache after we
# are done, and to make that work we would have to copy data
# over anyway before removing the cache, so we might just copy
# it right away.
#
# Also, we can't use theoretical "move" link type here, because
# the same cache file might be used a few times in a directory.
cache_types = ["reflink", "hardlink", "copy"]
try:
with external_repo(
url=url, rev=rev, cache_dir=tmp_dir, cache_types=cache_types
) as repo:
from_info = PathInfo(repo.root_dir) / path
to_info = PathInfo(out)
repo.repo_fs.download(from_info, to_info, jobs=jobs)
finally:
remove(tmp_dir)
| apache-2.0 | 8,423,483,636,947,428,000 | 32.684211 | 74 | 0.663542 | false |
GoogleCloudPlatform/iot-core-micropython | third_party/rsa/cli.py | 1 | 9382 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commandline scripts.
These scripts are called by the executables defined in setup.py.
"""
from __future__ import with_statement, print_function
import abc
import sys
from optparse import OptionParser
import third_party.rsa
import third_party.rsa.pkcs1
HASH_METHODS = sorted(third_party.rsa.pkcs1.HASH_METHODS.keys())
def keygen():
"""Key generator."""
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options] keysize',
description='Generates a new RSA keypair of "keysize" bits.')
parser.add_option('--pubout', type='string',
help='Output filename for the public key. The public key is '
'not saved if this option is not present. You can use '
'pyrsa-priv2pub to create the public key file later.')
parser.add_option('-o', '--out', type='string',
help='Output filename for the private key. The key is '
'written to stdout if this option is not present.')
parser.add_option('--form',
help='key format of the private and public keys - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != 1:
parser.print_help()
raise SystemExit(1)
try:
keysize = int(cli_args[0])
except ValueError:
parser.print_help()
print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
raise SystemExit(1)
print('Generating %i-bit key' % keysize, file=sys.stderr)
(pub_key, priv_key) = third_party.rsa.newkeys(keysize)
# Save public key
if cli.pubout:
print('Writing public key to %s' % cli.pubout, file=sys.stderr)
data = pub_key.save_pkcs1(format=cli.form)
with open(cli.pubout, 'wb') as outfile:
outfile.write(data)
# Save private key
data = priv_key.save_pkcs1(format=cli.form)
if cli.out:
print('Writing private key to %s' % cli.out, file=sys.stderr)
with open(cli.out, 'wb') as outfile:
outfile.write(data)
else:
print('Writing private key to stdout', file=sys.stderr)
third_party.rsa._compat.write_to_stdout(data)
class CryptoOperation(object):
"""CLI callable that operates with input, output, and a key."""
__metaclass__ = abc.ABCMeta
keyname = 'public' # or 'private'
usage = 'usage: %%prog [options] %(keyname)s_key'
description = None
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
'not specified.'
output_help = 'Name of the file to write the %(operation_past)s file ' \
'to. Written to stdout if this option is not present.'
expected_cli_args = 1
has_output = True
key_class = third_party.rsa.PublicKey
def __init__(self):
self.usage = self.usage % self.__class__.__dict__
self.input_help = self.input_help % self.__class__.__dict__
self.output_help = self.output_help % self.__class__.__dict__
@abc.abstractmethod
def perform_operation(self, indata, key, cli_args):
"""Performs the program's operation.
Implement in a subclass.
:returns: the data to write to the output.
"""
def __call__(self):
"""Runs the program."""
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
indata = self.read_infile(cli.input)
print(self.operation_progressive.title(), file=sys.stderr)
outdata = self.perform_operation(indata, key, cli_args)
if self.has_output:
self.write_outfile(outdata, cli.output)
def parse_cli(self):
"""Parse the CLI options
:returns: (cli_opts, cli_args)
"""
parser = OptionParser(usage=self.usage, description=self.description)
parser.add_option('-i', '--input', type='string', help=self.input_help)
if self.has_output:
parser.add_option('-o', '--output', type='string', help=self.output_help)
parser.add_option('--keyform',
help='Key format of the %s key - default PEM' % self.keyname,
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != self.expected_cli_args:
parser.print_help()
raise SystemExit(1)
return cli, cli_args
def read_key(self, filename, keyform):
"""Reads a public or private key."""
print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
with open(filename, 'rb') as keyfile:
keydata = keyfile.read()
return self.key_class.load_pkcs1(keydata, keyform)
def read_infile(self, inname):
"""Read the input file"""
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
with open(inname, 'rb') as infile:
return infile.read()
print('Reading input from stdin', file=sys.stderr)
return sys.stdin.read()
def write_outfile(self, outdata, outname):
"""Write the output file"""
if outname:
print('Writing output to %s' % outname, file=sys.stderr)
with open(outname, 'wb') as outfile:
outfile.write(outdata)
else:
print('Writing output to stdout', file=sys.stderr)
third_party.rsa._compat.write_to_stdout(outdata)
class EncryptOperation(CryptoOperation):
"""Encrypts a file."""
keyname = 'public'
description = ('Encrypts a file. The file must be shorter than the key '
'length in order to be encrypted.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, indata, pub_key, cli_args=None):
"""Encrypts files."""
return third_party.rsa.encrypt(indata, pub_key)
class DecryptOperation(CryptoOperation):
"""Decrypts a file."""
keyname = 'private'
description = ('Decrypts a file. The original file must be shorter than '
'the key length in order to have been encrypted.')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = third_party.rsa.PrivateKey
def perform_operation(self, indata, priv_key, cli_args=None):
"""Decrypts files."""
return third_party.rsa.decrypt(indata, priv_key)
class SignOperation(CryptoOperation):
"""Signs a file."""
keyname = 'private'
usage = 'usage: %%prog [options] private_key hash_method'
description = ('Signs a file, outputs the signature. Choose the hash '
'method from %s' % ', '.join(HASH_METHODS))
operation = 'sign'
operation_past = 'signature'
operation_progressive = 'Signing'
key_class = third_party.rsa.PrivateKey
expected_cli_args = 2
output_help = ('Name of the file to write the signature to. Written '
'to stdout if this option is not present.')
def perform_operation(self, indata, priv_key, cli_args):
"""Signs files."""
hash_method = cli_args[1]
if hash_method not in HASH_METHODS:
raise SystemExit('Invalid hash method, choose one of %s' %
', '.join(HASH_METHODS))
return third_party.rsa.sign(indata, priv_key, hash_method)
class VerifyOperation(CryptoOperation):
"""Verify a signature."""
keyname = 'public'
usage = 'usage: %%prog [options] public_key signature_file'
description = ('Verifies a signature, exits with status 0 upon success, '
'prints an error message and exits with status 1 upon error.')
operation = 'verify'
operation_past = 'verified'
operation_progressive = 'Verifying'
key_class = third_party.rsa.PublicKey
expected_cli_args = 2
has_output = False
def perform_operation(self, indata, pub_key, cli_args):
"""Verifies files."""
signature_file = cli_args[1]
with open(signature_file, 'rb') as sigfile:
signature = sigfile.read()
try:
third_party.rsa.verify(indata, signature, pub_key)
except third_party.rsa.VerificationError:
raise SystemExit('Verification failed.')
print('Verification OK', file=sys.stderr)
encrypt = EncryptOperation()
decrypt = DecryptOperation()
sign = SignOperation()
verify = VerifyOperation()
| apache-2.0 | -8,427,974,887,891,780,000 | 31.572917 | 87 | 0.612941 | false |
rachekalmir/pyJolt | pyjolt/util/tree_manager.py | 1 | 6011 | import itertools
from collections import defaultdict
from typing import Union, List, Dict
from pyjolt.exceptions import JoltException
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b)
def type_generator(item):
if isinstance(item, str):
return {}
elif isinstance(item, int):
return []
raise JoltException()
def id_generator():
"""Generator function to generate numbers from 0 onwards"""
start_value = 0
while True:
yield start_value
start_value += 1
class AutoDefaultDict(defaultdict):
"""Default dictionary that calls the specified function to get the new value."""
def __init__(self, f_of_x):
super().__init__(None) # Create the base defaultdict class with no default
self.f_of_x = f_of_x # Save the function
def __missing__(self, key): # __missing__ is called when a default value is needed
ret = next(self.f_of_x) # Calculate default value
self[key] = ret # Save the default value in the local dictionary
return ret
class ResultManager(object):
def __init__(self):
self._data = {}
def assign(self, path_list: list, value):
dv = self._data
for item, next_item in pairwise(path_list):
if next_item is None:
# If next_item is None then this is where the assignment to the value will take place
if isinstance(dv, list):
if len(dv) <= item:
# If the current array is too short for the requested assignment, pad the array with Nones
dv += [None] * (item + 1 - len(dv))
dv[item] = value
elif isinstance(dv, dict) and dv.get(item) is not None:
if isinstance(dv[item], list):
dv[item] += [value]
else:
dv[item] = [dv[item], value]
else:
dv[item] = value
break
elif isinstance(dv, list) and len(dv) <= item:
# Special case for array indexing to extend the array thereby ensuring no IndexOutOfBounds exception is encountered
dv += [None] * (item + 1 - len(dv))
if isinstance(dv, dict) and dv.get(item) is not None:
dv = dv[item]
elif isinstance(dv, list) and len(dv) > item and dv[item] is not None:
dv = dv[item]
else:
dv[item] = dv = type_generator(next_item)
class PropertyHolder(object):
def __init__(self, matches: list = None):
self.matches = [] if matches is None else matches
self.array_bind = AutoDefaultDict(id_generator())
def __repr__(self):
return 'PropertyHolder({matches})'.format(matches=self.matches)
class PropertyManager(object):
def __init__(self):
self._properties = {}
def __getitem__(self, key: Union[tuple, list]) -> PropertyHolder:
key = tuple(key) if isinstance(key, list) else key
v = self._properties.get(key)
if not isinstance(v, PropertyHolder):
v = self._properties[key] = PropertyHolder()
return v
# def __setitem__(self, key, value):
# self._properties[tuple(key) if isinstance(key, list) else key] = value
class Tree(object):
"""
A recursive dictionary type object with tree context.
"""
def __init__(self, dictionary: dict):
self.dictionary = dictionary
def __getitem__(self, item):
return self.dictionary[item]
def __repr__(self):
return "Tree(" + repr(self.dictionary) + ")"
class TreeManager(object):
"""
Manager object to keep track of where you are in a dictionary tree object.
self._tree is the full tree
self.path is the current patch in the object
self._dict is the local cached object computed using self.path on self._tree
"""
def __init__(self, tree: Union[Tree, Dict], path: List[str]):
self._tree = tree if isinstance(tree, Tree) else Tree(tree)
self.path = path
self._dict = self._tree.dictionary
for i in path:
if isinstance(self._dict, dict):
self._dict = self._dict[i]
elif isinstance(self._dict, list):
self._dict = self._dict[int(i)]
elif self._dict == i:
self._dict = None
else:
raise KeyError()
def __getitem__(self, item: str):
# type: (...) -> TreeManager
return TreeManager(self._tree, self.path + [item])
def __iter__(self):
if isinstance(self._dict, dict):
for key in self._dict.keys():
yield key, TreeManager(self._tree, self.path + [key])
elif isinstance(self._dict, list):
for index, _ in enumerate(self._dict):
yield TreeManager(self._tree, self.path + [index])
else:
raise JoltException()
def __repr__(self):
return 'TreeManager(' + repr(self.current_key) + ', ' + repr(self._dict) + ')'
def keys(self):
if isinstance(self._dict, dict):
return self._dict.keys()
elif isinstance(self._dict, list):
return range(len(self._dict))
return [self._dict]
@property
def current_key(self):
return self.path[-1] if self.path else None
@property
def value(self):
return self._dict
def ascend(self, levels: int):
# type(...) -> DictWalker:
if levels == 0:
return self
if levels < 0:
# TODO raise exception here
pass
return TreeManager(self._tree, self.path[:-levels])
def descend(self, key: Union[str, list]):
# type(...) -> DictWalker:
return TreeManager(self._tree, self.path + (key if isinstance(key, list) else [key]))
| apache-2.0 | -7,570,709,542,070,465,000 | 31.491892 | 131 | 0.562635 | false |
BirkbeckCTP/janeway | src/review/views.py | 1 | 87770 | __copyright__ = "Copyright 2017 Birkbeck, University of London"
__author__ = "Martin Paul Eve & Andy Byers"
__license__ = "AGPL v3"
__maintainer__ = "Birkbeck Centre for Technology and Publishing"
from uuid import uuid4
from collections import Counter
from datetime import timedelta
from django.contrib import messages
from django.urls import reverse
from django.shortcuts import render, get_object_or_404, redirect
from django.db.models import Q
from django.utils import timezone
from django.http import Http404
from django.core.exceptions import PermissionDenied
from django.conf import settings
from urllib import parse
from django.views.decorators.http import require_POST
from django.http import HttpResponse, JsonResponse
from core import models as core_models, files, forms as core_forms
from events import logic as event_logic
from review import models, logic, forms, hypothesis
from security.decorators import (
editor_user_required, reviewer_user_required,
reviewer_user_for_assignment_required,
file_user_required, article_decision_not_made, article_author_required,
editor_is_not_author, senior_editor_user_required,
section_editor_draft_decisions, article_stage_review_required
)
from submission import models as submission_models, forms as submission_forms
from utils import models as util_models, ithenticate, shared, setting_handler
from utils.logger import get_logger
logger = get_logger(__name__)
@senior_editor_user_required
def home(request):
"""
Displays a list of review articles.
:param request: HttpRequest object
:return: HttpResponse
"""
articles = submission_models.Article.objects.filter(
Q(stage=submission_models.STAGE_ASSIGNED) |
Q(stage=submission_models.STAGE_UNDER_REVIEW) |
Q(stage=submission_models.STAGE_UNDER_REVISION),
journal=request.journal
)
filter = request.GET.get('filter', None)
if filter == 'me':
assignments = models.EditorAssignment.objects.filter(article__journal=request.journal,
editor=request.user)
assignment_article_pks = [assignment.article.pk for assignment in assignments]
articles = articles.filter(pk__in=assignment_article_pks)
template = 'review/home.html'
context = {
'articles': articles,
'filter': filter,
}
return render(request, template, context)
@senior_editor_user_required
def unassigned(request):
"""
Displays a list of unassigned articles.
:param request: HttpRequest object
:return: HttpResponse
"""
articles = submission_models.Article.objects.filter(stage=submission_models.STAGE_UNASSIGNED,
journal=request.journal)
template = 'review/unassigned.html'
context = {
'articles': articles,
}
return render(request, template, context)
@editor_user_required
def unassigned_article(request, article_id):
"""
Displays metadata of an individual article, can send details to Crosscheck for reporting.
:param request: HttpRequest object
:param article_id: Article PK
:return: HttpResponse or Redirect if POST
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
if article.ithenticate_id and not article.ithenticate_score:
ithenticate.fetch_percentage(request.journal, [article])
if 'crosscheck' in request.POST:
file_id = request.POST.get('crosscheck')
file = get_object_or_404(core_models.File, pk=file_id)
try:
id = ithenticate.send_to_ithenticate(article, file)
article.ithenticate_id = id
article.save()
except AssertionError:
messages.add_message(
request,
messages.ERROR,
'Error returned by iThenticate. '
'Check login details and API status.',
)
return redirect(
reverse(
'review_unassigned_article',
kwargs={'article_id': article.pk},
)
)
current_editors = [assignment.editor.pk for assignment in
models.EditorAssignment.objects.filter(article=article)]
editors = core_models.AccountRole.objects.filter(
role__slug='editor',
journal=request.journal).exclude(user__id__in=current_editors)
section_editors = core_models.AccountRole.objects.filter(
role__slug='section-editor',
journal=request.journal
).exclude(user__id__in=current_editors)
template = 'review/unassigned_article.html'
context = {
'article': article,
'editors': editors,
'section_editors': section_editors,
}
return render(request, template, context)
@editor_user_required
def add_projected_issue(request, article_id):
"""
Allows an editor to add a projected issue to an article.
"""
article = get_object_or_404(
submission_models.Article,
pk=article_id,
)
form = submission_forms.ProjectedIssueForm(instance=article)
if request.POST:
form = submission_forms.ProjectedIssueForm(
request.POST,
instance=article,
)
if form.is_valid():
form.save()
messages.add_message(
request,
messages.SUCCESS,
'Projected Issue set.',
)
if request.GET.get('return'):
return redirect(
request.GET.get('return'),
)
else:
return redirect(
reverse(
'review_projected_issue',
kwargs={'article_id': article.pk},
)
)
template = 'review/projected_issue.html'
context = {
'article': article,
'form': form,
}
return render(request, template, context)
@editor_user_required
def view_ithenticate_report(request, article_id):
"""Allows editor to view similarity report."""
article = get_object_or_404(
submission_models.Article,
pk=article_id,
ithenticate_id__isnull=False,
)
ithenticate_url = ithenticate.fetch_url(article)
if ithenticate_url:
return redirect(ithenticate_url)
template = 'review/ithenticate_failure.html'
context = {
'article': article,
}
return render(request, template, context)
@senior_editor_user_required
def assign_editor_move_to_review(request, article_id, editor_id, assignment_type):
"""Allows an editor to assign another editor to an article and moves to review."""
assign_editor(request, article_id, editor_id, assignment_type, should_redirect=False)
return move_to_review(request, article_id)
@senior_editor_user_required
def assign_editor(request, article_id, editor_id, assignment_type, should_redirect=True):
"""
Allows a Senior Editor to assign another editor to an article.
:param request: HttpRequest object
:param article_id: Article PK
:param editor_id: Account PK
:param assignment_type: string, 'section-editor' or 'editor'
:param should_redirect: if true, we redirect the user to the notification page
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
editor = get_object_or_404(core_models.Account, pk=editor_id)
if not editor.has_an_editor_role(request):
messages.add_message(request, messages.WARNING, 'User is not an Editor or Section Editor')
return redirect(reverse('review_unassigned_article', kwargs={'article_id': article.pk}))
_, created = logic.assign_editor(article, editor, assignment_type, request)
messages.add_message(request, messages.SUCCESS, '{0} added as an Editor'.format(editor.full_name()))
if created and should_redirect:
return redirect('{0}?return={1}'.format(
reverse('review_assignment_notification', kwargs={'article_id': article_id, 'editor_id': editor.pk}),
request.GET.get('return')))
elif not created:
messages.add_message(request, messages.WARNING,
'{0} is already an Editor on this article.'.format(editor.full_name()))
if should_redirect:
return redirect(reverse('review_unassigned_article', kwargs={'article_id': article_id}))
@senior_editor_user_required
def unassign_editor(request, article_id, editor_id):
"""Unassigns an editor from an article"""
article = get_object_or_404(submission_models.Article, pk=article_id)
editor = get_object_or_404(core_models.Account, pk=editor_id)
assignment = get_object_or_404(
models.EditorAssignment, article=article, editor=editor
)
email_content = logic.get_unassignment_notification(request, assignment)
if request.method == "POST":
email_content = request.POST.get('content_email')
kwargs = {'message': email_content,
'assignment': assignment,
'request': request,
'skip': request.POST.get('skip', False)
}
event_logic.Events.raise_event(
event_logic.Events.ON_ARTICLE_UNASSIGNED, **kwargs)
assignment.delete()
util_models.LogEntry.add_entry(
types='EditorialAction',
description='Editor {0} unassigned from article {1}'
''.format(editor.full_name(), article.id),
level='Info',
request=request,
target=article,
)
return redirect(reverse(
'review_unassigned_article', kwargs={'article_id': article_id}
))
template = 'review/unassign_editor.html'
context = {
'article': article,
'assignment': assignment,
'email_content': email_content,
}
return render(request, template, context)
@senior_editor_user_required
def assignment_notification(request, article_id, editor_id):
"""
A senior editor can sent a notification to an assigned editor.
:param request: HttpRequest object
:param article_id: Article PK
:param editor_id: Account PK
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
editor = get_object_or_404(core_models.Account, pk=editor_id)
assignment = get_object_or_404(models.EditorAssignment, article=article, editor=editor, notified=False)
email_content = logic.get_assignment_content(request, article, editor, assignment)
if request.POST:
email_content = request.POST.get('content_email')
kwargs = {'user_message_content': email_content,
'editor_assignment': assignment,
'request': request,
'skip': False,
'acknowledgement': True}
if 'skip' in request.POST:
kwargs['skip'] = True
event_logic.Events.raise_event(event_logic.Events.ON_ARTICLE_ASSIGNED_ACKNOWLEDGE, **kwargs)
if request.GET.get('return', None):
return redirect(request.GET.get('return'))
else:
return redirect(reverse('review_unassigned_article', kwargs={'article_id': article_id}))
template = 'review/assignment_notification.html'
context = {
'article': article_id,
'editor': editor,
'assignment': assignment,
'email_content': email_content,
}
return render(request, template, context)
@editor_user_required
def move_to_review(request, article_id, should_redirect=True):
"""Moves an article into the review stage"""
article = get_object_or_404(submission_models.Article, pk=article_id)
if article.editorassignment_set.all().count() > 0:
article.stage = submission_models.STAGE_ASSIGNED
article.save()
review_round, created = models.ReviewRound.objects.get_or_create(article=article, round_number=1)
if not created:
messages.add_message(request, messages.WARNING, 'A default review round already exists for this article.')
else:
messages.add_message(request, messages.INFO, 'You must assign an editor before moving into reivew.')
if should_redirect:
if request.GET.get('return', None):
return redirect(request.GET.get('return'))
else:
return redirect("{0}?modal_id={1}".format(reverse('kanban_home'), article_id))
@editor_is_not_author
@editor_user_required
def in_review(request, article_id):
"""
Displays an article's review management page
:param request: HttpRequest object
:param article_id: Article PK
:return: HttpResponse
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review_rounds = models.ReviewRound.objects.filter(article=article)
revisions_requests = models.RevisionRequest.objects.filter(article=article)
if not review_rounds:
models.ReviewRound.objects.create(article=article, round_number=1)
return redirect(reverse('review_in_review', kwargs={'article_id': article.id}))
if request.POST:
if 'new_review_round' in request.POST:
# Complete all existing review assignments.
for assignment in article.current_review_round_object().reviewassignment_set.all():
if not assignment.date_complete:
assignment.date_complete = timezone.now()
assignment.decision = 'withdrawn'
assignment.is_complete = True
assignment.save()
messages.add_message(request, messages.INFO, 'Assignment {0} closed.'.format(assignment.id))
kwargs = {'review_assignment': assignment,
'request': request}
event_logic.Events.raise_event(event_logic.Events.ON_REVIEW_CLOSED,
task_object=assignment.article,
**kwargs)
# Add a new review round.
new_round_number = article.current_review_round() + 1
models.ReviewRound.objects.create(article=article, round_number=new_round_number)
article.stage = submission_models.STAGE_UNDER_REVIEW
article.save()
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
template = 'review/in_review.html'
context = {
'article': article,
'review_rounds': review_rounds,
'revisions_requests': revisions_requests,
}
return render(request, template, context)
@editor_user_required
@article_stage_review_required
def send_review_reminder(request, article_id, review_id, reminder_type):
"""
Allows an editor to resent a review invite or manually send a reminder.
:param request: HttpRequest object
:param article_id: PK of an Article object
:param review_id: PK of a ReviewAssignment object
:param type: string, either request or accepted
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(
submission_models.Article,
pk=article_id,
journal=request.journal,
)
review_assignment = get_object_or_404(
models.ReviewAssignment,
pk=review_id,
article=article,
is_complete=False,
)
# If this review has not been accepted, you cannot send an accepted
# reminder, add a message and redirect.
if not review_assignment.date_accepted and reminder_type == 'accepted':
messages.add_message(
request,
messages.INFO,
'You cannot send this reminder type. Review not accepted.'
)
return redirect(
reverse(
'review_in_review',
kwargs={'article_id': article.pk}
)
)
email_content = logic.get_reminder_content(
reminder_type,
article,
review_assignment,
request
)
form_initials = {
'body': email_content,
'subject': 'Review Request Reminder'
}
form = forms.ReviewReminderForm(
initial=form_initials
)
if request.POST:
form = forms.ReviewReminderForm(
request.POST
)
if form.is_valid():
logic.send_review_reminder(
request,
form,
review_assignment,
reminder_type
)
messages.add_message(
request,
messages.SUCCESS,
'Email sent'
)
return redirect(
reverse(
'review_in_review',
kwargs={'article_id': article.pk}
)
)
template = 'review/send_review_reminder.html'
context = {
'article': article,
'assignment': review_assignment,
'form': form,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def delete_review_round(request, article_id, round_id):
"""
Deletes a review round if it is not already closed.
:param request: HttpRequest object
:param article_id: Article PK
:param round_id: Round PK
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review_round = get_object_or_404(models.ReviewRound, pk=round_id)
if request.POST:
if 'delete' in request.POST:
review_round.delete()
if article.is_under_revision():
article.stage = submission_models.STAGE_UNDER_REVISION
article.save()
messages.add_message(request, messages.INFO, 'Round {0} deleted.'.format(review_round.round_number))
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
elif not review_round.round_number == article.current_review_round():
messages.add_message(request, messages.INFO, 'Cannot delete a closed round.')
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
template = 'review/delete_review_round.html'
context = {
'article': article,
'round': review_round,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def add_files(request, article_id, round_id):
"""
Interface for adding files to a review round.
:param request: HttpRequest object
:param article_id: Article PK
:param round_id: Round PK
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article.objects.prefetch_related('manuscript_files'), pk=article_id)
review_round = get_object_or_404(models.ReviewRound.objects.prefetch_related('review_files'), pk=round_id)
if request.POST:
if 'upload' in request.POST:
review_files = request.FILES.getlist('review_file')
if review_files:
for review_file in review_files:
new_file_obj = files.save_file_to_article(review_file, article, request.user, 'Review File')
article.manuscript_files.add(new_file_obj)
messages.add_message(request, messages.SUCCESS, 'File uploaded')
else:
messages.add_message(request, messages.WARNING, 'No file uploaded.')
return redirect(reverse('review_add_files', kwargs={'article_id': article.pk, 'round_id': review_round.pk}))
for file in request.POST.getlist('file'):
file = core_models.File.objects.get(id=file)
review_round.review_files.add(file)
messages.add_message(request, messages.INFO, 'File {0} added.'.format(file.label))
if not request.POST.getlist('file'):
messages.add_message(request, messages.WARNING,
'Please select at least one file, or press the Cancel button.')
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
template = 'review/add_files.html'
context = {
'article': article,
'round': review_round,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def remove_file(request, article_id, round_id, file_id):
"""Removes a file from a review round."""
article = get_object_or_404(submission_models.Article, pk=article_id)
review_round = get_object_or_404(models.ReviewRound, pk=round_id)
file = get_object_or_404(core_models.File, pk=file_id)
if review_round.round_number == article.current_review_round():
review_round.review_files.remove(file)
messages.add_message(request, messages.INFO, 'File {0} removed.'.format(file.label))
else:
messages.add_message(request, messages.INFO,
'Cannot remove a file from a closed review round.'.format(file.label))
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
@reviewer_user_for_assignment_required
def accept_review_request(request, assignment_id):
"""
Accept a review request
:param request: the request object
:param assignment_id: the assignment ID to handle
:return: a context for a Django template
"""
access_code = logic.get_access_code(request)
# update the ReviewAssignment object
if access_code:
assignment = models.ReviewAssignment.objects.get(Q(pk=assignment_id) &
Q(is_complete=False) &
Q(access_code=access_code) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(date_accepted__isnull=True))
else:
assignment = models.ReviewAssignment.objects.get(Q(pk=assignment_id) &
Q(is_complete=False) &
Q(reviewer=request.user) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(date_accepted__isnull=True))
assignment.date_accepted = timezone.now()
assignment.save()
kwargs = {'review_assignment': assignment,
'request': request,
'accepted': True}
event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_ACCEPTED,
task_object=assignment.article,
**kwargs)
return redirect(logic.generate_access_code_url('do_review', assignment, access_code))
@reviewer_user_for_assignment_required
def decline_review_request(request, assignment_id):
"""
Decline a review request
:param request: the request object
:param assignment_id: the assignment ID to handle
:return: a context for a Django template
"""
access_code = logic.get_access_code(request)
if access_code:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=False) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(access_code=access_code)
)
else:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=False) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(reviewer=request.user)
)
assignment.date_declined = timezone.now()
assignment.date_accepted = None
assignment.is_complete = True
assignment.save()
template = 'review/review_decline.html'
context = {
'assigned_articles_for_user_review': assignment,
'access_code': access_code if access_code else ''
}
kwargs = {'review_assignment': assignment,
'request': request,
'accepted': False}
event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_DECLINED,
task_object=assignment.article,
**kwargs)
return render(request, template, context)
@reviewer_user_for_assignment_required
def suggest_reviewers(request, assignment_id):
"""
Allows a user to suggest reviewers
:param request:
:param assignment_id:
:return:
"""
try:
access_code = logic.get_access_code(request)
if access_code:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=True) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(access_code=access_code)
)
else:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=True) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(reviewer=request.user)
)
except models.ReviewAssignment.DoesNotExist:
raise PermissionError('Suggested reviewers already supplied.')
form = forms.SuggestReviewers(instance=assignment)
if request.POST:
form = forms.SuggestReviewers(request.POST, instance=assignment)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, 'Thanks for suggesting reviewers for this article.')
return redirect(reverse('website_index'))
template = 'review/suggest_reviewers.html'
context = {
'assignment': assignment,
'form': form,
}
return render(request, template, context)
@reviewer_user_required
def review_requests(request):
"""
A list of requests for the current user
:param request: the request object
:return: a context for a Django template
"""
new_requests = models.ReviewAssignment.objects.filter(
Q(is_complete=False) &
Q(reviewer=request.user) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(date_accepted__isnull=True),
article__journal=request.journal
).select_related('article')
active_requests = models.ReviewAssignment.objects.filter(
Q(is_complete=False) &
Q(reviewer=request.user) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW),
Q(date_accepted__isnull=False),
article__journal=request.journal
).select_related('article')
completed_requests = models.ReviewAssignment.objects.filter(
Q(is_complete=True) &
Q(reviewer=request.user),
article__journal=request.journal
).select_related('article')
template = 'review/review_requests.html'
context = {
'new_requests': new_requests,
'active_requests': active_requests,
'completed_requests': completed_requests,
}
return render(request, template, context)
@reviewer_user_for_assignment_required
def do_review(request, assignment_id):
"""
Rendering of the review form for user to complete.
:param request: the request object
:param assignment_id: ReviewAssignment PK
:return: a context for a Django template
"""
access_code = logic.get_access_code(request)
if access_code:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=False) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(access_code=access_code)
)
else:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=False) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(reviewer=request.user)
)
allow_save_review = setting_handler.get_setting(
'general', 'enable_save_review_progress', request.journal,
).processed_value
fields_required = decision_required = True
if allow_save_review:
fields_required = decision_required = False
elif assignment.review_file:
fields_required = False
review_round = assignment.article.current_review_round_object()
form = forms.GeneratedForm(
review_assignment=assignment,
fields_required=fields_required,
)
decision_form = forms.ReviewerDecisionForm(
instance=assignment,
decision_required=decision_required,
)
if 'review_file' in request.GET:
return logic.serve_review_file(assignment)
if request.POST:
if request.FILES:
assignment = upload_review_file(
request, assignment_id=assignment_id)
if 'decline' in request.POST:
return redirect(
logic.generate_access_code_url(
'decline_review',
assignment,
access_code,
)
)
if 'accept' in request.POST:
return redirect(
logic.generate_access_code_url(
'accept_review',
assignment,
access_code,
)
)
# If the submission has a review_file, reviewer does not need
# to complete the generated part of the form. Same if this is
# a POST for saving progress but not completing the review
if "complete" in request.POST:
if assignment.review_file:
fields_required = False
else:
fields_required = True
decision_required = True
form = forms.GeneratedForm(
request.POST,
review_assignment=assignment,
fields_required=fields_required,
)
decision_form = forms.ReviewerDecisionForm(
request.POST,
instance=assignment,
decision_required=decision_required,
)
if form.is_valid() and decision_form.is_valid():
decision_form.save()
assignment.save_review_form(form, assignment)
if 'save_progress' in request.POST:
messages.add_message(
request,
messages.SUCCESS,
'Progress saved',
)
else:
assignment.date_complete = timezone.now()
assignment.is_complete = True
if not assignment.date_accepted:
assignment.date_accepted = timezone.now()
assignment.save()
kwargs = {'review_assignment': assignment,
'request': request}
event_logic.Events.raise_event(
event_logic.Events.ON_REVIEW_COMPLETE,
task_object=assignment.article,
**kwargs
)
return redirect(
logic.generate_access_code_url(
'thanks_review',
assignment,
access_code,
)
)
else:
messages.add_message(
request,
messages.ERROR,
'Found errors on the form. Please, resolve them and try again',
)
template = 'review/review_form.html'
context = {
'assignment': assignment,
'form': form,
'decision_form': decision_form,
'review_round': review_round,
'access_code': access_code,
'allow_save_review': allow_save_review,
}
return render(request, template, context)
@require_POST
@reviewer_user_for_assignment_required
def upload_review_file(request, assignment_id):
access_code = logic.get_access_code(request)
if access_code:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id)
& Q(is_complete=False)
& Q(article__stage=submission_models.STAGE_UNDER_REVIEW)
& Q(access_code=access_code)
)
else:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id)
& Q(is_complete=False)
& Q(article__stage=submission_models.STAGE_UNDER_REVIEW)
& Q(reviewer=request.user)
)
if 'review_file' in request.FILES:
uploaded_file = request.FILES.get('review_file', None)
old_file = assignment.review_file
if uploaded_file:
new_file = files.save_file_to_article(
uploaded_file,
assignment.article,
assignment.reviewer,
)
assignment.review_file = new_file
assignment.save()
messages.add_message(
request,
messages.SUCCESS,
'File uploaded successfully.',
)
if old_file:
old_file.unlink_file(request.journal)
old_file.delete()
else:
messages.add_message(
request,
messages.ERROR,
'Please select a file to upload.',
)
return assignment
@reviewer_user_for_assignment_required
def thanks_review(request, assignment_id):
"""
Displays thank you message for the assignment form.
:param request: HttpRequest object
:param assignment_id: ReviewAssignment PK
:return: HttpResponse
"""
access_code = logic.get_access_code(request)
if access_code:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=True) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(access_code=access_code)
)
else:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=True) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(reviewer=request.user)
)
template = 'review/thanks.html'
context = {
'assignment': assignment,
'access_code': access_code,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def add_review_assignment(request, article_id):
"""
Allow an editor to add a new review assignment
:param request: HttpRequest object
:param article_id: Article PK
:return: HttpResponse
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
form = forms.ReviewAssignmentForm(journal=request.journal)
new_reviewer_form = core_forms.QuickUserForm()
reviewers = logic.get_reviewer_candidates(article, request.user)
suggested_reviewers = logic.get_suggested_reviewers(article, reviewers)
user_list = logic.get_enrollable_users(request)
modal = None
# Check if this review round has files
if not article.current_review_round_object().review_files.all():
messages.add_message(request, messages.WARNING, 'You should select files for review before adding reviewers.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
if request.POST:
if 'quick_assign' in request.POST:
logic.quick_assign(request, article)
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
elif 'add_and_assign' in request.POST:
# first check whether the user exists
new_reviewer_form = core_forms.QuickUserForm(request.POST)
try:
user = core_models.Account.objects.get(email=new_reviewer_form.data['email'])
user.add_account_role('reviewer', request.journal)
except core_models.Account.DoesNotExist:
user = None
if user:
logic.quick_assign(request, article, reviewer_user=user)
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
valid = new_reviewer_form.is_valid()
if valid:
acc = logic.handle_reviewer_form(request, new_reviewer_form)
logic.quick_assign(request, article, reviewer_user=acc)
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
else:
modal = 'reviewer'
elif 'assign' in request.POST:
# first check whether the user exists
new_reviewer_form = core_forms.QuickUserForm(request.POST)
try:
user = core_models.Account.objects.get(email=new_reviewer_form.data['email'])
user.add_account_role('reviewer', request.journal)
except core_models.Account.DoesNotExist:
user = None
if user:
return redirect(reverse('review_add_review_assignment', kwargs={'article_id': article.pk}) + '?' + parse.urlencode({'user': new_reviewer_form.data['email'], 'id': str(user.pk)}))
valid = new_reviewer_form.is_valid()
if valid:
acc = logic.handle_reviewer_form(request, new_reviewer_form)
return redirect(reverse('review_add_review_assignment', kwargs={'article_id': article.pk}) + '?' + parse.urlencode({'user': new_reviewer_form.data['email'], 'id': str(acc.pk)}))
else:
modal = 'reviewer'
elif 'enrollusers' in request.POST:
user_ids = request.POST.getlist('user_id')
users = core_models.Account.objects.filter(pk__in=user_ids)
for user in users:
user.add_account_role('reviewer', request.journal)
messages.add_message(request, messages.SUCCESS, '{0} enrolled as a reviewer.'.format(user.full_name()))
return redirect(reverse('review_add_review_assignment', kwargs={'article_id': article.pk}))
else:
form = forms.ReviewAssignmentForm(request.POST, journal=request.journal)
if form.is_valid():
reviewer = logic.get_reviewer_from_post(request)
if not reviewer:
form.add_error(None, 'You must select a reviewer.')
else:
review_assignment = form.save(commit=False)
review_assignment.reviewer = reviewer
review_assignment.article = article
review_assignment.editor = request.user
review_assignment.review_round = article.current_review_round_object()
review_assignment.access_code = uuid4()
review_assignment.save()
article.stage = submission_models.STAGE_UNDER_REVIEW
article.save()
kwargs = {'user_message_content': '',
'review_assignment': review_assignment,
'request': request,
'skip': False,
'acknowledgement': False}
event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_REQUESTED, **kwargs)
return redirect(reverse('review_notify_reviewer',
kwargs={'article_id': article_id, 'review_id': review_assignment.id}))
template = 'review/add_review_assignment.html'
context = {
'article': article,
'form': form,
'reviewers': reviewers,
'new_reviewer_form': new_reviewer_form,
'modal': modal,
'user_list': user_list,
'suggested_reviewers': suggested_reviewers,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def notify_reviewer(request, article_id, review_id):
"""
Allows the editor to send a notification the the assigned peer reviewer
:param request: HttpRequest object
:param article_id: Articke PK
:param review_id: ReviewAssignment PK
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
email_content = logic.get_reviewer_notification(request, article, request.user, review)
if request.POST:
email_content = request.POST.get('content_email')
kwargs = {'user_message_content': email_content,
'review_assignment': review,
'request': request,
'skip': False,
'acknowledgement': True}
if 'skip' in request.POST:
kwargs['skip'] = True
event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_REQUESTED_ACKNOWLEDGE, **kwargs)
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
event_logic.Events.raise_event(event_logic.Events.ON_REVIEWER_REQUESTED_ACKNOWLEDGE, **kwargs)
review.date_requested = timezone.now()
review.save()
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
template = 'review/notify_reviewer.html'
context = {
'article': article,
'review': review,
'email_content': email_content,
'assignment': review,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def view_review(request, article_id, review_id):
"""
A view that allows the editor to view a review.
:param request: Django's request object
:param article_id: Article PK
:param review_id: ReviewAssignment PK
:return: a rendered django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
if request.POST:
if 'author_consumption' in request.POST:
if review.for_author_consumption:
review.for_author_consumption = False
else:
review.for_author_consumption = True
review.save()
if 'individual_author_consumption' in request.POST:
checkboxes = request.POST.getlist('answer_viewable')
for answer in review.review_form_answers():
if str(answer.pk) in checkboxes:
answer.author_can_see = True
else:
answer.author_can_see = False
answer.save()
if 'reset' in request.POST:
answer_pk = request.POST.get('pk')
answer = models.ReviewAssignmentAnswer.objects.get(pk=answer_pk)
answer.edited_answer = None
answer.save()
if 'review_file_visible' in request.POST:
logic.handle_review_file_switch(review, request.POST.get('review_file_visible'))
messages.add_message(request, messages.SUCCESS, 'Review File visibility updated.')
return redirect(reverse('review_view_review', kwargs={'article_id': article.pk, 'review_id': review.pk}))
template = 'review/view_review.html'
context = {
'article': article,
'review': review
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def edit_review_answer(request, article_id, review_id, answer_id):
"""
Allows an Editor to tweak an answer given for a peer review question.
:param request: HttpRequest object
:param article_id: Article PK
:param review_id: ReviewAssignment PK
:param answer_id: ReviewAssignmentAnswer PK
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
answer = get_object_or_404(models.ReviewAssignmentAnswer, pk=answer_id)
form = forms.GeneratedForm(answer=answer)
if request.POST:
form = forms.GeneratedForm(request.POST, answer=answer)
if form.is_valid():
# Form element keys are posted as str
element_key = str(answer.element.pk)
answer.edited_answer = form.cleaned_data[element_key]
answer.save()
return redirect(
reverse(
'review_view_review',
kwargs={'article_id': article.pk, 'review_id': review.pk},
)
)
template = 'review/edit_review_answer.html'
context = {
'article': article,
'review': review,
'answer': answer,
'form': form,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def edit_review(request, article_id, review_id):
"""
A view that allows a user to edit a review.
:param request: Django's request object
:param article_id: Article PK
:param review_id: ReviewAssignment PK
:return: a rendered django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
if review.date_complete:
messages.add_message(request, messages.WARNING, 'You cannot edit a review that is already complete.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
form = forms.ReviewAssignmentForm(instance=review, journal=request.journal)
if request.POST:
form = forms.ReviewAssignmentForm(request.POST, instance=review, journal=request.journal)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, 'Review updates.')
util_models.LogEntry.add_entry('Review Deleted', 'Review updated.', level='Info', actor=request.user,
request=request, target=review)
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
template = 'review/edit_review.html'
context = {
'article': article,
'review': review,
'form': form,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def delete_review(request, article_id, review_id):
"""
A view that allows a user to delete a review.
:param request: Django's request object
:param article_id: Article PK
:param review_id: ReviewAssignment PK
:return: a rendered django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
if review.date_complete:
messages.add_message(request, messages.WARNING, 'You cannot delete a review that is already complete.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
if request.POST and 'delete' in request.POST:
user_message = request.POST.get('delete_rationale', 'No message supplied by user.')
description = 'Review {0} for article {1} has been deleted by {2}. \n\n{3}'.format(
review.pk,
article.title,
request.user.username,
user_message,
)
util_models.LogEntry.add_entry('Review Deleted', description, level='Info', actor=request.user,
request=request, target=article)
review.delete()
messages.add_message(request, messages.SUCCESS, 'Review deleted.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
template = 'review/delete_review.html'
context = {
'article': article,
'review': review,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def withdraw_review(request, article_id, review_id):
"""
A view that allows a user to withdraw a review.
:param request: Django's request object
:param article_id: Article PK
:param review_id: ReviewAssignment PK
:return:a rendered django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
if review.date_complete:
messages.add_message(request, messages.WARNING, 'You cannot withdraw a review that is already complete.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
email_content = logic.get_withdrawl_notification(request, review)
if request.POST:
email_content = request.POST.get('content_email')
kwargs = {'user_message_content': email_content,
'review_assignment': review,
'request': request,
'skip': False}
if 'skip' in request.POST:
kwargs['skip'] = True
event_logic.Events.raise_event(event_logic.Events.ON_REVIEW_WITHDRAWL, **kwargs)
review.date_complete = timezone.now()
review.decision = 'withdrawn'
review.is_complete = True
review.save()
messages.add_message(request, messages.SUCCESS, 'Review withdrawn')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
template = 'review/withdraw_review.html'
context = {
'article': article,
'review': review,
'email_content': email_content,
}
return render(request, template, context)
@editor_is_not_author
@article_decision_not_made
@editor_user_required
def reset_review(request, article_id, review_id):
"""
Allows an editor to reset a review that has previously been declined or withdrawn.
:param request: django Django's request object
:param article_id: pk of an Article
:param review_id: pk of a ReviewAssignment
:return: a contextualised django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
review = get_object_or_404(models.ReviewAssignment, pk=review_id)
if request.POST:
review.is_complete = False
review.date_complete = None
review.date_declined = None
review.decision = None
review.suggested_reviewers = ""
review.save()
messages.add_message(request, messages.INFO, 'Review reset.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
template = 'review/reset.html'
context = {
'article': article,
'review': review,
}
return render(request, template, context)
@section_editor_draft_decisions
@editor_is_not_author
@editor_user_required
def review_decision(request, article_id, decision):
"""
Allows the editor to make a review decision, revisions are not a decision, only accept or delcine.
:param request: the django request object
:param article_id: Article PK
:param decision
:return: a contextualised django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
author_review_url = request.journal.site_url(
reverse('review_author_view', kwargs={'article_id': article.id})
)
email_content = logic.get_decision_content(request, article, decision, author_review_url)
if article.date_accepted or article.date_declined:
messages.add_message(request, messages.WARNING, 'This article has already been accepted or declined.')
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
if request.POST:
email_content = request.POST.get('decision_rationale')
kwargs = {
'article': article,
'request': request,
'decision': decision,
'user_message_content': email_content,
'skip': False,
}
if 'skip' in request.POST:
kwargs['skip'] = True
if decision == 'accept':
article.accept_article()
article.snapshot_authors(article, force_update=False)
event_logic.Events.raise_event(event_logic.Events.ON_ARTICLE_ACCEPTED, task_object=article, **kwargs)
workflow_kwargs = {'handshake_url': 'review_home',
'request': request,
'article': article,
'switch_stage': True}
return event_logic.Events.raise_event(event_logic.Events.ON_WORKFLOW_ELEMENT_COMPLETE, task_object=article,
**workflow_kwargs)
elif decision == 'decline':
article.decline_article()
event_logic.Events.raise_event(event_logic.Events.ON_ARTICLE_DECLINED, task_object=article, **kwargs)
return redirect(reverse('core_dashboard'))
messages.add_message(request, messages.INFO, 'Article {0} has been {1}ed'.format(article.title, decision))
return redirect(reverse('article_copyediting', kwargs={'article_id': article.pk}))
template = 'review/decision.html'
context = {
'article': article,
'decision': decision,
'email_content': email_content,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def rate_reviewer(request, article_id, review_id):
"""
Allows an Editor to rate a Reviewer
:param request: django's request object
:param article_id: pk of a Article
:param review_id: pk of a ReviewAssignment
:return: a contextualised django template
"""
review = get_object_or_404(models.ReviewAssignment, pk=review_id, article__pk=article_id)
if not review.is_complete:
messages.add_message(request, messages.INFO, 'You cannot rate a reviewer until their review is complete.'
'You should withdraw this review if you want to rate the reviewer'
'before they are finished.')
return redirect(reverse('review_in_review', kwargs={'article_id': review.article.id}))
if request.POST:
rating_int = int(request.POST.get('rating_number'))
if review.review_rating:
rating = review.review_rating
rating.rating = rating_int
rating.save()
messages.add_message(request, messages.INFO,
'{0}\'s rating updated to {1}'.format(review.reviewer.full_name(), rating_int))
else:
messages.add_message(request, messages.INFO,
'{0} assigned a rating of {1}'.format(review.reviewer.full_name(), rating_int))
models.ReviewerRating.objects.create(assignment=review, rating=rating_int, rater=request.user)
return redirect(reverse('review_in_review', kwargs={'article_id': review.article.id}))
template = 'review/rate_reviewer.html'
context = {
'review': review,
}
return render(request, template, context)
@article_author_required
def author_view_reviews(request, article_id):
"""
View that allows an author to view the reviews for an article.
:param request: django request object
:param article_id: Article pk
:return: a contextualised django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
reviews = models.ReviewAssignment.objects.filter(
article=article,
is_complete=True,
for_author_consumption=True,
).exclude(decision='withdrawn')
if not reviews.exists():
raise PermissionDenied(
'No reviews have been made available by the Editor.',
)
if request.GET.get('file_id', None):
viewable_files = logic.group_files(article, reviews)
file_id = request.GET.get('file_id')
file = get_object_or_404(core_models.File, pk=file_id)
if file in viewable_files:
return files.serve_file(request, file, article)
template = 'review/author_view_reviews.html'
context = {
'article': article,
'reviews': reviews,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def request_revisions(request, article_id):
"""
View allows an Editor to request revisions to an article.
:param request: django request object
:param article_id: Article PK
:return: a contextualised django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
form = forms.RevisionRequest()
review_round = models.ReviewRound.latest_article_round(
article=article,
)
pending_approval = review_round.reviewassignment_set.filter(
is_complete=True,
for_author_consumption=False,
)
incomplete = review_round.reviewassignment_set.filter(
is_complete=False,
)
if request.POST:
form = forms.RevisionRequest(request.POST)
if form.is_valid():
revision_request = form.save(commit=False)
revision_request.editor = request.user
revision_request.article = article
revision_request.save()
article.stage = submission_models.STAGE_UNDER_REVISION
article.save()
return redirect(reverse(
'request_revisions_notification',
kwargs={
'article_id': article.pk,
'revision_id': revision_request.pk,
}
))
template = 'review/revision/request_revisions.html'
context = {
'article': article,
'form': form,
'pending_approval': pending_approval,
'incomplete': incomplete,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def request_revisions_notification(request, article_id, revision_id):
"""
View allows an Editor to notify an Author of a Revision request
:param request: django request object
:param article_id: PK of an Article
:param revision_id: PK of a RevisionRequest
:return: a contextualised django template
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
revision = get_object_or_404(models.RevisionRequest, pk=revision_id)
email_content = logic.get_revision_request_content(request, article, revision)
if request.POST:
user_message_content = request.POST.get('email_content')
kwargs = {
'user_message_content': user_message_content,
'revision': revision,
'request': request,
'skip': False,
}
if 'skip' in request.POST:
kwargs['skip'] = True
event_logic.Events.raise_event(event_logic.Events.ON_REVISIONS_REQUESTED_NOTIFY, **kwargs)
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
template = 'review/revision/request_revisions_notification.html'
context = {
'article': article,
'email_content': email_content,
'revision': revision,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def edit_revision_request(request, article_id, revision_id):
"""
View allows an Editor to edit an existing Revision
:param request: HttpRequest object
:param article_id: Artickle PK
:param revision_id: Revision PK
:return: HttpResponse
"""
revision_request = get_object_or_404(models.RevisionRequest,
article__pk=article_id,
pk=revision_id)
form = forms.EditRevisionDue(instance=revision_request)
if revision_request.date_completed:
messages.add_message(request, messages.WARNING, 'You cannot edit a revision request that is complete.')
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
if request.POST:
if 'update_due' in request.POST:
form = forms.EditRevisionDue(request.POST, instance=revision_request)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, 'Due date updated.')
if 'delete_revision' in request.POST:
rationale = request.POST.get('delete_rationale')
util_models.LogEntry.add_entry('deletion', '{0} deleted a revision request with reason:\n\n{1}'.format(
request.user.full_name(), rationale), level='Info', actor=request.user, target=revision_request.article
)
revision_request.delete()
messages.add_message(request, messages.INFO, 'Revision request deleted.')
if 'mark_as_complete' in request.POST:
util_models.LogEntry.add_entry('update', '{0} marked revision {1} as complete'.format(
request.user.full_name(), revision_request.id), level='Info', actor=request.user,
target=revision_request.article
)
revision_request.date_completed = timezone.now()
revision_request.save()
messages.add_message(request, messages.INFO, 'Revision request marked as complete.')
return redirect(reverse('review_in_review', kwargs={'article_id': article_id}))
template = 'review/revision/edit_revision_request.html'
context = {
'revision_request': revision_request,
'form': form,
}
return render(request, template, context)
@article_author_required
def do_revisions(request, article_id, revision_id):
"""
Allows an Author to complete a revision request of an article.
:param request: django request object
:param article_id: PK of an Article
:param revision_id: PK of a RevisionRequest
:return:
"""
revision_request = get_object_or_404(
models.RevisionRequest,
article__pk=article_id,
pk=revision_id,
date_completed__isnull=True,
)
reviews = models.ReviewAssignment.objects.filter(
article=revision_request.article,
is_complete=True,
for_author_consumption=True,
).exclude(decision='withdrawn')
form = forms.DoRevisions(instance=revision_request)
revision_files = logic.group_files(revision_request.article, reviews)
if request.POST:
if 'delete' in request.POST:
file_id = request.POST.get('delete')
file = get_object_or_404(core_models.File, pk=file_id)
files.delete_file(revision_request.article, file)
logic.log_revision_event(
'File {0} ({1}) deleted.'.format(
file.id,
file.original_filename
),
request.user,
revision_request,
)
return redirect(
reverse(
'do_revisions',
kwargs={
'article_id': article_id,
'revision_id': revision_id
}
)
)
elif 'save' in request.POST:
covering_letter = request.POST.get('author_note')
revision_request.author_note = covering_letter
revision_request.save()
messages.add_message(
request,
messages.SUCCESS,
'Thanks. Your covering letter has been saved.',
)
return redirect(
reverse(
'do_revisions',
kwargs={
'article_id': article_id,
'revision_id': revision_id
}
)
)
else:
form = forms.DoRevisions(request.POST, instance=revision_request)
if not revision_request.article.has_manuscript_file():
form.add_error(
None,
'Your article must have at least one manuscript file.',
)
if form.is_valid():
form.save()
kwargs = {
'revision': revision_request,
'request': request,
}
event_logic.Events.raise_event(
event_logic.Events.ON_REVISIONS_COMPLETE,
**kwargs
)
messages.add_message(
request,
messages.SUCCESS,
'Thank you for submitting your revisions. The Editor has been notified.',
)
revision_request.date_completed = timezone.now()
revision_request.save()
return redirect(reverse('core_dashboard'))
if request.GET.get('file_id', None):
file_id = request.GET.get('file_id')
file = get_object_or_404(core_models.File, pk=file_id)
if file in revision_files:
logic.log_revision_event(
'Downloaded file {0} ({1}).'.format(
file.label,
file.original_filename),
request.user,
revision_request,
)
return files.serve_file(request, file, revision_request.article)
template = 'admin/review/revision/do_revision.html'
context = {
'revision_request': revision_request,
'form': form,
'article': revision_request.article,
'reviews': reviews,
}
return render(request, template, context)
@article_author_required
def replace_file(request, article_id, revision_id, file_id):
revision_request = get_object_or_404(models.RevisionRequest, article__pk=article_id, pk=revision_id,
date_completed__isnull=True)
file = get_object_or_404(core_models.File, pk=file_id)
if request.GET.get('download', None):
logic.log_revision_event('Downloaded file {0} ({1})'.format(file.label, file.original_filename), request.user,
revision_request)
return files.serve_file(request, file, revision_request.article)
if request.POST and request.FILES:
if 'replacement' in request.POST:
uploaded_file = request.FILES.get('replacement-file')
label = request.POST.get('label')
new_file = files.save_file_to_article(uploaded_file, revision_request.article, request.user,
replace=file, is_galley=False, label=label)
files.replace_file(
revision_request.article,
file,
new_file,
retain_old_label=False,
)
logic.log_revision_event(
'File {0} ({1}) replaced with {2} ({3})'.format(file.label, file.original_filename, new_file.label,
new_file.original_filename),
request.user, revision_request)
return redirect(reverse('do_revisions', kwargs={'article_id': article_id, 'revision_id': revision_id}))
template = 'review/revision/replace_file.html'
context = {
'revision_request': revision_request,
'article': revision_request.article,
'file': file,
}
return render(request, template, context)
@article_author_required
def upload_new_file(request, article_id, revision_id):
"""
View allows an author to upload new file to their article.
:param request: HttpRequest object
:param article_id: Article PK
:param revision_id: RevisionRequest PK
:return: Httpresponse or HttpRedirect
"""
revision_request = get_object_or_404(models.RevisionRequest, article__pk=article_id, pk=revision_id,
date_completed__isnull=True)
article = revision_request.article
if request.POST and request.FILES:
file_type = request.POST.get('file_type')
uploaded_file = request.FILES.get('file')
label = request.POST.get('label')
new_file = files.save_file_to_article(
uploaded_file,
article,
request.user,
label=label,
)
if file_type == 'manuscript':
article.manuscript_files.add(new_file)
if file_type == 'data':
article.data_figure_files.add(new_file)
logic.log_revision_event(
'New file {0} ({1}) uploaded'.format(
new_file.label, new_file.original_filename),
request.user, revision_request)
return redirect(reverse(
'do_revisions',
kwargs={'article_id': article_id, 'revision_id': revision_id})
)
template = 'review/revision/upload_file.html'
context = {
'revision_request': revision_request,
'article': revision_request.article,
}
return render(request, template, context)
@editor_is_not_author
@editor_user_required
def view_revision(request, article_id, revision_id):
"""
Allows an Editor to view a revisionrequest
:param request: HttpRequest object
:param article_id: Article PK
:param revision_id: RevisionRequest PK
:return: HttpResponse
"""
revision_request = get_object_or_404(models.RevisionRequest.objects.select_related('article'),
pk=revision_id,
article__pk=article_id)
template = 'review/revision/view_revision.html'
context = {
'revision_request': revision_request,
'article': revision_request.article
}
return render(request, template, context)
@editor_user_required
def review_warning(request, article_id):
"""
Checks if an editor user is the author of an article amd blocks their access temporarily.
If overwritten, all Editors are notified.
:param request: HttpRequest object
:param article_id: Article PK
:return: HttpResponse or HttpRedirect
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
if request.POST and request.user.is_editor(request):
override = models.EditorOverride.objects.create(
article=article, editor=request.user)
kwargs = {'request': request, 'override': override}
event_logic.Events.raise_event(
event_logic.Events.ON_REVIEW_SECURITY_OVERRIDE,
task_object=article,
**kwargs
)
return redirect(reverse('review_in_review', kwargs={'article_id': article.pk}))
else:
messages.add_message(
request, messages.WARNING, 'This action is not allowed.')
template = 'review/review_warning.html'
context = {
'article': article
}
return render(request, template, context)
@editor_user_required
@file_user_required
def editor_article_file(request, article_id, file_id):
""" Serves an article file.
:param request: the request associated with this call
:param article_id: the id of an article
:param file_id: the file ID to serve
:return: a streaming response of the requested file or 404
"""
article_object = submission_models.Article.objects.get(pk=article_id)
file_object = get_object_or_404(core_models.File, pk=file_id)
return files.serve_file(request, file_object, article_object)
@reviewer_user_for_assignment_required
def reviewer_article_file(request, assignment_id, file_id):
""" Serves an article file.
:param request: the request associated with this call
:param assignment_id: the ReviewAssignment id.
:param file_id: the file ID to serve
:return: a streaming response of the requested file or 404
"""
review_assignment = models.ReviewAssignment.objects.get(pk=assignment_id)
article_object = review_assignment.article
file_object = review_assignment.review_round.review_files.get(pk=file_id)
if not file_object:
raise Http404()
return files.serve_file(
request,
file_object,
article_object,
hide_name=True
)
@reviewer_user_for_assignment_required
def review_download_all_files(request, assignment_id):
review_assignment = models.ReviewAssignment.objects.get(pk=assignment_id)
zip_file, file_name = files.zip_article_files(
review_assignment.review_round.review_files.all(),
)
return files.serve_temp_file(zip_file, file_name)
@editor_is_not_author
@editor_user_required
def draft_decision(request, article_id):
"""
Allows a section editor to draft a decision for an editor.
:param request: request object
:param article_id: an Article primary key
:return: a django template with context
"""
article = get_object_or_404(submission_models.Article, pk=article_id)
drafts = models.DecisionDraft.objects.filter(article=article)
message_to_editor = logic.get_draft_email_message(request, article)
editors = request.journal.editors()
form = forms.DraftDecisionForm(
message_to_editor=message_to_editor,
editors=editors,
initial={
'revision_request_due_date': timezone.now() + timedelta(days=14),
}
)
if request.POST:
if 'delete' in request.POST:
delete_id = request.POST.get('delete')
draft = get_object_or_404(models.DecisionDraft, pk=delete_id, article=article)
draft.delete()
return redirect(
reverse(
'review_draft_decision',
kwargs={'article_id': article.pk},
),
)
else:
form = forms.DraftDecisionForm(
request.POST,
editors=editors,
message_to_editor=message_to_editor,
)
if form.is_valid():
new_draft = form.save(commit=False)
new_draft.section_editor = request.user
new_draft.article = article
new_draft.save()
messages.add_message(
request,
messages.SUCCESS,
'A draft has been saved, the editor has been notified.',
)
kwargs = {'request': request, 'article': article, 'draft': new_draft}
event_logic.Events.raise_event(
event_logic.Events.ON_DRAFT_DECISION,
**kwargs,
)
return redirect(
reverse(
'review_draft_decision',
kwargs={'article_id': article.pk},
),
)
template = 'review/draft_decision.html'
context = {
'article': article,
'drafts': drafts,
'form': form,
}
return render(request, template, context)
@require_POST
@editor_user_required
def draft_decision_text(request, article_id):
"""
Takes a POST and returns decision text.
"""
article = get_object_or_404(
submission_models.Article,
pk=article_id,
journal=request.journal,
)
decision = request.POST.get('decision')
date = request.POST.get('date', None)
if isinstance(date, str) and date != '':
date = shared.make_timezone_aware(date, '%Y-%m-%d')
else:
date = timezone.now() + timedelta(days=14)
author_review_url = request.journal.site_url(
reverse(
'review_author_view',
kwargs={'article_id': article.id},
)
)
if not decision:
raise Http404
if decision in ['accept', 'reject']:
decision_text = logic.get_decision_content(
request=request,
article=article,
decision=decision,
author_review_url=author_review_url,
)
elif decision in ['minor_revisions', 'major_revisions']:
revision = models.RevisionRequest(
article=article,
editor=request.user,
type=decision,
date_requested=timezone.now,
date_due=date.strftime("%Y-%m-%d"),
editor_note="[[Add Editor Note Here]]",
)
decision_text = logic.get_revision_request_content(
request=request,
article=article,
revision=revision,
draft=True,
)
return JsonResponse({'decision_text': decision_text})
@editor_is_not_author
@editor_user_required
def manage_draft(request, article_id, draft_id):
article = get_object_or_404(submission_models.Article, pk=article_id)
draft = get_object_or_404(models.DecisionDraft, pk=draft_id)
if 'decline_draft' in request.POST:
draft.editor_decision = 'declined'
draft.save()
logic.handle_draft_declined(article, draft, request)
if 'accept_draft' in request.POST:
draft.editor_decision = 'accept'
draft.save()
decision_action = logic.handle_decision_action(article, draft, request)
if decision_action:
return decision_action
messages.add_message(
request,
messages.INFO,
'Draft {}'.format(draft.editor_decision)
)
return redirect(
reverse(
'decision_helper',
kwargs={'article_id': article.pk},
),
)
@editor_is_not_author
@editor_user_required
def edit_draft_decision(request, article_id, draft_id):
article = get_object_or_404(submission_models.Article, pk=article_id)
draft = get_object_or_404(models.DecisionDraft, pk=draft_id)
drafts = models.DecisionDraft.objects.filter(article=article)
editors = request.journal.editors()
form = forms.DraftDecisionForm(
instance=draft,
editors=editors,
)
if request.POST:
form = forms.DraftDecisionForm(
request.POST,
instance=draft,
editors=editors,
)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Draft has been updated')
return redirect(
reverse(
'review_edit_draft_decision',
kwargs={'article_id': article.pk, 'draft_id': draft.pk},
),
)
template = 'review/draft_decision.html'
context = {
'article': article,
'drafts': drafts,
'draft': draft,
'form': form,
}
return render(request, template, context)
@senior_editor_user_required
def review_forms(request):
"""
Displays a list of review forms and allows new ones to be created.
:param request: HttpRequest object
:return: HttpResponse or HttpRedirect
"""
form_list = models.ReviewForm.objects.filter(
journal=request.journal,
deleted=False,
)
form = forms.NewForm()
default_form = setting_handler.get_setting(
'general', 'default_review_form', request.journal,
).processed_value
if default_form.isdigit():
default_form = int(default_form)
if request.POST:
if 'delete' in request.POST:
form_id = request.POST["delete"]
if form_id.isdigit():
form_id = int(form_id)
if default_form == form_id:
messages.add_message(
request,
messages.ERROR,
"This form is selected as the defaul form and thus"
" can't be deleted",
)
return redirect(reverse('review_review_forms'))
form_obj = get_object_or_404(
models.ReviewForm, id=form_id,
journal=request.journal,
)
form_obj.deleted = True
form_obj.save()
messages.add_message(request, messages.SUCCESS, 'Form Deleted')
return redirect(reverse('review_review_forms'))
else:
form = forms.NewForm(request.POST)
if form.is_valid():
new_form = form.save(commit=False)
new_form.journal = request.journal
new_form.save()
return redirect(reverse('review_review_forms'))
template = 'review/manager/review_forms.html'
context = {
'form_list': form_list,
'form': form,
'default_form': default_form,
}
return render(request, template, context)
@senior_editor_user_required
def edit_review_form(request, form_id, element_id=None):
"""
Allows the editing of an existing review form
:param request: HttpRequest object
:param form_id: ReviewForm PK
:param element_id: Element PK, optional
:return: HttpResponse or HttpRedirect
"""
edit_form = get_object_or_404(models.ReviewForm, pk=form_id)
form = forms.NewForm(instance=edit_form)
element_form = forms.ElementForm()
element, modal = None, None
if element_id:
element = get_object_or_404(models.ReviewFormElement, pk=element_id)
modal = 'element'
element_form = forms.ElementForm(instance=element)
if request.POST:
if 'delete' in request.POST:
delete_id = request.POST.get('delete')
element_to_delete = get_object_or_404(models.ReviewFormElement, pk=delete_id)
element_to_delete.delete()
return redirect(reverse('edit_review_form', kwargs={'form_id': edit_form.pk}))
if 'element' in request.POST:
if element_id:
element_form = forms.ElementForm(request.POST, instance=element)
else:
element_form = forms.ElementForm(request.POST)
if element_form.is_valid():
element = element_form.save()
edit_form.elements.add(element)
messages.add_message(request, messages.SUCCESS, 'New element added.')
return redirect(reverse('edit_review_form', kwargs={'form_id': edit_form.pk}))
if 'review_form' in request.POST:
form = forms.NewForm(request.POST, instance=edit_form)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Form updated')
return redirect(reverse('edit_review_form', kwargs={'form_id': edit_form.pk}))
template = 'review/manager/edit_review_form.html'
context = {
'form': form,
'edit_form': edit_form,
'element_form': element_form,
'modal': modal,
}
return render(request, template, context)
@senior_editor_user_required
def preview_form(request, form_id):
"""Displays a preview of a review form."""
form = get_object_or_404(models.ReviewForm, pk=form_id)
generated_form = forms.GeneratedForm(preview=form)
decision_form = forms.FakeReviewerDecisionForm()
template = 'review/manager/preview_form.html'
context = {
'form': form,
'generated_form': generated_form,
'decision_form': decision_form,
}
return render(request, template, context)
@require_POST
@senior_editor_user_required
def order_review_elements(request, form_id):
"""
Reorders Review Form elements.
:param request: HttpRequest object
:param form_id: ReviewForm PK
"""
form = get_object_or_404(
models.ReviewForm,
pk=form_id,
journal=request.journal,
)
shared.set_order(
form.elements.all(),
'order',
request.POST.getlist('element[]'),
)
return HttpResponse('Ok')
@reviewer_user_for_assignment_required
def hypothesis_review(request, assignment_id):
"""
Rendering of the review form for user to complete.
:param request: the request object
:param assignment_id: ReviewAssignment PK
:return: a context for a Django template
"""
access_code = logic.get_access_code(request)
if access_code:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=False) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(access_code=access_code)
)
else:
assignment = models.ReviewAssignment.objects.get(
Q(pk=assignment_id) &
Q(is_complete=False) &
Q(article__stage=submission_models.STAGE_UNDER_REVIEW) &
Q(reviewer=request.user)
)
pdf = assignment.review_round.review_files.get(mime_type='application/pdf')
hypothesis.create_hypothesis_account(assignment.reviewer)
grant_token = hypothesis.generate_grant_token(assignment.reviewer)
template = 'review/annotation_pdf_review.html'
context = {
'assignment': assignment,
'pdf': pdf,
'grant_token': grant_token,
'authority': settings.HYPOTHESIS_CLIENT_AUTHORITY,
}
return render(request, template, context)
@editor_user_required
def decision_helper(request, article_id):
"""
Displays all of the completed reviews to help the Editor make a decision.
:param request: HttpRequest object
:param article_id: Article object pk, integer
:return: a django response
"""
article = get_object_or_404(
submission_models.Article, pk=article_id,
)
reviews = models.ReviewAssignment.objects.filter(
article=article,
)
uncomplete_reviews = reviews.filter(
article=article,
is_complete=False,
date_complete__isnull=True,
)
complete_reviews = reviews.filter(
article=article,
is_complete=True,
date_complete__isnull=False,
).exclude(
decision='withdrawn',
)
withdraw_reviews = reviews.filter(
decision='withdrawn',
)
uncomplete_reviews = uncomplete_reviews.union(withdraw_reviews)
decisions = Counter(
[review.get_decision_display() for review in reviews if
review.decision]
)
if 'reveal_review' in request.POST:
review = get_object_or_404(
models.ReviewAssignment,
article=article,
id=request.POST.get('review'),
)
review.for_author_consumption=True
review.save()
messages.add_message(
request, messages.SUCCESS,
"The author can now see review #%s" % review.pk,
)
if 'hide_review' in request.POST:
review = get_object_or_404(
models.ReviewAssignment,
article=article,
id=request.POST.get('review'),
)
review.for_author_consumption=False
review.save()
messages.add_message(
request, messages.WARNING,
"The author won't see the review #%s" % review.pk,
)
if 'review_file_visible' in request.POST:
review = get_object_or_404(
models.ReviewAssignment,
article=article,
id=request.POST.get('review'),
)
logic.handle_review_file_switch(review, request.POST.get('review_file_visible'))
messages.add_message(request, messages.SUCCESS, 'Review File visibility updated.')
template = 'admin/review/decision_helper.html'
context = {
'article': article,
'complete_reviews': complete_reviews,
'uncomplete_reviews': uncomplete_reviews,
'decisions': dict(decisions)
}
return render(request, template, context)
| agpl-3.0 | 7,063,019,516,852,951,000 | 33.460149 | 194 | 0.606278 | false |
mickstar/2048-ai-python | game/gameboard.py | 1 | 3987 | import random
from game.cell import Cell
from game.move import Move
class GameBoard:
'''GameBoard defines the 2048 grid that should be a 4x4 square.
This class contains 16 cells, and provides methods for permuting the board state in conjunction with
2048 rules. The code is designed with an unconcrete size, though this should be changed with caution.'''
size = 4
def __init__(self):
self.grid = [[Cell(Cell.EMPTY) for x in range(GameBoard.size)] for x in range(GameBoard.size)]
def printBoard(self):
for y in range(GameBoard.size):
for x in range(GameBoard.size):
cell = self.getCell(x, y)
print(cell, end="\t")
print("") # new line
def hasEmptyTiles(self):
'''Returns whether there exists any empty tiles (=0) in the grid.'''
for row in self.grid:
for cell in row:
if cell.isEmpty():
return True
return False
def getCell(self, x, y):
'''return cell at the (x,y) coordinates.
grid is structured such that the top left corner is (0,0)
likewise, the bottom right is (3,3)'''
return self.grid[x][y]
def getRandomlyAvailableCell(self):
'''Returns a randomly selected empty cell from the grid.'''
emptyCells = []
for row in self.grid:
for cell in row:
if cell.isEmpty():
emptyCells.append(cell)
return random.choice(emptyCells)
def makeMove(self, move):
'''modifies the grid such in the direction of the move.
Such that Right[0,2,2,0] -> [0,0,0,4]
for each horizontal row in grid'''
x_delta = 0
y_delta = 0
n = GameBoard.size
x_range = list(range(n))
y_range = list(range(n))
if move == Move.LEFT_MOVE:
x_delta = -1
x_range = list(reversed(range(n)))
if move == Move.RIGHT_MOVE:
x_delta = +1
if move == Move.UP_MOVE:
y_delta = -1
x_range = list(reversed(range(n)))
if move == Move.DOWN_MOVE:
y_delta = +1
successfullyMoved = False
score_delta = 0
for x in x_range:
joined = []
for y in y_range:
# first we check to see we are not on an edge cell.
if (x + x_delta) in x_range and (y + y_delta) in y_range:
curCell = self.getCell(x, y)
adjCell = self.getCell(x + x_delta, y + y_delta)
# Check to see if we can merge two cells, e.g RIGHT[0,0,2,2] -> [0,0,0,4]
if (curCell not in joined
and not curCell.isEmpty()
and curCell.getValue() == adjCell.getValue()):
successfullyMoved = True
score_delta += 2*curCell.value
adjCell.doubleValue()
curCell.removeValue()
joined = [curCell, adjCell]
# Check to see if we can move a cell e.g RIGHT[2,0,0,0] -> [0,2,0,0]
elif not curCell.isEmpty() and adjCell.isEmpty():
successfullyMoved = True
adjCell.setValue(curCell.getValue())
curCell.removeValue()
for y in y_range:
for x in x_range:
if (x + x_delta) in x_range and (y + y_delta) in y_range:
curCell = self.getCell(x, y)
adjCell = self.getCell(x + x_delta, y + y_delta)
if (not curCell.isEmpty() and adjCell.isEmpty()):
adjCell.setValue(curCell.getValue())
curCell.removeValue()
return successfullyMoved, score_delta
def hasMovesAvailable(self):
'''Checks to see if any moves are available.'''
if (self.hasEmptyTiles()):
return True
n = len(self.grid)
# we iterate over all posible directions, where (0,1) corresponds to DOWN and (-1,0) to LEFT.
for (x_delta, y_delta) in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
x_range = list(range(n))
y_range = list(range(n))
# we always want to start from the further most away s.t LEFT[2,2,0,0] starts at index 3.
# as such we reverse the range to [3,2,1,0]
if x_delta == -1:
x_range = reversed(x_range)
if y_delta == -1:
y_range = reversed(y_range)
for x in x_range:
for y in y_range:
if ((x+x_delta) in x_range and (y+y_delta) in y_range):
curCell = self.getCell(x, y)
adjCell = self.getCell(x+x_delta, y+y_delta)
if (curCell.value == adjCell.value):
return True # a move is available.
return False
| gpl-3.0 | 5,492,649,167,837,388,000 | 29.204545 | 105 | 0.641084 | false |
rlindner81/pyload | module/plugins/hoster/LinksnappyCom.py | 1 | 1943 | # -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.misc import json
from module.plugins.internal.MultiHoster import MultiHoster
class LinksnappyCom(MultiHoster):
__name__ = "LinksnappyCom"
__type__ = "hoster"
__version__ = "0.16"
__status__ = "testing"
__pattern__ = r'https?://(?:[^/]+\.)?linksnappy\.com'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback",
"bool",
"Fallback to free download if premium fails",
False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int",
"Reconnect if waiting time is greater than minutes", 10),
("revertfailed", "bool", "Revert to standard download if fails", True)]
__description__ = """Linksnappy.com multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "[email protected]"),
("Bilal Ghouri", None)]
def handle_premium(self, pyfile):
host = self._get_host(pyfile.url)
json_params = json.dumps({'link': pyfile.url,
'type': host,
'username': self.account.user,
'password': self.account.get_login('password')})
r = self.load("https://linksnappy.com/api/linkgen",
post={'genLinks': json_params})
self.log_debug("JSON data: " + r)
j = json.loads(r)['links'][0]
if j['error']:
self.error(_("Error converting the link"))
pyfile.name = j['filename']
self.link = j['generated']
@staticmethod
def _get_host(url):
host = urlparse.urlsplit(url).netloc
return re.search(r'[\w\-]+\.\w+$', host).group(0)
| gpl-3.0 | 4,289,568,237,312,601,000 | 33.696429 | 89 | 0.513639 | false |
majkelx/astwro | astwro/coord/CoordMatch.py | 1 | 2213 | # coding=utf-8
from __future__ import absolute_import, division, print_function
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
class CoordMatch(object):
"""Two catalogues crossmatch
Object interface to `astropy.coordinates.match_to_catalog_sky`"""
def __init__(self, cooA, cooB, radius_match=0.5, radius_separated=None, unit=None):
super(CoordMatch, self).__init__()
if radius_separated is None:
radius_separated = radius_match
if not isinstance(radius_match, u.Quantity):
radius_match = radius_match * u.arcsec
if not isinstance(radius_separated, u.Quantity):
radius_separated = radius_separated * u.arcsec
self.r_match = radius_match
self.r_spe = radius_separated
kwargs = {} if unit is None else {'unit': unit}
self.A = SkyCoord(cooA, **kwargs)
self.B = SkyCoord(cooB, **kwargs)
self._ABidx = None
self._ABdist = None
def _calc_diff(self):
self._ABidx, self._ABdist, _ = self.A.match_to_catalog_sky(self.B)
@property
def sepAB(self):
if self._ABdist is None:
self._calc_diff()
return self._ABdist
@property
def mapAB(self):
if self._ABidx is None:
self._calc_diff()
return self._ABidx
@property
def lenB(self):
return len(self.B)
@property
def lenA(self):
return len(self.A)
@property
def mAB(self):
return self.sepAB < self.r_match
@property
def mBA(self):
r = np.zeros_like(self.B, dtype=bool)
r[self.iBA] = True
return r
@property
def mAonly(self):
return ~self.mAB
@property
def mBonly(self):
return ~self.mBA
@property
def iAonly(self):
return np.arange(self.lenA)[self.mAonly]
@property
def iBonly(self):
return np.arange(self.lenB)[self.mBonly]
@property
def iBA(self):
return np.unique(self.mappediBA)
@property
def iAB(self):
return np.arange(self.lenA)[self.mAB]
@property
def mappediBA(self):
return self.mapAB[self.mAB]
| mit | -650,743,030,654,543,700 | 21.814433 | 87 | 0.597379 | false |
zhangzr1026/monitor2 | src/collector/tasks/APP_RECORD/projectXfile.py | 1 | 1566 | '''
Created on 2014-12-29
@author: Administrator
'''
from lib import db_mysql
from lib import common
def conf_file_daily_num(resource=None):
'''
Get daliy file num
Just yesterday because today is not finished
'''
yesterday = common.lastday()
TARGET_TABLE='apprec_file_daily_num'
DBCoon = db_mysql.connect(user=resource['db']['user'],
passwd=resource['db']['passwd'],
host=resource['db']['host'],
port=resource['db']['port'],
db=resource['db']['db'])
''' Get Data '''
# daily file #
mFile = db_mysql.Model('file_conf_info',DBCoon)
strWhere = "create_time>'%s 00:00:00' and create_time<='%s 23:59:59'" % (yesterday,yesterday)
dataResult = mFile.field("count(*) AS num").where(strWhere).find()
if dataResult == False:
return False
fileNum = dataResult['num']
# daliy effective conf #
strWhere = "type=2 and create_time>'%s 00:00:00' and create_time<='%s 23:59:59'" % (yesterday,yesterday)
dataResult = mFile.field("count(*) AS num").where(strWhere).find()
if dataResult == False:
return False
fileVideoNum = dataResult['num']
''' Set Value '''
values = dict()
values['type'] = 0
values['real_time'] = "%s 23:59:59" % yesterday
values['file_num'] = fileNum
values['file_video_num'] = fileVideoNum
''' fill message body '''
msgBody = common.fillMsgData(TARGET_TABLE, values)
return msgBody
| lgpl-3.0 | -2,242,246,343,074,945,800 | 28 | 108 | 0.572158 | false |
UCSC-MedBook/MedBook_ | tools/old-external-tools/shazam/htmlFG.py | 1 | 17966 | #!/usr/bin/python2.6
import sys, string, os, time, fnmatch, imgFG, markup, re
from markup import oneliner as o
from numpy import *
rootDir = ""
pngDir = ""
pngBase = 'png/'
pathwayNameDict = {}
entityDict = {}
entityFile = {}
imgFG.printPDF = True
class1 = []
class2 = []
class3 = []
def parseContrast(file_name, red_label, grey_label):
global class1
global class2
inFile = open(file_name)
#class1 = ["DTB-004", "DTB-009", "DTB-024Pro", "DTB-030", "DTB-034", "DTB-036", "DTB-046", "DTB-049", "DTB-053", "DTB-064", "DTB-073"]
#class2 = ["DTB-003", "DTB-005", "DTB-011", "DTB-018", "DTB-022", "DTB-023", "DTB-038", "DTB-040", "DTB-060", "DTB-063", "DTB-071", "DTB-080"]
lineCount = 0
for line in inFile:
lineCount+=1
data = line[:-1].split('\t')
if len(data) == 2:
sample = data[0]
if sample == 'Sample':
continue
s_class = data[1]
if s_class == red_label:
class1.append(sample)
elif grey_label == 'Null':
class2.append(grey_label)
elif s_class == grey_label:
class2.append(sample)
else:
print "invalid sample label", line
inFile.close()
def getPathwayName(pid):
pid = pid.split('_')
if len(pid) != 2:
return "N/A"
pid = pid[1]
pid = re.sub("\.","", pid)
try:
name = pathwayNameDict[pid]
except:
name = "N/A"
return name
def initEntityDict(file_name):
inFile = open(file_name)
lineCount = 0
for line in inFile:
lineCount+=1
data = line[:-1].split('\t')
if len(data) == 2:
type = data[0]
name = data[1]
if name in entityDict:
if entityDict[name] != type and file_name == entityFile[name]:
print "on line ", lineCount, name, "cannot be assigned ",type, "when it is", entityDict[name] , "in", file_name , entityFile[name]
assert(entityDict[name] == type)
elif entityDict[name] != type:
if type != 'protein' and entityFile[name] == 'protein':
print "WARNING", lineCount, name, "has multiple types ",type, "and", entityDict[name] , "in", file_name , entityFile[name]
type = 'protein'
entityDict[name] = type
entityFile[name] = file_name
inFile.close()
def initPathwayNameDict(path_file="pathway_pids.tab"):
inFile = open(path_file)
for line in inFile:
data = line[:-1].split('\t')
pid = data[0]
name = data[1]
pathwayNameDict[pid] = name
inFile.close()
def getFilesMatching(baseDir, patterns):
list = []
for root, dirs, files in os.walk(baseDir):
for file in files:
ptr = os.path.join(root, file)
for pattern in patterns:
if fnmatch.fnmatch(ptr, pattern):
list.append(ptr)
return list
def writePageToFile(page, fname):
outFile = open(fname, 'w')
outFile.write(str(page))
outFile.close()
def initializePage(t, h, sort_list = "[[9,1]]"):
currentTime = time.localtime()
dateTime = str(currentTime[1]) + '/' + str(currentTime[2]) + '/' + str(currentTime[0]) + " "
dateTime += str(currentTime[3]) + ":" + str(currentTime[4]) + ":" + str(currentTime[5])
csses = "style.css"
tsStr = '\n$(document).ready(function()\n'
tsStr += ' {\n'
tsStr += ' $("table").tablesorter({\n'
tsStr += ' // sort on the tenth column , order desc \n'
tsStr += ' sortList: '+sort_list+' \n'
tsStr += ' }); \n'
tsStr += ' }\n'
tsStr += ');\n'
scripts = [('js/jquery-latest.js',['javascript','']),
('js/jquery.tablesorter.min.js',['javascript','']),
('js/jquery.metadata.js',['javascript','']),
('',['javascript',tsStr])]
page = markup.page()
pathway_name = re.sub(" ","_",re.sub("/","_",t))
summary_tsv = open(rootDir + pathway_name+'.tsv', 'wb')
summary_tsv.write("Gene\tAvg num Alterations\tTotal alterations\tnum genes\tmin mean truth\tmax mean truth\tmin mean any\tmax mean any\tnormalized activity\n")
page.init(title = t,
header = h,
script=scripts,
css = (csses, 'print, projection, screen'),
footer = "Last modified on " + dateTime)
return page, summary_tsv
def putSummaryTable(p, b, data, id, tsv):
labels = data["sample"]["labels"]
p.table(border=b, id=id, class_='tablesorter')
p.thead()
p.tr()
p.th("Entity - Gene or Complex or Molecule")
p.th(labels, class_="{sorter:'digit'}")
p.tr.close()
p.thead.close()
p.tbody()
for d in data["sample"]:
if d == "labels":
continue
vals = data["sample"][d]
p.tr()
#name of gene
geneUrl = 'http://www.genecards.org/cgi-bin/carddisp.pl?gene='+d
tsv.write('<a href=%s target="_blank">%s</a>\t' % (geneUrl, d))
p.td(o.a(d, href=geneUrl, target="_blank"))
tmp = [round(v, 3) for v in vals]
for v in vals:
tsv.write('%s\t' % str(round(v,3)))
p.td(tmp)
p.tr.close()
tsv.write('\n')
p.tbody.close()
tsv.close()
p.table.close()
def getPathwayByFilename(f):
i = f.find("pid")
if i == -1:
print "string 'pid' not found in file name", f
sys.exit(0)
tmp = f[i:-3].split('_')
pid = tmp[0] + '_' + tmp[1]
pid = re.sub("\.","", pid)
print "pid:",pid
return pid, getPathwayName(pid)
def summarizePathway(samples, data, entitySummary):
sampleIndex = []
nwIndex = []
naIndex = []
for i in range(len(samples)):
s = samples[i]
if s.startswith("nw_"):
nwIndex.append(i)
elif s.startswith("na_"):
naIndex.append(i)
else:
sampleIndex.append(i)
totalOutliers = 0
totalActivity = 0
count = 0
geneCount = 0
for d in entitySummary["sample"]:
if d == "labels":
continue
vals = entitySummary["sample"][d]
totalOutliers += vals[6]
try:
totalActivity += vals[7]
except:
print "error: no activity for ",d
sys.exit(2)
totalActivity += 0
try:
if entityDict[d] == 'protein':
geneCount += 1
except:
pass
count += 1
if geneCount > 0:
avgOutliers = 1.0 * totalOutliers / geneCount;
else:
avgOutliers = 0.0
print "entities", count, "genes", geneCount
minMean = 1000
maxMean = -1000
#minMeanNw = 1000
#maxMeanNw = -1000
minMeanNa = 1000
maxMeanNa = -1000
for d in data:
vals = data[d]
tmp = [vals[i] for i in sampleIndex]
m = mean(tmp)
if m < minMean:
minMean = m
elif m > maxMean:
maxMean = m
#tmp = [vals[i] for i in nwIndex]
#m = mean(tmp)
#if m < minMeanNw:
# minMeanNw = m
#elif m > maxMeanNw:
# maxMeanNw = m
tmp = [vals[i] for i in naIndex]
m = mean(tmp)
if m < minMeanNa:
minMeanNa = m
elif m > maxMeanNa:
maxMeanNa = m
if geneCount < 10:
return None
summary = {}
summary["Avg Num Alterations"] = avgOutliers
summary["Total Alterations"] = totalOutliers
summary["Num Genes"] = geneCount
summary["Min Mean Truth"] = minMean
summary["Max Mean Truth"] = maxMean
summary["Min Mean Any"] = minMeanNa
summary["Max Mean Any"] = maxMeanNa
if geneCount > 0:
summary["Normalized Activity"] = 100 * totalActivity / geneCount
print "summary Normalized Activity", 100 * totalActivity / geneCount
else:
print "#warning geneCount = 0"
summary["order"] = ("Avg Num Alterations", "Total Alterations",
"Num Genes",
"Min Mean Truth", "Max Mean Truth",
"Min Mean Any", "Max Mean Any", "Normalized Activity")
return summary
def fileData(fname):
inFile = open(fname)
line = inFile.readline()
header = line[:-1].split('\t')
sample_names = header[1:]
fData = {}
for line in inFile:
data = line[:-1].split('\t')
name = data[0]
data = data[1:]
if len(name.split("__")) > 1:
continue
try:
vals = [float(d) for d in data]
fData[name] = vals
except:
continue
return sample_names, fData
def createSampleListPage(path_f, parametric, uniqueName, red_label, grey_label):
samples, data = fileData(path_f)
pid, pathwayName = getPathwayByFilename(path_f)
print "pathway:", pathwayName
if parametric:
imgFilename = pngDir + uniqueName + '_' + pid + "_p_summary.png"
else:
imgFilename = pngDir + uniqueName + '_' + pid + "_np_summary.png"
#print "#image file ", imgFilename, "root", rootDir, "png", pngDir
imgSize = (12,5)
pathwayName, entitySummary, pngFile = imgFG.createPlotFromData(pathwayName, imgSize,
imgFilename, parametric,
samples, data,
red_label, grey_label,
class1, class2)
basePNG = os.path.basename(pngFile)
page, summary_tsv = initializePage(t = pathwayName + " -- " + uniqueName,
h = "", sort_list = "[[8,1]]")
#ipl plot at top of page
#summary_tsv.write('<img src="%s" alt="Summary Plot"\n' % pngDir+basePNG)
#summary_tsv.write('Result table\n')
page.img(src=pngBase+basePNG, alt="Summary Plot")
page.p("Result table")
putSummaryTable(p=page, b="1", data=entitySummary, id="result_table", tsv=summary_tsv)
fname = basePNG[:-4] + ".html"
print "createSampleListPage"
writePageToFile(page, rootDir + fname)
summary = summarizePathway(samples, data, entitySummary)
return fname, pathwayName, summary
def createGeneListPage(path_f, parametric, uniqueName, red_label, grey_label):
samples, data = fileData(path_f)
pid, pathwayName = getPathwayByFilename(path_f)
print "pathway:", pathwayName
if parametric:
imgFilename = pngDir + uniqueName + '_' + pid + "_p_summary.png"
else:
imgFilename = pngDir + uniqueName + '_' + pid + "_np_summary.png"
print "#image file ", imgFilename, "root", rootDir, "png", pngDir
imgSize = (12,5)
pathwayName, entitySummary, pngFile = imgFG.createPlotFromData(pathwayName, imgSize,
imgFilename, parametric,
samples, data,
red_label, grey_label,
class1, class2)
basePNG = os.path.basename(pngFile)
page, summary_tsv = initializePage(t = pathwayName + " -- " + uniqueName,
h = "", sort_list = "[[8,1]]")
#ipl plot at top of page
#summary_tsv.write('<img src="%s" alt="Summary Plot"\n' % pngDir+basePNG)
#summary_tsv.write('Result table\n')
page.img(src=pngBase+basePNG, alt="Summary Plot")
page.p("Result table")
putSummaryTable(p=page, b="1", data=entitySummary, id="result_table", tsv=summary_tsv)
fname = basePNG[:-4] + ".html"
print "createGeneListPage"
writePageToFile(page, rootDir + fname)
summary = summarizePathway(samples, data, entitySummary)
return fname, pathwayName, summary
def putResultsTable(p, b, data, id):
# p -> page
# data -> html_filename, pathwayName, summary dictionary (one row per pathway)
r = data[0]
summaryVals = r[2]
header = summaryVals["order"]
p.table(border=b, id=id, class_='tablesorter')
p.thead()
p.tr()
p.th("Image")
p.th("Name")
p.th(header, class_="{sorter:'digit'}")
p.tr.close()
p.thead.close()
summary_tsv = open(rootDir+'/summary.tsv','wb')
summary_tsv.write("Pathway\tAvg num Alterations\tTotal alterations\tnum genes\tmin mean truth\tmax mean truth\tmin mean any\tmax mean any\tnormalized activity\n")
p.tbody()
rowCount = 0
rowSum = [0 for h in header]
for r in data:
htmlFile = r[0]
pathwayName = r[1]
summaryVals = r[2]
p.tr()
base = os.path.basename(htmlFile)
#plot of ipls
p.td(o.a(o.img(src = pngBase + base[:-5] + ".png", width=100), href=base))
#link to pathway details page
p.td(o.a(pathwayName, href=base))
summary_tsv.write(pathwayName+'\t')
vals = [round(summaryVals[h],3) for h in header]
for v in vals:
summary_tsv.write(str(v)+'\t')
#additional columns of data
p.td(vals)
i = 0
#add data to totals for bottom of page
for h in header:
rowSum[i] += summaryVals[h]
i += 1
#end of row
summary_tsv.write('\n')
p.tr.close()
summary_tsv.close()
p.tbody.close()
p.tbody()
p.tr()
p.td('')
p.td('Total')
# last row in table is sums
p.td(rowSum)
p.tr.close()
p.tbody.close()
p.table.close()
def createIndexPage(pResults, npResults, index_html):
page, summary_tsv = initializePage(t = "Factor Graph Results",
h = "")
page.p("Per-pathway summary of activity")
putResultsTable(p=page, b="1", data=pResults, id="result_table1")
#page.p("Non-Parametric Results")
#putResultsTable(p=page, b="1", data=npResults, id="result_table2")
print "createIndexPage", index_html
writePageToFile(page, index_html)
def createTopPathwaysPage(pResults):
page, summary_tsv = initializePage(t = "Per-pathway summary of activity", h = "")
page.p("Per-pathway summary of activity")
page.p('<a href="index.html">Click here for all pathways</a>')
putResultsTable(p=page, b="1", data=pResults[0:10], id="results")
page.p('<a href="index.html">Click here for all pathways</a>')
print "createTopPathwaySummaryPage"
writePageToFile(page, rootDir + "summary.html")
def main(directory, pathway_directory, contrast_file, red_label, grey_label, index_html_path):
# create all html pages for each individual run, including images
# collect objects containing html page, whatever pathway-level summary info (in 2d dict)
# use objects to generate root level index.html
global rootDir
global pngDir
global class1
global class2
parseContrast(contrast_file, red_label, grey_label)
print len(class1), "samples for class1 ", red_label, class1
print len(class2), "samples for class2 ", grey_label, class2
fdir = os.path.dirname(index_html_path)
print "dir", fdir
if fdir == "":
fdir=os.getcwd()
rootDir = fdir+'/html/'
print "rootDir", rootDir
pathways = getFilesMatching(pathway_directory, ["*pid*tab","*pid*spf"])
for fname in pathways:
initEntityDict(fname)
print "reading ipls", directory
files = getFilesMatching(directory, ["*transpose*.out"])
pngDir = rootDir + pngBase
os.system('mkdir -p '+pngDir)
os.system('cp -p ./style.css '+rootDir)
os.system('cp -pr ./js '+rootDir)
print "mkdir -p "+pngDir
pResults = []
parametric = True
datasetName = os.path.basename(directory.strip('/'))
for f in files:
if f == "merged_transpose_pid_example.out":
continue
print "File: "+f, "dataset:", datasetName
# list of genes and complexes for a pathway
r = createGeneListPage(f, parametric, datasetName, red_label, grey_label)
# fname, pathwayName, summary
print " #createGeneListPage pathway ", r[1], r[2], r[0]
if r[2] != None:
pResults.append(r)
npResults = []
#parametric = False
#for f in files:
# if f == "merged_transpose_pid_example.out":
# continue
# r = createGeneListPage(f, parametric, directory.strip('/'))
# npResults.append(r)
#pResults.sort(key=lambda x: -x[2]["Avg Num Alterations"])
pResults.sort(key=lambda x: -x[2]["Normalized Activity"])
#main pathway summary page (all pathways)
createIndexPage(pResults, npResults, index_html_path)
#main pathway summary page (top 10)
createTopPathwaysPage(pResults)
def usage():
print "usage: python htmlFG.py ipl_directory pathway_directory pathway_pids.tab contrast_file class1_label class2_label index.html"
print " ipl_directory contains one IPL matrix per pathway"
print " pathway_directory contains one spf file per pathway"
print " pathway_pids.tab is a 3 col file with list of pathways in pathway_directory: pid, description, source"
print " contrast_file contains tab delimted file, first col is sample id and second is class of sample"
print " Note: pathway names must start with pid_ and end with _pathway.tab"
print
sys.exit(0)
if __name__ == "__main__":
if len(sys.argv) != 8:
usage()
directory = sys.argv[1]
pathways = sys.argv[2]
path_list = sys.argv[3]
contrast_file = sys.argv[4]
red_label = sys.argv[5]
grey_label = sys.argv[6]
index_html_path = sys.argv[7]
initPathwayNameDict(path_file=path_list)
import pdb
main(directory, pathways, contrast_file, red_label, grey_label, index_html_path)
| bsd-3-clause | 7,746,036,898,464,188,000 | 31.844607 | 166 | 0.560169 | false |
mcxiaoke/python-labs | archives/learning/security/otp.py | 1 | 4777 | # -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
otpauth
~~~~~~~
Implements two-step verification of HOTP/TOTP.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
:license: BSD, see LICENSE for more details.
"""
import base64
import hashlib
import hmac
import struct
import sys
import time
import warnings
if sys.version_info[0] == 3:
python_version = 3
string_type = str
else:
python_version = 2
string_type = unicode
range = xrange
class OTPAuth(object):
"""One Time Password Authentication.
:param secret: A secret token for the authentication.
"""
def __init__(self, secret):
self.secret = secret
def hotp(self, counter=4):
"""Generate a HOTP code.
:param counter: HOTP is a counter based algorithm.
"""
return generate_hotp(self.secret, counter)
def totp(self, period=30):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param period: A period that a TOTP code is valid in seconds
"""
return generate_totp(self.secret, period)
def valid_hotp(self, code, last=0, trials=100):
"""Valid a HOTP code.
:param code: A number that is less than 6 characters.
:param last: Guess HOTP code from last + 1 range.
:param trials: Guest HOTP code end at last + trials + 1.
"""
if not valid_code(code):
return False
code = int(code)
for i in range(last + 1, last + trials + 1):
if self.hotp(counter=i) == code:
return i
return False
def valid_totp(self, code, period=30):
"""Valid a TOTP code.
:param code: A number that is less than 6 characters.
:param period: A period that a TOTP code is valid in seconds
"""
return valid_code(code) and self.totp(period) == int(code)
def to_uri(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string.
:param type: Algorithm type, hotp or totp.
:param label: Label of the identifier.
:param issuer: The company, the organization or something else.
:param counter: Counter of the HOTP algorithm.
"""
type = type.lower()
if type not in ('hotp', 'totp'):
raise ValueError('type must be hotp or totp')
if type == 'hotp' and not counter:
raise ValueError('HOTP type authentication need counter')
secret = base64.b32encode(to_bytes(self.secret))
# bytes to string
secret = secret.decode('utf-8')
# remove pad string
secret = secret.strip('=')
# https://code.google.com/p/google-authenticator/wiki/KeyUriFormat
url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s'
'&issuer=%(issuer)s')
dct = dict(
type=type, label=label, issuer=issuer,
secret=secret, counter=counter
)
ret = url % dct
if type == 'hotp':
ret = '%s&counter=%s' % (ret, counter)
return ret
def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter)
def generate_hotp(secret, counter=4):
"""Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm.
"""
# https://tools.ietf.org/html/rfc4226
msg = struct.pack('>Q', counter)
digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()
ob = digest[19]
if python_version == 2:
ob = ord(ob)
pos = ob & 15
base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff
token = base % 1000000
return token
def generate_totp(secret, period=30):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param secret: A secret token for the authentication.
:param period: A period that a TOTP code is valid in seconds
"""
counter = int(time.time()) // period
return generate_hotp(secret, counter)
def to_bytes(text):
if isinstance(text, string_type):
# Python3 str -> bytes
# Python2 unicode -> str
text = text.encode('utf-8')
return text
def valid_code(code):
code = string_type(code)
return code.isdigit() and len(code) <= 6
if __name__ == '__main__':
gotp=OTPAuth('xjom6zpducm4mltk5stxcogv3wcvq7do')
print gotp.totp()
dotp=OTPAuth('PBFCKI5CSTEGFKDV4RHCLFZSCU')
print dotp.totp()
| apache-2.0 | 2,432,917,757,027,583,500 | 26.454023 | 75 | 0.601005 | false |
daweiwu/meta-iotqa-1 | lib/oeqa/runtime/wifi/comm_wifi_mnode.py | 1 | 5969 | """
@file comm_wifi_mnode.py
"""
##
# @addtogroup wifi
# @brief This is component
# @{
# @addtogroup comm_wifi_mnode
# @brief This is comm_wifi module
# @{
##
import time
import os
import string
import wifi
import ConfigParser
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import shell_cmd_timeout
from oeqa.utils.decorators import tag
ssid_config = ConfigParser.ConfigParser()
config_path = os.path.join(os.path.dirname(__file__), "files/config.ini")
ssid_config.readfp(open(config_path))
@tag(TestType="EFT")
class CommWiFiMNode(oeRuntimeTest):
"""
@class CommWiFiMNode
"""
@classmethod
def setUpClass(cls):
''' initialize wifi class
@fn setUp
@param self
@return
'''
wifi1 = wifi.WiFiFunction(cls.tc.targets[0])
wifi2 = wifi.WiFiFunction(cls.tc.targets[1])
# Connect to same WiFi AP
ap_type = "hidden"
ssid = ssid_config.get("Connect","ssid_80211n")
pwd = ssid_config.get("Connect","passwd_80211n")
wifi1.execute_connection(ap_type, ssid, pwd)
wifi2.execute_connection(ap_type, ssid, pwd)
@classmethod
def tearDownClass(cls):
'''disable wifi, it will block ethernet connection when rebooting
@fn tearDownClass
@param cls
@return
'''
wifi1 = wifi.WiFiFunction(cls.tc.targets[0])
wifi2 = wifi.WiFiFunction(cls.tc.targets[1])
wifi1.disable_wifi()
wifi2.disable_wifi()
def setUp(self):
''' init wifi1 and wifi2
@fn setUp
@param self
@return
'''
# init wifi1 and wifi2
self.wifi1 = wifi.WiFiFunction(self.targets[0])
self.wifi2 = wifi.WiFiFunction(self.targets[1])
@tag(FeatureID="IOTOS-457")
def test_wifi_ssh(self):
'''One device ssh to another via WiFi
@fn test_wifi_ssh
@param self
@return
'''
# Check wifi1 to ssh to wifi2
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
@tag(FeatureID="IOTOS-457")
def test_wifi_scp_file(self):
'''One device scp a file to another device via WiFi
@fn test_wifi_scp_file
@param self
@return
'''
# Check wifi1 to scp /etc/os-release to wifi2
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
file_path = "/etc/os-release"
self.wifi1.scp_to(file_path, self.wifi2.get_wifi_ipv4())
# Compare md5sum
(status, md5sum1) = self.wifi1.target.run('md5sume %s' % file_path)
(status, md5sum2) = self.wifi2.target.run('md5sume /tmp/%s' % file_path.split('/')[-1])
if md5sum1 == md5sum2:
pass
else:
self.assertEqual(0, 1, msg="md5sum checking fail: original %s, remote is %s" % (md5sum1, md5sum2))
@tag(FeatureID="IOTOS-458")
def test_wifi_scp_multiple_files(self):
'''Stability: one device scp thousands of small files
to another
@fn test_wifi_scp_multiple_files
@param self
@return
'''
# clean files on both sides
self.wifi2.target.run('rm -f /home/root/*')
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
# create 1000 files under /tmp/1000/ on target1
script = os.path.join(os.path.dirname(__file__), "files/create_1000_files.sh")
self.wifi1.target.copy_to(script, "/tmp/")
self.wifi1.target.run('sh /tmp/create_1000_files.sh')
# scp them to target2 /tmp/ folder
(status, file_number_old) = self.wifi2.target.run('ls /home/root/ | wc -l')
file_path = '/tmp/1000/*'
self.wifi1.scp_to(file_path, self.wifi2.get_wifi_ipv4())
# check if /tmp/ files number increase 1000 on target2
(status, file_number_new) = self.wifi2.target.run('ls /home/root/ | wc -l')
if int(file_number_new) - int(file_number_old) == 1000:
pass
else:
self.assertEqual(0, 1, msg="1000 file scp fail: original number %s, new number %s" % (file_number_old, file_number_new))
@tag(FeatureID="IOTOS-458")
def test_wifi_scp_big_file(self):
'''Stability: one device scp 500M size file to another
@fn test_wifi_scp_big_file
@param self
@return
'''
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
file_path = '/home/root/big_file'
# create a big file, size is 500M
(status, patition) = self.wifi1.target.run('mount | grep " \/ "')
self.wifi1.target.run('dd if=%s of=%s bs=1M count=500' % (patition.split()[0], file_path))
# scp it to target2 /home/root/ folder
self.wifi2.target.run('rm -f /home/root/*')
self.wifi1.scp_to(file_path, self.wifi2.get_wifi_ipv4())
# check if md5sume is consistent
(status, md5sum1) = self.wifi1.target.run('md5sum %s' % file_path)
(status, md5sum2) = self.wifi2.target.run('md5sum /home/root/%s' % file_path.split('/')[-1])
if md5sum1.split()[0] == md5sum2.split()[0]:
pass
else:
self.assertEqual(0, 1, msg="md5sum checking fail: original %s, remote is %s" % (md5sum1.split()[0], md5sum2.split()[0]))
@tag(FeatureID="IOTOS-458")
def test_wifi_avaliable_after_longtime_idle(self):
'''Stability: check if wifi is still workable after a long time idle
@fn test_wifi_avaliable_after_longtime_idle
@param self
@return
'''
# Re-connect wifi although setUpClass already did it
ap_type = "hidden"
ssid = ssid_config.get("Connect","ssid_80211n")
pwd = ssid_config.get("Connect","passwd_80211n")
self.wifi1.execute_connection(ap_type, ssid, pwd)
self.wifi2.execute_connection(ap_type, ssid, pwd)
# idle for half hour, then check basic ssh_to function
time.sleep(1800)
self.wifi1.ipv4_ssh_to(self.wifi2.get_wifi_ipv4())
##
# @}
# @}
##
| mit | 7,321,194,331,208,406,000 | 33.108571 | 132 | 0.59943 | false |
bmd/twittrscrapr | twittrscrapr/scrapers/profilescrapr.py | 1 | 2613 | from datetime import datetime as dt
import logging
from base_scraper import TwittrScrapr
logger = logging.getLogger("TwittrScrapr.ProfileScrapr")
class ProfileScrapr(TwittrScrapr):
def __init__(self, api_keys, writer):
"""
Construct the ProfileScraper object
:param api_keys: A dict containing Twitter API parameters
:param writer: A writer that implements the CSV module's DictReader and
DictWriter interfaces
:return: None
"""
super(ProfileScrapr, self).__init__(api_keys, writer)
def _call_lookup_method(self, method, profile_type, profiles):
"""
Call the appropriate lookup method for the profile type provided and return the result
from the Twitter API.
:param method:
:param profile_type: "screenname" or "user_ids"
:param profiles: an array of profiles to iterate over
:return: Dict
"""
if profile_type == 'screenname':
profs = method(screen_name=','.join(profiles))
else: # type is user_ids
profs = method(user_id=','.join(profiles))
results = []
for prof in profs:
results.append({
'screen_name': prof['screen_name'],
'display_name': prof['name'],
'twitter_join_dt': prof['created_at'],
'user_id': prof['id_str'],
'followers': prof['followers_count'],
'scrape_date': dt.strftime(dt.now(), '%Y-%m-%d'),
'location': prof['location'],
'website': prof['url'],
'tweets': prof['statuses_count'],
'friends': prof['friends_count'],
'listed_count': prof['listed_count']
})
self.reset_time = self.api.get_lastfunction_header('x-rate-limit-reset')
self.calls_remaining = self.api.get_lastfunction_header('x-rate-limit-remaining')
return results
@TwittrScrapr.error_handler
def fetch_user_profiles(self):
"""
Scrape a list of user profiles using the Twitter API's batch endpoints
:return: None
"""
self.check_rate_limit()
for x in range(0, (len(self.scrape_queue) // 100 + 1)):
start = x * 100
end = start + 100
results = self._call_lookup_method(
self.api.lookup_user, self.search_type, self.scrape_queue[start:min(end, len(self.scrape_queue))]
)
self.writer.write(results)
logger.info("Completed data pull")
self.writer.cleanup()
| mit | -934,643,122,492,740,400 | 33.381579 | 113 | 0.570608 | false |
ParuninPavel/lenta4_hack | vkapp/bot/models/news.py | 1 | 1052 | from django.db import models
from .users import Blogger, Admin
class News(models.Model):
id = models.AutoField(primary_key=True)
link = models.CharField(max_length=300, blank=True, null=True)
pic = models.CharField(max_length=300, blank=True, null=True)
blogger = models.ForeignKey(Blogger, on_delete=models.CASCADE, null=True)
media = models.CharField(max_length=3000, blank=True, null=True)
date_time = models.DateTimeField(auto_now_add=True)
class AdminReview(models.Model):
id = models.AutoField(primary_key=True)
admin = models.ForeignKey(Admin, on_delete=models.CASCADE)
news = models.ForeignKey(News, on_delete=models.CASCADE, unique=True)
rating = models.IntegerField()
date_time = models.DateTimeField(auto_now_add=True)
class Publication(models.Model):
id = models.AutoField(primary_key=True)
admin = models.ForeignKey(Admin, on_delete=models.CASCADE)
news = models.ForeignKey(News, on_delete=models.CASCADE, unique=True)
date_time = models.DateTimeField(auto_now_add=True)
| mit | 6,350,823,177,180,132,000 | 36.571429 | 77 | 0.734791 | false |
vedujoshi/tempest | tempest/api/object_storage/test_object_services.py | 1 | 46780 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import random
import re
import time
import zlib
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class ObjectTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(ObjectTest, cls).resource_setup()
cls.container_name = cls.create_container()
@classmethod
def resource_cleanup(cls):
cls.delete_containers()
super(ObjectTest, cls).resource_cleanup()
def _upload_segments(self):
# create object
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
segments = 10
data_segments = [data + str(i) for i in range(segments)]
# uploading segments
for i in range(segments):
self.object_client.create_object_segments(
self.container_name, object_name, i, data_segments[i])
return object_name, data_segments
def _copy_object_2d(self, src_object_name, metadata=None):
dst_object_name = data_utils.rand_name(name='TestObject')
resp, _ = self.object_client.copy_object_2d_way(self.container_name,
src_object_name,
dst_object_name,
metadata=metadata)
return dst_object_name, resp
def _check_copied_obj(self, dst_object_name, src_body,
in_meta=None, not_in_meta=None):
resp, dest_body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(src_body, dest_body)
if in_meta:
for meta_key in in_meta:
self.assertIn('x-object-meta-' + meta_key, resp)
if not_in_meta:
for meta_key in not_in_meta:
self.assertNotIn('x-object-meta-' + meta_key, resp)
@decorators.attr(type='smoke')
@decorators.idempotent_id('5b4ce26f-3545-46c9-a2ba-5754358a4c62')
def test_create_object(self):
# create object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
# create another object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@decorators.idempotent_id('5daebb1d-f0d5-4dc9-b541-69672eff00b0')
def test_create_object_with_content_disposition(self):
# create object with content_disposition
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {}
metadata['content-disposition'] = 'inline'
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=None)
self.assertIn('content-disposition', resp)
self.assertEqual(resp['content-disposition'], 'inline')
self.assertEqual(body, data)
@decorators.idempotent_id('605f8317-f945-4bee-ae91-013f1da8f0a0')
def test_create_object_with_content_encoding(self):
# create object with content_encoding
object_name = data_utils.rand_name(name='TestObject')
# put compressed string
data_before = b'x' * 2000
data = zlib.compress(data_before)
metadata = {}
metadata['content-encoding'] = 'deflate'
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
# download compressed object
metadata = {}
metadata['accept-encoding'] = 'deflate'
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=metadata)
self.assertEqual(body, data_before)
@decorators.idempotent_id('73820093-0503-40b1-a478-edf0e69c7d1f')
def test_create_object_with_etag(self):
# create object with etag
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
md5 = hashlib.md5(data).hexdigest()
metadata = {'Etag': md5}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@decorators.idempotent_id('84dafe57-9666-4f6d-84c8-0814d37923b8')
def test_create_object_with_expect_continue(self):
# create object with expect_continue
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
status, _ = self.object_client.create_object_continue(
self.container_name, object_name, data)
self.assertEqual(status, 201)
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
def test_create_object_with_transfer_encoding(self):
# create object with transfer_encoding
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(1024)
_, _, resp_headers = self.object_client.put_object_with_chunk(
container=self.container_name,
name=object_name,
contents=data_utils.chunkify(data, 512)
)
self.assertHeaders(resp_headers, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(data, body)
@decorators.idempotent_id('0f3d62a6-47e3-4554-b0e5-1a5dc372d501')
def test_create_object_with_x_fresh_metadata(self):
# create object with x_fresh_metadata
object_name_base = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata_1 = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name_base,
data,
metadata=metadata_1)
object_name = data_utils.rand_name(name='TestObject')
metadata_2 = {'X-Copy-From': '%s/%s' % (self.container_name,
object_name_base),
'X-Fresh-Metadata': 'true'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
'',
metadata=metadata_2)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
self.assertEqual(data, body)
@decorators.idempotent_id('1c7ed3e4-2099-406b-b843-5301d4811baf')
def test_create_object_with_x_object_meta(self):
# create object with object_meta
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
self.assertEqual(data, body)
@decorators.idempotent_id('e4183917-33db-4153-85cc-4dacbb938865')
def test_create_object_with_x_object_metakey(self):
# create object with the blank value of metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': ''}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], '')
self.assertEqual(data, body)
@decorators.idempotent_id('ce798afc-b278-45de-a5ce-2ea124b98b99')
def test_create_object_with_x_remove_object_meta(self):
# create object with x_remove_object_meta
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata_add)
metadata_remove = {'X-Remove-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata_remove)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
self.assertEqual(data, body)
@decorators.idempotent_id('ad21e342-7916-4f9e-ab62-a1f885f2aaf9')
def test_create_object_with_x_remove_object_metakey(self):
# create object with the blank value of remove metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata_add)
metadata_remove = {'X-Remove-Object-Meta-test-meta': ''}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data,
metadata=metadata_remove)
self.assertHeaders(resp, 'Object', 'PUT')
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
self.assertEqual(data, body)
@decorators.idempotent_id('17738d45-03bd-4d45-9e0b-7b2f58f98687')
def test_delete_object(self):
# create object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
# delete object
resp, _ = self.object_client.delete_object(self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'DELETE')
@decorators.attr(type='smoke')
@decorators.idempotent_id('7a94c25d-66e6-434c-9c38-97d4e2c29945')
def test_update_object_metadata(self):
# update object metadata
object_name, _ = self.create_object(self.container_name)
metadata = {'X-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
@decorators.idempotent_id('48650ed0-c189-4e1e-ad6b-1d4770c6e134')
def test_update_object_metadata_with_remove_metadata(self):
# update object metadata with remove metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta1', resp)
@decorators.idempotent_id('f726174b-2ded-4708-bff7-729d12ce1f84')
def test_update_object_metadata_with_create_and_remove_metadata(self):
# creation and deletion of metadata with one request
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
'X-Remove-Object-Meta-test-meta1': 'Meta1'}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta1', resp)
self.assertIn('x-object-meta-test-meta2', resp)
self.assertEqual(resp['x-object-meta-test-meta2'], 'Meta2')
@decorators.idempotent_id('08854588-6449-4bb7-8cca-f2e1040f5e6f')
def test_update_object_metadata_with_x_object_manifest(self):
# update object metadata with x_object_manifest
# uploading segments
object_name, _ = self._upload_segments()
# creating a manifest file
data_empty = ''
self.object_client.create_object(self.container_name,
object_name,
data_empty,
metadata=None)
object_prefix = '%s/%s' % (self.container_name, object_name)
update_metadata = {'X-Object-Manifest': object_prefix}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn('x-object-manifest', resp)
self.assertNotEmpty(resp['x-object-manifest'])
@decorators.idempotent_id('0dbbe89c-6811-4d84-a2df-eca2bdd40c0e')
def test_update_object_metadata_with_x_object_metakey(self):
# update object metadata with a blank value of metadata
object_name, _ = self.create_object(self.container_name)
update_metadata = {'X-Object-Meta-test-meta': ''}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], '')
@decorators.idempotent_id('9a88dca4-b684-425b-806f-306cd0e57e42')
def test_update_object_metadata_with_x_remove_object_metakey(self):
# update object metadata with a blank value of remove metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
resp, _ = self.object_client.update_object_metadata(
self.container_name,
object_name,
update_metadata,
metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertNotIn('x-object-meta-test-meta', resp)
@decorators.attr(type='smoke')
@decorators.idempotent_id('9a447cf6-de06-48de-8226-a8c6ed31caf2')
def test_list_object_metadata(self):
# get object metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata)
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
@decorators.idempotent_id('170fb90e-f5c3-4b1f-ae1b-a18810821172')
def test_list_no_object_metadata(self):
# get empty list of object metadata
object_name, _ = self.create_object(self.container_name)
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertNotIn('x-object-meta-', str(resp))
@decorators.idempotent_id('23a3674c-d6de-46c3-86af-ff92bfc8a3da')
def test_list_object_metadata_with_x_object_manifest(self):
# get object metadata with x_object_manifest
# uploading segments
object_name, _ = self._upload_segments()
# creating a manifest file
object_prefix = '%s/%s' % (self.container_name, object_name)
metadata = {'X-Object-Manifest': object_prefix}
data_empty = ''
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data_empty,
metadata=metadata)
resp, _ = self.object_client.list_object_metadata(
self.container_name,
object_name)
# Check only the existence of common headers with custom matcher
self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
'Object', 'HEAD'))
self.assertIn('x-object-manifest', resp)
# Etag value of a large object is enclosed in double-quotations.
# This is a special case, therefore the formats of response headers
# are checked without a custom matcher.
self.assertTrue(resp['etag'].startswith('\"'))
self.assertTrue(resp['etag'].endswith('\"'))
self.assertTrue(resp['etag'].strip('\"').isalnum())
self.assertTrue(re.match(r"^\d+\.?\d*\Z", resp['x-timestamp']))
self.assertNotEmpty(resp['content-type'])
self.assertTrue(re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*",
resp['x-trans-id']))
self.assertNotEmpty(resp['date'])
self.assertEqual(resp['accept-ranges'], 'bytes')
self.assertEqual(resp['x-object-manifest'],
'%s/%s' % (self.container_name, object_name))
@decorators.attr(type='smoke')
@decorators.idempotent_id('02610ba7-86b7-4272-9ed8-aa8d417cb3cd')
def test_get_object(self):
# retrieve object's data (in response body)
# create object
object_name, data = self.create_object(self.container_name)
# get object
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('005f9bf6-e06d-41ec-968e-96c78e0b1d82')
def test_get_object_with_metadata(self):
# get object with metadata
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=metadata)
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=None)
self.assertHeaders(resp, 'Object', 'GET')
self.assertIn('x-object-meta-test-meta', resp)
self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
self.assertEqual(body, data)
@decorators.idempotent_id('05a1890e-7db9-4a6c-90a8-ce998a2bddfa')
def test_get_object_with_range(self):
# get object with range
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(100)
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=None)
rand_num = random.randint(3, len(data) - 1)
metadata = {'Range': 'bytes=%s-%s' % (rand_num - 3, rand_num - 1)}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data[rand_num - 3: rand_num])
@decorators.idempotent_id('11b4515b-7ba7-4ca8-8838-357ded86fc10')
def test_get_object_with_x_object_manifest(self):
# get object with x_object_manifest
# uploading segments
object_name, data_segments = self._upload_segments()
# creating a manifest file
object_prefix = '%s/%s' % (self.container_name, object_name)
metadata = {'X-Object-Manifest': object_prefix}
data_empty = ''
resp, body = self.object_client.create_object(
self.container_name,
object_name,
data_empty,
metadata=metadata)
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=None)
# Check only the existence of common headers with custom matcher
self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
'Object', 'GET'))
self.assertIn('x-object-manifest', resp)
# Etag value of a large object is enclosed in double-quotations.
# This is a special case, therefore the formats of response headers
# are checked without a custom matcher.
self.assertTrue(resp['etag'].startswith('\"'))
self.assertTrue(resp['etag'].endswith('\"'))
self.assertTrue(resp['etag'].strip('\"').isalnum())
self.assertTrue(re.match(r"^\d+\.?\d*\Z", resp['x-timestamp']))
self.assertNotEmpty(resp['content-type'])
self.assertTrue(re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*",
resp['x-trans-id']))
self.assertNotEmpty(resp['date'])
self.assertEqual(resp['accept-ranges'], 'bytes')
self.assertEqual(resp['x-object-manifest'],
'%s/%s' % (self.container_name, object_name))
self.assertEqual(''.join(data_segments), body.decode())
@decorators.idempotent_id('c05b4013-e4de-47af-be84-e598062b16fc')
def test_get_object_with_if_match(self):
# get object with if_match
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(10)
create_md5 = hashlib.md5(data).hexdigest()
create_metadata = {'Etag': create_md5}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
list_metadata = {'If-Match': create_md5}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('be133639-e5d2-4313-9b1f-2d59fc054a16')
def test_get_object_with_if_modified_since(self):
# get object with if_modified_since
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
time_now = time.time()
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=None)
http_date = time.ctime(time_now - 86400)
list_metadata = {'If-Modified-Since': http_date}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('641500d5-1612-4042-a04d-01fc4528bc30')
def test_get_object_with_if_none_match(self):
# get object with if_none_match
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
create_md5 = hashlib.md5(data).hexdigest()
create_metadata = {'Etag': create_md5}
self.object_client.create_object(self.container_name,
object_name,
data,
metadata=create_metadata)
list_data = data_utils.random_bytes()
list_md5 = hashlib.md5(list_data).hexdigest()
list_metadata = {'If-None-Match': list_md5}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('0aa1201c-10aa-467a-bee7-63cbdd463152')
def test_get_object_with_if_unmodified_since(self):
# get object with if_unmodified_since
object_name, data = self.create_object(self.container_name)
time_now = time.time()
http_date = time.ctime(time_now + 86400)
list_metadata = {'If-Unmodified-Since': http_date}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('94587078-475f-48f9-a40f-389c246e31cd')
def test_get_object_with_x_newest(self):
# get object with x_newest
object_name, data = self.create_object(self.container_name)
list_metadata = {'X-Newest': 'true'}
resp, body = self.object_client.get_object(
self.container_name,
object_name,
metadata=list_metadata)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('1a9ab572-1b66-4981-8c21-416e2a5e6011')
def test_copy_object_in_same_container(self):
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
resp, _ = self.object_client.create_object(self.container_name,
src_object_name,
src_data)
# create destination object
dst_object_name = data_utils.rand_name(name='DstObject')
dst_data = data_utils.random_bytes(size=len(dst_object_name) * 3)
resp, _ = self.object_client.create_object(self.container_name,
dst_object_name,
dst_data)
# copy source object to destination
resp, _ = self.object_client.copy_object_in_same_container(
self.container_name, src_object_name, dst_object_name)
self.assertHeaders(resp, 'Object', 'PUT')
# check data
resp, body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(body, src_data)
@decorators.idempotent_id('2248abba-415d-410b-9c30-22dff9cd6e67')
def test_copy_object_to_itself(self):
# change the content type of an existing object
# create object
object_name, _ = self.create_object(self.container_name)
# get the old content type
resp_tmp, _ = self.object_client.list_object_metadata(
self.container_name, object_name)
# change the content type of the object
metadata = {'content-type': 'text/plain; charset=UTF-8'}
self.assertNotEqual(resp_tmp['content-type'], metadata['content-type'])
resp, _ = self.object_client.copy_object_in_same_container(
self.container_name, object_name, object_name, metadata)
self.assertHeaders(resp, 'Object', 'PUT')
# check the content type
resp, _ = self.object_client.list_object_metadata(self.container_name,
object_name)
self.assertEqual(resp['content-type'], metadata['content-type'])
@decorators.idempotent_id('06f90388-2d0e-40aa-934c-e9a8833e958a')
def test_copy_object_2d_way(self):
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
resp, _ = self.object_client.create_object(self.container_name,
src_object_name, src_data)
# create destination object
dst_object_name = data_utils.rand_name(name='DstObject')
dst_data = data_utils.random_bytes(size=len(dst_object_name) * 3)
resp, _ = self.object_client.create_object(self.container_name,
dst_object_name, dst_data)
# copy source object to destination
resp, _ = self.object_client.copy_object_2d_way(self.container_name,
src_object_name,
dst_object_name)
self.assertHeaders(resp, 'Object', 'COPY')
self.assertEqual(
resp['x-copied-from'],
self.container_name + "/" + src_object_name)
# check data
self._check_copied_obj(dst_object_name, src_data)
@decorators.idempotent_id('aa467252-44f3-472a-b5ae-5b57c3c9c147')
def test_copy_object_across_containers(self):
# create a container to use as a source container
src_container_name = data_utils.rand_name(name='TestSourceContainer')
self.container_client.create_container(src_container_name)
self.containers.append(src_container_name)
# create a container to use as a destination container
dst_container_name = data_utils.rand_name(
name='TestDestinationContainer')
self.container_client.create_container(dst_container_name)
self.containers.append(dst_container_name)
# create object in source container
object_name = data_utils.rand_name(name='Object')
data = data_utils.random_bytes(size=len(object_name) * 2)
resp, _ = self.object_client.create_object(src_container_name,
object_name, data)
# set object metadata
meta_key = data_utils.rand_name(name='test')
meta_value = data_utils.rand_name(name='MetaValue')
orig_metadata = {meta_key: meta_value}
resp, _ = self.object_client.update_object_metadata(src_container_name,
object_name,
orig_metadata)
self.assertHeaders(resp, 'Object', 'POST')
# copy object from source container to destination container
resp, _ = self.object_client.copy_object_across_containers(
src_container_name, object_name, dst_container_name,
object_name)
self.assertHeaders(resp, 'Object', 'PUT')
# check if object is present in destination container
resp, body = self.object_client.get_object(dst_container_name,
object_name)
self.assertEqual(body, data)
actual_meta_key = 'x-object-meta-' + meta_key
self.assertIn(actual_meta_key, resp)
self.assertEqual(resp[actual_meta_key], meta_value)
@decorators.idempotent_id('5a9e2cc6-85b6-46fc-916d-0cbb7a88e5fd')
def test_copy_object_with_x_fresh_metadata(self):
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_object_name, data = self.create_object(self.container_name,
metadata=metadata)
# copy source object with x_fresh_metadata header
metadata = {'X-Fresh-Metadata': 'true'}
dst_object_name, resp = self._copy_object_2d(src_object_name,
metadata)
self.assertHeaders(resp, 'Object', 'COPY')
self.assertNotIn('x-object-meta-src', resp)
self.assertEqual(resp['x-copied-from'],
self.container_name + "/" + src_object_name)
# check that destination object does NOT have any object-meta
self._check_copied_obj(dst_object_name, data, not_in_meta=["src"])
@decorators.idempotent_id('a28a8b99-e701-4d7e-9d84-3b66f121460b')
def test_copy_object_with_x_object_metakey(self):
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_obj_name, data = self.create_object(self.container_name,
metadata=metadata)
# copy source object to destination with x-object-meta-key
metadata = {'x-object-meta-test': ''}
dst_obj_name, resp = self._copy_object_2d(src_obj_name, metadata)
self.assertHeaders(resp, 'Object', 'COPY')
expected = {'x-object-meta-test': '',
'x-object-meta-src': 'src_value',
'x-copied-from': self.container_name + "/" + src_obj_name}
for key, value in expected.items():
self.assertIn(key, resp)
self.assertEqual(value, resp[key])
# check destination object
self._check_copied_obj(dst_obj_name, data, in_meta=["test", "src"])
@decorators.idempotent_id('edabedca-24c3-4322-9b70-d6d9f942a074')
def test_copy_object_with_x_object_meta(self):
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_obj_name, data = self.create_object(self.container_name,
metadata=metadata)
# copy source object to destination with object metadata
metadata = {'x-object-meta-test': 'value'}
dst_obj_name, resp = self._copy_object_2d(src_obj_name, metadata)
self.assertHeaders(resp, 'Object', 'COPY')
expected = {'x-object-meta-test': 'value',
'x-object-meta-src': 'src_value',
'x-copied-from': self.container_name + "/" + src_obj_name}
for key, value in expected.items():
self.assertIn(key, resp)
self.assertEqual(value, resp[key])
# check destination object
self._check_copied_obj(dst_obj_name, data, in_meta=["test", "src"])
@decorators.idempotent_id('e3e6a64a-9f50-4955-b987-6ce6767c97fb')
def test_object_upload_in_segments(self):
# create object
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
segments = 10
data_segments = [data + str(i) for i in range(segments)]
# uploading segments
for i in range(segments):
resp, _ = self.object_client.create_object_segments(
self.container_name, object_name, i, data_segments[i])
# creating a manifest file
metadata = {'X-Object-Manifest': '%s/%s/'
% (self.container_name, object_name)}
resp, _ = self.object_client.create_object(self.container_name,
object_name, data='')
self.assertHeaders(resp, 'Object', 'PUT')
resp, _ = self.object_client.update_object_metadata(
self.container_name, object_name, metadata, metadata_prefix='')
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
self.container_name, object_name)
# Etag value of a large object is enclosed in double-quotations.
# After etag quotes are checked they are removed and the response is
# checked if all common headers are present and well formatted
self.assertTrue(resp['etag'].startswith('\"'))
self.assertTrue(resp['etag'].endswith('\"'))
resp['etag'] = resp['etag'].strip('"')
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertIn('x-object-manifest', resp)
self.assertEqual(resp['x-object-manifest'],
'%s/%s/' % (self.container_name, object_name))
# downloading the object
resp, body = self.object_client.get_object(
self.container_name, object_name)
self.assertEqual(''.join(data_segments), body.decode())
@decorators.idempotent_id('50d01f12-526f-4360-9ac2-75dd508d7b68')
def test_get_object_if_different(self):
# http://en.wikipedia.org/wiki/HTTP_ETag
# Make a conditional request for an object using the If-None-Match
# header, it should get downloaded only if the local file is different,
# otherwise the response code should be 304 Not Modified
object_name, data = self.create_object(self.container_name)
# local copy is identical, no download
md5 = hashlib.md5(data).hexdigest()
headers = {'If-None-Match': md5}
url = "%s/%s" % (self.container_name, object_name)
resp, _ = self.object_client.get(url, headers=headers)
self.assertEqual(resp['status'], '304')
# When the file is not downloaded from Swift server, response does
# not contain 'X-Timestamp' header. This is the special case, therefore
# the existence of response headers is checked without custom matcher.
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
# local copy is different, download
local_data = "something different"
md5 = hashlib.md5(local_data.encode()).hexdigest()
headers = {'If-None-Match': md5}
resp, _ = self.object_client.get(url, headers=headers)
self.assertHeaders(resp, 'Object', 'GET')
class PublicObjectTest(base.BaseObjectTest):
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(PublicObjectTest, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_alt = cls.os_roles_operator_alt
@classmethod
def setup_clients(cls):
super(PublicObjectTest, cls).setup_clients()
cls.identity_client_alt = cls.os_alt.identity_client
def setUp(self):
super(PublicObjectTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
super(PublicObjectTest, self).tearDown()
@decorators.idempotent_id('07c9cf95-c0d4-4b49-b9c8-0ef2c9b27193')
def test_access_public_container_object_without_using_creds(self):
# make container public-readable and access an object in it object
# anonymously, without using credentials
# update container metadata to make it publicly readable
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers, metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
data = data_utils.random_bytes(size=len(object_name))
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
# list container metadata
resp_meta, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertHeaders(resp_meta, 'Container', 'HEAD')
self.assertIn('x-container-read', resp_meta)
self.assertEqual(resp_meta['x-container-read'], '.r:*,.rlistings')
# trying to get object with empty headers as it is public readable
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
resp, body = self.object_client.get_object(
self.container_name, object_name)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
@decorators.idempotent_id('54e2a2fe-42dc-491b-8270-8e4217dd4cdc')
def test_access_public_object_with_another_user_creds(self):
# make container public-readable and access an object in it using
# another user's credentials
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
data = data_utils.random_bytes(size=len(object_name))
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
# list container metadata
resp, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertHeaders(resp, 'Container', 'HEAD')
self.assertIn('x-container-read', resp)
self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
# get auth token of alternative user
alt_auth_data = self.identity_client_alt.auth_provider.auth_data
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=alt_auth_data
)
# access object using alternate user creds
resp, body = self.object_client.get_object(
self.container_name, object_name)
self.assertHeaders(resp, 'Object', 'GET')
self.assertEqual(body, data)
| apache-2.0 | -4,028,736,555,689,061,000 | 43.132075 | 79 | 0.583903 | false |
caperren/Archives | OSU Robotics Club/Mars Rover 2017-2018/software/testing/ubiradio_testing.py | 1 | 2372 | import paramiko
import json
import time
# ath0 21 channels in total; available frequencies :
# Channel 01 : 2.412 GHz
# Channel 31 : 2.414 GHz
# Channel 02 : 2.417 GHz
# Channel 32 : 2.419 GHz
# Channel 03 : 2.422 GHz
# Channel 33 : 2.424 GHz
# Channel 04 : 2.427 GHz
# Channel 34 : 2.429 GHz
# Channel 05 : 2.432 GHz
# Channel 35 : 2.434 GHz
# Channel 06 : 2.437 GHz
# Channel 36 : 2.439 GHz
# Channel 07 : 2.442 GHz
# Channel 37 : 2.444 GHz
# Channel 08 : 2.447 GHz
# Channel 38 : 2.449 GHz
# Channel 09 : 2.452 GHz
# Channel 39 : 2.454 GHz
# Channel 10 : 2.457 GHz
# Channel 40 : 2.459 GHz
# Channel 11 : 2.462 GHz
# Current Frequency:2.417 GHz (Channel 2)
# Sets: iwconfig ath0 channel 01
# Gets: iwlist ath0 channel
# NOTE
# Only the access point has to get changed the station (client) will automatically choose the new freq
channel = 3
get_general_info = "wstalist"
get_wireless_info = "iwlist ath0 channel"
set_wireless_frequency = "iwconfig ath0 channel " + "%02d" % channel # iwconfig ath0 freq 2.456G
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
# Before anyone complains, I'm not worried about this password being online.
# We only set one because the web interfaces HAVE to have one
ssh.connect("192.168.1.20", username="ubnt", password="rover4lyfe^", compress=True)
while True:
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(get_general_info)
output_json = json.loads(ssh_stdout.read())[0]
successful_transmit_percent = output_json["ccq"]
quality = output_json["airmax"]["quality"]
capacity = output_json["airmax"]["capacity"]
rx_rate = output_json["rx"]
tx_rate = output_json["tx"]
ground_tx_latency = output_json["tx_latency"]
rover_tx_latency = output_json["remote"]["tx_latency"]
print successful_transmit_percent, " | ", quality, " | ", capacity, " | ", rx_rate, " | ", tx_rate, " | ", ground_tx_latency, " | ", rover_tx_latency
time.sleep(0.25)
# ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(set_wireless_frequency)
# ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(get_wireless_info)
#
# print ssh_stdout.read()
| gpl-3.0 | -8,870,408,355,310,936,000 | 34.402985 | 153 | 0.623524 | false |
leschzinerlab/FreeHand | lib/fspace_param_consolidate.py | 1 | 1070 | #!/usr/bin/env python
import linecache
import sys
#Convert parameter file format with CTF info
untilt = sys.argv[1]
ctf2 = sys.argv[2]
fout = '%s_format' %(ctf2[:-4])
o1 = open(fout,'a')
o1.write("C Frealign format parameter file created from Search_fspace parameter file\n")
o1.write("C\n")
o1.write("C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 ANGAST CCMax\n")
i = 1
tmp = open(ctf2,'r')
tot = len(tmp.readlines())
while i <= tot:
t = i + 3
param = linecache.getline(untilt,t)
ctf = linecache.getline(ctf2,i)
l1 = param.split()
l2 = ctf.split()
psi = float(l1[1])
theta = float(l1[2])
phi = float(l1[3])
shiftx = float(l1[4])
shifty = float(l1[5])
mag = float(l1[6])
film=float(l1[7])
df1 = float(l2[0])
df2 = float(l2[1])
astig = float(l2[2])
a = (l1[10])
test = '%s' %(a[-1:])
if test == '*':
CC = 50
else:
CC = float(l1[11])
o1.write("%7d%8.3f%8.3f%8.3f%8.3f%8.3f%8.0f%6d%9.1f%9.1f%8.2f%7.2f\n" %(i,psi,theta,phi,shiftx,shifty,mag,film,df1,df2,astig,CC))
i = i + 1
o1.write("C\n")
| mit | 2,811,378,361,588,945,000 | 17.77193 | 130 | 0.592523 | false |
jordillinares/addons | stock_lot_enh_base/models/stock.py | 1 | 10625 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (http://tiny.be). All Rights Reserved
#
# This module,
# Copyright (C) 2015 Jordi Llinares López - [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
from openerp.exceptions import except_orm
from openerp.tools.translate import _
import time
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.float_utils import float_compare, float_round
class stock_picking(models.Model):
_inherit = 'stock.picking'
@api.multi
def do_transfer(self):
res = super(stock_picking, self).do_transfer()
self.refresh()
for picking in self:
for move in picking.move_lines:
if move.linked_move_operation_ids:
for operation_link in move.linked_move_operation_ids:
if operation_link.operation_id.lot_id:
reference_list = []
# Why this data format? see multi_m2o_text_widget
# module description.
add_reference = 'stock.picking,%s' % picking.id
# Write destination (reference to picking) on
# internal/outgoing pickings
if picking.picking_type_code != 'incoming':
if operation_link.operation_id.lot_id.destination:
reference_list += operation_link.operation_id.lot_id.destination.split(
";")
if add_reference not in reference_list:
reference_list.append(add_reference)
destination = ";".join(reference_list) or False
if destination:
operation_link.operation_id.lot_id.destination = destination
# Write origin (reference to picking) on incoming
# pickings
else:
if operation_link.operation_id.lot_id.origin:
reference_list += operation_link.operation_id.lot_id.origin.split(
";")
if add_reference not in reference_list:
reference_list.append(add_reference)
origin = ";".join(reference_list) or False
if origin:
operation_link.operation_id.lot_id.origin = origin
return res
class stock_quant(models.Model):
_inherit = 'stock.quant'
@api.model
def _quants_get_order(self, location, product, quantity, domain=[],
orderby='in_date'):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
context = self._context
domain += location and [('location_id', 'child_of', location.id)] or []
domain += [('product_id', '=', product.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', self.env.user.company_id.id)]
res = []
offset = 0
while float_compare(quantity, 0,
precision_rounding=product.uom_id.rounding) > 0:
quants = self.search(
domain, order=orderby, limit=10, offset=offset)
if not quants:
res.append((None, quantity))
break
for quant in quants:
# Here we implement a change that affects FEFO removal strategy
# (orderby = 'removal_date, in_date, id'):
# If a quant is already expired (removal_date < current date),
# skip it and send a warning message.
if orderby == 'removal_date, in_date, id':
if (quant.removal_date
and quant.removal_date <
time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
):
if ('chatter_model' in context and \
context.get('chatter_model', False) and 'chatter_id' in context
and context.get('chatter_id', False)
):
model = self.env[context['chatter_model']]
# maybe our active model class does not inherit
# from 'mail.thread'
try:
record = model.browse(context['chatter_id'])
message = _('A quant of lot %s has been '
'ignored because it seems to '
'have expired.\nPlease check it'
' and, if needed, remove the '
'whole lot from stock.'
) % (quant.lot_id.name,)
record.message_post(
message, _('An expired lot must be '
'retired!'), context=context)
finally:
# these pops throw an error:
# raise NotImplementedError("'pop' not supported
# on frozendict")
# self._context.pop('chatter_model')
# self._context.pop('chatter_id')
pass
continue
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty),
precision_rounding=rounding) >= 0:
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0,
precision_rounding=rounding) != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
class stock_production_lot(models.Model):
_inherit = 'stock.production.lot'
@api.model
def _get_lotname(self):
context = self._context
if context.get('product_id', False):
product = self.env['product.product'].browse(context['product_id'])
if product.lot_creation_mode == 'manual':
return False
elif product.lot_creation_mode == 'auto' and product.lot_sequence:
return self.env['ir.sequence'].get_id(product.lot_sequence.id)
return self.env['ir.sequence'].get_id('stock.lot.serial',
code_or_id='code')
name = fields.Char('Lot number', required=True,
help="Unique lot/serial alphanumeric code.",
index=True, copy=False, default=_get_lotname)
origin = fields.Char(
'Origin', size=200, help="Reference of the document in which "
"this lot was created (received or manufactured).", index=True)
destination = fields.Char(
'Destination', size=200, help="Reference of the the documents "
"in which this lot was used (consumed or served).", index=True)
class stock_transfer_details_items(models.TransientModel):
_inherit = 'stock.transfer_details_items'
def _get_quick_lot_creation_allowed(self):
for detail in self:
if detail.product_id.track_incoming:
# if (detail.packop_id
# and detail.packop_id.picking_id
# and detail.packop_id.picking_id.picking_type_code == 'incoming'):
# detail.allows_quick_lot_creating = True
detail.allows_quick_lot_creating = True
else:
detail.allows_quick_lot_creating = False
allows_quick_lot_creating = fields.Boolean('Quick lot creation allowed',
compute=_get_quick_lot_creation_allowed,
help="Technical field that "
"determines if quick lot "
"creation button is shown "
"for each detail row in "
"transfer wizard.")
@api.multi
def quick_lot_create(self):
for detail in self:
if (detail.product_id
and detail.product_id.lot_creation_mode == 'auto'
and (detail.product_id.track_incoming or
detail.product_id.track_outgoing or
detail.product_id.track_all)
):
self.lot_id = self.env['stock.production.lot'].with_context(
product_id=detail.product_id.id).create({})
else:
raise except_orm(_('Warning!'),
_('Product has not lot tracking enabled, or '
'has lot creation mode set to \'manual\'. '
'A new lot number won\'t be automatically '
'created.'))
if self and self[0]:
return self[0].transfer_id.wizard_view()
| agpl-3.0 | 1,831,910,691,091,152,100 | 47.290909 | 107 | 0.483528 | false |
Antonio-Team/enigma2 | lib/python/Screens/Ci.py | 2 | 18127 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
from Components.Sources.StaticText import StaticText
from Components.ActionMap import ActionMap
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigSubList, getConfigListEntry, KEY_LEFT, KEY_RIGHT, KEY_0, ConfigNothing, ConfigPIN, ConfigYesNo, NoSave
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.SystemInfo import SystemInfo
from enigma import eTimer, eDVBCI_UI, eDVBCIInterfaces
import Screens.Standby
forceNotShowCiMessages = False
def setCIBitrate(configElement):
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateNormal if configElement.value == "no" else eDVBCI_UI.rateHigh)
def setdvbCiDelay(configElement):
open(SystemInfo["CommonInterfaceCIDelay"], "w").write(configElement.value)
configElement.save()
def setRelevantPidsRouting(configElement):
open(SystemInfo["CI%dRelevantPidsRoutingSupport" % configElement.slotid], "w").write("yes" if configElement.value else "no")
def InitCiConfig():
config.ci = ConfigSubList()
config.cimisc = ConfigSubsection()
if SystemInfo["CommonInterface"]:
for slot in range(SystemInfo["CommonInterface"]):
config.ci.append(ConfigSubsection())
config.ci[slot].canDescrambleMultipleServices = ConfigSelection(choices = [("auto", _("auto")), ("no", _("no")), ("yes", _("yes"))], default = "auto")
config.ci[slot].use_static_pin = ConfigYesNo(default = True)
config.ci[slot].static_pin = ConfigPIN(default = 0)
config.ci[slot].show_ci_messages = ConfigYesNo(default = True)
if SystemInfo["CI%dSupportsHighBitrates" % slot]:
config.ci[slot].canHandleHighBitrates = ConfigYesNo(default = True)
config.ci[slot].canHandleHighBitrates.slotid = slot
config.ci[slot].canHandleHighBitrates.addNotifier(setCIBitrate)
if SystemInfo["CI%dRelevantPidsRoutingSupport" % slot]:
config.ci[slot].relevantPidsRouting = ConfigYesNo(default = False)
config.ci[slot].relevantPidsRouting.slotid = slot
config.ci[slot].relevantPidsRouting.addNotifier(setRelevantPidsRouting)
if SystemInfo["CommonInterfaceCIDelay"]:
config.cimisc.dvbCiDelay = ConfigSelection(default = "256", choices = [("16"), ("32"), ("64"), ("128"), ("256")])
config.cimisc.dvbCiDelay.addNotifier(setdvbCiDelay)
class MMIDialog(Screen):
def __init__(self, session, slotid, action, handler=eDVBCI_UI.getInstance(), wait_text="", screen_data=None):
Screen.__init__(self, session)
print "MMIDialog with action" + str(action)
self.mmiclosed = False
self.tag = None
self.slotid = slotid
self.timer = eTimer()
self.timer.callback.append(self.keyCancel)
#else the skins fails
self["title"] = Label("")
self["subtitle"] = Label("")
self["bottom"] = Label("")
self["entries"] = ConfigList([ ])
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"ok": self.okbuttonClick,
"cancel": self.keyCancel,
"menu": self.forceExit,
#for PIN
"left": self.keyLeft,
"right": self.keyRight,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.action = action
self.screen_data = screen_data
self.is_pin_list = -1
self.handler = handler
if wait_text == "":
self.wait_text = _("wait for ci...")
else:
self.wait_text = wait_text
if action == 2: #start MMI
handler.startMMI(self.slotid)
self.showWait()
elif action == 3: #mmi already there (called from infobar)
self.showScreen()
def addEntry(self, list, entry):
if entry[0] == "TEXT": #handle every item (text / pin only?)
list.append( (entry[1], ConfigNothing(), entry[2]) )
if entry[0] == "PIN":
pinlength = entry[1]
if entry[3] == 1:
# masked pins:
x = ConfigPIN(0, len = pinlength, censor = "*")
else:
# unmasked pins:
x = ConfigPIN(0, len = pinlength)
x.addEndNotifier(self.pinEntered)
self["subtitle"].setText(entry[2])
list.append( getConfigListEntry("", x) )
self["bottom"].setText(_("please press OK when ready"))
def pinEntered(self, value):
self.okbuttonClick()
def okbuttonClick(self):
self.timer.stop()
if not self.tag:
return
if self.tag == "WAIT":
print "do nothing - wait"
elif self.tag == "MENU":
print "answer MENU"
cur = self["entries"].getCurrent()
if cur:
self.handler.answerMenu(self.slotid, cur[2])
else:
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "LIST":
print "answer LIST"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
cur = self["entries"].getCurrent()
answer = str(cur[1].value)
length = len(answer)
while length < cur[1].getLength():
answer = '0' + answer
length += 1
self.answer = answer
if config.ci[self.slotid].use_static_pin.value:
self.session.openWithCallback(self.save_PIN_CB, MessageBox, _("Would you save the entered PIN %s persistent?") % self.answer, MessageBox.TYPE_YESNO)
else:
self.save_PIN_CB(False)
def save_PIN_CB(self, ret=None):
if ret:
config.ci[self.slotid].static_pin.value = self.answer
config.ci[self.slotid].static_pin.save()
self.handler.answerEnq(self.slotid, self.answer)
self.showWait()
def closeMmi(self):
self.timer.stop()
self.close(self.slotid)
def forceExit(self):
self.timer.stop()
if self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
global forceNotShowCiMessages
forceNotShowCiMessages = True
self.close(self.slotid)
def keyCancel(self):
self.timer.stop()
if not self.tag or self.mmiclosed:
self.closeMmi()
elif self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
self.closeMmi()
elif self.tag in ( "MENU", "LIST" ):
print "cancel list"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
print "cancel enq"
self.handler.cancelEnq(self.slotid)
self.showWait()
else:
print "give cancel action to ci"
def keyConfigEntry(self, key):
self.timer.stop()
try:
self["entries"].handleKey(key)
if self.is_pin_list == 4:
self.okbuttonClick()
except:
pass
def keyNumberGlobal(self, number):
self.timer.stop()
if self.is_pin_list > -1:
self.is_pin_list += 1
self.keyConfigEntry(KEY_0 + number)
def keyLeft(self):
self.timer.stop()
if self.is_pin_list > 0:
self.is_pin_list += -1
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.timer.stop()
if self.is_pin_list > -1 and self.is_pin_list < 4:
self.is_pin_list += 1
self.keyConfigEntry(KEY_RIGHT)
def updateList(self, list):
List = self["entries"]
try:
List.instance.moveSelectionTo(0)
except:
pass
List.l.setList(list)
def showWait(self):
self.tag = "WAIT"
self["title"].setText("")
self["subtitle"].setText("")
self["bottom"].setText("")
list = [ ]
list.append( (self.wait_text, ConfigNothing()) )
self.updateList(list)
def showScreen(self):
if self.screen_data is not None:
screen = self.screen_data
self.screen_data = None
else:
screen = self.handler.getMMIScreen(self.slotid)
list = [ ]
self.timer.stop()
if len(screen) > 0 and screen[0][0] == "CLOSE":
timeout = screen[0][1]
self.mmiclosed = True
if timeout > 0:
self.timer.start(timeout*1000, True)
else:
self.keyCancel()
else:
self.mmiclosed = False
self.tag = screen[0][0]
for entry in screen:
if entry[0] == "PIN":
if config.ci[self.slotid].use_static_pin.value and str(config.ci[self.slotid].static_pin.value) != "0":
answer = str(config.ci[self.slotid].static_pin.value)
length = len(answer)
while length < config.ci[self.slotid].static_pin.getLength():
answer = '0' + answer
length += 1
self.handler.answerEnq(self.slotid, answer)
self.showWait()
break
else:
self.is_pin_list = 0
self.addEntry(list, entry)
else:
if entry[0] == "TITLE":
self["title"].setText(entry[1])
elif entry[0] == "SUBTITLE":
self["subtitle"].setText(entry[1])
elif entry[0] == "BOTTOM":
self["bottom"].setText(entry[1])
elif entry[0] == "TEXT":
self.addEntry(list, entry)
self.updateList(list)
def ciStateChanged(self):
do_close = False
if self.action == 0: #reset
do_close = True
if self.action == 1: #init
do_close = True
#module still there ?
if self.handler.getState(self.slotid) != 2:
do_close = True
#mmi session still active ?
if self.handler.getMMIState(self.slotid) != 1:
do_close = True
if do_close:
self.closeMmi()
elif self.action > 1 and self.handler.availableMMI(self.slotid) == 1:
self.showScreen()
#FIXME: check for mmi-session closed
class CiMessageHandler:
def __init__(self):
self.session = None
self.auto_close = False
self.ci = { }
self.dlgs = { }
eDVBCI_UI.getInstance().ciStateChanged.get().append(self.ciStateChanged)
def setSession(self, session):
self.session = session
def ciStateChanged(self, slot):
if slot in self.ci:
self.ci[slot](slot)
else:
handler = eDVBCI_UI.getInstance()
if slot in self.dlgs:
self.dlgs[slot].ciStateChanged()
elif handler.availableMMI(slot) == 1:
if self.session:
show_ui = False
if config.ci[slot].show_ci_messages.value:
show_ui = True
screen_data = handler.getMMIScreen(slot)
if config.ci[slot].use_static_pin.value:
if screen_data is not None and len(screen_data):
ci_tag = screen_data[0][0]
if ci_tag == 'ENQ' and len(screen_data) >= 2 and screen_data[1][0] == 'PIN':
if str(config.ci[slot].static_pin.value) == "0":
show_ui = True
else:
answer = str(config.ci[slot].static_pin.value)
length = len(answer)
while length < config.ci[slot].static_pin.getLength():
answer = '0' + answer
length += 1
handler.answerEnq(slot, answer)
show_ui = False
self.auto_close = True
elif ci_tag == 'CLOSE' and self.auto_close:
show_ui = False
self.auto_close = False
if show_ui and not forceNotShowCiMessages and not Screens.Standby.inStandby:
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 3, screen_data = screen_data)
def dlgClosed(self, slot):
if slot in self.dlgs:
del self.dlgs[slot]
def registerCIMessageHandler(self, slot, func):
self.unregisterCIMessageHandler(slot)
self.ci[slot] = func
def unregisterCIMessageHandler(self, slot):
if slot in self.ci:
del self.ci[slot]
CiHandler = CiMessageHandler()
class CiSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "CiSelectionActions"],
{
"left": self.keyLeft,
"right": self.keyLeft,
"ok": self.okbuttonClick,
"cancel": self.cancel
},-1)
self.dlg = None
self.state = { }
self.list = [ ]
self.slot = 0
for slot in range(SystemInfo["CommonInterface"]):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
self.slot += 1
self.appendEntries(slot, state)
CiHandler.registerCIMessageHandler(slot, self.ciStateChanged)
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["entries"] = menuList
self["entries"].onSelectionChanged.append(self.selectionChanged)
self["text"] = Label(_("Slot %d") % 1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
global forceNotShowCiMessages
forceNotShowCiMessages = False
self.setTitle(_("Common Interface"))
def selectionChanged(self):
if self.slot > 1:
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
self["text"].setText(cur[0] == "**************************" and " " or cur[0] == _("DVB CI Delay") and _("All slots") or _("Slot %d") % (cur[3] + 1))
def keyConfigEntry(self, key):
try:
self["entries"].handleKey(key)
self["entries"].getCurrent()[1].save()
except:
pass
def keyLeft(self):
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.keyConfigEntry(KEY_RIGHT)
def appendEntries(self, slot, state):
self.state[slot] = state
if self.slot > 1:
self.list.append(("**************************", ConfigNothing(), 3, slot))
self.list.append((_("Reset"), ConfigNothing(), 0, slot))
self.list.append((_("Init"), ConfigNothing(), 1, slot))
if self.state[slot] == 0: #no module
self.list.append((_("no module found"), ConfigNothing(), 2, slot))
elif self.state[slot] == 1: #module in init
self.list.append((_("init module"), ConfigNothing(), 2, slot))
elif self.state[slot] == 2: #module ready
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list.append((appname, ConfigNothing(), 2, slot))
self.list.append(getConfigListEntry(_("Set pin code persistent"), config.ci[slot].use_static_pin, 3, slot))
self.list.append((_("Enter persistent PIN code"), ConfigNothing(), 5, slot))
self.list.append((_("Reset persistent PIN code"), ConfigNothing(), 6, slot))
self.list.append(getConfigListEntry(_("Show CI messages"), config.ci[slot].show_ci_messages, 3, slot))
self.list.append(getConfigListEntry(_("Multiple service support"), config.ci[slot].canDescrambleMultipleServices, 3, slot))
if SystemInfo["CI%dSupportsHighBitrates" % slot]:
self.list.append(getConfigListEntry(_("High bitrate support"), config.ci[slot].canHandleHighBitrates, 3, slot))
if SystemInfo["CI%dRelevantPidsRoutingSupport" % slot]:
self.list.append(getConfigListEntry(_("Relevant PIDs routing"), config.ci[slot].relevantPidsRouting, 3, slot))
if SystemInfo["CommonInterfaceCIDelay"]:
self.list.append(getConfigListEntry(_("DVB CI Delay"), config.cimisc.dvbCiDelay, 3, slot))
def updateState(self, slot):
state = eDVBCI_UI.getInstance().getState(slot)
self.state[slot] = state
slotidx = 0
while len(self.list[slotidx]) < 3 or self.list[slotidx][3] != slot:
slotidx += 1
slotidx += 1 #do not change Reset
slotidx += 1 #do not change Init
if state == 0: #no module
self.list[slotidx] = (_("no module found"), ConfigNothing(), 2, slot)
elif state == 1: #module in init
self.list[slotidx] = (_("init module"), ConfigNothing(), 2, slot)
elif state == 2: #module ready
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list[slotidx] = (appname, ConfigNothing(), 2, slot)
lst = self["entries"]
lst.list = self.list
lst.l.setList(self.list)
def ciStateChanged(self, slot):
if self.dlg:
self.dlg.ciStateChanged()
else:
state = eDVBCI_UI.getInstance().getState(slot)
if self.state[slot] != state:
self.state[slot] = state
self.updateState(slot)
def dlgClosed(self, slot):
self.dlg = None
def okbuttonClick(self):
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 3:
pass
elif action == 0: #reset
eDVBCI_UI.getInstance().setReset(slot)
elif action == 1: #init
eDVBCI_UI.getInstance().setInit(slot)
elif action == 5:
self.session.openWithCallback(self.cancelCB, PermanentPinEntry, config.ci[slot].static_pin, _("Smartcard PIN"))
elif action == 6:
config.ci[slot].static_pin.value = 0
config.ci[slot].static_pin.save()
self.session.openWithCallback(self.cancelCB, MessageBox, _("The saved PIN was cleared."), MessageBox.TYPE_INFO)
elif self.state[slot] == 2:
self.dlg = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, action)
def cancelCB(self, value):
pass
def cancel(self):
for slot in range(SystemInfo["CommonInterface"]):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
CiHandler.unregisterCIMessageHandler(slot)
self.close()
class PermanentPinEntry(Screen, ConfigListScreen):
def __init__(self, session, pin, pin_slot):
Screen.__init__(self, session)
self.skinName = ["ParentalControlChangePin", "Setup" ]
self.setup_title = _("Enter pin code")
self.onChangedEntry = [ ]
self.slot = pin_slot
self.pin = pin
self.list = []
self.pin1 = ConfigPIN(default = 0, censor = "*")
self.pin2 = ConfigPIN(default = 0, censor = "*")
self.pin1.addEndNotifier(boundFunction(self.valueChanged, 1))
self.pin2.addEndNotifier(boundFunction(self.valueChanged, 2))
self.list.append(getConfigListEntry(_("Enter PIN"), NoSave(self.pin1)))
self.list.append(getConfigListEntry(_("Reenter PIN"), NoSave(self.pin2)))
ConfigListScreen.__init__(self, self.list)
self["actions"] = NumberActionMap(["DirectionActions", "ColorActions", "OkCancelActions"],
{
"cancel": self.cancel,
"red": self.cancel,
"save": self.keyOK,
}, -1)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def valueChanged(self, pin, value):
if pin == 1:
self["config"].setCurrentIndex(1)
elif pin == 2:
self.keyOK()
def keyOK(self):
if self.pin1.value == self.pin2.value:
self.pin.value = self.pin1.value
self.pin.save()
self.session.openWithCallback(self.close, MessageBox, _("The PIN code has been saved successfully."), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR)
def cancel(self):
self.close(None)
def keyNumberGlobal(self, number):
ConfigListScreen.keyNumberGlobal(self, number)
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
| gpl-2.0 | -5,231,612,498,419,194,000 | 30.857645 | 181 | 0.677332 | false |
donovanhide/BitMagic | bm/__init__.py | 1 | 4167 | ## Copyright(c) 2009 William Waites <wwaites_at_gmail.com>
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without restriction,
## including without limitation the rights to use, copy, modify, merge,
## publish, distribute, sublicense, and/or sell copies of the Software,
## and to permit persons to whom the Software is furnished to do so,
## subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
## OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
## DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
## OTHER DEALINGS IN THE SOFTWARE.
from bm_ext import *
__all__ = ["BitVector", "strat"]
class BitVector(object):
"""
>>> v = BitVector()
>>> v.resize(64)
>>> for i in range(32): v[i*2] = True
...
>>> print v
<BitVector 10101010101010101010101010101010...>
>>> print v[0], v[1]
True False
>>>
### the length and number of bits that are set
>>> print len(v), v.count()
64 32
>>>
### bitwise NOT
>>> u = ~v
>>> print u
<BitVector 01010101010101010101010101010101...>
>>>
### bitwise AND
>>> print v&u
<BitVector 00000000000000000000000000000000...>
>>>
### bitwise OR
>>> print v|u
<BitVector 11111111111111111111111111111111...>
>>>
### iteration
>>> v = BitVector()
>>> v.resize(10)
>>> for i in range(5): v[i*2] = True
...
>>> for k in v: print k
...
0
2
4
6
8
>>>
"""
def __init__(self, v = None):
if v is None:
v = bvector()
v.set_new_blocks_strat(strat.GAP)
self.__vector__ = v
self.count = v.count
self.size = v.size
self.resize = v.resize
self.capacity = v.capacity
self.set = v.set
self.any = v.any
self.none = v.none
self.calc_stat = v.calc_stat
self.optimize = v.optimize
self.serialize = v.serialize
self.deserialize = v.deserialize
self.set_new_blocks_strat = v.set_new_blocks_strat
def __str__(self):
def _s():
i = 0
g = iter(self)
size = len(self)
max_size = 32
while i < min(max_size, size):
try:
one = g.next()
zeros = min(max_size, one) - i
if zeros > 0:
yield "0"*zeros
i = i + zeros
if one < max_size:
yield "1"
i = i + 1
except StopIteration:
zeros = min(max_size, size) - i
if zeros > 0:
yield "0"*zeros
i = i + zeros
if i < size:
yield "..."
return "<BitVector %s>" % ("".join(_s()),)
def __len__(self):
return len(self.__vector__)
def __setitem__(self, k, v):
self.__vector__[k] = v
def __getitem__(self, k):
return self.__vector__[k]
def __and__(self, other):
if isinstance(other, BitVector):
other = other.__vector__
return BitVector(self.__vector__ & other)
def __or__(self, other):
if isinstance(other, BitVector):
other = other.__vector__
return BitVector(self.__vector__ | other)
def __invert__(self):
return BitVector(~self.__vector__)
def __eq__(self, other):
if isinstance(other, BitVector):
other = other.__vector__
return self.__vector__ == other
def __iter__(self):
e = enumerator(self.__vector__, 0)
end = enumerator(self.__vector__, 1)
while True:
if e < end:
yield e.value()
else:
break
e.next()
def clear(self, free=False):
self.__vector__.clear(free)
def print_stats(self):
st = statistics()
self.calc_stat(st)
print "Size:".rjust(25), len(self)
print "Bits count:".rjust(25), self.count()
print "Bit blocks:".rjust(25), st.bit_blocks
print "GAP blocks:".rjust(25), st.gap_blocks
print "Memory used:".rjust(25), "%.02fMB" % (float(st.memory_used) / 1024 / 1024)
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit | 6,690,548,970,383,246,000 | 25.373418 | 84 | 0.62947 | false |
sjdv1982/seamless | docs/archive/0.2-cleanup/fireworks/tutorial/cell-display-numpy.py | 1 | 2519 | from PyQt5.QtWidgets import QMainWindow, QLabel, QWidget, QFrame, QSizePolicy
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt, QSize
import numpy as np
w = QMainWindow(size=QSize(640, 640))
ww = QWidget()
w.setCentralWidget(ww)
asp = AspectLayout(1.0)
ww.setLayout(asp)
w.setWindowFlags(Qt.WindowStaysOnTopHint)
l = QLabel()
l.setScaledContents(True)
l.setSizePolicy(QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum))
asp.addWidget(l)
l.setParent(ww)
l.setFrameStyle(QFrame.NoFrame)
w.show()
def update():
if PINS.title.updated:
w.setWindowTitle(PINS.title.get())
global arr
arr = PINS.array.get()
assert arr.dtype in (float, np.float32, np.uint8), arr.dtype
arr = np.ascontiguousarray(arr)
if arr.ndim == 1:
arr = arr.reshape((len(arr), 1))
if arr.ndim == 3:
if arr.shape[-1] == 4:
arr = np.ascontiguousarray(arr[:,:,:3])
assert arr.shape[-1] == 3
if arr.dtype == np.uint8:
arr_norm_255 = arr
else:
amin = arr.min(axis=0).min(axis=0)
amax = arr.max(axis=0).max(axis=0)
arange = np.maximum(amax-amin, 1e-12)
arr_norm = (arr - amin) / arange
arr_norm_255 = ((arr_norm- 1e-6)*256).astype(np.uint8)
width, height = arr.shape[1], arr.shape[0]
im = QImage(arr_norm_255, width, height, width*3, QImage.Format_RGB888)
elif arr.ndim == 2:
if arr.dtype == np.uint8:
arr_norm_255 = arr
else:
amin = arr.min()
amax = arr.max()
arange = np.maximum(amax-amin, 1e-12)
arr_norm = (arr - amin) / arange
arr_norm_255 = ((arr_norm- 1e-6)*256).astype(np.uint8)
arr_color = np.zeros((arr.shape) + (3,), dtype=np.uint8)
arr_color[:,:,0] = arr_norm_255
arr_color[:,:,1] = 128 - np.abs(arr_norm_255.astype(int)-128)
arr_color[:,:,2] = 255 - arr_norm_255
width, height = arr_color.shape[1], arr_color.shape[0]
im = QImage(arr_color, width, height, width*3, QImage.Format_RGB888)
pm = QPixmap.fromImage(im)
aspect = width / height
asp.aspect = aspect
cwidth, cheight = w.size().width(), w.size().height()
l.setPixmap(pm)
l.setMinimumSize(1,1)
scalex = width/cwidth
scaley = height/cheight
scale = max(scalex, scaley)
if scale > 1:
w.resize(width/scale, height/scale)
w.updateGeometry()
def destroy():
global w, l
del l
del w
#update()
| mit | 7,318,279,161,187,710,000 | 31.714286 | 79 | 0.598253 | false |
emilbjorklund/django-simplewebmentions | simplewebmentions/views.py | 1 | 4196 | """
TODO: send relevant signals when creating, deleting, unpublishing etc...
TODO: How to best connect various bit that we can read from the URLs?
"""
from __future__ import unicode_literals
from urlparse import urlparse
from webmentiontools.urlinfo import UrlInfo
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseNotAllowed
from django.core.urlresolvers import resolve, reverse
from django.shortcuts import render_to_response
from django.views.generic import View, DetailView
from django.views.defaults import bad_request
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from simplewebmentions.helpers import (
verify_params, is_valid_target, get_source_data,
mention_status_check, delete_if_existing, get_article_text)
from simplewebmentions.models import (
WebMention, MENTION_STATUS_UNMODERATED, MENTION_STATUS_DELETED)
class WebMentionDetail(View):
def dispatch(self, request, *args, **kwargs):
allowed_methods = ['GET', 'HEAD']
if request.method not in allowed_methods:
return HttpResponseNotAllowed(allowed_methods)
mention = get_object_or_404(WebMention, **kwargs)
message, status = mention_status_check(mention)
return HttpResponse(message, status=status)
class WebMentionEndpoint(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebMentionEndpoint, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
"""
Doing a get request should return a nice overview HTML page.
"""
response = render_to_response('webmentions/webmention_endpoint.html')
response.Link = reverse('webmention_endpoint')
return response
def post(self, request, *args, **kwargs):
"""
Handles post requests to our endpoint. Should check parameters
and trigger WebMention creation if present and correct.
"""
if not verify_params(request.POST):
return bad_request(request)
target = request.POST['target']
source = request.POST['source']
match = is_valid_target(target, request)
# Does the target exist on the site, and is there a source to parse?
if not match:
"""
If there doesn't seem to be content representing the target,
the webmention is rejected.
"""
delete_if_existing(source, target)
return bad_request(request)
# Use webmention-tools to try and fetch/parse the source
source_data = get_source_data(source)
# Is there some source data to parse?
if source_data.error:
"""
The source data could not be parsed by webmention-tools,
webmention is rejected.
"""
delete_if_existing(source, target)
return bad_request(request)
if not source_data.linksTo(target):
"""
If the source page does not contain a link back to the target,
the mention is rejected.
"""
delete_if_existing(source, target)
return bad_request(request)
target_app = match.app_name
mention = WebMention(
source=source,
target=target,
source_title=source_data.title(),
target_app=target_app or "",
source_link_excerpt=source_data.snippetWithLink(source_data.url) or "",
source_pub_date=source_data.pubDate(),
author_img_url=source_data.image() or "",
source_text=get_article_text(source_data.soup)
)
mention.save()
return HttpResponse(mention.get_absolute_url(), status=202)
def head(self, request, *args, **kwargs):
"""
Basically, disallow HEAD requests to the endpoint.
"""
return HttpResponseNotAllowed(['POST', 'GET'])
| mit | -3,596,693,950,043,964,400 | 32.677686 | 83 | 0.624881 | false |
wikimedia/ve-needcheck-reporter-bot | ircecho.py | 1 | 1213 | # Quick and ugly script to echo something to a given IRC channel
# Alex Monk, 2014-07-22
from socket import socket, AF_INET, SOCK_STREAM
def ircecho(nick, channel, message, host = "chat.freenode.net", port = 6667):
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
f = s.makefile()
def readLineWithoutServername(f):
l = f.readline().strip()
print(l)
return l[l.find(" ") + 1:]
def send(s, text):
s.send(text + "\r\n")
print("> " + text)
while True:
line = readLineWithoutServername(f)
if line == "NOTICE * :*** No Ident response" or line == "NOTICE * :*** Got Ident response":
send(s, "user " + nick + " 0 0 :" + nick)
send(s, "nick " + nick)
break
while True:
line = readLineWithoutServername(f)
if line == "376 " + nick + " :End of /MOTD command.":
send(s, "join " + channel)
break
elif line == "433 * " + nick + " :Nickname is already in use.":
nick += "_"
send(s, "nick " + nick)
while True:
line = readLineWithoutServername(f)
if line == "366 " + nick + " " + channel + " :End of /NAMES list.":
for messageLine in message.splitlines():
send(s, "privmsg " + channel + " :" + messageLine)
send(s, "quit :Done")
s.close()
break
| mit | -5,310,313,400,393,842,000 | 27.209302 | 93 | 0.608409 | false |
ShaolongHu/Nitrate | tcms/core/contrib/linkreference/views.py | 1 | 3751 | # -*- coding: utf-8 -*-
__all__ = ('add', 'get', 'remove', )
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.http import require_GET, require_POST
from django.utils import simplejson as json
from forms import AddLinkReferenceForm, BasicValidationForm
from models import create_link, LinkReference
from tcms.core.responses import HttpJSONResponse
from tcms.core.responses import HttpJSONResponseBadRequest
from tcms.core.responses import HttpJSONResponseServerError
@user_passes_test(lambda u: u.has_perm('testruns.change_testcaserun'))
@require_POST
def add(request):
'''Add new link to a specific target
The target should be a valid model within TCMS, which are documented in
``LINKREF_TARGET``.
Incoming request should be a POST request, and contains following
arguments:
- target: To which the new link will link to. The avialable target names
are documented in the ``LINKREF_TARGET``.
- target_id: the ID used to construct the concrete target instance, to
which the new link will be linked.
- name: a short description to this new link, and accept 64 characters at
most.
- url: the actual URL.
'''
form = AddLinkReferenceForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
url = form.cleaned_data['url']
target_id = form.cleaned_data['target_id']
model_class = form.cleaned_data['target']
model_instance = model_class.objects.get(pk=target_id)
create_link(name=name, url=url, link_to=model_instance)
jd = json.dumps(
{'rc': 0, 'response': 'ok',
'data': {'name': name, 'url': url}})
return HttpJSONResponse(content=jd)
else:
jd = json.dumps(
{'rc': 1, 'response': form.errors.as_text()})
return HttpJSONResponseBadRequest(content=jd)
@require_GET
def get(request):
'''Get links of specific instance of content type
- target: The model name of the instance being searched
- target_id: The ID of the instance
Only accept GET request from client.
'''
form = BasicValidationForm(request.GET)
if form.is_valid():
model_class = form.clean_data['target']
target_id = form.clean_data['target_id']
try:
model_instance = model_class.objects.get(pk=target_id)
links = LinkReference.get_from(model_instance)
except Exception, err:
jd = json.dumps({'rc': 1, 'response': str(err)})
return HttpJSONResponseServerError(content=jd)
jd = []
for link in links:
jd.append({'name': link.name, 'url': link.url})
jd = json.dumps(jd)
return HttpJSONResponse(content=jd)
else:
jd = json.dumps(
{'rc': 1, 'response': form.errors.as_text()})
return HttpJSONResponseBadRequest(content=jd)
@user_passes_test(lambda u: u.has_perm('testruns.change_testcaserun'))
@require_GET
def remove(request, link_id):
''' Remove a specific link with ID ``link_id`` '''
from django.forms import IntegerField
from django.forms import ValidationError
field = IntegerField(min_value=1)
try:
value = field.clean(link_id)
except ValidationError, err:
jd = json.dumps({'rc': 1, 'response': '\n'.join(err.messages)})
return HttpJSONResponseBadRequest(content=jd)
try:
LinkReference.unlink(value)
except Exception, err:
jd = json.dumps({'rc': 1, 'response': str(err)})
return HttpJSONResponseBadRequest(content=jd)
return HttpJSONResponse(
content=json.dumps(
{'rc': 0,
'response': 'Link has been removed successfully.'}))
| gpl-2.0 | -3,389,525,806,065,420,000 | 31.059829 | 77 | 0.649427 | false |
djurodrljaca/salamander-alm | server/trackermanagement/tracker_management.py | 1 | 27334 | """
Salamander ALM
Copyright (c) 2016 Djuro Drljaca
This Python module is free software; you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This Python module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with this library. If
not, see <http://www.gnu.org/licenses/>.
"""
from database.connection import Connection
from database.database import DatabaseInterface
from database.tables.tracker_information import TrackerSelection
import datetime
from typing import List, Optional
class TrackerManagementInterface(object):
"""
Tracker management
Dependencies:
- DatabaseInterface
"""
def __init__(self):
"""
Constructor is disabled!
"""
raise RuntimeError()
@staticmethod
def read_all_tracker_ids(project_id: int,
tracker_selection=TrackerSelection.Active,
max_revision_id=None) -> List[int]:
"""
Reads all tracker IDs from the database
:param project_id: ID of the project
:param tracker_selection: Search for active, inactive or all tracker
:param max_revision_id: Maximum revision ID for the search ("None" for latest revision)
:return: List of tracker IDs
"""
connection = DatabaseInterface.create_connection()
if max_revision_id is None:
max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id(
connection)
# Reads all tracker IDs from the database
trackers = None
if max_revision_id is not None:
trackers = DatabaseInterface.tables().tracker_information.read_all_tracker_ids(
connection,
project_id,
tracker_selection,
max_revision_id)
return trackers
@staticmethod
def read_tracker_by_id(tracker_id: int, max_revision_id=None) -> Optional[dict]:
"""
Reads a tracker (active or inactive) that matches the specified tracker ID
:param tracker_id: ID of the tracker
:param max_revision_id: Maximum revision ID for the search ("None" for latest revision)
:return: Tracker information object
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
connection = DatabaseInterface.create_connection()
if max_revision_id is None:
max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id(
connection)
# Read a tracker that matches the specified tracker ID
tracker = None
if max_revision_id is not None:
tracker = TrackerManagementInterface.__read_tracker_by_id(connection,
tracker_id,
max_revision_id)
return tracker
@staticmethod
def read_tracker_by_short_name(short_name: str, max_revision_id=None) -> Optional[dict]:
"""
Reads an active tracker that matches the specified short name
:param short_name: Tracker's short name
:param max_revision_id: Maximum revision ID for the search ("None" for latest revision)
:return: Tracker information object
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
connection = DatabaseInterface.create_connection()
if max_revision_id is None:
max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id(
connection)
# Read a tracker that matches the specified short name
tracker = None
if max_revision_id is not None:
tracker = TrackerManagementInterface.__read_tracker_by_short_name(connection,
short_name,
max_revision_id)
return tracker
@staticmethod
def read_trackers_by_short_name(short_name: str,
max_revision_id=None) -> List[dict]:
"""
Reads all active and inactive trackers that match the specified short name
:param short_name: Tracker's short name
:param max_revision_id: Maximum revision ID for the search ("None" for latest revision)
:return: Tracker information of all trackers that match the search attribute
Each dictionary in the returned list contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
connection = DatabaseInterface.create_connection()
if max_revision_id is None:
max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id(
connection)
# Read trackers that match the specified short name
trackers = list()
if max_revision_id is not None:
tracker_information_list = \
DatabaseInterface.tables().tracker_information.read_information(
connection,
"short_name",
short_name,
TrackerSelection.All,
max_revision_id)
for tracker_information in tracker_information_list:
trackers.append(TrackerManagementInterface.__parse_tracker_information(
tracker_information))
return trackers
@staticmethod
def read_tracker_by_full_name(full_name: str,
max_revision_id=None) -> Optional[dict]:
"""
Reads an active tracker that matches the specified full name
:param full_name: Tracker's full name
:param max_revision_id: Maximum revision ID for the search ("None" for latest revision)
:return: Tracker information object
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
connection = DatabaseInterface.create_connection()
if max_revision_id is None:
max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id(
connection)
# Read a tracker that matches the specified full name
tracker = None
if max_revision_id is not None:
tracker = TrackerManagementInterface.__read_tracker_by_full_name(connection,
full_name,
max_revision_id)
return tracker
@staticmethod
def read_trackers_by_full_name(full_name: str,
max_revision_id=None) -> List[dict]:
"""
Reads all active and inactive trackers that match the specified full name
:param full_name: Tracker's full name
:param max_revision_id: Maximum revision ID for the search ("None" for latest revision)
:return: Tracker information of all trackers that match the search attribute
Each dictionary in the returned list contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
connection = DatabaseInterface.create_connection()
if max_revision_id is None:
max_revision_id = DatabaseInterface.tables().revision.read_current_revision_id(
connection)
# Read trackers that match the specified full name
trackers = list()
if max_revision_id is not None:
tracker_information_list = \
DatabaseInterface.tables().tracker_information.read_information(
connection,
"full_name",
full_name,
TrackerSelection.All,
max_revision_id)
for tracker_information in tracker_information_list:
trackers.append(TrackerManagementInterface.__parse_tracker_information(
tracker_information))
return trackers
@staticmethod
def create_tracker(requested_by_user: int,
project_id: int,
short_name: str,
full_name: str,
description: str) -> Optional[int]:
"""
Creates a new tracker
:param requested_by_user: ID of the user that requested creation of the new tracker
:param project_id: ID of the project
:param short_name: Tracker's short name
:param full_name: Tracker's full name
:param description: Tracker's description
:return: Tracker ID of the new tracker
"""
tracker_id = None
connection = DatabaseInterface.create_connection()
try:
success = connection.begin_transaction()
# Start a new revision
revision_id = None
if success:
revision_id = DatabaseInterface.tables().revision.insert_row(
connection,
datetime.datetime.utcnow(),
requested_by_user)
if revision_id is None:
success = False
# Create the tracker
if success:
tracker_id = TrackerManagementInterface.__create_tracker(connection,
project_id,
short_name,
full_name,
description,
revision_id)
if tracker_id is None:
success = False
if success:
connection.commit_transaction()
else:
connection.rollback_transaction()
except:
connection.rollback_transaction()
raise
return tracker_id
@staticmethod
def update_tracker_information(requested_by_user: int,
tracker_to_modify: int,
short_name: str,
full_name: str,
description: str,
active: bool) -> bool:
"""
Updates tracker's information
:param requested_by_user: ID of the user that requested modification of the user
:param tracker_to_modify: ID of the tracker that should be modified
:param short_name: Tracker's new short name
:param full_name: Tracker's new full name
:param description: Tracker's new description
:param active: Tracker's new state (active or inactive)
:return: Success or failure
"""
connection = DatabaseInterface.create_connection()
try:
success = connection.begin_transaction()
# Start a new revision
revision_id = None
if success:
revision_id = DatabaseInterface.tables().revision.insert_row(
connection,
datetime.datetime.utcnow(),
requested_by_user)
if revision_id is None:
success = False
# Check if there is already an existing tracker with the same short name
if success:
tracker = TrackerManagementInterface.__read_tracker_by_short_name(connection,
short_name,
revision_id)
if tracker is not None:
if tracker["id"] != tracker_to_modify:
success = False
# Check if there is already an existing tracker with the same full name
if success:
tracker = TrackerManagementInterface.__read_tracker_by_full_name(connection,
full_name,
revision_id)
if tracker is not None:
if tracker["id"] != tracker_to_modify:
success = False
# Update tracker's information in the new revision
if success:
row_id = DatabaseInterface.tables().tracker_information.insert_row(
connection,
tracker_to_modify,
short_name,
full_name,
description,
active,
revision_id)
if row_id is None:
success = False
if success:
connection.commit_transaction()
else:
connection.rollback_transaction()
except:
connection.rollback_transaction()
raise
return success
@staticmethod
def activate_tracker(requested_by_user: int, tracker_id: int) -> bool:
"""
Activates an inactive tracker
:param requested_by_user: ID of the user that requested modification of the user
:param tracker_id: ID of the tracker that should be activated
:return: Success or failure
"""
connection = DatabaseInterface.create_connection()
try:
success = connection.begin_transaction()
# Start a new revision
revision_id = None
if success:
revision_id = DatabaseInterface.tables().revision.insert_row(
connection,
datetime.datetime.utcnow(),
requested_by_user)
if revision_id is None:
success = False
# Read tracker
tracker = None
if success:
tracker = TrackerManagementInterface.__read_tracker_by_id(connection,
tracker_id,
revision_id)
if tracker is None:
success = False
elif tracker["active"]:
# Error, tracker is already active
success = False
# Activate tracker
if success:
success = DatabaseInterface.tables().tracker_information.insert_row(
connection,
tracker_id,
tracker["short_name"],
tracker["full_name"],
tracker["description"],
True,
revision_id)
if success:
connection.commit_transaction()
else:
connection.rollback_transaction()
except:
connection.rollback_transaction()
raise
return success
@staticmethod
def deactivate_tracker(requested_by_user: int, tracker_id: int) -> bool:
"""
Deactivates an active tracker
:param requested_by_user: ID of the user that requested modification of the user
:param tracker_id: ID of the tracker that should be deactivated
:return: Success or failure
"""
connection = DatabaseInterface.create_connection()
try:
success = connection.begin_transaction()
# Start a new revision
revision_id = None
if success:
revision_id = DatabaseInterface.tables().revision.insert_row(
connection,
datetime.datetime.utcnow(),
requested_by_user)
if revision_id is None:
success = False
# Read tracker
tracker = None
if success:
tracker = TrackerManagementInterface.__read_tracker_by_id(connection,
tracker_id,
revision_id)
if tracker is None:
success = False
elif not tracker["active"]:
# Error, tracker is already inactive
success = False
# Deactivate tracker
if success:
success = DatabaseInterface.tables().tracker_information.insert_row(
connection,
tracker_id,
tracker["short_name"],
tracker["full_name"],
tracker["description"],
False,
revision_id)
if success:
connection.commit_transaction()
else:
connection.rollback_transaction()
except:
connection.rollback_transaction()
raise
return success
@staticmethod
def __read_tracker_by_id(connection: Connection,
tracker_id: int,
max_revision_id: int) -> Optional[dict]:
"""
Reads a tracker (active or inactive) that matches the search parameters
:param connection: Database connection
:param tracker_id: ID of the tracker
:param max_revision_id: Maximum revision ID for the search
:return: Tracker information object
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
# Read the trackers that match the search attribute
trackers = DatabaseInterface.tables().tracker_information.read_information(
connection,
"tracker_id",
tracker_id,
TrackerSelection.All,
max_revision_id)
# Return a tracker only if exactly one was found
tracker = None
if trackers is not None:
if len(trackers) == 1:
tracker = {"id": trackers[0]["tracker_id"],
"project_id": trackers[0]["project_id"],
"short_name": trackers[0]["short_name"],
"full_name": trackers[0]["full_name"],
"description": trackers[0]["description"],
"active": trackers[0]["active"],
"revision_id": trackers[0]["revision_id"]}
return tracker
@staticmethod
def __read_tracker_by_short_name(connection: Connection,
short_name: str,
max_revision_id: int) -> Optional[dict]:
"""
Reads an active tracker that matches the specified short name
:param connection: Database connection
:param short_name: Tracker's short name
:param max_revision_id: Maximum revision ID for the search
:return: Tracker information object
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
# Read the trackers that match the search attribute
trackers = DatabaseInterface.tables().tracker_information.read_information(
connection,
"short_name",
short_name,
TrackerSelection.Active,
max_revision_id)
# Return a tracker only if exactly one was found
tracker = None
if trackers is not None:
if len(trackers) == 1:
tracker = {"id": trackers[0]["tracker_id"],
"project_id": trackers[0]["project_id"],
"short_name": trackers[0]["short_name"],
"full_name": trackers[0]["full_name"],
"description": trackers[0]["description"],
"active": trackers[0]["active"],
"revision_id": trackers[0]["revision_id"]}
return tracker
@staticmethod
def __read_tracker_by_full_name(connection: Connection,
full_name: str,
max_revision_id: int) -> Optional[dict]:
"""
Reads an active tracker that matches the specified full name
:param connection: Database connection
:param full_name: Tracker's full name
:param max_revision_id: Maximum revision ID for the search
:return: Tracker information object
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
# Read the trackers that match the search attribute
trackers = DatabaseInterface.tables().tracker_information.read_information(
connection,
"full_name",
full_name,
TrackerSelection.Active,
max_revision_id)
# Return a tracker only if exactly one was found
tracker = None
if trackers is not None:
if len(trackers) == 1:
tracker = {"id": trackers[0]["tracker_id"],
"project_id": trackers[0]["project_id"],
"short_name": trackers[0]["short_name"],
"full_name": trackers[0]["full_name"],
"description": trackers[0]["description"],
"active": trackers[0]["active"],
"revision_id": trackers[0]["revision_id"]}
return tracker
@staticmethod
def __create_tracker(connection: Connection,
project_id: int,
short_name: str,
full_name: str,
description: str,
revision_id: int) -> Optional[int]:
"""
Creates a new tracker
:param connection: Database connection
:param project_id: ID of the project
:param short_name: Tracker's short name
:param full_name: Tracker's full name
:param description: Tracker's description
:param revision_id: Revision ID
:return: Tracker ID of the newly created tracker
"""
# Check if a tracker with the same short name already exists
tracker = TrackerManagementInterface.__read_tracker_by_short_name(connection,
short_name,
revision_id)
if tracker is not None:
return None
# Check if a tracker with the same full name already exists
tracker = TrackerManagementInterface.__read_tracker_by_full_name(connection,
full_name,
revision_id)
if tracker is not None:
return None
# Create the tracker in the new revision
tracker_id = DatabaseInterface.tables().tracker.insert_row(connection, project_id)
if tracker_id is None:
return None
# Add tracker information to the tracker
tracker_information_id = DatabaseInterface.tables().tracker_information.insert_row(
connection,
tracker_id,
short_name,
full_name,
description,
True,
revision_id)
if tracker_information_id is None:
return None
return tracker_id
@staticmethod
def __parse_tracker_information(raw_tracker_information: dict) -> dict:
"""
Parse raw tracker information object and convert it to a tracker information object
:param raw_tracker_information: Tracker information
:return: Tracker information object
Input (raw) dictionary contains items:
- project_id
- tracker_id
- short_name
- full_name
- description
- active
- revision_id
Returned dictionary contains items:
- id
- project_id
- short_name
- full_name
- description
- active
- revision_id
"""
return {"id": raw_tracker_information["tracker_id"],
"project_id": raw_tracker_information["project_id"],
"short_name": raw_tracker_information["short_name"],
"full_name": raw_tracker_information["full_name"],
"description": raw_tracker_information["description"],
"active": raw_tracker_information["active"],
"revision_id": raw_tracker_information["revision_id"]}
| gpl-2.0 | -3,854,890,622,643,095,000 | 35.013175 | 100 | 0.498207 | false |
tinloaf/home-assistant | homeassistant/components/influxdb.py | 1 | 11971 | """
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
import re
import queue
import threading
import time
import math
import requests.exceptions
import voluptuous as vol
from homeassistant.const import (
CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_HOST, CONF_INCLUDE,
CONF_PASSWORD, CONF_PORT, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL,
EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_STOP, STATE_UNAVAILABLE,
STATE_UNKNOWN)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_values import EntityValues
REQUIREMENTS = ['influxdb==5.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_DB_NAME = 'database'
CONF_TAGS = 'tags'
CONF_DEFAULT_MEASUREMENT = 'default_measurement'
CONF_OVERRIDE_MEASUREMENT = 'override_measurement'
CONF_TAGS_ATTRIBUTES = 'tags_attributes'
CONF_COMPONENT_CONFIG = 'component_config'
CONF_COMPONENT_CONFIG_GLOB = 'component_config_glob'
CONF_COMPONENT_CONFIG_DOMAIN = 'component_config_domain'
CONF_RETRY_COUNT = 'max_retries'
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_VERIFY_SSL = True
DOMAIN = 'influxdb'
TIMEOUT = 5
RETRY_DELAY = 20
QUEUE_BACKLOG_SECONDS = 30
BATCH_TIMEOUT = 1
BATCH_BUFFER_SIZE = 100
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(vol.Schema({
vol.Optional(CONF_HOST): cv.string,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
vol.Optional(CONF_INCLUDE, default={}): vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL): cv.boolean,
vol.Optional(CONF_RETRY_COUNT, default=0): cv.positive_int,
vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string,
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
vol.Optional(CONF_TAGS, default={}):
vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_COMPONENT_CONFIG, default={}):
vol.Schema({cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}):
vol.Schema({cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}):
vol.Schema({cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}),
})),
}, extra=vol.ALLOW_EXTRA)
RE_DIGIT_TAIL = re.compile(r'^[^\.]*\d+\.?\d+[^\.]*$')
RE_DECIMAL = re.compile(r'[^\d.]+')
def setup(hass, config):
"""Set up the InfluxDB component."""
from influxdb import InfluxDBClient, exceptions
conf = config[DOMAIN]
kwargs = {
'database': conf[CONF_DB_NAME],
'verify_ssl': conf[CONF_VERIFY_SSL],
'timeout': TIMEOUT
}
if CONF_HOST in conf:
kwargs['host'] = conf[CONF_HOST]
if CONF_PORT in conf:
kwargs['port'] = conf[CONF_PORT]
if CONF_USERNAME in conf:
kwargs['username'] = conf[CONF_USERNAME]
if CONF_PASSWORD in conf:
kwargs['password'] = conf[CONF_PASSWORD]
if CONF_SSL in conf:
kwargs['ssl'] = conf[CONF_SSL]
include = conf.get(CONF_INCLUDE, {})
exclude = conf.get(CONF_EXCLUDE, {})
whitelist_e = set(include.get(CONF_ENTITIES, []))
whitelist_d = set(include.get(CONF_DOMAINS, []))
blacklist_e = set(exclude.get(CONF_ENTITIES, []))
blacklist_d = set(exclude.get(CONF_DOMAINS, []))
tags = conf.get(CONF_TAGS)
tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES)
default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT)
override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT)
component_config = EntityValues(
conf[CONF_COMPONENT_CONFIG],
conf[CONF_COMPONENT_CONFIG_DOMAIN],
conf[CONF_COMPONENT_CONFIG_GLOB])
max_tries = conf.get(CONF_RETRY_COUNT)
try:
influx = InfluxDBClient(**kwargs)
influx.write_points([])
except (exceptions.InfluxDBClientError,
requests.exceptions.ConnectionError) as exc:
_LOGGER.error("Database host is not accessible due to '%s', please "
"check your entries in the configuration file (host, "
"port, etc.) and verify that the database exists and is "
"READ/WRITE", exc)
return False
def event_to_json(event):
"""Add an event to the outgoing Influx list."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist_e or state.domain in blacklist_d:
return
try:
if (whitelist_e and state.entity_id not in whitelist_e) or \
(whitelist_d and state.domain not in whitelist_d):
return
_include_state = _include_value = False
_state_as_value = float(state.state)
_include_value = True
except ValueError:
try:
_state_as_value = float(state_helper.state_as_number(state))
_include_state = _include_value = True
except ValueError:
_include_state = True
include_uom = True
measurement = component_config.get(state.entity_id).get(
CONF_OVERRIDE_MEASUREMENT)
if measurement in (None, ''):
if override_measurement:
measurement = override_measurement
else:
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
if default_measurement:
measurement = default_measurement
else:
measurement = state.entity_id
else:
include_uom = False
json = {
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {}
}
if _include_state:
json['fields']['state'] = state.state
if _include_value:
json['fields']['value'] = _state_as_value
for key, value in state.attributes.items():
if key in tags_attributes:
json['tags'][key] = value
elif key != 'unit_of_measurement' or include_uom:
# If the key is already in fields
if key in json['fields']:
key = key + "_"
# Prevent column data errors in influxDB.
# For each value we try to cast it as float
# But if we can not do it we store the value
# as string add "_str" postfix to the field key
try:
json['fields'][key] = float(value)
except (ValueError, TypeError):
new_key = "{}_str".format(key)
new_value = str(value)
json['fields'][new_key] = new_value
if RE_DIGIT_TAIL.match(new_value):
json['fields'][key] = float(
RE_DECIMAL.sub('', new_value))
# Infinity and NaN are not valid floats in InfluxDB
try:
if not math.isfinite(json['fields'][key]):
del json['fields'][key]
except (KeyError, TypeError):
pass
json['tags'].update(tags)
return json
instance = hass.data[DOMAIN] = InfluxThread(
hass, influx, event_to_json, max_tries)
instance.start()
def shutdown(event):
"""Shut down the thread."""
instance.queue.put(None)
instance.join()
influx.close()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class InfluxThread(threading.Thread):
"""A threaded event handler class."""
def __init__(self, hass, influx, event_to_json, max_tries):
"""Initialize the listener."""
threading.Thread.__init__(self, name='InfluxDB')
self.queue = queue.Queue()
self.influx = influx
self.event_to_json = event_to_json
self.max_tries = max_tries
self.write_errors = 0
self.shutdown = False
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
def _event_listener(self, event):
"""Listen for new messages on the bus and queue them for Influx."""
item = (time.monotonic(), event)
self.queue.put(item)
@staticmethod
def batch_timeout():
"""Return number of seconds to wait for more events."""
return BATCH_TIMEOUT
def get_events_json(self):
"""Return a batch of events formatted for writing."""
queue_seconds = QUEUE_BACKLOG_SECONDS + self.max_tries*RETRY_DELAY
count = 0
json = []
dropped = 0
try:
while len(json) < BATCH_BUFFER_SIZE and not self.shutdown:
timeout = None if count == 0 else self.batch_timeout()
item = self.queue.get(timeout=timeout)
count += 1
if item is None:
self.shutdown = True
else:
timestamp, event = item
age = time.monotonic() - timestamp
if age < queue_seconds:
event_json = self.event_to_json(event)
if event_json:
json.append(event_json)
else:
dropped += 1
except queue.Empty:
pass
if dropped:
_LOGGER.warning("Catching up, dropped %d old events", dropped)
return count, json
def write_to_influxdb(self, json):
"""Write preprocessed events to influxdb, with retry."""
from influxdb import exceptions
for retry in range(self.max_tries+1):
try:
self.influx.write_points(json)
if self.write_errors:
_LOGGER.error("Resumed, lost %d events", self.write_errors)
self.write_errors = 0
_LOGGER.debug("Wrote %d events", len(json))
break
except (exceptions.InfluxDBClientError, IOError):
if retry < self.max_tries:
time.sleep(RETRY_DELAY)
else:
if not self.write_errors:
_LOGGER.exception("Write error")
self.write_errors += len(json)
def run(self):
"""Process incoming events."""
while not self.shutdown:
count, json = self.get_events_json()
if json:
self.write_to_influxdb(json)
for _ in range(count):
self.queue.task_done()
def block_till_done(self):
"""Block till all events processed."""
self.queue.join()
| apache-2.0 | -1,788,784,909,250,787,800 | 33.900875 | 79 | 0.572383 | false |
TheWardoctor/Wardoctors-repo | script.stargate.guide/strings.py | 1 | 2287 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Tommy Winther
# http://tommy.winther.nu
#
# Modified for FTV Guide (09/2014 onwards)
# by Thomas Geppert [bluezed] - [email protected]
#
# Modified for Stargate Guide (2016)
# by wardoctor - [email protected]
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmcaddon
ADDON = xbmcaddon.Addon(id = 'script.stargate.guide')
NO_DESCRIPTION = 30000
CALCULATING_REMAINING_TIME = 30002
TIME_LEFT = 30003
BACKGROUND_UPDATE_IN_PROGRESS = 30004
NO_PROGRAM_AVAILABLE = 30009
NO_STREAM_AVAILABLE_TITLE = 30100
NO_STREAM_AVAILABLE_LINE1 = 30101
NO_STREAM_AVAILABLE_LINE2 = 30102
CLEAR_CACHE = 30104
CLEAR_NOTIFICATIONS = 30108
DONE = 30105
LOAD_ERROR_TITLE = 30150
LOAD_ERROR_LINE1 = 30151
LOAD_ERROR_LINE2 = 30152
CONFIGURATION_ERROR_LINE2 = 30153
SKIN_ERROR_LINE1 = 30154
SKIN_ERROR_LINE2 = 30155
SKIN_ERROR_LINE3 = 30156
NOTIFICATION_5_MINS = 30200
NOTIFICATION_NOW = 30201
WATCH_CHANNEL = 30300
REMIND_PROGRAM = 30301
DONT_REMIND_PROGRAM = 30302
CHOOSE_STRM_FILE = 30304
REMOVE_STRM_FILE = 30306
PREVIEW_STREAM = 30604
STOP_PREVIEW = 30607
WEEBTV_WEBTV_MISSING_1 = 30802
WEEBTV_WEBTV_MISSING_2 = 30803
WEEBTV_WEBTV_MISSING_3 = 30804
DATABASE_SCHEMA_ERROR_1 = 30157
DATABASE_SCHEMA_ERROR_2 = 30158
DATABASE_SCHEMA_ERROR_3 = 30159
FETCH_ERROR_TITLE = 31000
FETCH_ERROR_LINE1 = 31001
FETCH_ERROR_LINE2 = 31002
def strings(id, replacements = None):
string = ADDON.getLocalizedString(id)
if replacements is not None:
return string % replacements
else:
return string | apache-2.0 | -2,401,772,511,313,800,700 | 26.238095 | 72 | 0.738085 | false |
Belgabor/django | tests/regressiontests/test_client_regress/models.py | 1 | 40827 | # -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
import os
from django.conf import settings
from django.test import Client, TestCase
from django.test.utils import ContextList
from django.core.urlresolvers import reverse
from django.core.exceptions import SuspiciousOperation
from django.template import TemplateDoesNotExist, TemplateSyntaxError, Context
from django.template import loader
from django.test.client import encode_file
class AssertContainsTests(TestCase):
def setUp(self):
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve content: Response code was 200 (expected 999)")
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't retrieve content: Response code was 200 (expected 999)")
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve content: Response code was 200 (expected 999)")
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't retrieve content: Response code was 200 (expected 999)")
try:
self.assertNotContains(response, 'once')
except AssertionError, e:
self.assertEquals(str(e), "Response should not contain 'once'")
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response should not contain 'once'")
try:
self.assertContains(response, 'never', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'never' in response (expected 1)")
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 0 instances of 'never' in response (expected 1)")
try:
self.assertContains(response, 'once', 0)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 0)")
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 1 instances of 'once' in response (expected 0)")
try:
self.assertContains(response, 'once', 2)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 2)")
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 1 instances of 'once' in response (expected 2)")
try:
self.assertContains(response, 'twice', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 2 instances of 'twice' in response (expected 1)")
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 2 instances of 'twice' in response (expected 1)")
try:
self.assertContains(response, 'thrice')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't find 'thrice' in response")
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't find 'thrice' in response")
try:
self.assertContains(response, 'thrice', 3)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'thrice' in response (expected 3)")
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Found 0 instances of 'thrice' in response (expected 3)")
def test_unicode_contains(self):
"Unicode characters can be found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertContains(r, u'さかき')
self.assertContains(r, '\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
#Regression test for #10183
r = self.client.get('/test_client_regress/check_unicode/')
self.assertNotContains(r, u'はたけ')
self.assertNotContains(r, '\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertEquals(str(e), "No templates used to render the response")
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: No templates used to render the response")
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template")
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template")
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertEquals(str(e), "Template 'form_view.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertEquals(str(e), "Template 'base.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertEquals(str(e), "Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html")
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'")
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEquals(len(response.redirect_chain), 1)
self.assertEquals(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/test_client_regress/redirects/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/no_template_view/',
status_code=301, target_status_code=200)
self.assertEquals(len(response.redirect_chain), 3)
self.assertEquals(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301))
self.assertEquals(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301))
self.assertEquals(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/test_client_regress/non_existent_view/',
status_code=301, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True)
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/test_client_regress/redirect_to_self/',
status_code=301, target_status_code=301)
self.assertEquals(len(response.redirect_chain), 2)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True)
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/test_client_regress/circular_redirect_2/',
status_code=301, target_status_code=301)
self.assertEquals(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/test_client_regress/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/test_client_regress/no_template_view/', 301, 200)
self.assertEquals(len(response.redirect_chain), 3)
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/', follow=True)
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 200 (expected 302)")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 200 (expected 302)")
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/get_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 200 (expected 302)")
try:
self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc')
except AssertionError, e:
self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 200 (expected 302)")
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'wrong_form' was not used to render the response")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The form 'wrong_form' was not used to render the response")
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the field 'some_field'")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The form 'form' in context 0 does not contain the field 'some_field'")
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'value' on form 'form' in context 0 contains no errors")
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The field 'value' on form 'form' in context 0 contains no errors")
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )")
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError, e:
self.assertEqual(str(e), "abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )")
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
class SessionEngineTests(TestCase):
fixtures = ['testdata']
def setUp(self):
self.old_SESSION_ENGINE = settings.SESSION_ENGINE
settings.SESSION_ENGINE = 'regressiontests.test_client_regress.session'
def tearDown(self):
settings.SESSION_ENGINE = self.old_SESSION_ENGINE
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/test_client/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.failUnless(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.failUnless(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
class TemplateExceptionTests(TestCase):
def setUp(self):
# Reset the loaders so they don't try to render cached templates.
if loader.template_source_loaders is not None:
for template_loader in loader.template_source_loaders:
if hasattr(template_loader, 'reset'):
template_loader.reset()
self.old_templates = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_templates
def test_no_404_template(self):
"Missing templates are correctly reported by test client"
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about missing template")
except TemplateDoesNotExist:
pass
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'bad_templates'),)
try:
response = self.client.get("/no_such_view/")
self.fail("Should get error about syntax error in template")
except TemplateSyntaxError:
pass
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEquals(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEquals(url, '/test_client_regress/arg_view/somename/')
class ContextTests(TestCase):
fixtures = ['testdata']
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEquals(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertTrue('get-foo' in response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError, e:
self.assertEquals(e.args[0], 'does-not-exist')
class SessionTests(TestCase):
fixtures = ['testdata.json']
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'NO')
# This request sets a session variable.
response = self.client.get('/test_client_regress/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'set_session')
# Check that the session has been modified
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
# Log in
login = self.client.login(username='testclient',password='password')
self.failUnless(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/test_client_regress/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'YES')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient',password='password')
self.failUnless(login, 'Could not log in')
self.client.logout()
self.client.logout()
class RequestMethodTests(TestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, 'request method: HEAD')
self.assertEqual(response.content, '')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/test_client_regress/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: DELETE')
class RequestMethodStringDataTests(TestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = u'{"test": "json"}'
response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'request method: PUT')
class QueryStringTests(TestCase):
def test_get_like_requests(self):
for method_name in ('get','head','options','put','delete'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['request-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
self.assertEqual(response.context['request-foo'], None)
self.assertEqual(response.context['request-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/test_client_regress/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['request-foo'], 'whiz')
# POST data provided in the URL augments actual form data
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
self.assertEqual(response.context['request-foo'], 'bang')
response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
self.assertEqual(response.context['request-foo'], 'whiz')
self.assertEqual(response.context['request-bar'], 'bang')
class UnicodePayloadTests(TestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = u'{"english": "mountain pass"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json)
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
#Regression test for #10571
json = u'{"dog": "собака"}'
response = self.client.post("/test_client_regress/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return 'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(TestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual('--TEST_BOUNDARY', encoded_file[0])
self.assertEqual('Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual('TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual('Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertEqual('Content-Type: application/zip',
encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2])
self.assertEqual('Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
| bsd-3-clause | -3,956,953,672,115,931,600 | 47.053004 | 183 | 0.64441 | false |
darvelo/chime | chime/error_functions.py | 1 | 4103 | from __future__ import absolute_import
from logging import getLogger
Logger = getLogger('chime.error_functions')
from flask import current_app, request
from urllib import quote
from urlparse import urlparse
from os.path import join, exists
from .view_functions import get_repo, strip_index_file, path_display_type, get_value_from_front_matter, FOLDER_FILE_TYPE
from .repo_functions import TASK_METADATA_FILENAME
EMAIL_SUBJECT_TEXT = u'Chime Error Report'
EMAIL_BODY_PREFIX = u'\n\n----- Please add any relevant details above this line -----\n\n'
def common_error_template_args(app_config):
''' Return dictionary of template arguments common to error pages.
'''
return {
"activities_path": u'/',
"support_email": app_config.get('SUPPORT_EMAIL_ADDRESS'),
"support_phone_number": app_config.get('SUPPORT_PHONE_NUMBER')
}
def make_email_params(message, path=None, uuid=None):
''' Construct email params to send to the template.
'''
email_subject = EMAIL_SUBJECT_TEXT
email_message = EMAIL_BODY_PREFIX + message
if path:
email_message = u'\n'.join([email_message, u'path: {}'.format(path)])
if uuid:
email_subject = u'{} ({})'.format(email_subject, uuid)
return u'?subject={}&body={}'.format(quote(email_subject), quote(email_message))
def extract_branch_name_from_path(path):
''' If the name of a branch that exists in the passed repo is in the passed URL, return it
'''
repo = get_repo(flask_app=current_app)
for branch_name_candidate in path.split('/'):
if branch_name_candidate in repo.branches:
return branch_name_candidate
return None
def summarize_conflict_details(error):
''' Make an object that summarizes the files affected by a merge conflict.
The object looks like this:
[
{'edit_path': u'', 'display_type': u'Article', 'actions': u'Deleted', 'title': u'How to Find Us'},
{'edit_path': u'/tree/34246e3/edit/contact/hours-of-operation/', 'display_type': u'Article', 'actions': u'Edited', 'title': u'Hours of Operation'},
{'edit_path': u'/tree/34246e3/edit/contact/driving-directions/', 'display_type': u'Article', 'actions': u'Edited', 'title': u'Driving Directions'},
{'edit_path': u'/tree/34246e3/edit/contact/', 'display_type': u'Category', 'actions': u'Created', 'title': u'Contact'}
]
'''
repo = get_repo(flask_app=current_app)
path = urlparse(request.url).path
# get the branch name (unless it's the default branch)
branch_name = repo.active_branch.name
if branch_name == current_app.config['default_branch']:
branch_name = extract_branch_name_from_path(path)
conflict_files = error.files()
summary = []
for id_file in conflict_files:
# skip the task metadata file
if TASK_METADATA_FILENAME in id_file['path']:
continue
file_description = {'actions': id_file['actions'].title()}
edit_path = u''
display_type = u''
title = id_file['path'].split('/')[-1]
# construct location info if the file's there
file_loc = join(repo.working_dir, id_file['path'])
if exists(file_loc):
dir_path = strip_index_file(id_file['path'])
dir_loc = join(repo.working_dir, dir_path)
display_type = path_display_type(dir_loc)
# if it's not a category or article, it's just a file
if display_type == FOLDER_FILE_TYPE:
display_type = path_display_type(file_loc)
title = get_value_from_front_matter('title', file_loc) or title
edit_path = join(u'/tree/{}/edit/'.format(branch_name), dir_path)
else:
# the file's not there, so just dump the whole path into the title
title = id_file['path']
display_type = u'Unknown'
file_description['edit_path'] = edit_path
file_description['display_type'] = display_type.title()
file_description['title'] = title
summary.append(file_description)
return summary
| bsd-3-clause | -526,122,277,231,651,140 | 40.867347 | 159 | 0.63612 | false |
ThierryM/bCNC | bCNC/lib/bmath.py | 2 | 53783 | #
# Copyright European Organization for Nuclear Research (CERN)
# All rights reserved
#
# Author: [email protected]
# Date: 15-May-2004
from __future__ import generators
from __future__ import absolute_import
__author__ = "Vasilis Vlachoudis"
__email__ = "[email protected]"
import random
from math import acos, asin, atan2, copysign, cos, degrees, fmod, hypot, pi, pow, radians, sin, sqrt
import rexx
# Accuracy for comparison operators
_accuracy = 1E-15
# Formatting
_format = "%15g"
#-------------------------------------------------------------------------------
def sign(x):
"""Return sign of number"""
return int(copysign(1,x))
#-------------------------------------------------------------------------------
def Cmp0(x):
"""Compare against zero within _accuracy"""
return abs(x)<_accuracy
#-------------------------------------------------------------------------------
def frange(start,stop,step):
"""range(start,stop,step) for floating point numbers"""
x = start
if step<0.0:
while x>stop:
yield x
x += step
else:
while x<stop:
yield x
x += step
#-------------------------------------------------------------------------------
def limit(min_, num, max_):
"""limit a number within a specific range"""
return max(min(num,max_),min_)
#-------------------------------------------------------------------------------
def dms(d,m,s):
"""dms - degrees from degrees, minutes, seconds"""
return d + m/60.0 + s/3600.0
#-------------------------------------------------------------------------------
def cbrt(x):
"""cubic root, this cubic root routine handles negative arguments"""
if x == 0.0:
return 0
elif x > 0.0:
return pow(x, 1./3.)
else:
return -pow(-x, 1./3.)
#-------------------------------------------------------------------------------
def d2s(ang, fmt=""):
"""degrees to string
D2S(angle[,"H"|"M"|"D"|"N"])
"""
fmt.capitalize()
if ang<0.0:
neg = "-"
ang = -ang
else:
neg = ""
ang = round(ang*360000)/100
SS = "%05.2f" % (fmod(ang,60))
ang = int(ang / 60.0)
MM = "%02d" % (ang % 60)
HH = neg + str(ang / 60)
if fmt=="H":
return HH+"h"+MM+"m"+SS+"s"
if fmt=="M":
return HH+"h"+MM+"m"
if fmt=="D":
return HH+" "+MM+"'"+SS+'"'
if fmt=="N":
return HH+":"+MM
return HH+":"+MM+":"+SS
#-------------------------------------------------------------------------------
def format(number, length=10, useExp=False, useD=False):
""" Format a number to fit in the minimum space given by length"""
_MAXLEN=22
# Convert number to string
# XXX str cuts at 12 digits, repr shows everything but
# numbers like e.g 9.2 will be converted to 9.1999999999999
# What should I do
# Fields also in the CardWidget are converted with str and
# are cut at 12 digits!!!!
if isinstance(number, (float, int)):
number = repr(number).upper()
else:
number = str(number).strip().upper()
if not rexx.datatype(number, "N"): return number
if useD:
number = number.replace("E", "D")
expE = "D"
else:
number = number.replace("D", "E")
expE = "E"
if len(number) < length:
hasExp = (number.find(expE)>=0)
if useExp:
if hasExp: return number
elif number.find(".")>=0 or hasExp:
return number
if number=="0":
if useExp: return "0.%s0" % (expE)
else: return "0.0"
if length<5 or length>_MAXLEN: raise Exception("Format invalid length")
# Dissect the number. It is in the normal Rexx format.
try:
(mantissa, exponent) = number.split(expE)
if exponent == '':
exponent = 0
else:
exponent = int(exponent)
except:
mantissa = number
exponent = 0
if mantissa[0] == '-':
sgn = True
mantissa = mantissa[1:]
elif mantissa[0] == '+':
sgn = False
mantissa = mantissa[1:]
else:
sgn = False
try:
(befo, afte) = mantissa.split(".")
except:
befo = mantissa
afte = ""
# Count from the left for the decimal point.
point = len(befo)
# Make this a number without a point.
integer = befo + afte
# Remove leading zeros
for p in range(len(integer)):
if integer[p] != '0':
if p>0:
integer = integer[p:]
point -= p
break
else:
if useExp: return "0.%s0" % (expE)
else: return "0.0"
# ... and trailing
for p in range(len(integer)-1,0,-1):
if integer[p] != '0':
integer = integer[0:p+1]
break
exponent += point
# Cannot handle more than _MAXLEN digits
lint = len(integer)
if lint > _MAXLEN:
r = integer[_MAXLEN]
integer = integer[0:_MAXLEN]
if r>='5':
integer = str(int(integer)+1)
if len(integer) > lint:
exponent += 1
if len(integer) > _MAXLEN:
integer = integer[0:_MAXLEN]
# Now the number is described by:
# sgn 0.integer "E" exponent
# Make space for sign
if sgn: length -= 1
while True:
# Minimum length representation of a number
# Length = Length of integer
# + 1 for Dot if needed (no exponent)
# + (2-4) for exponent
# exponent can be in the following forms
# nothing if dot can placed inside integer
# E# 2
# E## 3
# E-# 3
# E-## 4
# integer is given as 0.integer
lint = len(integer)
if useExp:
mNum = "%s%s%d"%(rexx.insert(".", integer, 1),expE,exponent-1)
elif exponent==-2:
mNum = ".00%s"%(integer)
elif exponent==-1:
mNum = ".0%s"%(integer)
elif exponent==0:
mNum = ".%s"%(integer)
elif exponent==1:
mNum = rexx.insert(".", integer, 1)
elif exponent==length:
mNum = "%s%s"%(integer,"0"*(length-lint))
elif exponent>1 and exponent<=lint:
mNum = rexx.insert(".", integer, exponent)
elif exponent>1 and exponent<=lint+2:
if exponent>lint:
mNum = "%s%s."%(integer, "0"*(exponent-lint))
else:
mNum = "%s."%(integer.ljust(exponent))
elif exponent>lint and exponent+1<length:
mNum = "%s%s."%(integer, "0"*(exponent-lint))
else:
mNum = "%s%s%d"%(rexx.insert(".", integer, 1),expE,exponent-1)
diff = len(mNum)-length
if diff<=0:
break
elif diff<=2:
r = integer[-1]
integer = integer[0:-1]
else:
r = integer[-diff]
integer = integer[0:-diff]
if r>='5':
lint = len(integer)
if lint==0: integer = 0
integer = str(int(integer)+1)
if len(integer) > lint:
exponent += 1
# Remove trailing zeros
for p in range(len(integer)-1,-1,-1):
if integer[p] != '0':
integer = integer[0:p+1]
break
else:
if useExp: return "0.%s0"%(expE)
else: return "0.0"
if sgn: mNum = "-%s"%(mNum)
return mNum
#==============================================================================
# Dangerous dictionary that unknown keys return a user default value
# Use it with care
#==============================================================================
class DefaultDict(dict):
"""Dictionary where unknown keys will return a default value"""
def __init__(self, default=None):
dict.__init__(self)
self._default = default
# ----------------------------------------------------------------------
def __getitem__(self, key):
return self.get(key,self._default)
#==============================================================================
class ZeroDict(DefaultDict):
"""Dictionary where unknown keys will return 0.0"""
def __init__(self):
DefaultDict.__init__(self, 0.0)
#==============================================================================
class ZeroIntDict(DefaultDict):
"""Dictionary where unknown keys will return 0"""
def __init__(self):
DefaultDict.__init__(self, 0)
#===============================================================================
# Vector class
# Inherits from List
#===============================================================================
class Vector(list):
"""Vector class"""
# ----------------------------------------------------------------------
def __init__(self, x=3, *args):
"""Create a new vector,
Vector(size), Vector(list), Vector(x,y,z,...)"""
list.__init__(self)
if isinstance(x,int) and not args:
for i in range(x):
self.append(0.0)
elif isinstance(x,(list,tuple)):
for i in x:
self.append(float(i))
else:
self.append(float(x))
for i in args:
self.append(float(i))
# ----------------------------------------------------------------------
def set(self, x, y, z=None):
"""Set vector"""
self[0] = x
self[1] = y
if z: self[2] = z
# ----------------------------------------------------------------------
def __repr__(self):
return "[%s]"%(", ".join([repr(x) for x in self]))
# ----------------------------------------------------------------------
def __str__(self):
return "[%s]"%(", ".join([(_format%(x)).strip() for x in self]))
# ----------------------------------------------------------------------
def eq(self, v, acc=_accuracy):
"""Test for equality with vector v within accuracy"""
if len(self) != len(v): return False
s2 = 0.0
for a,b in zip(self, v):
s2 += (a-b)**2
return s2 <= acc**2
# ----------------------------------------------------------------------
def __eq__(self, v): return self.eq(v)
# ----------------------------------------------------------------------
def __neg__(self):
"""Negate vector"""
new = Vector(len(self))
for i,s in enumerate(self):
new[i] = -s
return new
# ----------------------------------------------------------------------
def __add__(self, v):
"""Add 2 vectors"""
size = min(len(self),len(v))
new = Vector(size)
for i in range(size):
new[i] = self[i] + v[i]
return new
# ----------------------------------------------------------------------
def __iadd__(self, v):
"""Add vector v to self"""
for i in range(min(len(self),len(v))):
self[i] += v[i]
return self
# ----------------------------------------------------------------------
def __sub__(self, v):
"""Subtract 2 vectors"""
size = min(len(self),len(v))
new = Vector(size)
for i in range(size):
new[i] = self[i] - v[i]
return new
# ----------------------------------------------------------------------
def __isub__(self, v):
"""Subtract vector v from self"""
for i in range(min(len(self),len(v))):
self[i] -= v[i]
return self
# ----------------------------------------------------------------------
# Scale or Dot product
# ----------------------------------------------------------------------
def __mul__(self, v):
"""scale*Vector() or Vector()*Vector() - Scale vector or dot product"""
if isinstance(v,list):
return self.dot(v)
else:
return Vector([x*v for x in self])
# ----------------------------------------------------------------------
# Scale or Dot product
# ----------------------------------------------------------------------
def __rmul__(self, v):
"""scale*Vector() or Vector()*Vector() - Scale vector or dot product"""
if isinstance(v,Vector):
return self.dot(v)
else:
return Vector([x*v for x in self])
# ----------------------------------------------------------------------
# Divide by floating point
# ----------------------------------------------------------------------
def __div__(self, b):
return Vector([x/b for x in self])
# ----------------------------------------------------------------------
def __xor__(self, v):
"""Cross product"""
return self.cross(v)
# ----------------------------------------------------------------------
def dot(self, v):
"""Dot product of 2 vectors"""
s = 0.0
for a,b in zip(self, v):
s += a*b
return s
# ----------------------------------------------------------------------
def cross(self, v):
"""Cross product of 2 vectors"""
if len(self)==3:
return Vector( self[1]*v[2]-self[2]*v[1],
self[2]*v[0]-self[0]*v[2],
self[0]*v[1]-self[1]*v[0])
elif len(self)==2:
return self[0]*v[1]-self[1]*v[0]
else:
raise Exception("Cross product needs 2d or 3d vectors")
# ----------------------------------------------------------------------
def length2(self):
"""Return length squared of vector"""
s2 = 0.0
for s in self:
s2 += s**2
return s2
# ----------------------------------------------------------------------
def length(self):
"""Return length of vector"""
s2 = 0.0
for s in self:
s2 += s**2
return sqrt(s2)
__abs__ = length
# ----------------------------------------------------------------------
def arg(self):
"""return vector angle"""
return atan2(self[1], self[0])
# ----------------------------------------------------------------------
def norm(self):
"""Normalize vector and return length"""
l = self.length()
if l>0.0:
invlen = 1.0/l
for i in range(len(self)):
self[i] *= invlen
return l
normalize = norm
# ----------------------------------------------------------------------
def unit(self):
"""return a unit vector"""
v = self.clone()
v.norm()
return v
# ----------------------------------------------------------------------
def clone(self):
"""Clone vector"""
return Vector(self)
# ----------------------------------------------------------------------
def x(self): return self[0]
def y(self): return self[1]
def z(self): return self[2]
# ----------------------------------------------------------------------
def orthogonal(self):
"""return a vector orthogonal to self"""
xx = abs(self.x())
yy = abs(self.y())
if len(self)>=3:
zz = abs(self.z())
if xx < yy:
if xx < zz:
return Vector(0.0, self.z(), -self.y())
else:
return Vector(self.y(), -self.x(), 0.0)
else:
if yy < zz:
return Vector(-self.z(), 0.0, self.x())
else:
return Vector(self.y(), -self.x(), 0.0)
else:
return Vector(-self.y(), self.x())
# ----------------------------------------------------------------------
def direction(self, zero=_accuracy):
"""return containing the direction if normalized with any of the axis"""
v = self.clone()
l = v.norm()
if abs(l) <= zero: return "O"
if abs(v[0]-1.0)<zero:
return "X"
elif abs(v[0]+1.0)<zero:
return "-X"
elif abs(v[1]-1.0)<zero:
return "Y"
elif abs(v[1]+1.0)<zero:
return "-Y"
elif abs(v[2]-1.0)<zero:
return "Z"
elif abs(v[2]+1.0)<zero:
return "-Z"
else:
#nothing special about the direction, return N
return "N"
# ----------------------------------------------------------------------
# Set the vector directly in polar coordinates
# @param ma magnitude of vector
# @param ph azimuthal angle in radians
# @param th polar angle in radians
# ----------------------------------------------------------------------
def setPolar(self, ma, ph, th):
"""Set the vector directly in polar coordinates"""
sf = sin(ph)
cf = cos(ph)
st = sin(th)
ct = cos(th)
self[0] = ma*st*cf
self[1] = ma*st*sf
self[2] = ma*ct
# ----------------------------------------------------------------------
def phi(self):
"""return the azimuth angle."""
if Cmp0(self.x()) and Cmp0(self.y()):
return 0.0
return atan2(self.y(), self.x())
# ----------------------------------------------------------------------
def theta(self):
"""return the polar angle."""
if Cmp0(self.x()) and Cmp0(self.y()) and Cmp0(self.z()):
return 0.0
return atan2(self.perp(),self.z())
# ----------------------------------------------------------------------
def cosTheta(self):
"""return cosine of the polar angle."""
ptot = self.length()
if Cmp0(ptot):
return 1.0
else:
return self.z()/ptot
# ----------------------------------------------------------------------
def perp2(self):
"""return the transverse component squared
(R^2 in cylindrical coordinate system)."""
return self.x() * self.x() + self.y() * self.y()
# ----------------------------------------------------------------------
def perp(self):
"""@return the transverse component
(R in cylindrical coordinate system)."""
return sqrt(self.perp2())
# ----------------------------------------------------------------------
# Return a random 3D vector
# ----------------------------------------------------------------------
@staticmethod
def random():
cosTheta = 2.0*random.random()-1.0
sinTheta = sqrt(1.0 - cosTheta**2)
phi = 2.0*pi*random.random()
return Vector(cos(phi)*sinTheta, sin(phi)*sinTheta, cosTheta)
#-------------------------------------------------------------------------------
# Basic 3D Vectors
#-------------------------------------------------------------------------------
Vector.O = Vector(0.0, 0.0, 0.0)
Vector.X = Vector(1.0, 0.0, 0.0)
Vector.Y = Vector(0.0, 1.0, 0.0)
Vector.Z = Vector(0.0, 0.0, 1.0)
# ------------------------------------------------------------------------------
# Return a random nolor
# ------------------------------------------------------------------------------
def rndColor(x):
def rnd(zw):
w = zw & 0xffff
z = (zw >> 16) & 0xffff
z = 36969 * (z & 0xffff) + (z >> 16)
w = 18000 * (w & 0xffff) + (w >> 16)
return (z << 16) + (w & 0xffff)
x = rnd(x)
R = (x % 224) + 16
x = rnd(x)
G = (x % 224) + 16
x = rnd(x)
B = (x % 224) + 16
return R<<16 | G<<8 | B
#===============================================================================
# Matrix class
# Use 4x4 matrix for vector transformations
#===============================================================================
class Matrix(list):
"""Matrix 4x4 used for vector transformations"""
# ----------------------------------------------------------------------
def __init__(self, rows=4, cols=-1, type=0):
"""
Matrix(rows=4, cols=-1, type=0|1)
if rows is integer then
Create a matrix rows x cols either
zero(type=0) or unary(type=1)
elif rows is a list of lists
create a matrix from a double-list
"""
if isinstance(rows, list):
lst = rows
self.rows = len(lst)
self.extend([[]]*self.rows)
if isinstance(lst[0], list):
self.cols = len(lst[0])
for i in range(self.rows):
self[i] = lst[i][:]
if len(self[i]) != self.cols:
raise Exception("Not a valid double-list for a matrix")
else:
self.cols = 1
for i in range(self.rows):
self[i] = [lst[i]]
else:
if rows<2: raise Exception("Array size too small")
if cols<0: cols=rows
self.rows = rows
self.cols = cols
self += [[]]*rows
if type==1:
self.unary()
else:
self.zero()
# ----------------------------------------------------------------------
# Create a diagonal square matrix from a list
# ----------------------------------------------------------------------
@staticmethod
def diagonal(lst):
m = Matrix(len(lst), type=0)
i = 0
for item in lst:
m[i][i] = item
i += 1
return m
# ----------------------------------------------------------------------
# append row
# ----------------------------------------------------------------------
def append(self, col):
list.append(self, col)
self.rows += 1
# ----------------------------------------------------------------------
@staticmethod
def translate(x, y=0.0, z=0.0):
"""m = Matrix.translate(x,y,z|vector)
@return a translation matrix"""
m = Matrix(4, type=1)
if isinstance(x,(list,tuple)):
m[0][3] = x[0]
m[1][3] = x[1]
m[2][3] = x[2]
else:
m[0][3] = x
m[1][3] = y
m[2][3] = z
return m
# ----------------------------------------------------------------------
@staticmethod
def scale(sx, sy=None, sz=None):
"""m = Matrix.scale(scale|vector)
@return a scaling matrix"""
m = Matrix(4, type=1)
if sy is None: sy = sx
if sz is None: sz = sx
if isinstance(sx,(list,tuple)):
m[0][0] = sx[0]
m[1][1] = sx[1]
m[2][2] = sx[2]
else:
m[0][0] = sx
m[1][1] = sy
m[2][2] = sz
return m
# ----------------------------------------------------------------------
def zero(self):
"""Zero matrix"""
for i in range(self.rows):
self[i] = [0.0]*self.cols
# ----------------------------------------------------------------------
def unary(self):
"""Unary matrix"""
self.zero()
for i in range(min(self.rows, self.cols)):
self[i][i] = 1.0
# ----------------------------------------------------------------------
# Create a transformation matrix from 3 normalized vectors
# and optionally a translation
# ----------------------------------------------------------------------
def make(self,X,Y,Z=None,T=None):
"""Create a transformation matrix from 3 normalized vectors"""
self.unary()
if (self.rows==3 or self.rows==4) and self.cols==self.rows:
if Z is None:
Z = X ^ Y
Z.normalize()
for i in range(3):
self[0][i] = X[i]
self[1][i] = Y[i]
self[2][i] = Z[i]
if T is not None and self.rows==4: self[i][3] = T[i]
else:
raise Exception("Matrix.make() works only on Matrix(3x3) or Matrix(4x4)")
# ----------------------------------------------------------------------
def __repr__(self):
"""Multi line string representation of matrix"""
s = ""
for i in range(self.rows):
if i==0:
first="/"
last="\\"
elif i==self.rows-1:
first="\\"
last="/"
else:
first=last="|"
s += first
for j in range(self.cols):
s += " " + repr(self[i][j])
s += " " + last + "\n"
return s
# ----------------------------------------------------------------------
def __str__(self):
"""Multi line string representation of matrix"""
s = ""
for i in range(self.rows):
if i==0:
first="/"
last="\\"
elif i==self.rows-1:
first="\\"
last="/"
else:
first=last="|"
s += first
for j in range(self.cols):
s += " " + _format % self[i][j]
s += " " + last + "\n"
return s
# ----------------------------------------------------------------------
def writeOctave(self, filename, name):
"""Write an octave matrix file"""
f = open(filename,"w")
f.write("# bmath.Matrix\n")
f.write("# name: %s\n"%(name))
f.write("# type: matrix\n")
f.write("# rows: %d\n"%(self.rows))
f.write("# columns: %d\n"%(self.cols))
for i in range(self.rows):
for j in range(self.cols):
f.write("%s "%(repr(self[i][j])))
f.write("\n")
f.close()
# ----------------------------------------------------------------------
def T(self):
"""@return transpose matrix"""
m = Matrix(self.cols, self.rows)
for i in range(self.rows):
for j in range(self.cols):
m[j][i] = self[i][j]
return m
transpose=T
# ----------------------------------------------------------------------
def trace(self):
"""Return trace of matrix (sum of diagonal elements)"""
t = 0.0
for i in range(min(self.rows,self.cols)):
t += self[i][i]
return t
# ----------------------------------------------------------------------
def __eq__(self, m):
"""Test for equality of 2 matrices"""
if self.rows!=m.rows or self.cols!=m.cols:
return False
for i in range(self.rows):
for j in range(self.cols):
if abs(self[i][j] - m[i][j]):
return False
return True
# ----------------------------------------------------------------------
# Create a rotation matrix around one axis
# X = 0
# Y = 1
# Z = 2
# or an arbitrary vector
# ----------------------------------------------------------------------
def rotate(self, angle, axis):
"""Add rotation elements to the matrix around one axis
Axis X=0, Y=1, Z=2, or an arbitrary one given by vector axis"""
self.unary()
c = cos(angle)
s = sin(angle)
if isinstance(axis,int):
m1 = ((axis+1)%3)+1
m2 = m1%3
m1 = m1 - 1
self[m1][m1] = c
self[m2][m2] = c
self[m1][m2] = -s
self[m2][m1] = s
elif isinstance(axis,Vector):
l = axis.length()
x = axis[0] / l
y = axis[1] / l
z = axis[2] / l
c1 = 1 - c
self[0][0] = x*x + (1-x*x)*c
self[0][1] = x*y*c1 - z*s
self[0][2] = x*z*c1 + y*s
self[1][0] = x*y*c1 + z*s
self[1][1] = y*y + (1-y*y)*c
self[1][2] = y*z*c1 - x*s
self[2][0] = x*z*c1 - y*s
self[2][1] = y*z*c1 + x*s
self[2][2] = z*z + (1-z*z)*c
# ----------------------------------------------------------------------
@staticmethod
def rotX(angle):
"""m = Matrix.rotX(angle) - Return a rotation matrix around X"""
m = Matrix(4, type=1)
m.rotate(angle, 0)
return m
# ----------------------------------------------------------------------
@staticmethod
def rotY(angle):
"""m = Matrix.rotY(angle) - Return a rotation matrix arround Y"""
m = Matrix(4, type=1)
m.rotate(angle, 1)
return m
# ----------------------------------------------------------------------
@staticmethod
def rotZ(angle):
"""m = Matrix.rotZ(angle) - Return a rotation matrix arround Z"""
m = Matrix(4, type=1)
m.rotate(angle, 2)
return m
# ----------------------------------------------------------------------
def getEulerRotation(self):
"""return the Euler rotation angles
ROTX(x) * ROTY(y) * ROTZ(z)"""
# cos(z)*cos(y)
# sin(z)*cos(y)
# -sin(y)
# -sin(z)*cos(x)+cos(z)*sin(y)*sin(x)
# cos(z)*cos(x)+sin(z)*sin(y)*sin(x)
# cos(y)*sin(x)
# sin(z)*sin(x)+cos(z)*sin(y)*cos(x)
# -cos(z)*sin(x)+sin(z)*sin(y)*cos(x)
# cos(y)*cos(x)
rx = atan2(self[1][2], self[2][2])
ry = -asin( self[0][2])
rz = atan2(self[0][1], self[0][0])
return rx,ry,rz
# ----------------------------------------------------------------------
@staticmethod
def eulerRotation(rx, ry, rz):
"""return a rotation matrix based on the Euler rotation
ROTX(x) * ROTY(y) * ROTZ(z)"""
m = Matrix(4, type=1)
cx = cos(rx)
cy = cos(ry)
cz = cos(rz)
sx = sin(rx)
sy = sin(ry)
sz = sin(rz)
row = m[0]
row[0] = cz*cy
row[1] = sz*cy
row[2] = -sy
row = m[1]
row[0] = -sz*cx+cz*sy*sx
row[1] = cz*cx+sz*sy*sx
row[2] = cy*sx
row = m[2]
row[0] = sz*sx+cz*sy*cx
row[1] = -cz*sx+sz*sy*cx
row[2] = cy*cx
return m
# ----------------------------------------------------------------------
def __add__(self, B):
"""Add 2 matrices"""
if self.rows != B.rows or self.cols != B.cols:
raise Exception("Matrix.add: matrices same size")
m = Matrix(self.rows, self.cols)
for i in range(self.rows):
mrow = m[i]
arow = self[i]
brow = B[i]
for j in range(self.cols):
mrow[j] = arow[j] + brow[j]
return m
# ----------------------------------------------------------------------
def __sub__(self, B):
"""Subtract 2 matrices"""
if self.rows != B.rows or self.cols != B.cols:
raise Exception("Matrix.add: matrices same size")
m = Matrix(self.rows, self.cols)
for i in range(self.rows):
mrow = m[i]
arow = self[i]
brow = B[i]
for j in range(self.cols):
mrow[j] = arow[j] - brow[j]
return m
# ----------------------------------------------------------------------
def __neg__(self):
"""Negate matrix"""
m = Matrix(self.rows, self.cols)
for i in range(self.rows):
mrow = m[i]
mold = self[i]
for j in range(self.cols):
mrow[j] = -mold[j]
return m
# ----------------------------------------------------------------------
def __mul__(self, B):
"""Multiply two matrices or vector
A.__mul__(B|vec) <==> A*B or A*vec"""
if isinstance(B, Matrix): # has to be a matrix of same cxN * Nxr
if self.cols != B.rows:
raise Exception("arrays don't have the correct dimensions")
r = Matrix(self.rows, B.cols)
for i in range(self.rows):
for j in range(B.cols):
s = 0.0
for k in range(self.cols):
s += self[i][k]*B[k][j]
r[i][j] = s
return r
elif isinstance(B, list): # Vector or list
vecsize = len(B)
v = Vector(vecsize)
for i in range(vecsize):
for j in range(min(self.cols, vecsize)):
v[i] += self[i][j] * B[j]
for j in range(vecsize, self.cols):
v[i] += self[i][j]
return v
else:
for row in self:
for i in range(self.cols):
row[i] *= B
return self
# -----------------------------------------------------------------------
# Special function to multiply a transformation matrix with a vector
# ignoring the translation
# -----------------------------------------------------------------------
def multNoTranslation(self, B):
"""Multiply matrix with a vector ignoring the translation part"""
if not isinstance(B, list):
raise Exception("Invalid operation")
vecsize = len(B)
v = Vector(vecsize)
for i in range(vecsize):
for j in range(min(self.cols, vecsize)):
v[i] += self[i][j] * B[j]
return v
# ----------------------------------------------------------------------
def inv(self):
"""Inverse matrix in place"""
if self.rows != self.cols:
raise Exception("inverting a non square matrix")
index = [ 0 ] * self.rows
self.__ludcmp(index)
y = Matrix(self.rows)
for j in range(self.rows):
col = [ 0.0 ] * self.rows
col[j] = 1.0
self.__lubksb(index,col)
for i in range(self.rows):
y[i][j] = col[i]
for j in range(self.rows):
self[j] = y[j]
inverse = inv
# ----------------------------------------------------------------------
def clone(self):
"""Clone matrix"""
m = Matrix(self.rows, self.cols)
for i in range(self.rows):
m[i] = self[i][:]
return m
# ----------------------------------------------------------------------
# determinant with Gauss method
# ----------------------------------------------------------------------
def det(self, eps=_accuracy):
"""determinant of square matrix using Gauss method"""
if self.rows == 2:
return self[0][0]*self[1][1] - self[1][0]*self[0][1]
elif self.rows == 3:
return self[0][0]*(self[1][1]*self[2][2] - self[2][1]*self[1][2]) \
- self[0][1]*(self[1][0]*self[2][2] - self[2][0]*self[1][2]) \
+ self[0][2]*(self[1][0]*self[2][1] - self[2][0]*self[1][1])
M = self.clone()
s = 1.0
n = M.rows
for i in range(n-1):
# find the absolute maximum value
ma = abs(M[i][i])
k = i
for j in range(i+1, n):
if abs(M[j][i]) > ma:
ma = abs(M[j][i])
k = j
if ma < eps: return 0.0
# swap rows i,k
if i != k:
s = -s # Change sign of determinate
for j in range(n):
d = M[i][j]
M[i][j] = M[k][j]
M[k][j] = d
# make all the following rows with zero at the i column
for j in range(i+1, n):
if abs(M[j][i]) < _accuracy: continue
d = - M[i][i] / M[j][i]
s *= d
for k in range(i,n):
M[j][k] = M[i][k] + d * M[j][k]
d = M[0][0] / s
for i in range(1,n):
d *= M[i][i]
return d
determinant = det
# ----------------------------------------------------------------------
# LU decomposition.
# Parameters
# index[0:size] row permutation record
# ----------------------------------------------------------------------
def __ludcmp(self, index): #procedure expose indx.
size = self.rows
vv = [ 0.0 ] * size
for i in range(size):
big = 0.0
for j in range(size):
big = max(abs(self[i][j]), big)
if big==0:
raise Exception("Singular matrix found")
vv[i] = 1.0/big
for j in range(size):
for i in range(j):
s = self[i][j]
for k in range(i):
s -= self[i][k] * self[k][j]
self[i][j] = s
big = 0.0
for i in range(j,size):
s = self[i][j]
for k in range(j):
s -= self[i][k] * self[k][j]
self[i][j] = s
dum = vv[i]*abs(s)
if dum >= big:
big = dum
imax = i
if j != imax:
for k in range(size):
dum = self[imax][k]
self[imax][k] = self[j][k]
self[j][k] = dum
vv[imax] = vv[j]
index[j] = imax
if self[j][j] == 0.0:
self[j][j] = 1E-20
if j != size-1:
dum = 1.0/self[j][j]
for i in range(j+1,size):
self[i][j] *= dum
# ----------------------------------------------------------------------
# backward substitution
# index[0:size] row permutation record
# col[0:size] right hand vector (?)
# ----------------------------------------------------------------------
def __lubksb(self, index, col):
ii = -1
size = self.rows
for i in range(size):
ip = index[i]
s = col[ip]
col[ip] = col[i]
if ii >= 0:
for j in range(ii,i):
s -= self[i][j] * col[j]
elif s != 0.0:
ii = i
col[i] = s
for i in range(size-1,-1,-1):
s = col[i]
for j in range(i+1,size):
s -= self[i][j] * col[j]
col[i] = s/self[i][i]
#-------------------------------------------------------------------------------
# Basic Matrices
#-------------------------------------------------------------------------------
Matrix.O = Matrix(4, type=0)
Matrix.U = Matrix(4, type=1)
#-------------------------------------------------------------------------------
# Quaternion
#
# Note: See the following for more information on quaternions:
#
# - Shoemake, K., Animating rotation with quaternion curves, Computer
# Graphics 19, No 3 (Proc. SIGGRAPH'85), 245-254, 1985.
# - Pletinckx, D., Quaternion calculus as a basic tool in computer
# graphics, The Visual Computer 5, 2-13, 1989.
#-------------------------------------------------------------------------------
class Quaternion(list):
def __init__(self, a, b=None, c=None, d=None):
list.__init__(self)
if isinstance(a, Quaternion):
self.extend(a)
elif isinstance(a, Matrix):
tr = a[0][0] + a[1][1] + a[2][2] + 1.0 # trace of matrix
if tr > 0:
S = sqrt(tr) * 2.0 # S=4*qw
qw = 0.25 * S
qx = (a[2][1] - a[1][2]) / S
qy = (a[0][2] - a[2][0]) / S
qz = (a[1][0] - a[0][1]) / S
elif a[0][0] > a[1][1] and a[0][0] > a[2][2]:
S = sqrt(1.0 + a[0][0] - a[1][1] - a[2][2]) * 2.0 # S=4*qx
qx = 0.25 * S
qy = (a[0][1] + a[1][0]) / S
qz = (a[0][2] + a[2][0]) / S
qw = (a[2][1] - a[1][2]) / S
elif a[1][1] > a[2][2]:
S = sqrt(1.0 + a[1][1] - a[0][0] - a[2][2]) * 2.0 # S=4*qy
qx = (a[0][1] + a[1][0]) / S
qy = 0.25 * S
qz = (a[1][2] + a[2][1]) / S
qw = (a[0][2] - a[2][0]) / S
else:
S = sqrt(1.0 + a[2][2] - a[0][0] - a[1][1]) * 2.0 # S=4*qz
qx = (a[0][2] + a[2][0]) / S
qy = (a[1][2] + a[2][1]) / S
qz = 0.25 * S
qw = (a[1][0] - a[0][1]) / S
self.extend([qx, qy, qz, qw])
elif isinstance(a,Vector) and isinstance(b,float):
s = sin(b/2.0) / a.length()
self.append(a[0]*s)
self.append(a[1]*s)
self.append(a[2]*s)
self.append(cos(b/2.0))
else:
self.extend([a,b,c,d])
# ----------------------------------------------------------------------
# Quaternions always obey: a^2 + b^2 + c^2 + d^2 = 1.0
# If they don't add up to 1.0, dividing by their magnitued will
# renormalize them.
# ----------------------------------------------------------------------
def norm(self):
"""normalize quaternion"""
mag = sqrt(self[0]**2 + self[1]**2 + self[2]**2 + self[3]**2)
self[0] /= mag
self[1] /= mag
self[2] /= mag
self[3] /= mag
return mag
normalize = norm
# ----------------------------------------------------------------------
def vector(self):
"""return vector of quaternion"""
return Vector(self[0], self[1], self[2])
# ----------------------------------------------------------------------
# return rotation matrix
# ----------------------------------------------------------------------
def matrix(self):
"""return rotation matrix"""
m = Matrix(4, type=1)
m[0][0] = 1.0 - 2.0 * (self[1] * self[1] + self[2] * self[2])
m[0][1] = 2.0 * (self[0] * self[1] - self[2] * self[3])
m[0][2] = 2.0 * (self[2] * self[0] + self[1] * self[3])
m[1][0] = 2.0 * (self[0] * self[1] + self[2] * self[3])
m[1][1] = 1.0 - 2.0 * (self[2] * self[2] + self[0] * self[0])
m[1][2] = 2.0 * (self[1] * self[2] - self[0] * self[3])
m[2][0] = 2.0 * (self[2] * self[0] - self[1] * self[3])
m[2][1] = 2.0 * (self[1] * self[2] + self[0] * self[3])
m[2][2] = 1.0 - 2.0 * (self[1] * self[1] + self[0] * self[0])
return m
# ----------------------------------------------------------------------
# Given two rotations, e1 and e2, expressed as quaternion rotations,
# figure out the equivalent single rotation and stuff it into dest.
# This routine also normalizes the result every RENORMCOUNT times it is
# called, to keep error from creeping in.
# ----------------------------------------------------------------------
def __add__(self, b):
v1 = self.vector()
v2 = b.vector()
t1 = v1 * b[3]
t2 = v2 * self[3]
t3 = v2.cross(v1)
tf = t1 + t2 + t3
q = Quaternion(tf, self[3]*b[3] - v1.dot(v2))
q.norm()
return q
# ----------------------------------------------------------------------
def __iadd__(self, b):
v1 = self.vector()
v2 = b.vector()
t1 = v1 * b[3]
t2 = v2 * self[3]
t3 = v2.cross(v1)
tf = t1 + t2 + t3
self[0] = tf[0]
self[1] = tf[1]
self[2] = tf[2]
self[3] = self[3]*b[3] - v1.dot(v2)
self.norm()
return self
#-------------------------------------------------------------------------------
def gauss(A, B):
"""Solve A*X = B using the Gauss elimination method"""
n = len(A)
s = [0.0]*n
X = [0.0]*n
p = [i for i in range(n)]
for i in range(n):
s[i] = max([abs(x) for x in A[i]])
for k in range(n-1):
# select j>=k so that
# |A[p[j]][k]| / s[p[i]] >= |A[p[i]][k]| / s[p[i]] for i = k,k+1,...,n
j = k
ap = abs(A[p[j]][k]) / s[p[j]]
for i in range(k+1, n):
api = abs(A[p[i]][k]) / s[p[i]]
if api>ap:
j = i
ap = api
if j!=k: p[k],p[j] = p[j],p[k] # Swap values
for i in range(k+1, n):
z = A[p[i]][k] / A[p[k]][k]
A[p[i]][k] = z
for j in range(k+1, n):
A[p[i]][j] -= z * A[p[k]][j]
for k in range(n-1):
for i in range(k+1,n):
B[p[i]] -= A[p[i]][k] * B[p[k]]
for i in range(n-1, -1, -1):
X[i] = B[p[i]]
for j in range(i+1, n):
X[i] -= A[p[i]][j] * X[j]
X[i] /= A[p[i]][i]
return X
#-------------------------------------------------------------------------------
def solveOverDetermined(A, B, W=None):
"""Solve the overdetermined linear system defined by the matrices A,B
such as A*X = B
Optionally a weight can be specified"""
if A.rows < A.cols:
raise Exception("solveOverDetermined: A matrix has more columns than rows")
AT = A.transpose()
if W:
Wd = Matrix.diagonal(W)
ATA = AT * Wd * A
ATB = AT * Wd * B
else:
ATA = AT * A
ATB = AT * B
ATA.inv()
RT = ATA * ATB
return [RT[i][0] for i in range(len(RT))]
#-------------------------------------------------------------------------------
def linear(X, Y):
"""
Solve linear regression y = ax + b
@return a,b,r
"""
Sx = Sy = Sx2 = Sy2 = Sxy = 0.0
for x,y in zip(X,Y):
Sx += x
Sy += y
Sx2 += x*x
Sy2 += y*y
Sxy += x*y
n = float(len(X))
try:
b = (Sxy - Sx*Sy/n) / (Sx2 - Sx*Sx/n)
a = Sy/n - b * Sx/n
r = (Sxy - Sx*Sy/n) / sqrt(Sx2-Sx*Sx/n) * sqrt(Sy2-Sy*Sy/n)
return a,b,r
except ZeroDivisionError:
return None
#-------------------------------------------------------------------------------
# Idiotimes pragmatikwv symmetrikwv pivakwv
#
# O algori8mos poy xrnsimopoieitai stnv roytiva eivai gvwstos sav
# proseggistikn me8odos Jacobi.
# O algori8mos ekmetaleyetai tnv idiotnta poy exoyv oi diagwvioi
# pivakes, dnladn pivakes me mndevika ola ta stoixeia ektos tns
# kyrias diagwvioy, va exoyv sav idiotimes ta diagwvia stoixeia.
# Me tov metasxnmatismo.
# T T
# A1 = R1 (f) A R1(f), A2 = R2 (f) A1 R2(f)
# metaballoyme syvexws tov pivaka A, mexris otoy to a8roisma olwv
# twv mn diagwviwv stoixeiwv f8asei mia ka8orismevn timn tns eklogns
# toy xrnstn n givei mndev
# Ta bnmata tns diadikasias eivai:
# 1. Avazntnsn toy apolytws megistoy mn diagwvioy stoixeioy
# Divei ta p kai q
# 2. Prosdiorismos tns gwvias peristrofns f. Divei ta sinf kai cosf
# 3. Metasxnmatismos Ai -> Ai+1
# 4. Elegxos av to a8roisma twv mn diagwviwv stoixeiwv exei f8asei tnv
# epi8ymntn timn. Eav vai tote ta diagwvia stoixeia eivai oi
# proseggiseis twv idiotimwv, eav oxi tote epistrefoyme sto 1.
# px. | 1 -2 -1 |
# A = | -2 1 -1 |
# | -1 -1 2.5|
# apolyto megisto A(1,2) = -2
# Ypologizoyme tnv gwvia f, co=cos(f), si=sin(f) kai kavoyme tov
# metasxnmatismo
# | co -si 0 | | 1 -2 -1 | | co si 0 |
# A = | si co 0 | x | -2 1 -1 | x | -si co 0 |
# | 0 0 1 | | -1 -1 2.5| | 0 0 1 |
#
#
# Oi parametroi tns roytivas eivai oi e3ns:
# A - pivakas tetragwvikos
# eps - akribeia (a8roisma tetragwvwv)
# check - av prepei va elejei tnv symmetria toy arxikoy pivaka
# n oxi
#-------------------------------------------------------------------------------
def eigenvalues(M, eps=_accuracy, check=False):
"""Return eigen values and eigen vectors of a symmetric matrix"""
n = M.rows
# elegxos av eivai symmetrikos o pivakas
if check:
if n != M.cols: return None
for i in range(n):
for j in range(i,n):
if M[i][j] != M[j][i]:
return None
# Allocate arrays
A = M.clone()
R = Matrix(n, type=0)
RT = Matrix(n, type=0)
ZW = Matrix(n, type=0)
V = None
# kavovika 8a prepei meta apo merikes prospa8eies va tov aporiptei
while True:
# Bnma 1. Avazntnsn toy apolytws megistoy mn diagwvioy stoixeioy
p=0; q=1; el=abs(A[p][q])
for i in range(1, n):
for j in range(i):
if abs(A[i][j]) > el:
el = abs(A[i][j])
p = i; q = j
if el==0: break
# Ftiaxvei ta R, RT
for i in range(n):
for j in range(n):
R[i][j] = RT[i][j] = (i==j)
# Bnma 2. Prosdiorizei tnv gwvia f, cosf kai sinf
fi = (A[q][q] - A[p][p]) / (2*A[p][q])
t = 1 / (fi + sqrt(fi*fi+1))
if fi<0: t = -t
co = 1 / sqrt(1+t*t)
si = t / sqrt(1+t*t)
R[p][p] = R[q][q] = co
RT[p][p] = RT[q][q] = co
R[p][q] = si; R[q][p] = -si
RT[p][q] = -si; RT[q][p] = si
# Bnma 3. metasxnmatismos Ai+1 = Rt * Ai * R
# ka8os kai to ginomeno Rn*...*R2*R1 that
# gives us the eigenvectors
if V is None:
V = R.clone()
else:
V = V * R
for i in range(n):
for j in range(n):
if j!=p and j!=q:
ZW[i][j] = A[i][j]
else:
zw1 = 0
for k in range(n):
zw1 += A[i][k] * R[k][j]
ZW[i][j] = zw1
for i in range(n):
for j in range(n):
if i!=p and i!=q:
A[i][j] = ZW[i][j]
else:
zw1 = 0
for k in range(n):
zw1 += RT[i][k] * ZW[k][j]
A[i][j] = zw1
# Bnma 4. Briskoymai to a8roisma kai elegxoyme av teleiwse
zw1 = 0
k = 0
for i in range(1,n):
for j in range(i):
zw1 += A[i][j] * A[i][j]
k += 1
zw1 /= n
# Exit condition
if zw1 <= eps: break
return ([A[i][i] for i in range(n)],V.T())
#-------------------------------------------------------------------------------
# Given a function, and given a bracketing triplet of abscissas ax,bx,cx (such
# that bx is between ax and cx, and f(bx) is less than both f(ax) and f(cx),
# this routing performs a golden section search for the minimum, isolating it
# to a fractional precision of about eps. The abscissa of the minimum is
# returned as xmin, and the minimum function value is returned as golden, the
# returned function value.
#
# @param func function to be evaluated
# @param ax triplet of abscissas ax,bx,cx
# @param bx where func(x+bx*d) < min[ func(x+ax*d), func(x+cx*d) ]
# @param cx ...
# @param x starting vector/value
# @param d direction vector/value
# @param eps accuracy of search
#-------------------------------------------------------------------------------
def goldenSectionSearch(func, ax, bx, cx, x, d=1, eps=_accuracy):
R = 0.61803399 # The golden ratio
C = (1.0-R)
x0 = ax # At any given time we will keep track of four points
x3 = cx # x0, x1, x2, x3
if abs(cx-bx) > abs(bx-ax):
x1 = bx
x2 = bx + C*(cx-bx)
else:
x2 = bx
x1 = bx - C*(bx-ax)
f1 = func(x+x1*d) # The initial function evaluation
f2 = func(x+x2*d)
while abs(x3-x0) > eps*(abs(x1)+abs(x2)):
if f2 < f1:
x0 = x1
x1 = x2
x2 = R*x1 + C*x3
f1 = f2
f2 = func(x+x2*d)
else:
x3 = x2
x2 = x1
x1 = R*x2 + C*x0
f2 = f1
f1 = func(x+x1*d)
if f1 < f2:
return x1
else:
return x2
#-------------------------------------------------------------------------------
# Generators for calculating a) the permutations of a sequence and
# b) the combinations and selections of a number of elements from a
# sequence. Uses Python 2.2 generators.
# Similar solutions found also in comp.lang.python
# Keywords: generator, combination, permutation, selection
#
# See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/105962
# See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66463
# See also: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66465
#-------------------------------------------------------------------------------
def xcombinations(items, n):
if n<=0: yield []
else:
for i in range(len(items)):
for cc in xcombinations(items[:i]+items[i+1:],n-1):
yield [items[i]]+cc
#-------------------------------------------------------------------------------
def xuniqueCombinations(items, n):
if n<=0: yield []
else:
for i in range(len(items)):
for cc in xuniqueCombinations(items[i+1:],n-1):
yield [items[i]]+cc
#-------------------------------------------------------------------------------
def xselections(items, n):
if n<=0: yield []
else:
for i in range(len(items)):
for ss in xselections(items, n-1):
yield [items[i]]+ss
#-------------------------------------------------------------------------------
def xpermutations(items):
return xcombinations(items, len(items))
#-------------------------------------------------------------------------------
# Conversion between rectangular and polar coordinates
# Usage:
# real, real = rect(real, real [, deg=False])
# real, real = polar(real, real [, deg=False])
# Normally, rect() and polar() uses radian for angle; but,
# if deg=True specified, degree is used instead.
#-------------------------------------------------------------------------------
# radian if deg=False; degree if deg=True
def rect(r, w, deg=False):
"""
Convert from polar (r,w) to rectangular (x,y)
x = r cos(w)
y = r sin(w)
"""
if deg: w = radians(w)
return r * cos(w), r * sin(w)
#-------------------------------------------------------------------------------
# radian if deg=False; degree if deg=True
#-------------------------------------------------------------------------------
def polar(x, y, deg=False):
"""
Convert from rectangular (x,y) to polar (r,w)
r = sqrt(x^2 + y^2)
w = arctan(y/x) = [-pi,pi] = [-180,180]
"""
if deg:
return hypot(x, y), degrees(atan2(y, x))
else:
return hypot(x, y), atan2(y, x)
#-------------------------------------------------------------------------------
# Quadratic equation: x^2 + ax + b = 0 (or ax^2 + bx + c = 0)
# Solve quadratic equation with real coefficients
#
# Usage
# number, number = quadratic(real, real [, real])
#
# Normally, x^2 + ax + b = 0 is assumed with the 2 coefficients # as
# arguments; but, if 3 arguments are present, then ax^2 + bx + c = 0 is assumed.
#-------------------------------------------------------------------------------
#def quadratic(a, b, c=None):
# """
# x^2 + ax + b = 0 (or ax^2 + bx + c = 0)
# By substituting x = y-t and t = a/2,
# the equation reduces to y^2 + (b-t^2) = 0
# which has easy solution
# y = +/- sqrt(t^2-b)
# """
# if c: # (ax^2 + bx + c = 0)
# a, b = b / float(a), c / float(a)
# t = a / 2.0
# r = t**2 - b
# if r >= 0: # real roots
# y1 = sqrt(r)
# else: # complex roots
# y1 = cmath.sqrt(r)
# y2 = -y1
# return y1 - t, y2 - t
def quadratic(b, c, eps=_accuracy):
D = b*b - 4.0*c
if D <= 0.0:
x1 = -0.5*b # Always return this as a solution!!!
if D >= -eps*(b*b+abs(c)):
return x1,x1
else:
return None,None
else:
if b>0.0:
bD = -b - sqrt(D)
else:
bD = -b + sqrt(D)
return 0.5 * bD, 2.0 * c / bD
#-------------------------------------------------------------------------------
# Cubic equation: y^3 + a*y^2 + b*y + c = 0 (or ax^3 + bx^2 + cx + d = 0)
#
# Normally, x^3 + ax^2 + bx + c = 0 is assumed with the 3 coefficients as
# arguments; but, if 4 arguments are present, then ax^3 + bx^2 + cx + d = 0 is
# assumed.
#
# Even though both quadratic() and cubic() functions take real arguments, they
# can be modified to accept any real or complex coefficients because the method
# of solution does not make any assumptions.
#-------------------------------------------------------------------------------
def cubic(a, b, c, d=None, eps=_accuracy):
if d is not None: # (ax^3 + bx^2 + cx + d = 0)
a, b, c = b/float(a), c/float(a), d/float(a)
Q = (a*a - 3.0*b) / 9.0
R = (2.*a**3 - 9.*a*b + 27.*c)/54.
R2 = R**2
Q3 = Q**3
if R2 < Q3: # the cubic has 3 real solutions
theta = acos(R/sqrt(Q3))
sqrt_Q = sqrt(Q)
x1 = -2. * sqrt_Q * cos(theta/3.) - a/3.
x2 = -2. * sqrt_Q * cos((theta+2.*pi)/3.) - a/3.
x3 = -2. * sqrt_Q * cos((theta-2.*pi)/3.) - a/3.
return x1,x2,x3
A = -copysign(1.0,R) * (abs(R) + sqrt(R2 - Q3))**(1./3.)
if abs(A)>eps:
B = Q / A
else:
B = 0.0
return (A+B) - a/3., None, None
# imaginary roots
# x2 = -(A+B)/2 - a/3 + i*sqrt(3)*(A-B)
# x3 = -(A+B)/2 - a/3 - i*sqrt(3)*(A-B)
#-------------------------------------------------------------------------------
# Fit a plane to a set of points using least square fitting
#-------------------------------------------------------------------------------
def fitPlane(xyz):
# First do statistics with points
Sx = Sy = Sz = 0.0
Sx2 = Sy2 = Sz2 = 0.0
Sxy = Syz = Sxz = 0.0
for x,y,z in xyz:
Sx += x
Sy += y
Sz += z
Sx2 += x**2
Sy2 += y**2
Sz2 += z**2
Sxy += x*y
Syz += y*z
Sxz += x*z
n = float(len(xyz))
Sx /= n
Sy /= n
Sz /= n
Vx = Sx2/n - Sx**2
Vy = Sy2/n - Sy**2
Vz = Sz2/n - Sz**2
# Count zero variances
nv = int(abs(Vx)<=_accuracy) + int(abs(Vy)<=_accuracy) + int(abs(Vz)<=_accuracy)
if nv>1:
return None
elif nv==1:
# Planes parallel to axes
# Try the solution of x=Xo or y=Yo or z=Zo
if abs(Vx)<=_accuracy:
return 1.0, 0.0, 0.0, -Sx
elif abs(Vy)<=_accuracy:
return 0.0, 1.0, 0.0, -Sy
else:
return 0.0, 0.0, 1.0, -Sz
# Try a generic solution
# z = ax + by + d <=> ax + by -z + d = 0
# assuming c=-1
# it can only fail on ax + by + d = 0
#
# / Sx2 Sxy Sx \ / Sxz \
# | Sxy Sy2 Sy | * X = | Syz |
# \ Sx Sy n / \ Sz /
A = Matrix([[Sx2, Sxy, Sx], [Sxy, Sy2, Sy], [Sx, Sy, n]])
B = Matrix([[Sxz], [Syz], [Sz]])
try:
A.inverse()
X = A*B
return X[0][0], X[1][0], -1.0, X[2][0]
except:
pass
# Try a solution where c=0
# y = ax + d <=> ax -y +d = 0
#.
# / Sx2 Sx \ / Sxy \
# | | * X = | |
# \ Sx n / \ Sy /
A = Matrix([[Sx2, Sx], [Sx, n]])
B = Matrix([[Sxy], [Sy]])
try:
A.inverse()
X = A*B
return X[0][0], -1.0, 0.0, X[1][0]
except:
return None
#-------------------------------------------------------------------------------
# Evaluating n'th degree polynomial is simple loop, starting with highest
# coefficient a[n].
#-------------------------------------------------------------------------------
def polyeval(a, x):
"""
p(x) = polyeval(a, x)
= a[0] + a[1]x + a[2]x^2 +...+ a[n-1]x^{n-1} + a[n]x^n
= a[0] + x(a[1] + x(a[2] +...+ x(a[n-1] + a[n]x)...)
"""
p = 0
a.reverse()
for coef in a:
p = p*x + coef
a.reverse()
return p
#-------------------------------------------------------------------------------
# Find the first derivative of a polynomial
#-------------------------------------------------------------------------------
def polyderiv(a):
"""
p'(x) = polyderiv(a)
= b[0] + b[1]x + b[2]x^2 +...+ b[n-2]x^{n-2} + b[n-1]x^{n-1}
where b[i] = (i+1)a[i+1]
"""
b = []
for i in range(1, len(a)):
b.append(i * a[i])
return b
#-------------------------------------------------------------------------------
# Factor out a root from n'th degree polynomial, and return the remaining
# (n-1)'th degree polynomial.
# list = polyreduce(list, number)
#-------------------------------------------------------------------------------
def polyreduce(a, root):
"""
Given x = r is a root of n'th degree polynomial p(x) = (x-r)q(x),
divide p(x) by linear factor (x-r) using the same algorithm as
polynomial evaluation. Then, return the (n-1)'th degree quotient
q(x) = polyreduce(a, r)
= c[0] + c[1]x + c[2]x^2 +...+ c[n-2]x^{n-2} + c[n-1]x^{n-1}
"""
c, p = [], 0
a.reverse()
for coef in a:
p = p * root + coef
c.append(p)
a.reverse()
c.reverse()
return c[1:]
#-------------------------------------------------------------------------------
# Conversion from integer to Roman
#-------------------------------------------------------------------------------
def int2roman(num):
"""
Convert an integer to Roman numeral
"""
if not isinstance(num,int):
raise TypeError("expected integer, got %s" % type(input))
if not 0 < num < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i,n in zip(ints, nums):
count = int(num / i)
result += n * count
num -= i * count
return result
#-------------------------------------------------------------------------------
# Conversion from Roman to integer
#-------------------------------------------------------------------------------
def roman2int(roman):
"""
convert a roman string to integer
"""
if not isinstance(roman,str):
raise TypeError("expected string, got %s"%type(roman))
roman = roman.upper()
nums = ('M', 'D', 'C', 'L', 'X', 'V', 'I')
ints = (1000, 500, 100, 50, 10, 5, 1)
places = []
for c in roman:
if not c in nums:
raise ValueError("input is not a valid roman numeral: %s"%roman)
for i in range(len(roman)):
c = roman[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(roman[i +1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
s = 0
for n in places: s += n
# Easiest test for validity...
if int2roman(s) == roman:
return s
else:
raise ValueError('input is not a valid roman numeral: %s' % roman)
| gpl-2.0 | 1,152,561,699,915,834,500 | 26.538658 | 100 | 0.457226 | false |
kdmurray91/khmer | sandbox/find-high-abund-kmers.py | 1 | 2443 | #! /usr/bin/env python2
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: [email protected]
#
"""
@@
"""
import sys
import screed
import khmer
from khmer.khmer_args import build_counting_args, DEFAULT_MIN_TABLESIZE
DEFAULT_LOWER_CUTOFF = 2000
DEFAULT_UPPER_CUTOFF = 65535
###
def main():
parser = build_construct_args()
parser.add_argument('-l', '--lower-cutoff', type=int, dest='lower_cutoff',
default=DEFAULT_LOWER_CUTOFF)
parser.add_argument('-u', '--upper-cutoff', type=int, dest='upper_cutoff',
default=DEFAULT_UPPER_CUTOFF)
parser.add_argument('output_filename')
parser.add_argument('input_filename')
args = parser.parse_args()
if not args.quiet:
if args.min_hashsize == DEFAULT_MIN_HASHSIZE:
print >>sys.stderr, "** WARNING: hashsize is default! " \
"You absodefly want to increase this!\n** " \
"Please read the docs!"
print >>sys.stderr, '\nPARAMETERS:'
print >>sys.stderr, ' - kmer size = %d \t\t(-k)' % args.ksize
print >>sys.stderr, ' - n hashes = %d \t\t(-N)' % args.n_hashes
print >>sys.stderr, ' - min hashsize = %-5.2g \t(-x)' % \
args.min_hashsize
print >>sys.stderr, ''
print >>sys.stderr, 'Estimated memory usage is %.2g bytes " \
"(n_hashes x min_hashsize)' % (
args.n_hashes * args.min_hashsize)
print >>sys.stderr, '-' * 8
K = args.ksize
HT_SIZE = args.min_hashsize
N_HT = args.n_hashes
output = args.output_filename
input = args.input_filename
print 'lower cutoff:', args.lower_cutoff
print 'upper cutoff:', args.upper_cutoff
print 'Saving stoptags to %s' % output
print 'Loading sequences in %s' % input
###
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT)
ht.set_use_bigcount(True)
print 'consuming input', input
hb = ht.collect_high_abundance_kmers(input,
args.lower_cutoff,
args.upper_cutoff)
print 'saving stoptags', output
hb.save_stop_tags(output)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
| bsd-3-clause | 1,175,521,753,674,455,300 | 29.160494 | 78 | 0.588621 | false |
ncbray/pystream | lib/PADS/Sudoku.py | 1 | 67213 | """Sudoku.py
PADS-based command-line application for generating and solving Sudoku puzzles.
These puzzles are given as a 9x9 grid of cells, some of which are filled with
digits in the range 1-9. The task is to fill the remaining cells in such a
way that each row of the grid, each column of the grid, and each of nine 3x3
squares into which the grid is partitioned, all have one copy of each of the
nine digits.
A proper Sudoku puzzle must have a unique solution, and it should be possible
to reach that solution by a sequence of logical deductions without trial and
error. To the extent possible, we strive to keep the same ethic in our
automated solver, by mimicking human rule-based reasoning, rather than
resorting to brute force backtracking search.
D. Eppstein, July 2005.
"""
import random
import sys
from optparse import OptionParser
from BipartiteMatching import imperfections
from StrongConnectivity import StronglyConnectedComponents
from Repetitivity import NonrepetitiveGraph
from Wrap import wrap
try:
set
except NameError:
from sets import Set as set
class BadSudoku(Exception): pass
# raised when we discover that a puzzle has no solutions
# ======================================================================
# Bitmaps and patterns
# ======================================================================
digits = range(1,10)
class group:
def __init__(self, i, j, x, y, name):
mask = 0
h,k = [q for q in range(4) if q != i and q != j]
for w in range(3):
for z in range(3):
mask |= 1L << (x*3**i + y*3**j + w*3**h + z*3**k)
self.mask = mask
self.pos = [None]*9
self.name = "%s %d" % (name,x+3*y+1)
cols = [group(0,1,x,y,"column") for x in range(3) for y in range(3)]
rows = [group(2,3,x,y,"row") for x in range(3) for y in range(3)]
sqrs = [group(1,3,x,y,"square") for x in range(3) for y in range(3)]
groups = sqrs+rows+cols
neighbors = [0]*81
for i in range(81):
b = 1L<<i
for g in groups:
if g.mask & b:
neighbors[i] |= (g.mask &~ b)
unmask = {}
for i in range(81):
unmask[1L<<i] = i
alignments = {}
for s in sqrs:
for g in rows+cols:
m = s.mask&g.mask
if m:
alignments[m] = (s,g)
b1 = m &~ (m-1)
m &=~ b1
b2 = m &~ (m-1)
b3 = m &~ b2
alignments[b1|b2]=alignments[b1|b3]=alignments[b2|b3]=(s,g)
triads = []
for square in sqrs:
for group in rows+cols:
triads.append((square.mask & group.mask,square,group))
# pairs of rows and columns that cross the same squares
nearby = {}
for g in rows+cols:
nearby[g] = []
for r1 in rows:
for s in sqrs:
if r1.mask & s.mask != 0:
for r2 in rows:
if r1 != r2 and r2.mask & s.mask != 0:
nearby[r1].append(r2)
break
for c1 in cols:
for s in sqrs:
if c1.mask & s.mask != 0:
for c2 in cols:
if c1.mask < c2.mask and c2.mask & s.mask != 0:
nearby[c1].append(c2)
break
# ======================================================================
# Human-readable names for puzzle cells
# ======================================================================
cellnames = [None]*81
for row in range(9):
for col in range(9):
cellnames[row*9+col] = ''.join(['R',str(row+1),'C',str(col+1)])
def andlist(list,conjunction="and"):
"""Turn list of strings into English text."""
if len(list) == 0:
return "(empty list!)"
if len(list) == 1:
return list[0]
elif len(list) == 2:
return (' '+conjunction+' ').join(list)
else:
return ', '.join(list[:-1]+[conjunction+' '+list[-1]])
def namecells(mask,conjunction="and"):
"""English string describing a sequence of cells."""
names = []
while mask:
bit = mask &~ (mask - 1)
names.append(cellnames[unmask[bit]])
mask &=~ bit
return andlist(names,conjunction)
def pathname(cells):
return '-'.join([cellnames[c] for c in cells])
def plural(howmany,objectname):
if howmany == 1:
return objectname
else:
return "%d %ss" % (howmany,objectname)
# ======================================================================
# State for puzzle solver
# ======================================================================
class Sudoku:
"""
Data structure for storing and manipulating Sudoku puzzles.
The actual rules for solving the puzzles are implemented
separately from this class.
"""
def __init__(self,initial_placements = None):
"""
Initialize a new Sudoku grid.
If an argument is given, it should either be a sequence of 81
digits 0-9 (0 meaning a not-yet-filled cell), or a sequence
of (digit,cell) pairs.
The main state we use for the solver is an array contents[]
of 81 cells containing digits 0-9 (0 for an unfilled cell)
and an array locations[] indexed by the digits 1-9, containing
bitmasks of the cells still available to each digit.
We also store additional fields:
- progress is a boolean, set whenever one of our methods
changes the state of the puzzle, and used by step() to tell
whether one of its rules fired.
- rules_used is a set of the rule names that have made progress.
- pairs is a dictionary mapping bitmasks of pairs of cells to
lists of digits that must be located in that pair, as set up
by the pair rule and used by other later rules.
- bilocation is a NonrepetitiveGraph representing paths and
cycles among bilocated digits, as constructed by the bilocal
rule and used by the repeat and conflict rules.
- bivalues is a NonrepetitiveGraph representing paths and
cycles among bivalued cells, as constructed by the bivalue
rule and used by the repeat and conflict rules.
- otherbv maps pairs (cell,digit) in the bivalue graph to the
other digit available at the same cell
- logstream is a stream on which to log verbose descriptions
of the steps made by the solver (typically sys.stderr), or
None if verbose descriptions are not to be logged.
- steps is used to count how many solver passes we've made so far.
- original_cells is a bitmask of cells that were originally nonempty.
- assume_unique should be set true to enable solution rules
based on the assumption that there exists a unique solution
"""
self.contents = [0]*81
self.locations = [None]+[(1L<<81)-1]*9
self.rules_used = set()
self.progress = False
self.pairs = None
self.bilocation = None
self.logstream = False
self.steps = 0
self.original_cells = 0
self.assume_unique = False
if initial_placements:
cell = 0
for item in initial_placements:
try:
digit = int(item)
except TypeError:
digit,cell = item
if digit:
self.place(digit,cell)
self.original_cells |= 1L << cell
cell += 1
def __iter__(self):
"""
If we are asked to loop over the items in a grid
(for instance, if we pass one Sudoku instance as the argument
to the initialization of another one) we simply list the
known cell contents of the grid.
"""
return iter(self.contents)
def mark_progress(self):
"""Set progress True and clear fields that depended on old state."""
self.progress = True
self.pairs = None
def log(self,items,explanation=None):
"""
Send a message for verbose output.
Items should be a string or list of strings in the message.
If explanation is not None, it is called as a function and
the results appended to items.
"""
if not self.logstream:
return
if isinstance(items,str):
items = [items]
if explanation:
if isinstance(explanation,str) or isinstance(explanation,list):
x = explanation
else:
x = explanation()
if isinstance(x,str):
x = [x]
else:
x = []
text = ' '.join([str(i) for i in items+x])
for line in wrap(text):
print >>self.logstream, line
print >>self.logstream
def place(self,digit,cell,explanation=None):
"""Change the puzzle by filling the given cell with the given digit."""
if digit != int(digit) or not 1 <= digit <= 9:
raise ValueError("place(%d,%d): digit out of range" % (digit,cell))
if self.contents[cell] == digit:
return
if self.contents[cell]:
self.log(["Unable to place",digit,"in",cellnames[cell],
"as it already contains",str(self.contents[cell])+"."])
raise BadSudoku("place(%d,%d): cell already contains %d" %
(digit,cell,self.contents[cell]))
if (1L<<cell) & self.locations[digit] == 0:
self.log(["Unable to place",digit,"in",cellnames[cell],
"as that digit is not available to be placed there."])
raise BadSudoku("place(%d,%d): location not available" %
(digit,cell))
self.contents[cell] = digit
bit = 1L << cell
for d in digits:
if d != digit:
self.unplace(d,bit,explanation,False)
else:
self.unplace(d,neighbors[cell],explanation,False)
self.mark_progress()
self.log(["Placing",digit,"in",cellnames[cell]+'.'],explanation)
def unplace(self,digit,mask,explanation=None,log=True):
"""
Eliminate the masked positions as possible locations for digit.
The log argument should be true for external callers, but false
when called by Sudoku.place; it is used to disable verbose output
that would be redundant to the output from place.
"""
if digit != int(digit) or not 1 <= digit <= 9:
raise ValueError("unplace(%d): digit out of range" % digit)
if self.locations[digit] & mask:
if log and self.logstream:
items = ["Preventing",digit,"from being placed in",
namecells(self.locations[digit] & mask,"or")+'.']
self.log(items,explanation)
self.locations[digit] &=~ mask
self.mark_progress()
def choices(self,cell):
"""Which digits are still available to be placed in the cell?"""
bit = 1L<<cell
return [d for d in digits if self.locations[d] & bit]
def complete(self):
"""True if all cells have been filled in."""
return 0 not in self.contents
# ======================================================================
# Rules for puzzle solver
# ======================================================================
def locate(grid):
"""
Place digits that can only go in one cell of their group.
If a digit x has only one remaining cell that it can be placed in,
within some row, column, or square, then we place it in that cell.
Any potential positions of x incompatible with that cell (because
they lie in the same row, column, or square) are removed from
future consideration.
"""
for d in digits:
for g in groups:
dglocs = grid.locations[d] & g.mask
if dglocs & (dglocs-1) == 0:
if dglocs == 0:
grid.log(["Unable to place",d,"anywhere in",g.name+"."])
raise BadSudoku("No place for %d in %s" %(d,g.name))
grid.place(d,unmask[dglocs],
["It is the only cell in",g.name,
"in which",d,"can be placed."])
def eliminate(grid):
"""
Fill cells that can only contain one possible digit.
If a cell has only one digit x that can be placed in it, we place
x in that cell. Incompatible positions for x are removed from
future consideration.
"""
for cell in range(81):
if not grid.contents[cell]:
allowed = grid.choices(cell)
if len(allowed) == 0:
grid.log(["Unable to place any digit in",cellnames[cell]+"."])
raise BadSudoku("No digit for cell %d" % cell)
if len(allowed) == 1:
grid.place(allowed[0],cell,
"No other digit may be placed in that cell.")
def align(grid):
"""
Eliminate positions that leave no choices for another group.
If the cells of a square that can contain a digit x all lie
in a single row or column, we eliminate positions for x that
are outside the square but inside that row or column. Similarly,
if the cells that can contain x within a row or column all lie
in a single square, we eliminate positions that are inside that
square but outside the row or column.
"""
for d in digits:
for g in groups:
a = grid.locations[d] & g.mask
if a in alignments:
s = [x for x in alignments[a] if x != g][0]
def explain():
un = grid.locations[d] & s.mask &~ a
if un & (un - 1):
this = "These placements"
else:
this = "This placement"
return [this, "would conflict with", namecells(a)+",",
"which are the only cells in", g.name,
"that can contain that digit."]
grid.unplace(d, s.mask &~ a, explain)
enough_room = "To leave enough room for those digits, no other " \
"digits may be placed in those cells."
def explain_pair(grid,digs,locs):
"""Concoct explanation for application of pair rule."""
d1,d2 = digs
g1 = [g for g in groups if
grid.locations[d1] & g.mask == grid.locations[d1] & locs]
g2 = [g for g in groups if
grid.locations[d2] & g.mask == grid.locations[d2] & locs]
for g in g1:
if g in g2:
ing = ["In", g.name+",", "digits", d1, "and", d2]
break
else:
# unlikely to get here due to align rule applying before pair
ing = ["In",(g1 and g1[0].name or "no group")+",", "digit", str(d1)+",",
"and in",(g2 and g2[0].name or "no group")+",", "digit", str(d2)]
return ing+["may only be placed in",namecells(locs)+".", enough_room]
def pair(grid):
"""
Eliminate positions that leave no choices for two other digits.
If two digits x and y each share the same two cells as the only
locations they may be placed within some row, column, or square,
then all other digits must avoid those two cells.
"""
grid.pairs = pairs = {}
for d in digits:
for g in groups:
dglocs = grid.locations[d] & g.mask
fewerbits = dglocs & (dglocs - 1)
if fewerbits & (fewerbits - 1) == 0:
if d not in pairs.setdefault(dglocs,[d]):
pairs[dglocs].append(d)
for e in digits:
if e not in pairs[dglocs]:
def explain():
return explain_pair(grid,pairs[dglocs],dglocs)
grid.unplace(e, dglocs, explain)
def triad(grid):
"""
Find forced triples of digits within triples of cells.
If some three cells, formed by intersecting a row or column
with a square, have three digits whose only remaining positions
within that row, column, or square are among those three cells,
we prevent all other digits from being placed there. We also
remove positions for those three forced digits outside the
triple but within the row, column, or square containing it.
"""
for mask,sqr,grp in triads:
forces = [d for d in digits
if (grid.locations[d]&sqr.mask == grid.locations[d]&mask)
or (grid.locations[d]&grp.mask == grid.locations[d]&mask)]
if len(forces) == 3:
outside = (sqr.mask | grp.mask) &~ mask
for d in digits:
def explain():
ing = ["In", grp.name, "and", sqr.name+",",
"digits %d, %d, and %d" % tuple(forces),
"may only be placed in", namecells(mask)+"."]
if d not in forces:
return ing+[enough_room]
elif grid.locations[d]&sqr.mask == grid.locations[d]&mask:
og = grp.name
else:
og = sqr.name
return ing+["Therefore,", d, "may not be placed",
"in any other cell of", og]
grid.unplace(d, d in forces and outside or mask, explain)
def digit(grid):
"""
Remove incompatible positions of a single digit.
If the placement of digit x in cell y can not be extended to a
placement of nine copies of x covering each row and column of the
grid exactly once, we eliminate cell y from consideration as
a placement for x.
"""
for d in digits:
graph = {}
locs = grid.locations[d]
for r in range(9):
graph[r] = [c for c in range(9)
if rows[r].mask & cols[c].mask & locs]
imp = imperfections(graph)
mask = 0
forced = []
for r in imp:
for c in imp[r]:
mask |= rows[r].mask & cols[c].mask
if imp[r][c] not in forced:
forced.append(imp[r][c])
mask &= grid.locations[d]
if not mask:
continue
def explain():
expl = []
for f in forced:
fr = [rows[r].name for r in f]
fr.sort()
fc = list(set([cols[c].name for r in f for c in f[r]]))
fc.sort()
expl += ["In", andlist(fr)+", digit", d,
"can only be placed in", andlist(fc,"or")+"."]
return expl + ["Placing",d,"in",namecells(mask,"or"),
"would leave too few columns for", d,
"to be placed in all of these rows."]
grid.unplace(d,mask,explain)
def rectangles():
"""Generate pairs of rows and columns that form two-square rectangles."""
for r1 in rows:
for r2 in rows:
if r2 in nearby[r1]:
for c1 in range(9):
for c2 in range(c1):
if cols[c1] not in nearby[cols[c2]]:
yield r1,r2,cols[c2],cols[c1]
elif r1.mask < r2.mask:
for c1 in cols:
for c2 in nearby[c1]:
yield r1,r2,c1,c2
def rectangle(grid):
"""
Avoid the formation of an ambiguous rectangle.
That is, four corners of a rectangle within two squares, all four
corners initially blank, and containing only two digits. If this
situation occurred, the puzzle would necessarily have evenly many
solutions, because we could swap the two digits in the rectangle
corners in any solution to form a different solution, contradicting
the assumption that there is only one. Therefore, we make sure that
any such rectangle keeps at least three available values.
"""
if not grid.assume_unique:
return
for r1,r2,c1,c2 in rectangles():
mask = (r1.mask | r2.mask) & (c1.mask | c2.mask)
if not (mask & grid.original_cells):
# First rectangle test
# If three cells are bivalued with the same two digits x,y
# then we can eliminate x and y on the fourth
safe_corners = 0
multiply_placable = []
for d in digits:
dmask = grid.locations[d] & mask
if dmask & (dmask - 1):
multiply_placable.append(d)
else:
safe_corners |= dmask
if len(multiply_placable) == 2 and \
safe_corners & (safe_corners-1) == 0:
for d in multiply_placable:
def explain():
return ["This placement would create an ambiguous",
"rectangle for digits",
str(multiply_placable[0]),"and",
str(multiply_placable[1]),"in",
r1.name+",",r2.name+",",
c1.name+",","and",c2.name+"."]
grid.unplace(d,safe_corners,explain)
# Second rectangle test
# If only three digits can be placed in the rectangle,
# we eliminate placements that conflict with
# all positions of one of the digits.
placable = [d for d in digits if grid.locations[d] & mask]
if len(placable) == 3:
for d in placable:
a = grid.locations[d] & mask
conflicts = 0
for g in groups:
if grid.locations[d] & g.mask & a == a:
conflicts |= g.mask
def explain():
un = conflicts &~ a
if un & (un - 1):
this = "These placements"
else:
this = "This placement"
return ["The rectangle in", r1.name+",",
r2.name+",", c1.name+", and", c2.name,
"can only contain digits",
andlist([str(dd) for dd in placable])+".",
this, "would conflict with the placements",
"of", str(d)+",", "creating an ambiguous",
"rectangle on the remaining two digits."]
grid.unplace(d, conflicts &~ a, explain)
# Third rectangle test
# If two cells are bivalued with digits x and y,
# and the other two cells are bilocal with x,
# then we can eliminate y from the two bilocal cells.
for x1,x2 in ((r1,r2), (r2,r1), (c1,c2), (c2,c1)):
xd = [d for d in digits if grid.locations[d] & mask & x1.mask]
if len(xd) == 2: # found locked pair on x1's corners
for d in xd:
x2d = grid.locations[d] & x2.mask
if x2d & mask == x2d: # and bilocal on x2
dd = xd[0]+xd[1]-d # other digit
def explain():
return ["The rectangle in", r1.name+",",
r2.name+",", c1.name+", and", c2.name,
"can only contain digits",
str(xd[0]),"and",str(xd[1]),"in",
x1.name+".","In addition,"
"the only cells in",x2.name,
"that can contain",str(d),
"are in the rectangle.",
"Therefore, to avoid creating an",
"ambiguous rectangle, the",str(dd),
"in",x2.name,"must be placed",
"outside the rectangle."]
grid.unplace(dd,x2d,explain)
# Fourth rectangle test
# If two cells are bivalued with digits x and y,
# and a perpendicular side is bilocal with x,
# then we can eliminate y from the remaining cell
for x1,perp in ((r1,(c1,c2)),(r2,(c1,c2)),
(c1,(r1,r2)),(c2,(r1,r2))):
xd = [d for d in digits if grid.locations[d] & mask & x1.mask]
if len(xd) == 2: # found locked pair on x1's corners
for x2 in perp:
for d in xd:
x2d = grid.locations[d] & x2.mask
if x2d & mask == x2d: # and bilocal on x2
dd = xd[0]+xd[1]-d # other digit
def explain():
return ["For the rectangle in", r1.name+",",
r2.name+",", c1.name+", and", c2.name,
"the two corners in",
x1.name,"must contain both digits",
str(xd[0]),"and",str(xd[1]),
"and the two corners in",
x2.name,"must contain one",str(d)+".",
"Therefore, to avoid creating an",
"ambiguous rectangle, the",
"remaining corner must not contain",
str(dd)+"."]
grid.unplace(dd,mask&~(x1.mask|x2.mask),explain)
def trapezoid(grid):
"""
Force pairs of digits to form trapezoids instead of rectangles.
If two digits can only be placed in five cells of two squares,
four of which form a rectangle, then they must be placed in
four cells that form a trapezoid out of those five.
We prevent those digits from being placed in cells not part of
a trapezoid, and prevent other digits from being placed in cells
that are part of all such trapezoids.
"""
if not grid.assume_unique:
return
for r1,r2,c1,c2 in rectangles():
corners = (r1.mask | r2.mask) & (c1.mask | c2.mask)
if not (corners & grid.original_cells):
s1,s2 = [s for s in sqrs if s.mask & corners]
uncorner = (s1.mask | s2.mask) &~ corners
candidates = {}
universal = None
for d in digits:
if not grid.locations[d] & uncorner:
universal = d # can form five cells w/any other digit
for d in digits:
locs_for_d = grid.locations[d] & uncorner
if locs_for_d and not (locs_for_d & (locs_for_d - 1)):
if universal != None or locs_for_d in candidates:
# found another digit sharing same five cells w/d
if universal != None:
d1,d2 = universal,d
else:
d1,d2 = candidates[locs_for_d],d
explanation = ["Digits",str(d1),"and",str(d2),
"must be placed in a trapezoid in",
s1.name,"and",s2.name+",",
"for if they were placed in a",
"rectangle, their locations",
"could be swapped, resulting",
"in multiple solutions",
"to the puzzle."]
must = locs_for_d
mustnt = 0
if s2.mask & locs_for_d:
s1,s2 = s2,s1 # swap so s1 contains extra cell
must |= corners & s2.mask
for line in r1.mask,r2.mask,c1.mask,c2.mask:
if line & locs_for_d and line & s2.mask:
# most informative case: the extra cell
# lies on a line through both squares.
must |= corners & (s1.mask &~ line)
mustnt |= corners & (s1.mask & line)
for d3 in digits:
if d3 == d1 or d3 == d2:
grid.unplace(d3,mustnt,explanation)
else:
grid.unplace(d3,must,explanation)
else:
candidates[locs_for_d] = d
def subproblem(grid):
"""
Remove incompatible positions within a single row, column, or square.
If the placement of a digit x in cell y within a single row, column,
or square can not be extended to a complete solution of that row, column,
or square, then we eliminate that placement from consideration.
"""
for g in groups:
graph = {}
for d in digits:
graph[d] = []
locs = grid.locations[d] & g.mask
while locs:
bit = locs &~ (locs-1)
graph[d].append(unmask[bit])
locs &=~ bit
imp = imperfections(graph)
for d in imp.keys():
if not imp[d]:
del imp[d]
while imp:
# Here with imp mapping digits to unplaceable cells.
# We choose carefully the order of digits to handle,
# so that our explanations make logical sense: if an
# explanation includes the fact that a digit can only
# go in certain cells, we need to have already handled
# the unplaceable cells for that other digit.
for d in imp:
entailed = False
for cell in imp[d]:
for forced in imp[d][cell]:
if forced in imp and imp[forced]:
entailed = True
break
if not entailed:
break
# Here with imp[d] mapping d to some unplaceable cells.
# We build up a bitmap of those cells, as we do collecting
# the sets of digits and cells that must be matched to each
# other and that prevent us from placing d in those cells.
mask = 0
forces = []
for cell in imp[d]:
bit = 1L<<cell
if bit & grid.locations[d]:
mask |= bit
force = imp[d][cell]
if force not in forces:
forces.append(force)
# Now that we have both the bitmap and the subgraphs describing
# why each bit is in that bitmap, we are ready to make and
# explain our unplacement decision.
def explain():
that = "would make it impossible to place that digit."
expls = []
for force in forces:
if expls or len(force) > 1:
that = "would leave too few remaining cells" \
" to place those digits."
if expls:
expls[-1] += ','
if force == forces[-1]:
expls[-1] += ' and'
forcedigs = [str(x) for x in force]
forcedigs.sort()
forcemask = 0
for dig in force:
for cell in force[dig]:
forcemask |= 1L<<cell
expls += [len(forcedigs) == 1 and "digit" or "digits",
andlist(forcedigs), "can only be placed in",
namecells(forcemask)]
expls[-1] += '.'
return ["In", g.name+","] + expls + ["Placing", d,
"in", namecells(mask,"or"), that]
grid.unplace(d,mask,explain)
del imp[d]
if grid.progress:
return # let changes propagate before trying more groups
bilocal_explanation = \
"each two successive cells belong to a common row, column, or square," \
" and are the only two cells in that row, column, or square where one" \
" of the digits may be placed"
incyclic = "In the cyclic sequence of cells"
inpath = "In the sequence of cells"
def bilocal(grid):
"""
Look for nonrepetitive cycles among bilocated digits.
Despite the sesquipedalian summary line above, this is a form of
analysis that is easy to perform by hand: draw a graph connecting
two cells whenever some digit's location within a row, column,
or square is forced to lie only in those two cells. We then
search for cycles in the graph in which each two adjacent edges
in the cycle have different labels. In any such cycle, each cell
can only contain the digits labeling the two edges incident to it.
"""
if not grid.pairs:
return # can only run after pair rule finds edges
# Make labeled graph of pairs
graph = dict([(i,{}) for i in range(81)])
for pair in grid.pairs:
digs = grid.pairs[pair]
bit = pair &~ (pair-1)
pair &=~ bit
if pair:
v = unmask[bit]
w = unmask[pair]
graph[v][w] = graph[w][v] = digs
# Apply repetitivity analysis to collect cyclic labels at each cell
grid.bilocation = nrg = NonrepetitiveGraph(graph)
forced = [set() for i in range(81)]
for v,w,L in nrg.cyclic():
forced[v].add(L)
forced[w].add(L)
# Carry out forces indicated by our analysis
for cell in range(81):
if len(forced[cell]) == 2:
# It's also possible for len(forced[cell]) to be > 2;
# in this case multiple cycles go through the same edge
# and cell must be filled with the digit labeling that edge.
# But for simplicity's sake we ignore that possibility;
# it doesn't happen very often and when it does the repetitive
# cycle rule will find it instead.
mask = 1L<<cell
for d in digits:
if d not in forced[cell]:
def explain():
forced1,forced2 = tuple(forced[cell])
cycle = nrg.shortest(cell,forced1,cell,forced2)
return [incyclic, pathname(cycle)+",",
bilocal_explanation + ".",
"This placement would prevent",
forced1, "or", forced2,
"from being placed in", cellnames[cell]+",",
"making it impossible to place the cycle's",
len(cycle)-1, "digits into the remaining",
len(cycle)-2, "cells."]
grid.unplace(d,mask,explain)
bivalue_explanation = \
"each cell has two possible digits, each of which may also" \
" be placed at one of the cell's two neighbors in the sequence"
def bivalue(grid):
"""
Look for nonrepetitive cycles among bivalued cells.
We draw a graph connecting two cells whenever both can only
contain two digits, one of those digits is the same for both
cells, and both cells belong to the same row, column, or square.
Edges are labeled by the digit(s) the two cells share.
If any edge of this graph is contained in a cycle with no two
consecutive edges having equal labels, then the digit labeling
that edge must be placed on one of its two endpoints, and can
not be placed in any other cell of the row, column, or square
containing the edge.
"""
# Find and make bitmask per digit of bivalued cells
graph = {}
grid.otherbv = otherbv = {}
tvmask = [0]*10
for c in range(81):
ch = grid.choices(c)
if len(ch) == 2:
graph[c] = {}
tvmask[ch[0]] |= 1L<<c
tvmask[ch[1]] |= 1L<<c
otherbv[c,ch[0]] = ch[1]
otherbv[c,ch[1]] = ch[0]
edgegroup = {}
# Form edges and map back to their groups
for g in groups:
for d in digits:
mask = tvmask[d] & g.mask
dgcells = []
while mask:
bit = mask &~ (mask - 1)
dgcells.append(unmask[bit])
mask &=~ bit
for v in dgcells:
for w in dgcells:
if v != w:
edgegroup.setdefault((v,w),[]).append(g)
graph[v].setdefault(w,set()).add(d)
# Apply repetitivity analysis to collect cyclic labels at each cell
# and eliminate that label from other cells of the same group
grid.bivalues = nrg = NonrepetitiveGraph(graph)
for v,w,digit in nrg.cyclic():
mask = 0
for g in edgegroup[v,w]:
mask |= g.mask
mask &=~ (1L << v)
mask &=~ (1L << w)
def explain():
cycle = [v] + nrg.shortest(w,grid.otherbv[w,digit],
v,grid.otherbv[v,digit])
return ["In the cyclic sequence of cells", pathname(cycle)+",",
bivalue_explanation + ".",
"This placement would conflict with placing", digit,
"in", namecells((1L<<v)|(1L<<w))+",",
"making it impossible to fill the cycle's",
len(cycle)-1, "cells with the remaining",
len(cycle)-2, "digits."]
grid.unplace(digit,mask,explain)
def repeat(grid):
"""
Look for cycles of bilocated or bivalued vertices with one repetition.
We use the same graphs described for the bilocal and bivalue rules;
if there exists a cycle in which some two adjacent edges are labeled
by the same digit, and all other adjacent pairs of cycle edges have
differing digits, then the repeated digit must be placed at the cell
where the two same-labeled edges meet (in the case of the bilocal graph)
or can be eliminated from that cell (in the case of the bivalue graph).
"""
if not grid.bilocation or not grid.bivalues:
return
for cell in range(81):
if not grid.contents[cell]:
for d in grid.choices(cell):
if (cell,d) in grid.bilocation.reachable(cell,d):
cycle = grid.bilocation.shortest(cell,d,cell,d)
if cycle[1] == cycle[-2]:
# Degenerate repetitive cycle, look for a better one.
# It would be a correct decision to place d in cell:
# due to prior application of the bilocal rule, the
# part of the cycle from cycle[1] to cycle[-2] must
# itself be a repetitive cycle. But the explanation
# will be clearer if we avoid using this cycle.
break
def explain():
expl = [incyclic, pathname(cycle)+",",
bilocal_explanation + ".",
"If",d,"were not placed in",cellnames[cell]+",",
"it would have to be placed in",
cellnames[cycle[1]],"and",
cellnames[cycle[-2]],"instead,",
"making it impossible to place the"]
if len(cycle) == 4:
expl.append("remaining digit.")
else:
expl += ["cycle's remaining",len(cycle)-3,"digits",
"in the remaining"]
if len(cycle) == 5:
expl.append("cell.")
else:
expl += [len(cycle)-4,"cells."]
return expl
grid.place(d,cell,explain)
return # allow changes to propagate w/simpler rules
elif (cell,d) in grid.bivalues.reachable(cell,d):
cycle = grid.bivalues.shortest(cell,d,cell,d)
if cycle[1] == cycle[-2]:
break
def explain():
return [incyclic, pathname(cycle)+",",
bivalue_explanation + ",",
"except that", cellnames[cell],
"shares", d, "as a possible value",
"with both of its neighbors.",
"Placing", d, "in", cellnames[cell],
"would make it impossible",
"to fill the cycle's remaining",
len(cycle)-2, "cells with the remaining",
len(cycle)-3, "digits, so only",
grid.otherbv[cell,d], "can be placed in",
cellnames[cell]+"."]
grid.place(grid.otherbv[cell,d],cell,explain)
return # allow changes to propagate w/simpler rules
def path(grid):
"""
Look for paths of bilocated or bivalued cells with conflicting endpoints.
In the same graphs used by the bilocal and repeat rules, if there exists
a path that starts and ends with the same digit, with no two consecutive
edges labeled by the same digit, then the digit ending the path can be
placed in no cell that conflicts with both endpoints of the path. If
the path endpoints belong to the same row, column, or square as each other,
this eliminates other placements within that row, column, or square;
otherwise, it eliminates placements at the other two corners of a
rectangle having the two path endpoints as opposite corners.
"""
if not grid.bilocation or not grid.bivalues:
return
for cell in range(81):
if not grid.contents[cell]:
for d in grid.choices(cell):
for neighbor,nd in grid.bilocation.reachable(cell,d):
if nd == d:
def explain():
path = grid.bilocation.shortest(cell,d,neighbor,d)
return [inpath, pathname(path)+",",
bilocal_explanation+".",
"This placement conflicts with placing",
d, "in", cellnames[cell], "or",
cellnames[neighbor]+",", "making it",
"impossible to place the sequence's",
len(path)-1, "digits in the remaining",
len(path)-2, "cells."]
grid.unplace(d,neighbors[cell]&neighbors[neighbor],
explain)
if cell in grid.bivalues:
for neighbor,nd in grid.bivalues.reachable(cell,
grid.otherbv[cell,d]):
if d == grid.otherbv[neighbor,nd]:
def explain():
path = grid.bivalues.shortest(cell,
grid.otherbv[cell,d],neighbor,nd)
return [inpath, pathname(path)+",",
bivalue_explanation+".",
"This placement conflicts with placing",
d, "in", cellnames[cell], "or",
cellnames[neighbor]+",", "making it",
"impossible to fill the sequence's",
len(path), "cells using only the",
len(path)-1,
"shared digits of the sequence."]
grid.unplace(d,neighbors[cell]&neighbors[neighbor],
explain)
def explain_conflict_path(grid,cell,d,why,reached,dd):
"""Explain why either cell,d or reached,dd must be placed."""
if why[reached,dd]:
path = grid.bilocation.shortest(cell,d,reached,dd)
if len(path) == 2:
mask = (1L<<cell)|(1L<<reached)
for g in groups:
if g.mask & mask == mask:
break
return [cellnames[cell],"and",cellnames[reached],
"are the only cells in",g.name,
"in which",d,"may be placed, so if",d,
"were not placed in",cellnames[cell]+",",
"it would have to be placed in",cellnames[reached]+"."]
return [inpath, pathname(path)+",", bilocal_explanation+".",
"If",d,"were not placed in",cellnames[cell]+",",
"then",dd,"would have to be placed in",cellnames[reached]+",",
"in order to make room for the remaining",
plural(len(path)-2,"digit"),"in the remaining",
plural(len(path)-2,"cell"),"of the sequence."]
path = grid.bivalues.shortest(cell,grid.otherbv[cell,d],
reached,grid.otherbv[reached,dd])
if len(path) == 2:
mask = (1L<<cell)|(1L<<reached)
return [cellnames[cell],"and",cellnames[reached],
"each have two possible values.",
"If",d,"were not placed in",cellnames[cell],
"it would have to contain",grid.otherbv[cell,d],
"instead, forcing",cellnames[reached],"to contain",str(dd)+"."]
return [inpath, pathname(path)+",", bivalue_explanation+".",
"If",d,"were not placed in",cellnames[cell]+",",
"then",dd,"would have to be placed in",cellnames[reached]+",",
"in order to make allow the remaining",plural(len(path)-1,"cell"),
"of the sequence to be filled by the remaining",
plural(len(path)-1,"digit")+"."]
def explain_conflict(grid,cell,d,why,reached,dd):
"""Concoct explanation for pair of conflicting paths, one to reached."""
for neighbor,ddd in why:
if ddd == dd:
if (1L<<neighbor) & neighbors[reached]:
return explain_conflict_path(grid,cell,d,why,reached,dd) + \
explain_conflict_path(grid,cell,d,why,neighbor,dd) + \
[cellnames[reached],"and",cellnames[neighbor],
"cannot both contain",str(dd)+",","so",cellnames[cell],
"must contain",str(d)+"."]
return explain_conflict_path(grid,cell,d,why,reached,dd) + \
["This conflicts with another path that has become lost."]
def explain_conflict_group(grid,cell,d,why,g,dd):
"""Conflict explanation for set of conflicting paths that cover a group."""
mask = g.mask & grid.locations[dd]
conflicts = []
confmask = 0
for reached,ddd in why:
if dd == ddd and neighbors[reached] & mask:
conflicts.append(reached)
confmask |= 1L<<reached
mask &=~ neighbors[reached]
conflicts.sort()
expl = []
for c in conflicts:
expl += explain_conflict_path(grid,cell,d,why,c,dd)
expl += ["In",g.name+",",namecells(g.mask&grid.locations[dd]),
"are the only cells in which",dd,"may be placed."]
return expl + ["Placing",dd,"in",namecells(confmask),
"would prevent it from being placed anywhere in",g.name+",",
"so",d,"must be placed in",cellnames[cell]+"."]
def conflict(grid):
"""
Look for conflicting paths of bilocated or bivalued cells.
In the same graph used by the bilocal and repeat rules, if there exist
two paths that start with the same cell and digit, and that end with
equal digits in different cells of the same row, column, or square,
then the start cell must contain the starting digit for otherwise
it would cause the end cells to conflict with each other.
One or both paths can instead be in the bivalue graph, starting and
ending with the other digit than the one for the bilocal path.
We also find similar pairs of paths that end in sets of cells that
together eliminate all positions for the end digit in another row,
column, or square of the grid.
"""
if not grid.bilocation or not grid.bivalues:
return
for cell in range(81):
if not grid.contents[cell]:
for d in grid.choices(cell):
conflicts = [0]*10
why = {}
for reached,dd in grid.bilocation.reachable(cell,d):
why[reached,dd] = True
if (1L<<reached) & conflicts[dd]:
def explain():
return explain_conflict(grid,cell,d,why,reached,dd)
grid.place(d,cell,explain)
return # allow changes to propagate
else:
conflicts[dd] |= neighbors[reached]
if cell in grid.bivalues:
for reached,dd in grid.bivalues.reachable(cell,
grid.otherbv[cell,d]):
other = grid.otherbv[reached,dd]
why[reached,other] = False
if (1L<<reached) & conflicts[other]:
def explain():
return explain_conflict(grid,cell,d,
why,reached,other)
grid.place(d,cell,explain)
return # allow changes to propagate
else:
conflicts[other] |= neighbors[reached]
for g in groups:
for dd in digits:
if grid.locations[dd] & g.mask &~ conflicts[dd] == 0:
def explain():
return explain_conflict_group(grid,cell,d,
why,g,dd)
grid.place(d,cell,explain)
return # allow changes to propagate
# triples of name, rule, difficulty level
rules = [
("locate",locate,0),
("eliminate",eliminate,1),
("align",align,2),
("pair",pair,2),
("triad",triad,2),
("trapezoid",trapezoid,2),
("rectangle",rectangle,2),
("subproblem",subproblem,3),
("digit",digit,3),
("bilocal",bilocal,3),
("bivalue",bivalue,3),
("repeat",repeat,4),
("path",path,4),
("conflict",conflict,4),
]
def step(grid, quick_and_dirty = False):
"""Try the rules, return True if one succeeds."""
if grid.complete():
return False
grid.progress = False
grid.steps += 1
grid.log(["Beginning solver iteration",str(grid.steps)+'.'])
for name,rule,level in rules:
if level <= 1 or not quick_and_dirty:
rule(grid)
if grid.progress:
grid.rules_used.add(name)
grid.log(["Ending solver iteration",grid.steps,
"after successful application of the",
name,"rule."])
return True
grid.log(["Ending solver iteration",grid.steps,
"with no additional progress."])
return False
# ======================================================================
# Random permutation of puzzles
# ======================================================================
def block_permutation(preserve_symmetry = True):
"""Choose order to rearrange rows or columns of blocks."""
if preserve_symmetry:
return random.choice([[0,1,2],[2,1,0]])
result = [0,1,2]
random.shuffle(result)
return result
def permute1d(preserve_symmetry = True):
"""Choose order to rearrange rows or columns of puzzle."""
bp = block_permutation(preserve_symmetry)
ip = [block_permutation(False),block_permutation(preserve_symmetry)]
if preserve_symmetry:
ip.append([2-ip[0][2],2-ip[0][1],2-ip[0][0]])
else:
ip.append(block_permutation(False))
return [bp[i]*3+ip[i][j] for i in [0,1,2] for j in [0,1,2]]
def permute(grid, preserve_symmetry = True):
"""Generate a randomly permuted version of the input puzzle."""
digit_permutation = list(digits)
random.shuffle(digit_permutation)
digit_permutation = [0]+digit_permutation
row_permutation = permute1d(preserve_symmetry)
col_permutation = permute1d(preserve_symmetry)
transpose = random.choice([[1,9],[9,1]])
contents = [None]*81
for row in range(9):
for col in range(9):
contents[row_permutation[row]*transpose[0] +
col_permutation[col]*transpose[1]] = \
digit_permutation[grid.contents[9*row+col]]
return Sudoku(contents)
# ======================================================================
# Output of puzzles
# ======================================================================
# Output functions should return True if it's ok to add difficulty/level,
# false otherwise
def text_format(grid):
for row in digits:
if row % 3 != 1:
print ('|' + ' '*11)*3+'|'
elif row == 1:
print ' ' + '-'*35 + ' '
else:
print '|' + '-'*35 + '|'
for col in digits:
if col % 3 == 1:
print '|',
else:
print ' ',
print grid.contents[(row-1)*9+(col-1)] or '.',
print '|'
print ' ' + '-'*35 + ' '
return True
def numeric_format(grid):
row = []
for digit in grid:
row.append(str(digit))
if len(row) == 9:
print ''.join(row)
row = []
return True
def html_format(grid):
print "<table border=1>"
for a in range(3):
print "<tr>"
for b in range(3):
print "<td><table border=0>"
for c in range(3):
print "<tr>"
for d in range(3):
row = 3*a+c
col = 3*b+d
cell = 9*row+col
if grid.contents[cell]:
print '<td width=30 height=30 align=center valign=middle style="font-family:times,serif; font-size:16pt; text-align:center; color:black">%d</td>' % grid.contents[cell]
# sty = '; color:black'
# val = ' value="%d" readonly' % grid.contents[cell]
else:
print '<td width=30 height=30 align=center valign=middle><input style="font-family:times,serif; font-size:16pt; text-align:center; color:#555; margin:0pt; border-width:0" size=1 maxlength=1></td>'
# sty = '; color:gray'
# val = ''
# print '<td width=30 height=30 align=center valign=middle><input style="font-size:16pt; text-align:center%s" size=1 maxlength=1%s></td>' % (sty,val)
print "</tr>"
print "</table></td>"
print "</tr>"
print "</table>"
return False
def svg_format(grid):
print '''<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="274pt" height="274pt" viewBox="0 0 273 273">'''
print ' <g fill="none" stroke="black" stroke-width="1.5">'
print ' <rect x="2" y="2" width="270" height="270" />'
for i in [3,6]:
print ' <line x1="2" y1="%d" x2="272" y2="%d" />' % (30*i+2,30*i+2)
print ' <line x1="%d" y1="2" x2="%d" y2="272" />' % (30*i+2,30*i+2)
print ' </g>'
print ' <g fill="none" stroke="black" stroke-width="0.5">'
for i in [1,2,4,5,7,8]:
print ' <line x1="2" y1="%d" x2="272" y2="%d" />' % (30*i+2,30*i+2)
print ' <line x1="%d" y1="2" x2="%d" y2="272" />' % (30*i+2,30*i+2)
print ' </g>'
print ' <g font-family="Times" font-size="24" fill="black" text-anchor="middle">'
for row in range(9):
for col in range(9):
cell = row*9+col
if grid.contents[cell]:
print ' <text x="%d" y="%d">%d</text>' % \
(30*col+17, 30*row+25, grid.contents[cell])
print ' </g>'
print '</svg>'
return False
output_formats = {
"text": text_format,
"txt": text_format,
"t": text_format,
"numeric": numeric_format,
"num": numeric_format,
"n": numeric_format,
"html": html_format,
"h": html_format,
"svg": svg_format,
"s": svg_format,
}
# ======================================================================
# Backtracking search for all solutions
# ======================================================================
def all_solutions(grid, fastrules = True):
"""Generate sequence of completed Sudoku grids from initial puzzle."""
while True:
# first try the usual non-backtracking rules
try:
while step(grid,fastrules): pass
except BadSudoku:
grid.log("A contradiction was found,"
" so this branch has no solutions.")
return # no solutions
# if they finished off the puzzle, there's only one solution
if grid.complete():
grid.log("A solution to the puzzle has been found.")
yield grid
return
# find a cell with few remaining possibilities
def choices(c):
ch = grid.choices(c)
if len(ch) < 2: return (10,0,0)
return (len(ch),c,ch[0])
L,c,d = min([choices(c) for c in range(81)])
# try it both ways
branch = Sudoku(grid)
grid.log("Failed to progress, "
"creating a new backtracking search branch.")
branch.logstream = grid.logstream
branch.steps = grid.steps
branch.original_cells = grid.original_cells
branch.place(d,c,"The backtracking search will try this placement"
" first. Then, after returning from this branch,"
" it will try preventing this placement.")
for sol in all_solutions(branch,fastrules):
yield sol
grid.log(["Returned from backtracking branch; undoing placement of",
d,"in",cellnames[c],"and all subsequent decisions."])
grid.rules_used.update(branch.rules_used)
grid.rules_used.add("backtrack")
grid.steps = branch.steps
grid.unplace(d,1L<<c,"The backtracking search has already tried this"
" placement, and now must try the opposite decision.")
def unisolvent(grid):
"""Does this puzzle have a unique solution?"""
stream = all_solutions(grid)
try:
stream.next()
except StopIteration:
return False
try:
stream.next()
except StopIteration:
return True
return False
# ======================================================================
# Command-line interface
# ======================================================================
parser = OptionParser()
parser.add_option("-r","--rules",dest="show_rules", action="store_true",
help = "show description of known solver rules and exit")
parser.add_option("-l","--levels",dest="show_levels", action="store_true",
help = "show description of difficulty levels and exit")
parser.add_option("-0", "--blank", dest="empty", action="store_true",
help = "output blank sudoku grid and exit")
parser.add_option("-t","--translate", dest="translate", action="store_true",
help = "translate format of input puzzle without solving")
parser.add_option("-p","--permute",dest="permute", action="store_true",
help = "randomly rearrange the input puzzle")
parser.add_option("-g","--generate", dest="generate", action="store_true",
help = "generate new puzzle rather than reading from stdin")
parser.add_option("-a", "--asymmetric", dest="asymmetric", action="store_true",
help = "allow asymmetry in generated puzzles")
parser.add_option("-u", "--unique", dest="assume_unique", action="store_false",
help = "disallow rules that assume a unique solution",
default = True)
parser.add_option("-b", "--backtrack", dest="backtrack", action="store_true",
help = "enable trial and error search for all solutions")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help = "output description of each step in puzzle solution")
parser.add_option("-x", "--empty", dest="emptychars", action="store",
type="string", default=".0",
help="characters representing empty cells in input puzzle")
parser.add_option("-2", "--output-both", dest="output_both",
action="store_true",
help = "output both the puzzle and its solution")
parser.add_option("-f", "--format", dest="format", action="store",
type="string", default="text",
help="output format (options: text, numeric, html, svg)")
if __name__ == '__main__':
options,args = parser.parse_args()
if args:
print >>sys.stderr, "Unrecognized command line syntax, use --help for input documentation"
sys.exit(0)
if options.show_rules:
print """This solver knows the following rules. Rules occurring later
in the list are attempted only when all earlier rules have failed
to make progress.
"""
for name,rule,difficulty in rules:
print name + ":" + rule.__doc__
sys.exit(0)
if options.show_levels:
print """
Puzzles are classified by difficulty, according to a weighted combination
of the set of rules needed to solve each puzzle. There are six levels,
in order by difficulty: easy, moderate, tricky, difficult, evil, and
fiendish. In addition, a puzzle is classified as impossible if this
program cannot find a solution for it, or if backtracking is needed to
find the solution.
"""
sys.exit(0)
if options.translate:
if options.generate:
print "Can not simultaneously generate and translate puzzles."
sys.exit(0)
try:
outputter = output_formats[options.format.lower()]
except KeyError:
print "Unrecognized output format."
sys.exit(0)
if options.empty:
outputter(Sudoku())
sys.exit(0)
# ======================================================================
# Initial puzzle setup
# ======================================================================
def random_puzzle(generate_symmetric = True):
"""Generate and return a randomly constructed Sudoku puzzle instance."""
puzzle = []
grid = Sudoku()
def choices(cell):
c = grid.choices(cell)
return len(c) > 1 and c or []
while True:
try:
while not grid.complete():
d,c = random.choice([(d,c) for c in range(81)
for d in choices(c)])
grid.place(d,c)
while step(grid,True): pass
puzzle.append((d,c))
if generate_symmetric:
c = 80-c
ch = grid.choices(c)
if not ch: # avoid IndexError from random.choice
raise BadSudoku("Placement invalidated symmetric cell")
d = random.choice(ch)
grid.place(d,c)
while step(grid,True): pass
puzzle.append((d,c))
except BadSudoku:
puzzle = []
grid = Sudoku()
continue
break
# find redundant information in initial state
q = 0
while q < len(puzzle):
grid = Sudoku(puzzle[:q] + puzzle[q+1+generate_symmetric:])
if not unisolvent(grid):
q += 1+generate_symmetric
else:
del puzzle[q]
if generate_symmetric:
del puzzle[q]
return Sudoku(puzzle)
def read_puzzle(empty = ".0"):
"""Read and return a Sudoku instance from standard input."""
def digits():
for digit in sys.stdin.read():
if digit in empty:
yield 0
elif '1' <= digit <= '9':
yield int(digit)
return Sudoku(digits())
if __name__ == '__main__':
if options.generate:
puzzle = random_puzzle(not options.asymmetric)
print_puzzle = True
print_solution = options.output_both
else:
puzzle = read_puzzle(options.emptychars)
print_puzzle = options.output_both or options.translate
print_solution = options.output_both or not options.translate
if options.permute:
puzzle = permute(puzzle, not options.asymmetric)
if options.verbose:
puzzle.logstream = sys.stderr
if options.assume_unique:
puzzle.assume_unique = True
# ======================================================================
# Main program: print and solve puzzle
# ======================================================================
if __name__ == '__main__':
print_level = True
if print_puzzle:
print_level = outputter(puzzle)
if options.output_both and print_level:
print
if options.backtrack:
solns = all_solutions(puzzle,False)
else:
while step(puzzle): pass
solns = [puzzle]
nsolns = 0
for soln in solns:
if print_solution:
print_level = outputter(soln)
nsolns += 1
difficulty = 0
used_names = []
for name,rule,level in rules:
if name in puzzle.rules_used:
used_names.append(name)
difficulty += 1<<level
if "backtrack" in puzzle.rules_used:
used_names.append("backtrack")
if print_level:
print "\nRules used:", ", ".join(used_names)
if nsolns != 1:
print "Number of solutions:",nsolns
if not puzzle.complete() or "backtrack" in puzzle.rules_used:
print "Level: impossible"
elif difficulty <= 1:
print "Level: easy"
elif difficulty <= 5:
print "Level: moderate"
elif difficulty <= 9:
print "Level: tricky"
elif difficulty <= 17:
print "Level: difficult"
elif difficulty <= 33:
print "Level: evil"
else:
print "Level: fiendish"
| apache-2.0 | 2,277,679,837,284,832,000 | 41.245757 | 220 | 0.512326 | false |
CityGrid/twonicorn | twonicornweb/views/cp_user.py | 1 | 8124 | # Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPConflict
from pyramid.response import Response
from datetime import datetime
import logging
from passlib.hash import sha512_crypt
from twonicornweb.views import (
site_layout,
get_user,
)
from twonicornweb.models import (
DBSession,
User,
UserGroupAssignment,
Group,
)
log = logging.getLogger(__name__)
@view_config(route_name='cp_user', permission='cp', renderer='twonicornweb:templates/cp_user.pt')
def view_cp_user(request):
page_title = 'Control Panel - Users'
user = get_user(request)
users = DBSession.query(User).all()
groups = DBSession.query(Group).all()
params = {'mode': None,
'commit': None,
'user_id': None,
}
for p in params:
try:
params[p] = request.params[p]
except:
pass
mode = params['mode']
commit = params['commit']
user_id = params['user_id']
error_msg = None
this_user = None
this_groups = None
subtitle = 'Users'
if mode == 'add':
subtitle = 'Add a new user'
if commit:
user_names = request.POST.getall('user_name')
first_names = request.POST.getall('first_name')
last_names= request.POST.getall('last_name')
email_addresses = request.POST.getall('email_address')
passwords = request.POST.getall('password')
try:
utcnow = datetime.utcnow()
for u in range(len(user_names)):
salt = sha512_crypt.genconfig()[17:33]
encrypted_password = sha512_crypt.encrypt(passwords[u], salt=salt)
create = User(user_name=user_names[u], first_name=first_names[u], last_name=last_names[u], email_address=email_addresses[u], salt=salt, password=encrypted_password, updated_by=user['login'], created=utcnow, updated=utcnow)
DBSession.add(create)
DBSession.flush()
user_id = create.user_id
group_assignments = request.POST.getall('group_assignments')
for a in group_assignments:
g = DBSession.query(Group).filter(Group.group_name==a).one()
create = UserGroupAssignment(group_id=g.group_id, user_id=user_id, updated_by=user['login'], created=utcnow, updated=utcnow)
DBSession.add(create)
DBSession.flush()
return_url = '/cp/user'
return HTTPFound(return_url)
except Exception as ex:
if type(ex).__name__ == 'IntegrityError':
log.error('User already exists in the db, please edit instead.')
# Rollback
DBSession.rollback()
# FIXME: Return a nice page
return HTTPConflict('User already exists in the db, please edit instead.')
else:
raise
# FIXME not trapping correctly
DBSession.rollback()
error_msg = ("Failed to create user (%s)" % (ex))
log.error(error_msg)
if mode == 'edit':
subtitle = 'Edit user'
if not commit:
try:
q = DBSession.query(User)
q = q.filter(User.user_id == user_id)
this_user = q.one()
q = DBSession.query(Group)
q = q.join(UserGroupAssignment, Group.group_id== UserGroupAssignment.group_id)
q = q.filter(UserGroupAssignment.user_id==this_user.user_id)
results = q.all()
this_groups = []
for r in results:
this_groups.append(r.group_name)
except Exception, e:
conn_err_msg = e
return Response(str(conn_err_msg), content_type='text/plain', status_int=500)
if commit:
if 'form.submitted' in request.POST:
user_id = request.POST.get('user_id')
user_name = request.POST.get('user_name')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email_address = request.POST.get('email_address')
password = request.POST.get('password')
group_assignments = request.POST.getall('group_assignments')
# Update the user
utcnow = datetime.utcnow()
this_user = DBSession.query(User).filter(User.user_id==user_id).one()
this_user.user_name = user_name
this_user.first_name = first_name
this_user.last_name = last_name
this_user.email_address = email_address
if password:
salt = sha512_crypt.genconfig()[17:33]
encrypted_password = sha512_crypt.encrypt(password, salt=salt)
this_user.salt = salt
this_user.password = encrypted_password
this_user.updated_by=user['login']
DBSession.flush()
for g in groups:
if str(g.group_id) in group_assignments:
# assign
log.debug("Group: %s is in group assignments" % g.group_name)
q = DBSession.query(UserGroupAssignment).filter(UserGroupAssignment.group_id==g.group_id, UserGroupAssignment.user_id==this_user.user_id)
check = DBSession.query(q.exists()).scalar()
if not check:
log.info("Assigning local user %s to group %s" % (this_user.user_name, g.group_name))
update = UserGroupAssignment(group_id=g.group_id, user_id=user_id, updated_by=user['login'], created=utcnow, updated=utcnow)
DBSession.add(update)
DBSession.flush()
else:
# delete
log.debug("Checking to see if we need to remove assignment for user: %s in group %s" % (this_user.user_name,g.group_name))
q = DBSession.query(UserGroupAssignment).filter(UserGroupAssignment.group_id==g.group_id, UserGroupAssignment.user_id==this_user.user_id)
check = DBSession.query(q.exists()).scalar()
if check:
log.info("Removing local user %s from group %s" % (this_user.user_name, g.group_name))
assignment = DBSession.query(UserGroupAssignment).filter(UserGroupAssignment.group_id==g.group_id, UserGroupAssignment.user_id==this_user.user_id).one()
DBSession.delete(assignment)
DBSession.flush()
return_url = '/cp/user'
return HTTPFound(return_url)
return {'layout': site_layout(),
'page_title': page_title,
'user': user,
'this_user': this_user,
'this_groups': this_groups,
'user_id': user_id,
'users': users,
'groups': groups,
'subtitle': subtitle,
'mode': mode,
'commit': commit,
'error_msg': error_msg,
}
| apache-2.0 | 223,268,084,307,298,000 | 40.661538 | 242 | 0.546406 | false |
mseclab/PyJFuzz | pyjfuzz/core/pjf_server.py | 1 | 8559 | """
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from wsgiref.simple_server import make_server, WSGIRequestHandler
from bottle import route, run, ServerAdapter, response, request, static_file
from .pjf_testcase_server import PJFTestcaseServer
from .errors import PJFBaseException
from .errors import PJFMissingArgument
from threading import Thread
from .pjf_logger import PJFLogger
from .pjf_factory import PJFFactory
from .certs import CERT_PATH
import multiprocessing
import signal
import time
import ssl
import sys
import os
import socket
class WSGIRefServer(ServerAdapter):
"""
WSGI based server class using SSL
"""
def run(self, handler):
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
def log_error(self, format, *args):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class SSLWSGIRefServer(ServerAdapter):
"""
WSGI based server class using SSL
"""
def run(self, handler):
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
def log_error(self, format, *args):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.socket = ssl.wrap_socket(srv.socket, certfile=CERT_PATH, server_side=True)
srv.serve_forever()
class PJFServer:
"""
Class used to run both HTTP and HTTPS server using bottle web server
"""
def __init__(self, configuration):
self.client_queue = multiprocessing.Queue(0)
self.apply_patch()
self.logger = self.init_logger()
if ["debug", "html", "content_type", "notify", "ports"] not in configuration:
raise PJFMissingArgument()
if configuration.debug:
print("[\033[92mINFO\033[0m] Starting HTTP ({0}) and HTTPS ({1}) built-in server...".format(
configuration.ports["servers"]["HTTP_PORT"],
configuration.ports["servers"]["HTTPS_PORT"]
))
if not configuration.content_type:
configuration.content_type = False
if not configuration.content_type:
configuration.content_type = "application/json"
self.config = configuration
self.json = PJFFactory(configuration)
self.https = SSLWSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTPS_PORT"])
self.http = WSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTP_PORT"])
self.httpsd = multiprocessing.Process(target=run, kwargs={"server": self.https, "quiet": True})
self.httpd = multiprocessing.Process(target=run, kwargs={"server": self.http, "quiet": True})
if self.config.fuzz_web:
self.request_checker = Thread(target=self.request_pool, args=())
self.logger.debug("[{0}] - PJFServer successfully initialized".format(time.strftime("%H:%M:%S")))
def run(self):
"""
Start the servers
"""
route("/")(self.serve)
if self.config.html:
route("/<filepath:path>")(self.custom_html)
if self.config.fuzz_web:
self.request_checker.start()
self.httpd.start()
self.httpsd.start()
def save_testcase(self, ip, testcases):
try:
count = 0
dir_name = "testcase_{0}".format(ip)
print("[\033[92mINFO\033[0m] Client {0} seems to not respond anymore, saving testcases".format(ip))
try:
os.mkdir(dir_name)
except OSError:
pass
for test in testcases:
with open("{0}/testcase_{1}.json".format(dir_name, count), "wb") as t:
t.write(test)
t.close()
count += 1
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def request_pool(self):
try:
clients = {}
end = False
while not end:
try:
client = self.client_queue.get(timeout=5)
if client == (0,0):
end = True
else:
if client[0] not in clients:
clients.update({client[0]: {"timestamp": time.time(), "testcases": []}})
else:
clients[client[0]]["timestamp"] = time.time()
if len(clients[client[0]]["testcases"]) <= 10:
clients[client[0]]["testcases"].append(client[1])
else:
clients[client[0]]["testcases"].pop(0)
clients[client[0]]["testcases"].append(client[1])
except:
pass
for c in list(clients.keys()):
if time.time() - clients[c]["timestamp"] >= 30:
self.save_testcase(c, clients[c]["testcases"])
del clients[c]
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def stop(self):
"""
Kill the servers
"""
os.kill(self.httpd.pid, signal.SIGKILL)
os.kill(self.httpsd.pid, signal.SIGKILL)
self.client_queue.put((0,0))
if self.config.fuzz_web:
self.request_checker.join()
self.logger.debug("[{0}] - PJFServer successfully completed".format(time.strftime("%H:%M:%S")))
def custom_html(self, filepath):
"""
Serve custom HTML page
"""
try:
response.headers.append("Access-Control-Allow-Origin", "*")
response.headers.append("Accept-Encoding", "identity")
response.headers.append("Content-Type", "text/html")
return static_file(filepath, root=self.config.html)
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def serve(self):
"""
Serve fuzzed JSON object
"""
try:
fuzzed = self.json.fuzzed
if self.config.fuzz_web:
self.client_queue.put((request.environ.get('REMOTE_ADDR'), fuzzed))
response.headers.append("Access-Control-Allow-Origin", "*")
response.headers.append("Accept-Encoding", "identity")
response.headers.append("Content-Type", self.config.content_type)
if self.config.notify:
PJFTestcaseServer.send_testcase(fuzzed, '127.0.0.1', self.config.ports["servers"]["TCASE_PORT"])
yield fuzzed
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def init_logger(self):
"""
Init the default logger
"""
return PJFLogger.init_logger()
def apply_patch(self):
"""
Fix default socket lib to handle client disconnection while receiving data (Broken pipe)
"""
if sys.version_info >= (3, 0):
# No patch for python >= 3.0
pass
else:
from .patch.socket import socket as patch
socket.socket = patch
| mit | -7,245,142,948,344,012,000 | 38.442396 | 112 | 0.592242 | false |
elebihan/yaprogen | data/templates/setuptools-python-app/skeleton/disthelpers.py | 1 | 7324 | # -*- coding: utf-8 -*-
#
# disthelpers.py - useful distutils helper commands
#
# Copyright (c) 2014 Eric Le Bihan <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from distutils import cmd
from distutils.command.build import build as _build
from distutils.errors import DistutilsOptionError
from docutils.core import publish_file
import os
import subprocess
class extract_messages(cmd.Command):
description = 'extract localizable strings from source code'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
domain = self.distribution.get_name()
potin_file = os.path.join(os.curdir, 'po', 'POTFILES.in')
pot_file = os.path.join(os.curdir, 'po', domain + '.pot')
args = [
'xgettext', '-Lpython', '-k_', '-f', potin_file, '-o', pot_file,
'--package-name', self.distribution.get_name(),
]
subprocess.check_call(args)
class init_catalog(cmd.Command):
description = 'create a new catalog based on a POT file'
user_options = [
('locale=', 'l', 'locale for the new localized catalog'),
]
def initialize_options(self):
self.locale = None
def finalize_options(self):
if not self.locale:
raise DistutilsOptionError('please provide a locale')
def run(self):
domain = self.distribution.get_name()
pot_file = os.path.join(os.curdir, 'po', domain + '.pot')
po_file = os.path.join(os.curdir, 'po', self.locale + '.po')
args = [
'msginit', '--input', pot_file, '--output', po_file,
'--locale', self.locale,
]
subprocess.check_call(args)
class update_catalog(cmd.Command):
description = 'update an existing catalog from a POT file'
user_options = [
('locale=', 'l', 'locale of the localized catalog'),
]
def initialize_options(self):
self.locale = None
def finalize_options(self):
if not self.locale:
raise DistutilsOptionError('please provide a locale')
def run(self):
domain = self.distribution.get_name()
pot_file = os.path.join(os.curdir, 'po', domain + '.pot')
po_file = os.path.join(os.curdir, 'po', self.locale + '.po')
args = ['msgmerge', '--update', po_file, pot_file]
subprocess.check_call(args)
class build_catalog(cmd.Command):
description = 'compile *.po file into *.mo file'
user_options = [
('locale=', 'l', 'locale of the localized catalog'),
]
def initialize_options(self):
self.locale = None
def finalize_options(self):
pass
def run(self):
locales = []
domain = self.distribution.get_name()
po_dir = os.path.join(os.path.dirname(os.curdir), 'po')
if self.locale:
locales.append(self.locale)
else:
for path, names, filenames in os.walk(po_dir):
for f in filenames:
if f.endswith('.po'):
locale = f[:-3]
locales.append(locale)
for locale in locales:
mo_dir = os.path.join('build', 'locale', locale, 'LC_MESSAGES')
src = os.path.join(po_dir, locale + '.po')
dst = os.path.join(mo_dir, domain + '.mo')
if not os.path.exists(mo_dir):
os.makedirs(mo_dir)
print("compiling {0}".format(src))
args = ['msgfmt', src, '--output-file', dst]
subprocess.check_call(args)
locale_dir = os.path.join('share', 'locale', locale, 'LC_MESSAGES')
self.distribution.data_files.append((locale_dir, [dst]))
class build_man(cmd.Command):
description = 'build MAN page from restructuredtext'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
src_dir = os.path.join(os.path.dirname(os.curdir), 'man')
dst_dir = os.path.join('build', 'man')
for path, names, filenames in os.walk(src_dir):
for f in filenames:
if f.endswith('.rst'):
filename, section, ext = f.rsplit('.', 2)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
src = os.path.join(path, f)
dst = os.path.join(dst_dir, filename + '.' + section)
print("converting {0}".format(src))
publish_file(source_path=src,
destination_path=dst,
writer_name='manpage')
man_dir = os.path.join('share', 'man', 'man' + section)
self.distribution.data_files.append((man_dir, [dst]))
class build_html(cmd.Command):
description = 'build HTML version of MAN pages from restructuredtext'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
name = self.distribution.get_name()
src_dir = os.path.join(os.path.dirname(os.curdir), 'man')
dst_dir = os.path.join('build', 'html')
for path, names, filenames in os.walk(src_dir):
for f in filenames:
if f.endswith('.rst'):
filename, section, ext = f.rsplit('.', 2)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
src = os.path.join(path, f)
dst = os.path.join(dst_dir, filename + '.' + section +
'.html')
print("converting {0}".format(src))
publish_file(source_path=src,
destination_path=dst,
writer_name='html')
html_dir = os.path.join('share', 'doc', name, 'html')
self.distribution.data_files.append((html_dir, [dst]))
class build(_build):
sub_commands = _build.sub_commands
sub_commands += [('build_catalog', None)]
sub_commands += [('build_man', None)]
sub_commands += [('build_html', None)]
def run(self):
_build.run(self)
# vim: ts=4 sts=4 sw=4 sta et ai
| gpl-3.0 | 159,195,571,268,875,170 | 34.553398 | 79 | 0.578372 | false |
cctags/gumpad2 | gumpad2.py | 1 | 50733 | #!/usr/bin/env python
# coding: utf-8
import wx
import wx.richtext
import wx.lib
import wx.lib.wordwrap
import os
import sys
import uuid
import tempfile
import optparse
import StringIO
import time
import locale
import hashlib
import zshelve
import PyRTFParser
import xtea
from wx.lib.embeddedimage import PyEmbeddedImage
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import aui
from agw.aui import aui_switcherdialog as ASD
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.aui as aui
from wx.lib.agw.aui import aui_switcherdialog as ASD
import images
program_name = "Gumpad2"
program_version = "v0.1.3"
program_title = "%s %s" % (program_name, program_version)
program_dbpath = "%s.db" % (program_name.lower())
program_main_icon = os.path.join(dirName, "main.ico")
############################################################################
#
# debug tools
#
import inspect
def debug_line():
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back.f_lineno
def debug_file():
return inspect.currentframe().f_code.co_filename
def fall_into(x, a, b):
assert a < b
return a <= x and x < b
############################################################################
#
# VsTempFile
#
class VsTempFile:
def __init__(self):
self.fd, self.filename = tempfile.mkstemp()
def __del__(self):
self.Close()
def AppendString(self, str):
os.write(self.fd, str)
def Close(self):
os.close(self.fd)
os.unlink(self.filename)
############################################################################
#
# data format:
# version: xx
# magic: xx
# [uuid]: {type: xx, title: xx, body: xx, xtea: sha1sum}, type = (root, dir, html)
# tree: item = {id: xx, subs: [item *]}
#
VsData_Format_Version = 1
VsData_Format_Magic = "gumpad_magic_jshcm"
VsData_Type_Root = 1
VsData_Type_Dir = 2
VsData_Type_Html = 3
class VsData:
def __init__(self, filename):
self.m_filename = filename
bFileExist = os.access(filename, os.R_OK | os.W_OK)
self.db = zshelve.btopen(filename)
if not bFileExist:
self.__CreateData__()
def __CreateData__(self):
self.SetMagic(VsData_Format_Magic)
self.SetVersion(VsData_Format_Version)
id = self.GenerateId()
self.db[id] = {"type": VsData_Type_Root, "title": "root", "body": ""}
self.db["tree"] = {"id": id, "subs": []}
self.db.sync()
def __GetTree__(self, tree, id):
if id == tree["id"]:
return None, tree
for i in tree["subs"]:
parent, t = self.__GetTree__(i, id)
if t is not None:
if parent is None:
parent = tree
return parent, t
return None, None
def GetFileName(self):
return self.m_filename
def GetVersion(self):
return self.db["version"]
def SetVersion(self, version):
self.db["version"] = version
self.db.sync()
def GetMagic(self):
return self.db["magic"]
def SetMagic(self, magic):
self.db["magic"] = magic
self.db.sync()
def GetTree(self, parent, id=None):
"""从 parent 往下查找指定 id 的结点,返回 父结点、结点,
不存在时返回 None
"""
if id is None:
return None, parent
else:
return self.__GetTree__(parent, id)
def GetRoot(self):
return self.db["tree"]
def SetRoot(self, dir_tree):
"""更新目录树"""
self.set_root_tree_root = None
self.set_root_last_node = []
for i in dir_tree:
id = i[0]
path = i[1]
new = {"id": id, "subs": []}
if path == 0:
self.set_root_tree_root = new
self.set_root_last_node.append(new)
else:
while len(self.set_root_last_node) > path:
self.set_root_last_node.pop()
assert len(self.set_root_last_node) == path
parent = self.set_root_last_node[-1]
parent["subs"].append(new)
self.set_root_last_node.append(new)
assert self.set_root_tree_root is not None
self.db["tree"] = self.set_root_tree_root
self.db.sync()
def GenerateId(self):
return str(uuid.uuid1())
def Add(self, title, body, parent_id=None, type=None):
root = self.db["tree"]
dummy, t = self.GetTree(root, parent_id)
if type is None:
type = VsData_Type_Html
elif type not in (VsData_Type_Dir, VsData_Type_Html):
type = VsData_Type_Dir
new_id = self.GenerateId()
t["subs"].append({"id": new_id, "subs": []})
self.db["tree"] = root
self.db[new_id] = {"type": type, "title": title, "body": body}
self.db.sync()
return new_id
def Delete(self, id):
"""删除指定Id的叶子结点,根结点除外
成功时返回 True,失败时返回 False
"""
if id is None:
return False
root = self.db["tree"]
if id == root["id"]:
return False
parent, t = self.GetTree(root, id)
if t is None:
return False
if len(t["subs"]) != 0:
return False
# 删除关系记录
for i in range(len(parent["subs"])):
if id == parent["subs"][i]["id"]:
del parent["subs"][i]
break
self.db["tree"] = root
# 删除结点记录
if id in self.db:
del self.db[id]
self.db.sync()
def GetTitle(self, id=None):
if id is None:
id = self.db["tree"]["id"]
return self.db[id]["title"]
def SetTitle(self, id, title):
if id is None:
id = self.db["tree"]["id"]
t = self.db[id]
t["title"] = title
self.db[id] = t
self.db.sync()
def GetBody(self, id=None):
if id is None:
id = self.db["tree"]["id"]
return self.db[id]["body"]
def SetBody(self, id, body):
if id is None:
id = self.db["tree"]["id"]
t = self.db[id]
t["body"] = body
self.db[id] = t
self.db.sync()
def GetType(self, id=None):
if id is None:
id = self.db["tree"]["id"]
return self.db[id]["type"]
def SetXtea(self, id, key):
assert not self.HasXtea(id)
t = self.db[id]
t["xtea"] = hashlib.sha1(key).hexdigest()
self.db[id] = t
self.db.sync()
def ClearXtea(self, id):
assert self.HasXtea(id)
t = self.db[id]
del t["xtea"]
self.db[id] = t
self.db.sync()
def HasXtea(self, id):
return self.db[id].has_key("xtea")
def CheckXtea(self, id, key):
assert self.HasXtea(id)
return self.db[id]["xtea"] == hashlib.sha1(key).hexdigest()
def IsEditable(self, id=None):
"""判断指定Id对应的内容是否允许编辑"""
if id is None:
return False
t = self.GetType(id)
return VsData_Type_Html == t
############################################################################
#
# VsConfig
#
class VsConfig:
def __init__(self):
pass
def GetDefaultFont():
return wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, u"宋体", wx.FONTENCODING_SYSTEM)
############################################################################
#
# Control item Id
#
VsGenerateMenuId_Start = wx.ID_HIGHEST + 1
def VsGenerateMenuId():
global VsGenerateMenuId_Start
VsGenerateMenuId_Start += 1
return VsGenerateMenuId_Start
ID_Menu_CreateHtml = VsGenerateMenuId()
ID_Menu_CreateDir = VsGenerateMenuId()
ID_Menu_RenameEntry = VsGenerateMenuId()
ID_Menu_DeleteEntry = VsGenerateMenuId()
ID_Menu_Save = VsGenerateMenuId()
ID_Menu_SaveAs = VsGenerateMenuId()
ID_Menu_Exit = VsGenerateMenuId()
ID_Menu_Encrypt = VsGenerateMenuId()
ID_Menu_ToogleDirectory = VsGenerateMenuId()
ID_Menu_ToogleToolBar = VsGenerateMenuId()
ID_Menu_FindItem = VsGenerateMenuId()
ID_Menu_FindNextItem = VsGenerateMenuId()
ID_Menu_About = VsGenerateMenuId()
ID_ToolBar_Bold = VsGenerateMenuId()
ID_ToolBar_Italic = VsGenerateMenuId()
ID_ToolBar_Underline = VsGenerateMenuId()
ID_ToolBar_AlignLeft = VsGenerateMenuId()
ID_ToolBar_Center = VsGenerateMenuId()
ID_ToolBar_AlignRight = VsGenerateMenuId()
ID_ToolBar_IndentLess = VsGenerateMenuId()
ID_ToolBar_IndentMore = VsGenerateMenuId()
ID_ToolBar_Font = VsGenerateMenuId()
ID_ToolBar_FontColor = VsGenerateMenuId()
ID_ToolBar_InsertPic = VsGenerateMenuId()
ID_Ctx_InsertAsSibling = VsGenerateMenuId()
ID_Ctx_InsertAsChild = VsGenerateMenuId()
############################################################################
#
# VsStatusBar
#
class VsStatusBar(wx.StatusBar):
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(3)
self.SetStatusStyles([wx.SB_FLAT, wx.SB_NORMAL, wx.SB_NORMAL])
# 显示当前操作数据
str = "@ %s" % (self.GetParent().db.GetFileName())
self.SetStatusText(str, 1)
# 初始时显示时间
self.OnTimer()
# 调整控件大小
width, height = self.GetTextExtent(self.GetStatusText(2))
width += 48
self.SetStatusWidths([0, -1, width])
# 控件时间显示
self.timer = wx.PyTimer(self.OnTimer)
self.timer.Start(1000 * 20)
def OnTimer(self):
# 显示当前时间
t = time.localtime()
str = time.strftime("[%Y-%m-%d %H:%M %A]", t)
self.SetStatusText(str, 2)
############################################################################
#
# VsTreeCtrl
#
class VsTreeCtrl(wx.TreeCtrl):
def __init__(self, parent, id, pos, size, style):
wx.TreeCtrl.__init__(self, parent, id, pos, size, style)
def Traverse(self, func, startNode):
"""Apply 'func' to each node in a branch, beginning with 'startNode'. """
def TraverseAux(node, depth, func):
nc = self.GetChildrenCount(node, 0)
child, cookie = self.GetFirstChild(node)
# In wxPython 2.5.4, GetFirstChild only takes 1 argument
for i in xrange(nc):
func(child, depth)
TraverseAux(child, depth + 1, func)
child, cookie = self.GetNextChild(node, cookie)
func(startNode, 0)
TraverseAux(startNode, 1, func)
def ItemIsChildOf(self, item1, item2):
''' Tests if item1 is a child of item2, using the Traverse function '''
self.result = False
def test_func(node, depth):
if node == item1:
self.result = True
self.Traverse(test_func, item2)
return self.result
############################################################################
#
# VsFrame
#
class VsFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE | wx.SUNKEN_BORDER):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.db = VsData(program_dbpath)
self.tree = None
self.editor_list = [] # [id, ctrl, modified]
self.passwd_map = {} # id:passwd
self._mgr = aui.AuiManager()
# tell AuiManager to manage this frame
self._mgr.SetManagedWindow(self)
# set frame icon
icon = wx.EmptyIcon()
icon.LoadFile(program_main_icon, wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
# set up default notebook style
self._notebook_style = aui.AUI_NB_DEFAULT_STYLE | aui.AUI_NB_TAB_EXTERNAL_MOVE | wx.NO_BORDER
self._notebook_theme = 0
# 状态栏
self.SetStatusBar(VsStatusBar(self))
self.CreateMenuBar()
self.BuildPanes()
# 查找功能
self.finddlg = None
self.finddata = wx.FindReplaceData()
self.finddata.SetFlags(wx.FR_DOWN)
self.Bind(wx.EVT_FIND, self.OnFind)
self.Bind(wx.EVT_FIND_NEXT, self.OnFind)
self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose)
def CreateMenuBar(self):
"""创建菜单"""
mb = wx.MenuBar()
def DoBindMenuHandler(item, handler, updateUI=None):
self.Bind(wx.EVT_MENU, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
file_menu = wx.Menu()
DoBindMenuHandler(file_menu.Append(ID_Menu_CreateHtml, u"新建笔记"), self.OnCreateHtml, self.OnMenuUpdateUI)
DoBindMenuHandler(file_menu.Append(ID_Menu_CreateDir, u"新建目录"), self.OnCreateDir, self.OnMenuUpdateUI)
file_menu.AppendSeparator()
DoBindMenuHandler(file_menu.Append(ID_Menu_Save, u"保存(&S)\tCtrl-S"), self.OnSave, self.OnMenuUpdateUI)
DoBindMenuHandler(file_menu.Append(ID_Menu_SaveAs, u"另存为(&A)"), self.OnSaveAs, self.OnMenuUpdateUI)
file_menu.AppendSeparator()
self.Bind(wx.EVT_MENU, self.OnExit, file_menu.Append(ID_Menu_Exit, u"退出(&X)"))
ope_menu = wx.Menu()
DoBindMenuHandler(ope_menu.AppendCheckItem(ID_Menu_ToogleDirectory, u"显示目录树(&D)\tCtrl-D"), self.OnToogleDirTree, self.OnMenuUpdateUI)
DoBindMenuHandler(ope_menu.AppendCheckItem(ID_Menu_ToogleToolBar, u"显示工具栏(&T)\tCtrl-T"), self.OnToogleToolBar, self.OnMenuUpdateUI)
ope_menu.AppendSeparator()
DoBindMenuHandler(ope_menu.Append(ID_Menu_FindItem, u"查找(&F)\tCtrl-F"), self.OnFindItem, self.OnMenuUpdateUI)
DoBindMenuHandler(ope_menu.Append(ID_Menu_FindNextItem, u"查找下一个(&N)\tF3"), self.OnFindNextItem, self.OnMenuUpdateUI)
help_menu = wx.Menu()
self.Bind(wx.EVT_MENU, self.OnAbout, help_menu.Append(ID_Menu_About, u"关于(&A)..."))
mb.Append(file_menu, u"文件(&F)")
mb.Append(ope_menu, u"操作(&O)")
mb.Append(help_menu, u"帮助(&H)")
self.SetMenuBar(mb)
def CreateToolBar(self):
def DoBind(item, handler, updateUI=None):
self.Bind(wx.EVT_TOOL, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
tb = aui.AuiToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize, aui.AUI_TB_DEFAULT_STYLE | aui.AUI_TB_OVERFLOW)
tb.SetToolBitmapSize(wx.Size(16, 16))
DoBind(tb.AddToggleTool(wx.ID_CUT, images._rt_cut.GetBitmap(), wx.NullBitmap, False, None, "Cut"), self.ForwardEvent, self.ForwardEvent)
DoBind(tb.AddToggleTool(wx.ID_COPY, images._rt_copy.GetBitmap(), wx.NullBitmap, False, None, "Copy"), self.ForwardEvent, self.ForwardEvent)
DoBind(tb.AddToggleTool(wx.ID_PASTE, images._rt_paste.GetBitmap(), wx.NullBitmap, False, None, "Paste"), self.ForwardEvent, self.ForwardEvent)
tb.AddSeparator()
DoBind(tb.AddToggleTool(wx.ID_UNDO, images._rt_undo.GetBitmap(), wx.NullBitmap, False, None, "Undo"), self.ForwardEvent, self.ForwardEvent)
DoBind(tb.AddToggleTool(wx.ID_REDO, images._rt_redo.GetBitmap(), wx.NullBitmap, False, None, "Redo"), self.ForwardEvent, self.ForwardEvent)
tb.AddSeparator()
DoBind(tb.AddToggleTool(ID_ToolBar_Bold, images._rt_bold.GetBitmap(), wx.NullBitmap, True, None, "Bold"), self.OnBold, self.OnToolBarUpdateUI)
DoBind(tb.AddToggleTool(ID_ToolBar_Italic, images._rt_italic.GetBitmap(), wx.NullBitmap, True, None, "Italic"), self.OnItalics, self.OnToolBarUpdateUI)
DoBind(tb.AddToggleTool(ID_ToolBar_Underline, images._rt_underline.GetBitmap(), wx.NullBitmap, True, None, "Underline"), self.OnUnderline, self.OnToolBarUpdateUI)
tb.AddSeparator()
DoBind(tb.AddToggleTool(ID_ToolBar_AlignLeft, images._rt_alignleft.GetBitmap(), wx.NullBitmap, True, None, "Align left"), self.OnAlignLeft, self.OnToolBarUpdateUI)
DoBind(tb.AddToggleTool(ID_ToolBar_Center, images._rt_centre.GetBitmap(), wx.NullBitmap, True, None, "Center"), self.OnAlignCenter, self.OnToolBarUpdateUI)
DoBind(tb.AddToggleTool(ID_ToolBar_AlignRight, images._rt_alignright.GetBitmap(), wx.NullBitmap, True, None, "Align right"), self.OnAlignRight, self.OnToolBarUpdateUI)
tb.AddSeparator()
DoBind(tb.AddToggleTool(ID_ToolBar_IndentLess, images._rt_indentless.GetBitmap(), wx.NullBitmap, False, None, "Indent Less"), self.OnIndentLess, self.OnToolBarUpdateUI)
DoBind(tb.AddToggleTool(ID_ToolBar_IndentMore, images._rt_indentmore.GetBitmap(), wx.NullBitmap, False, None, "Indent More"), self.OnIndentMore, self.OnToolBarUpdateUI)
tb.AddSeparator()
DoBind(tb.AddToggleTool(ID_ToolBar_Font, images._rt_font.GetBitmap(), wx.NullBitmap, False, None, "Font"), self.OnFont, self.OnToolBarUpdateUI)
DoBind(tb.AddToggleTool(ID_ToolBar_FontColor, images._rt_colour.GetBitmap(), wx.NullBitmap, False, None, "Font Color"), self.OnColour, self.OnToolBarUpdateUI)
tb.AddSeparator()
DoBind(tb.AddToggleTool(ID_ToolBar_InsertPic, images.images.GetBitmap(), wx.NullBitmap, False, None, "Insert Picture"), self.OnInsertPicture, self.OnToolBarUpdateUI)
tb.Realize()
self.toolbar_updateui_funcs = {
ID_ToolBar_Bold: self.OnUpdateBold,
ID_ToolBar_Italic: self.OnUpdateItalic,
ID_ToolBar_Underline: self.OnUpdateUnderline,
ID_ToolBar_AlignLeft: self.OnUpdateAlignLeft,
ID_ToolBar_Center: self.OnUpdateAlignCenter,
ID_ToolBar_AlignRight: self.OnUpdateAlignRight,
ID_ToolBar_IndentLess: None,
ID_ToolBar_IndentMore: None,
ID_ToolBar_Font: None,
ID_ToolBar_FontColor: None,
ID_ToolBar_InsertPic: None,
}
return tb
def BuildPanes(self):
# min size for the frame itself isn't completely done.
# see the end up AuiManager.Update() for the test
# code. For now, just hard code a frame minimum size
self.SetMinSize(wx.Size(400, 300))
self._mgr.AddPane(self.CreateTreeCtrl(), aui.AuiPaneInfo().Name("VsFrame_Dir_Tree").Caption(u"目录树").
Left().Layer(1).Position(1).CloseButton(True).MaximizeButton(False).
MinimizeButton(False))
self._mgr.AddPane(self.CreateNotebook(), aui.AuiPaneInfo().Name("VsFrame_Notebook").
CenterPane().PaneBorder(False))
self._mgr.AddPane(self.CreateToolBar(), aui.AuiPaneInfo().Name("VsFrame_Html_Edit_Toolbar").Caption("Toobar").ToolbarPane().Top())
# make some default perspectives
#
perspective_all = self._mgr.SavePerspective()
all_panes = self._mgr.GetAllPanes()
for pane in all_panes:
if not pane.IsToolbar():
pane.Hide()
self._mgr.GetPane("VsFrame_Dir_Tree").Show().Left().Layer(0).Row(0).Position(0)
self._mgr.GetPane("VsFrame_Notebook").Show()
perspective_default = self._mgr.SavePerspective()
self._nb_perspectives = []
auibook = self._mgr.GetPane("VsFrame_Notebook").window
nb_perspective_default = auibook.SavePerspective()
self._nb_perspectives.append(nb_perspective_default)
self._mgr.LoadPerspective(perspective_default)
# "commit" all changes made to AuiManager
self._mgr.Update()
def IsModified(self, index):
"""检查指定编辑控件是否已经有修改而未保存"""
assert fall_into(index, 0, len(self.editor_list))
return self.editor_list[index][2]
def SetModified(self, index, modified=True):
"""标记为已经修改"""
self.editor_list[index][2] = modified
def GetToolBarPanelInfo(self):
return self._mgr.GetPane("VsFrame_Html_Edit_Toolbar")
def GetNotebook(self):
notebook = self._mgr.GetPane("VsFrame_Notebook").window
assert notebook is not None
return notebook
def GetDirTreePanelInfo(self):
return self._mgr.GetPane("VsFrame_Dir_Tree")
def GetDirTree(self):
tree = self.GetDirTreePanelInfo().window
assert tree is not None
return tree
def GetDirTreeImageIndexByType(self, t):
if t == VsData_Type_Root:
return 0
elif t == VsData_Type_Dir:
return 0
elif t == VsData_Type_Html:
return 1
else:
assert False
def GetView(self, index=None):
parent = self.GetNotebook()
if index is None:
index = parent.GetSelection()
if index < 0:
return parent, None, None
assert fall_into(index, 0, len(self.editor_list))
return parent, index, self.editor_list[index][1]
def GetCurrentView(self):
"""获取当前窗口视图"""
return self.GetView()
def UpdateViewTitle(self, index=None):
parent, index, ctrl = self.GetView(index)
id = self.editor_list[index][0]
str = self.db.GetTitle(id)
if self.IsModified(index):
str = "* " + str
parent.SetPageText(index, str)
def SaveDirTree(self, tree):
self.save_dir_tree = []
tree.Traverse(lambda node, path: \
self.save_dir_tree.append((tree.GetItemPyData(node), path)),
tree.GetRootItem())
self.db.SetRoot(self.save_dir_tree)
def DoSave(self, id, body, encrypt=False):
# 原始内容 -->(加密)--> 保存
# 加密内容 -->(解密)--> 保存
if encrypt or self.db.HasXtea(id):
assert self.passwd_map.has_key(id)
kk = hashlib.md5(self.passwd_map[id]).digest()
cc = xtea.crypt(kk, body)
body = cc
self.db.SetBody(id, body)
def OnSave(self, event):
parent, index, ctrl = self.GetCurrentView()
if index is None:
return
# 如果没有改动,则直接返回
if not self.IsModified(index):
return
# 恢复标题
self.SetModified(index, False)
id = self.editor_list[index][0]
self.UpdateViewTitle()
# 保存内容
s = StringIO.StringIO()
handler = wx.richtext.RichTextXMLHandler()
handler.SaveStream(ctrl.GetBuffer(), s)
self.DoSave(id, s.getvalue())
def OnSaveAs(self, event):
parent, index, ctrl = self.GetCurrentView()
assert ctrl is not None
# 默认的文件名
default_title = parent.GetPageText(index)
# Display a File Save Dialog for RTF files
dlg = wx.FileDialog(self, "Choose a filename",
wildcard=u'Rich Text Format files (*.rtf)|*.rtf',
defaultFile=default_title,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() != wx.ID_OK:
return
# assign it to path
path = dlg.GetPath()
dlg.Destroy()
# Use the custom RTF Handler to save the file
handler = PyRTFParser.PyRichTextRTFHandler()
handler.SaveFile(ctrl.GetBuffer(), path)
def OnToogleDirTree(self, event):
panel = self.GetDirTreePanelInfo()
panel.Show(not panel.IsShown())
self._mgr.Update()
def OnToogleToolBar(self, event):
panel = self.GetToolBarPanelInfo()
panel.Show(not panel.IsShown())
self._mgr.Update()
def OnFind(self, event):
parent, index, ctrl = self.GetCurrentView()
assert ctrl is not None
end = ctrl.GetLastPosition()
textstring = ctrl.GetRange(0, end).lower()
findstring = self.finddata.GetFindString().lower()
backward = not (self.finddata.GetFlags() & wx.FR_DOWN)
if backward:
start = ctrl.GetSelection()[0]
loc = textstring.rfind(findstring, 0, start)
else:
start = ctrl.GetSelection()[1]
loc = textstring.find(findstring, start)
if loc == -1 and start != 0:
# string not found, start at beginning
if backward:
start = end
loc = textstring.rfind(findstring, 0, start)
else:
start = 0
loc = textstring.find(findstring, start)
if loc == -1:
wx.MessageBox(u"搜索字符串未找到!", program_name, wx.OK | wx.ICON_EXCLAMATION)
if self.finddlg:
if loc == -1:
self.finddlg.SetFocus()
return
else:
self.finddlg.Destroy()
self.finddlg = None
ctrl.ShowPosition(loc)
ctrl.SetSelection(loc, loc + len(findstring))
def OnFindClose(self, event):
event.GetDialog().Destroy()
self.finddlg = None
def OnFindItem(self, event):
if self.finddlg is not None:
return
parent, index, ctrl = self.GetCurrentView()
assert ctrl is not None
self.finddlg = wx.FindReplaceDialog(self, self.finddata, "Find")
self.finddlg.Show(True)
def OnFindNextItem(self, event):
if self.finddata.GetFindString():
self.OnFind(event)
else:
self.OnFindItem(event)
def OnMenuUpdateUI(self, event):
evId = event.GetId()
if evId == ID_Menu_ToogleDirectory:
event.Check(self.GetDirTreePanelInfo().IsShown())
elif evId == ID_Menu_ToogleToolBar:
event.Check(self.GetToolBarPanelInfo().IsShown())
elif evId in (ID_Menu_Save, ID_Menu_SaveAs, ID_Menu_FindItem, ID_Menu_FindNextItem):
parent, index, ctrl = self.GetCurrentView()
exist = ctrl is not None
event.Enable(exist)
if evId == ID_Menu_Save and exist:
event.Enable(self.IsModified(index))
elif evId in (ID_Menu_CreateHtml, ID_Menu_CreateDir):
# 目录树隐藏时,禁用菜单里的新建功能
event.Enable(self.GetDirTreePanelInfo().IsShown())
def OnCopy(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.Copy()
wx.TheClipboard.Flush()
def OnCut(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.Cut()
wx.TheClipboard.Flush()
def OnRichtextContentChanged(self, event):
parent, index, ctrl = self.GetCurrentView()
assert index is not None
assert event.GetEventObject() is ctrl
if not self.IsModified(index):
self.SetModified(index, True)
self.UpdateViewTitle()
def OnTreeItemActivated(self, event):
id = self.tree.GetItemPyData(event.GetItem())
parent = self.GetNotebook()
passwd = ""
# 如果内容不可编辑,则直接返回
if not self.db.IsEditable(id):
return
# 如果已经打开,则将其选中,并返回
for i in range(len(self.editor_list)):
if id == self.editor_list[i][0]:
parent.SetSelection(i)
return
# 要求输入密码
encrypted = self.db.HasXtea(id)
if encrypted:
passwd = wx.GetPasswordFromUser(message=u"请输入密码:", caption=u"打开加密文档", default_value="", parent=None)
if not self.db.CheckXtea(id, passwd):
if len(passwd) != 0:
wx.MessageBox(u"密码不正确!", program_name, wx.OK | wx.ICON_ERROR)
return
self.passwd_map[id] = passwd
# 创建新的编辑页
ctrl = wx.richtext.RichTextCtrl(parent, style=wx.VSCROLL | wx.HSCROLL | wx.NO_BORDER)
ctrl.Bind(wx.richtext.EVT_RICHTEXT_CONTENT_INSERTED, self.OnRichtextContentChanged)
ctrl.Bind(wx.richtext.EVT_RICHTEXT_CONTENT_DELETED, self.OnRichtextContentChanged)
ctrl.Bind(wx.richtext.EVT_RICHTEXT_STYLE_CHANGED, self.OnRichtextContentChanged)
ctrl.Bind(wx.EVT_MENU, self.OnCopy, id=wx.ID_COPY)
ctrl.Bind(wx.EVT_MENU, self.OnCut, id=wx.ID_CUT)
# 设置默认字体
ctrl.SetFont(GetDefaultFont())
# 解析正文内容
body = self.db.GetBody(id)
if encrypted:
kk = hashlib.md5(passwd).digest()
cc = xtea.crypt(kk, body)
body = cc
if len(body) != 0:
tmpfile = VsTempFile()
tmpfile.AppendString(body)
ctrl.Freeze()
ctrl.BeginSuppressUndo()
handler = wx.richtext.RichTextXMLHandler()
# Load the XML file via the XML Handler.
# Note that for XML, the BUFFER is passed.
handler.LoadFile(ctrl.GetBuffer(), tmpfile.filename)
# Signal the end of changing the control
ctrl.EndSuppressUndo()
ctrl.Thaw()
# 更新到内存记录里去
self.editor_list.append([id, ctrl, False])
parent.AddPage(ctrl, self.db.GetTitle(id), select=True)
def OnTreeEndLabelEdit_After(self, item, old_text):
"""更新 title,如果已经打开,则同步更新"""
item_text = self.tree.GetItemText(item)
s = item_text.strip()
# 更新目录树里的显示
if s != item_text:
self.tree.SetItemText(item, s)
# 如果没有变化,则直接返回
if old_text == s:
return
# 更新数据
id = self.tree.GetItemPyData(item)
self.db.SetTitle(id, s)
# 更新打开文件标题
for i in range(len(self.editor_list)):
if id == self.editor_list[i][0]:
self.UpdateViewTitle(i)
break
def OnTreeEndLabelEdit(self, event):
item = event.GetItem()
wx.CallAfter(self.OnTreeEndLabelEdit_After, item, self.tree.GetItemText(item))
def OnTreeBeginDrag(self, event):
tree = event.GetEventObject()
self.drag_source = event.GetItem()
if self.drag_source != tree.GetRootItem():
event.Allow()
else:
event.Veto()
def OnTreeEndDrag(self, event):
drop_target = event.GetItem()
if not drop_target.IsOk():
return
tree = event.GetEventObject()
source_id = tree.GetItemPyData(self.drag_source)
# 不允许目标项是源项的子项
if tree.ItemIsChildOf(drop_target, self.drag_source):
tree.Unselect()
return
# One of the following methods of inserting will be called...
def MoveNodes(parent, target):
# 删除源项及子项
tree.Delete(self.drag_source)
# 将源项添加到目标位置
imgidx = self.GetDirTreeImageIndexByType(self.db.GetType(source_id))
title = self.db.GetTitle(source_id)
if target is not None:
new_item = tree.InsertItem(parent, target, title, imgidx)
else:
new_item = tree.InsertItemBefore(parent, 0, title, imgidx)
tree.SetItemPyData(new_item, source_id)
# 添加子项
dummy, t = self.db.GetTree(self.db.GetRoot(), source_id)
self.Tree_AddNode(t, new_item)
# 设置树结点属性
tree.ExpandAllChildren(new_item)
tree.SelectItem(new_item)
# 保存目录树
self.SaveDirTree(tree)
def InsertAsSibling(event):
MoveNodes(tree.GetItemParent(drop_target), drop_target)
def InsertAsChild(event):
MoveNodes(drop_target, None)
# 如果不是根项,则询问是作为目标项的兄弟项还是子项
if drop_target == tree.GetRootItem():
InsertAsChild(None)
else:
menu = wx.Menu()
menu.Append(ID_Ctx_InsertAsSibling, u"与目标项平级", "")
menu.Append(ID_Ctx_InsertAsChild, u"作为目标项的子项", "")
menu.UpdateUI()
menu.Bind(wx.EVT_MENU, InsertAsSibling, id=ID_Ctx_InsertAsSibling)
menu.Bind(wx.EVT_MENU, InsertAsChild, id=ID_Ctx_InsertAsChild)
self.PopupMenu(menu)
def OnBold(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ApplyBoldToSelection()
def OnItalics(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ApplyItalicToSelection()
def OnAlignLeft(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_LEFT)
def OnAlignCenter(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_CENTRE)
def OnAlignRight(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_RIGHT)
def OnIndentLess(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is None:
return
attr = wx.richtext.TextAttrEx()
attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT)
ip = ctrl.GetInsertionPoint()
if ctrl.GetStyle(ip, attr):
r = wx.richtext.RichTextRange(ip, ip)
if ctrl.HasSelection():
r = ctrl.GetSelectionRange()
if attr.GetLeftIndent() >= 100:
attr.SetLeftIndent(attr.GetLeftIndent() - 100)
attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT)
ctrl.SetStyle(r, attr)
def OnIndentMore(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is None:
return
attr = wx.richtext.TextAttrEx()
attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT)
ip = ctrl.GetInsertionPoint()
if ctrl.GetStyle(ip, attr):
r = wx.richtext.RichTextRange(ip, ip)
if ctrl.HasSelection():
r = ctrl.GetSelectionRange()
attr.SetLeftIndent(attr.GetLeftIndent() + 100)
attr.SetFlags(wx.richtext.TEXT_ATTR_LEFT_INDENT)
ctrl.SetStyle(r, attr)
def OnUnderline(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ApplyUnderlineToSelection()
def OnFont(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is None:
return
if not ctrl.HasSelection():
return
r = ctrl.GetSelectionRange()
fontData = wx.FontData()
fontData.EnableEffects(False)
attr = wx.richtext.TextAttrEx()
attr.SetFlags(wx.richtext.TEXT_ATTR_FONT)
if ctrl.GetStyle(ctrl.GetInsertionPoint(), attr):
fontData.SetInitialFont(attr.GetFont())
dlg = wx.FontDialog(ctrl, fontData)
if dlg.ShowModal() == wx.ID_OK:
fontData = dlg.GetFontData()
font = fontData.GetChosenFont()
if font:
attr.SetFlags(wx.richtext.TEXT_ATTR_FONT)
attr.SetFont(font)
ctrl.SetStyle(r, attr)
dlg.Destroy()
def OnColour(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is None:
return
if not ctrl.HasSelection():
return
colourData = wx.ColourData()
attr = wx.richtext.TextAttrEx()
attr.SetFlags(wx.richtext.TEXT_ATTR_TEXT_COLOUR)
if ctrl.GetStyle(ctrl.GetInsertionPoint(), attr):
colourData.SetColour(attr.GetTextColour())
dlg = wx.ColourDialog(self, colourData)
if dlg.ShowModal() == wx.ID_OK:
colourData = dlg.GetColourData()
colour = colourData.GetColour()
if colour:
if not ctrl.HasSelection():
ctrl.BeginTextColour(colour)
else:
r = ctrl.GetSelectionRange()
attr.SetFlags(wx.richtext.TEXT_ATTR_TEXT_COLOUR)
attr.SetTextColour(colour)
ctrl.SetStyle(r, attr)
dlg.Destroy()
def OnInsertPicture(self, event):
parent, index, ctrl = self.GetCurrentView()
assert ctrl is not None
# 选择图片
dlg = wx.FileDialog(self, "Choose a file",
defaultFile="",
wildcard="All files (*.*)|*.*",
style=wx.OPEN | wx.CHANGE_DIR)
if dlg.ShowModal() != wx.ID_OK:
return
# 加载图片,如果图片无效,则返回
image = wx.Image(dlg.GetPath())
if not image.IsOk():
return
# 插入图片
ctrl.WriteImage(image)
def ForwardEvent(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
ctrl.ProcessEvent(event)
def OnToolBarUpdateUI(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Enable(True)
id = event.GetId()
if id in self.toolbar_updateui_funcs:
f = self.toolbar_updateui_funcs[id]
if f is not None:
f(event)
else:
event.Enable(False)
def OnUpdateBold(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Check(ctrl.IsSelectionBold())
def OnUpdateItalic(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Check(ctrl.IsSelectionItalics())
def OnUpdateUnderline(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Check(ctrl.IsSelectionUnderlined())
def OnUpdateAlignLeft(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Check(ctrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_LEFT))
def OnUpdateAlignCenter(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Check(ctrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_CENTRE))
def OnUpdateAlignRight(self, event):
parent, index, ctrl = self.GetCurrentView()
if ctrl is not None:
event.Check(ctrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_RIGHT))
def OnRightDown(self, event):
tree = self.GetDirTree()
pt = event.GetPosition()
item, flags = tree.HitTest(pt)
if item:
tree.SelectItem(item)
def OnRightUp(self, event):
tree = self.GetDirTree()
menu = wx.Menu()
self.Bind(wx.EVT_MENU, self.OnCreateHtml, menu.Append(ID_Menu_CreateHtml, u"新建笔记"))
self.Bind(wx.EVT_MENU, self.OnCreateDir, menu.Append(ID_Menu_CreateDir, u"新建目录"))
menu.AppendSeparator()
self.Bind(wx.EVT_MENU, self.OnRenameEntry, menu.Append(ID_Menu_RenameEntry, u"重命名"))
self.Bind(wx.EVT_MENU, self.OnDeleteEntry, menu.Append(ID_Menu_DeleteEntry, u"删除"))
# 如果当前选择了根结点,则禁用 ID_Menu_DeleteEntry
# 如果有子结点,也禁用
#
cursel = tree.GetSelection()
if cursel == tree.GetRootItem():
menu.Enable(ID_Menu_DeleteEntry, False)
if tree.ItemHasChildren(cursel):
menu.Enable(ID_Menu_DeleteEntry, False)
# 加密/解密
menu.AppendSeparator()
self.Bind(wx.EVT_MENU, self.OnEncrypt, menu.Append(ID_Menu_Encrypt, u"加密"))
id = tree.GetItemPyData(cursel)
if VsData_Type_Html == self.db.GetType(id):
# 如果已经加密
if self.db.HasXtea(id):
menu.SetLabel(ID_Menu_Encrypt, u"清除密码")
# 在修改状态下禁止操作
for i in range(len(self.editor_list)):
if id == self.editor_list[i][0]:
if self.IsModified(i):
menu.Enable(ID_Menu_Encrypt, False)
break
else:
menu.Enable(ID_Menu_Encrypt, False)
self.PopupMenu(menu)
menu.Destroy()
event.Skip()
def OnCreateEntry(self, event, type):
tree = self.GetDirTree()
parent_item = tree.GetSelection()
parent_id = tree.GetItemPyData(parent_item)
name = "new item"
if VsData_Type_Dir == type:
image_index = 0
else:
image_index = 1
child_id = self.db.Add(name, "", parent_id, type)
child_item = tree.AppendItem(parent_item, name, image_index)
tree.SetItemPyData(child_item, child_id)
tree.SelectItem(child_item)
tree.EditLabel(child_item)
def OnCreateHtml(self, event):
self.OnCreateEntry(event, VsData_Type_Html)
def OnCreateDir(self, event):
self.OnCreateEntry(event, VsData_Type_Dir)
def OnRenameEntry(self, event):
tree = self.GetDirTree()
item = tree.GetSelection()
tree.EditLabel(item)
def OnDeleteEntry(self, event):
"""删除一个结点"""
tree = self.GetDirTree()
item = tree.GetSelection()
id = tree.GetItemPyData(item)
# 确认删除
ret = wx.MessageBox(u'确实要删除吗?', u'确认删除', wx.YES_NO | wx.ICON_QUESTION)
if wx.YES != ret:
return
# 从数据库里删除
self.db.Delete(id)
# 清空密码
if self.passwd_map.has_key(id):
del self.passwd_map[id]
# 如果已经打开,则关闭
for i in range(len(self.editor_list)):
if id == self.editor_list[i][0]:
del self.editor_list[i]
self.GetNotebook().DeletePage(i)
break
# 从目录树里删除
tree.Delete(item)
def OnEncrypt(self, event):
tree = self.GetDirTree()
cursel = tree.GetSelection()
id = tree.GetItemPyData(cursel)
assert VsData_Type_Html == self.db.GetType(id)
if not self.db.HasXtea(id): # 加密
# 用户输入密码
p1 = wx.GetPasswordFromUser(message=u"请输入新密码:", caption=u"加密", default_value="", parent=None)
p2 = wx.GetPasswordFromUser(message=u"请再次输入新密码:", caption=u"加密", default_value="", parent=None)
if p1 != p2:
wx.MessageBox(u"输入密码不一致!", program_name, wx.OK | wx.ICON_ERROR)
return
elif len(p1) == 0:
wx.MessageBox(u"密码不允许为空!", program_name, wx.OK | wx.ICON_ERROR)
return
# 记录明文密码
assert not self.passwd_map.has_key(id)
self.passwd_map[id] = p1
# 提交密码散列值、数据
self.db.SetXtea(id, p1)
self.DoSave(id, self.db.GetBody(id), encrypt=True)
else: # 解密
# 需要输入旧密码
p1 = wx.GetPasswordFromUser(message=u"请输入密码:", caption=u"解密", default_value="", parent=None)
if not self.db.CheckXtea(id, p1):
wx.MessageBox(u"密码不正确!", program_name, wx.OK | wx.ICON_ERROR)
return
self.passwd_map[id] = p1
self.DoSave(id, self.db.GetBody(id), encrypt=True)
self.db.ClearXtea(id)
del self.passwd_map[id]
def UserQuitConfirm(self):
ret = wx.MessageBox(u"内容已经修改但没有保存,确认要继续吗?", u'确认关闭', wx.YES_NO | wx.ICON_QUESTION)
return ret
def OnNotebookPageClose(self, event):
index = event.GetSelection()
assert fall_into(index, 0, len(self.editor_list))
# 提示当前内容已经修改但还没有保存
if self.IsModified(index):
if wx.YES != self.UserQuitConfirm():
event.Veto()
return
# 确认关闭,清除相应数据结构
del self.editor_list[index]
def OnExit(self, event):
self.Close(False)
def OnCloseWindow(self, event):
# 查看是否有已经修复但还没有保存的内容
modified = False
for i in range(len(self.editor_list)):
if self.IsModified(i):
modified = True
break
# 用户确认
if modified:
if wx.YES != self.UserQuitConfirm():
event.Veto()
return
# 退出
self.Destroy()
def OnAbout(self, event):
info = wx.AboutDialogInfo()
info.Name = program_name
info.Version = program_version
info.Copyright = "(C) 2010-2011 [email protected]"
info.Description = wx.lib.wordwrap.wordwrap(
program_name + " is a simple richtext notepad.\n\nTHIS SOFTWARE COMES WITH ABSOLUTELY NO WARRANTY! USE AT YOUR OWN RISK!",
430, wx.ClientDC(self))
info.WebSite = ("http://code.google.com/p/gumpad2")
info.Developers = ["[email protected]"]
info.License = wx.lib.wordwrap.wordwrap("The MIT License", 500, wx.ClientDC(self))
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
def Tree_AddNode(self, db_node, node):
for i in range(len(db_node["subs"])):
child_id = db_node["subs"][i]["id"]
imgidx = self.GetDirTreeImageIndexByType(self.db.GetType(child_id))
n = self.tree.AppendItem(node, self.db.GetTitle(child_id), imgidx)
self.tree.SetItemPyData(n, child_id)
self.Tree_AddNode(db_node["subs"][i], n)
def CreateTreeCtrl(self):
self.tree = VsTreeCtrl(self, -1, wx.Point(0, 0), wx.Size(200, 250),
wx.TR_DEFAULT_STYLE | wx.NO_BORDER | wx.TR_EDIT_LABELS | wx.TR_NO_BUTTONS)
imglist = wx.ImageList(16, 16, True, 2)
imglist.Add(wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, wx.Size(16, 16)))
imglist.Add(wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, wx.Size(16, 16)))
self.tree.AssignImageList(imglist)
db_root = self.db.GetRoot()
root = self.tree.AddRoot(self.db.GetTitle(), 0)
self.tree.SetItemPyData(root, db_root["id"])
self.Tree_AddNode(db_root, root)
self.tree.ExpandAllChildren(root)
self.tree.SelectItem(root)
self.tree.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.tree.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.tree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnTreeItemActivated)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnTreeEndLabelEdit)
self.tree.Bind(wx.EVT_TREE_BEGIN_DRAG, self.OnTreeBeginDrag)
self.tree.Bind(wx.EVT_TREE_END_DRAG, self.OnTreeEndDrag)
return self.tree
def CreateNotebook(self):
client_size = self.GetClientSize()
ctrl = aui.AuiNotebook(self, -1, wx.Point(client_size.x, client_size.y),
wx.Size(430, 200), self._notebook_style)
ctrl.SetArtProvider(aui.AuiDefaultTabArt())
ctrl.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnNotebookPageClose)
return ctrl
class MyApp(wx.App):
def __init__(self):
wx.App.__init__(self, 0)
def OnInit(self):
self.frame = VsFrame(None, -1, program_title, size=(800, 600))
self.frame.CenterOnScreen()
self.frame.Show()
self.Bind(wx.EVT_ACTIVATE_APP, self.OnActivate)
return True
def OnActivate(self, event):
if event.GetActive():
pass
def main():
global program_dbpath
# 本地化设置
locale.setlocale(locale.LC_ALL, '')
# 命令行参数解析
usage = program_name + " [-f <file>] [-h] [-v]"
program_dbpath = os.path.join(os.path.expanduser("~"), program_dbpath)
parser = optparse.OptionParser(usage)
parser.add_option("-v", "--version", action="store_true", dest="version", default=False, help="print the version number of the executable and exit")
parser.add_option("-f", "--file", action="store", type="string", dest="file", default=program_dbpath, help="specify the data file")
options, args = parser.parse_args(sys.argv[1:])
if options.version:
print program_title
return
if len(args) > 0:
parser.print_help()
return
# 解析用户指定文件是否有效
program_dbpath = os.path.expanduser(options.file)
if not os.path.isabs(program_dbpath):
program_dbpath = os.path.realpath(os.path.join(os.curdir, program_dbpath))
# 创建多层目录
dirname = os.path.dirname(program_dbpath)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except:
print "Error: " + options.file + " is not a valid filename"
return
elif not os.path.isdir(dirname):
print "Error: " + options.file + " is not a valid filename"
return
if os.path.exists(program_dbpath):
# 如果路径存在、且不是文件,则退出
if not os.path.isfile(program_dbpath):
print "Error: " + options.file + " is not a valid filename"
return
# 如果不是有效的数据库,则退出
try:
db = VsData(program_dbpath)
assert db.GetMagic() == VsData_Format_Magic
if db.GetVersion() > VsData_Format_Version:
print "Error: " + options.file + " has version (%d), higher than the executable (%d)" % (db.GetVersion(), VsData_Format_Version)
return
except:
print "Error: " + options.file + " exists but corrupted"
return
# 启动程序界面
app = MyApp()
app.MainLoop()
if __name__ == '__main__':
main()
| mit | -7,152,632,794,733,037,000 | 32.421268 | 176 | 0.58031 | false |
armenzg/build-mozharness | configs/unittests/win_unittest.py | 1 | 7095 | import os
import sys
# OS Specifics
ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
BINARY_PATH = os.path.join(ABS_WORK_DIR, "firefox", "firefox.exe")
INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.zip")
XPCSHELL_NAME = 'xpcshell.exe'
EXE_SUFFIX = '.exe'
DISABLE_SCREEN_SAVER = False
ADJUST_MOUSE_AND_SCREEN = True
#####
config = {
"buildbot_json_path": "buildprops.json",
"exes": {
'python': sys.executable,
'virtualenv': [sys.executable, 'c:/mozilla-build/buildbotve/virtualenv.py'],
'hg': 'c:/mozilla-build/hg/hg',
'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),
'%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],
'tooltool.py': [sys.executable, 'C:/mozilla-build/tooltool.py'],
},
###
"installer_path": INSTALLER_PATH,
"binary_path": BINARY_PATH,
"xpcshell_name": XPCSHELL_NAME,
"virtualenv_path": 'venv',
"virtualenv_python_dll": os.path.join(os.path.dirname(sys.executable), "python27.dll"),
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"exe_suffix": EXE_SUFFIX,
"run_file_names": {
"mochitest": "runtests.py",
"webapprt": "runtests.py",
"reftest": "runreftest.py",
"xpcshell": "runxpcshelltests.py",
"cppunittest": "runcppunittests.py",
"jittest": "jit_test.py",
"mozbase": "test.py",
"mozmill": "runtestlist.py",
},
"minimum_tests_zip_dirs": ["bin/*", "certs/*", "modules/*", "mozbase/*", "config/*"],
"specific_tests_zip_dirs": {
"mochitest": ["mochitest/*"],
"webapprt": ["mochitest/*"],
"reftest": ["reftest/*", "jsreftest/*"],
"xpcshell": ["xpcshell/*"],
"cppunittest": ["cppunittests/*"],
"jittest": ["jit-test/*"],
"mozbase": ["mozbase/*"],
"mozmill": ["mozmill/*"],
},
# test harness options are located in the gecko tree
"in_tree_config": "config/mozharness/windows_config.py",
# local mochi suites
"all_mochitest_suites":
{
"plain1": ["--total-chunks=5", "--this-chunk=1", "--chunk-by-dir=4"],
"plain2": ["--total-chunks=5", "--this-chunk=2", "--chunk-by-dir=4"],
"plain3": ["--total-chunks=5", "--this-chunk=3", "--chunk-by-dir=4"],
"plain4": ["--total-chunks=5", "--this-chunk=4", "--chunk-by-dir=4"],
"plain5": ["--total-chunks=5", "--this-chunk=5", "--chunk-by-dir=4"],
"plain": [],
"plain-chunked": ["--chunk-by-dir=4"],
"mochitest-push": ["--subsuite=push"],
"chrome": ["--chrome"],
"browser-chrome": ["--browser-chrome"],
"browser-chrome-1": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"browser-chrome-2": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"browser-chrome-3": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"browser-chrome-chunked": ["--browser-chrome", "--chunk-by-dir=5"],
"mochitest-gl": ["--subsuite=webgl"],
"mochitest-devtools-chrome": ["--browser-chrome", "--subsuite=devtools"],
"mochitest-devtools-chrome-chunked": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-runtime"],
"mochitest-metro-chrome": ["--browser-chrome", "--metro-immersive"],
"jetpack-package": ["--jetpack-package"],
"jetpack-addon": ["--jetpack-addon"],
"a11y": ["--a11y"],
"plugins": ['--setpref=dom.ipc.plugins.enabled=false',
'--setpref=dom.ipc.plugins.enabled.x86_64=false',
'--ipcplugins']
},
# local webapprt suites
"all_webapprt_suites": {
"chrome": ["--webapprt-chrome", "--browser-arg=-test-mode"],
"content": ["--webapprt-content"]
},
# local reftest suites
"all_reftest_suites": {
"reftest": ["tests/reftest/tests/layout/reftests/reftest.list"],
"crashtest": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
"jsreftest": ["--extra-profile-file=tests/jsreftest/tests/user.js", "tests/jsreftest/tests/jstests.list"],
"reftest-ipc": ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/layout/reftests/reftest-sanity/reftest.list'],
"reftest-no-accel": ["--setpref=gfx.direct2d.disabled=true", "--setpref=layers.acceleration.disabled=true",
"tests/reftest/tests/layout/reftests/reftest.list"],
"reftest-omtc": ["--setpref=layers.offmainthreadcomposition.enabled=true",
"tests/reftest/tests/layout/reftests/reftest.list"],
"crashtest-ipc": ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/testing/crashtest/crashtests.list'],
},
"all_xpcshell_suites": {
"xpcshell": ["--manifest=tests/xpcshell/tests/all-test-dirs.list",
"%(abs_app_dir)s/" + XPCSHELL_NAME]
},
"all_cppunittest_suites": {
"cppunittest": ['tests/cppunittests']
},
"all_jittest_suites": {
"jittest": []
},
"all_mozbase_suites": {
"mozbase": []
},
"run_cmd_checks_enabled": True,
"preflight_run_cmd_suites": [
# NOTE 'enabled' is only here while we have unconsolidated configs
{
"name": "disable_screen_saver",
"cmd": ["xset", "s", "off", "s", "reset"],
"architectures": ["32bit", "64bit"],
"halt_on_failure": False,
"enabled": DISABLE_SCREEN_SAVER
},
{
"name": "run mouse & screen adjustment script",
"cmd": [
# when configs are consolidated this python path will only show
# for windows.
sys.executable,
"../scripts/external_tools/mouse_and_screen_resolution.py",
"--configuration-url",
"https://hg.mozilla.org/%(repo_path)s/raw-file/%(revision)s/" +
"testing/machine-configuration.json"],
"architectures": ["32bit"],
"halt_on_failure": True,
"enabled": ADJUST_MOUSE_AND_SCREEN
},
],
"vcs_output_timeout": 1000,
"minidump_save_path": "%(abs_work_dir)s/../minidumps",
"buildbot_max_log_size": 52428800,
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file": os.path.join(os.getcwd(), "oauth.txt"),
"download_minidump_stackwalk": True,
"minidump_stackwalk_path": "win32-minidump_stackwalk.exe",
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest",
}
| mpl-2.0 | -8,659,088,840,214,111,000 | 44.191083 | 115 | 0.562368 | false |
arunkgupta/gramps | gramps/gui/merge/mergefamily.py | 1 | 9809 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide merge capabilities for families.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.ggettext import sgettext as _
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from gramps.gen.errors import MergeError
from ..dialog import ErrorDialog
from ..managedwindow import ManagedWindow
from gramps.gen.merge import MergePersonQuery, MergeFamilyQuery
#-------------------------------------------------------------------------
#
# Gramps constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Entering_and_Editing_Data:_Detailed_-_part_3' % \
URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Merge_Families')
_GLADE_FILE = 'mergefamily.glade'
#-------------------------------------------------------------------------
#
# MergeFamily
#
#-------------------------------------------------------------------------
class MergeFamily(ManagedWindow):
"""
Merges two families into a single family. Displays a dialog box that allows
the families to be combined into one.
"""
def __init__(self, dbstate, uistate, handle1, handle2):
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.database = dbstate.db
self.fy1 = self.database.get_family_from_handle(handle1)
self.fy2 = self.database.get_family_from_handle(handle2)
self.define_glade('mergefamily', _GLADE_FILE)
self.set_window(self._gladeobj.toplevel,
self.get_widget("family_title"),
_("Merge Families"))
# Detailed selection widgets
father1 = self.fy1.get_father_handle()
father2 = self.fy2.get_father_handle()
father1 = self.database.get_person_from_handle(father1)
father2 = self.database.get_person_from_handle(father2)
father_id1 = father1.get_gramps_id() if father1 else ""
father_id2 = father2.get_gramps_id() if father2 else ""
father1 = name_displayer.display(father1) if father1 else ""
father2 = name_displayer.display(father2) if father2 else ""
entry1 = self.get_widget("father1")
entry2 = self.get_widget("father2")
entry1.set_text("%s [%s]" % (father1, father_id1))
entry2.set_text("%s [%s]" % (father2, father_id2))
deactivate = False
if father_id1 == "" and father_id2 == "":
deactivate = True
elif father_id2 == "":
self.get_widget("father_btn1").set_active(True)
deactivate = True
elif father_id1 == "":
self.get_widget("father_btn2").set_active(True)
deactivate = True
elif entry1.get_text() == entry2.get_text():
deactivate = True
if deactivate:
for widget_name in ('father1', 'father2', 'father_btn1',
'father_btn2'):
self.get_widget(widget_name).set_sensitive(False)
mother1 = self.fy1.get_mother_handle()
mother2 = self.fy2.get_mother_handle()
mother1 = self.database.get_person_from_handle(mother1)
mother2 = self.database.get_person_from_handle(mother2)
mother_id1 = mother1.get_gramps_id() if mother1 else ""
mother_id2 = mother2.get_gramps_id() if mother2 else ""
mother1 = name_displayer.display(mother1) if mother1 else ""
mother2 = name_displayer.display(mother2) if mother2 else ""
entry1 = self.get_widget("mother1")
entry2 = self.get_widget("mother2")
entry1.set_text("%s [%s]" % (mother1, mother_id1))
entry2.set_text("%s [%s]" % (mother2, mother_id2))
deactivate = False
if mother_id1 == "" and mother_id2 == "":
deactivate = True
elif mother_id1 == "":
self.get_widget("mother_btn2").set_active(True)
deactivate = True
elif mother_id2 == "":
self.get_widget("mother_btn1").set_active(True)
deactivate = True
elif entry1.get_text() == entry2.get_text():
deactivate = True
if deactivate:
for widget_name in ('mother1', 'mother2', 'mother_btn1',
'mother_btn2'):
self.get_widget(widget_name).set_sensitive(False)
entry1 = self.get_widget("rel1")
entry2 = self.get_widget("rel2")
entry1.set_text(str(self.fy1.get_relationship()))
entry2.set_text(str(self.fy2.get_relationship()))
if entry1.get_text() == entry2.get_text():
for widget_name in ('rel1', 'rel2', 'rel_btn1', 'rel_btn2'):
self.get_widget(widget_name).set_sensitive(False)
gramps1 = self.fy1.get_gramps_id()
gramps2 = self.fy2.get_gramps_id()
entry1 = self.get_widget("gramps1")
entry2 = self.get_widget("gramps2")
entry1.set_text(gramps1)
entry2.set_text(gramps2)
if entry1.get_text() == entry2.get_text():
for widget_name in ('gramps1', 'gramps2', 'gramps_btn1',
'gramps_btn2'):
self.get_widget(widget_name).set_sensitive(False)
# Main window widgets that determine which handle survives
rbutton1 = self.get_widget("handle_btn1")
rbutton_label1 = self.get_widget("label_handle_btn1")
rbutton_label2 = self.get_widget("label_handle_btn2")
rbutton_label1.set_label("%s and %s [%s]" %(father1, mother1, gramps1))
rbutton_label2.set_label("%s and %s [%s]" %(father2, mother2, gramps2))
rbutton1.connect("toggled", self.on_handle1_toggled)
self.connect_button("family_help", self.cb_help)
self.connect_button("family_ok", self.cb_merge)
self.connect_button("family_cancel", self.close)
self.show()
def on_handle1_toggled(self, obj):
"""Preferred family changes"""
if obj.get_active():
father1_text = self.get_widget("father1").get_text()
if (father1_text != " []" or
self.get_widget("father2").get_text() == " []"):
self.get_widget("father_btn1").set_active(True)
mother1_text = self.get_widget("mother1").get_text()
if (mother1_text != " []" or
self.get_widget("mother2").get_text() == " []"):
self.get_widget("mother_btn1").set_active(True)
self.get_widget("rel_btn1").set_active(True)
self.get_widget("gramps_btn1").set_active(True)
else:
father2_text = self.get_widget("father2").get_text()
if (father2_text != " []" or
self.get_widget("father1").get_text() == " []"):
self.get_widget("father_btn2").set_active(True)
mother2_text = self.get_widget("mother2").get_text()
if (mother2_text != " []" or
self.get_widget("mother1").get_text() == " []"):
self.get_widget("mother_btn2").set_active(True)
self.get_widget("rel_btn2").set_active(True)
self.get_widget("gramps_btn2").set_active(True)
def cb_help(self, obj):
"""Display the relevant portion of the Gramps manual"""
display_help(webpage = WIKI_HELP_PAGE, section = WIKI_HELP_SEC)
def cb_merge(self, obj):
"""
Perform the merge of the families when the merge button is clicked.
"""
self.uistate.set_busy_cursor(True)
use_handle1 = self.get_widget("handle_btn1").get_active()
if use_handle1:
phoenix = self.fy1
titanic = self.fy2
else:
phoenix = self.fy2
titanic = self.fy1
# Add second handle to history so that when merge is complete,
# phoenix is the selected row.
self.uistate.viewmanager.active_page.get_history().push(
phoenix.get_handle())
phoenix_fh = phoenix.get_father_handle()
phoenix_mh = phoenix.get_mother_handle()
if self.get_widget("father_btn1").get_active() ^ use_handle1:
phoenix_fh = titanic.get_father_handle()
if self.get_widget("mother_btn1").get_active() ^ use_handle1:
phoenix_mh = titanic.get_mother_handle()
if self.get_widget("rel_btn1").get_active() ^ use_handle1:
phoenix.set_relationship(titanic.get_relationship())
if self.get_widget("gramps_btn1").get_active() ^ use_handle1:
phoenix.set_gramps_id(titanic.get_gramps_id())
try:
query = MergeFamilyQuery(self.database, phoenix, titanic,
phoenix_fh, phoenix_mh)
query.execute()
except MergeError, err:
ErrorDialog( _("Cannot merge people"), str(err))
self.uistate.set_busy_cursor(False)
self.close()
| gpl-2.0 | -4,250,079,644,733,064,000 | 42.402655 | 79 | 0.574269 | false |
tensorflow/datasets | tensorflow_datasets/scripts/documentation/generate_visualization.py | 1 | 2247 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script which generates datasets figures.
"""
import functools
import os
import tempfile
from absl import flags
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.scripts.documentation import script_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
'datasets', None,
'Comma separated list of datasets to generates. None for all datasets.')
flags.DEFINE_string('dst_dir', None, 'Destination dir to save the images.')
flags.DEFINE_boolean('overwrite', False,
'If True, overwrite the existing visualizations.')
def _save_fig(dst_path: str, figure: matplotlib.figure.Figure) -> None:
"""Save the generated figures for the dataset in dst_dir."""
# `savefig` do not support GCS, so first save the image locally.
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = os.path.join(tmp_dir, 'tmp.png')
figure.savefig(tmp_path)
tf.io.gfile.copy(tmp_path, dst_path, overwrite=FLAGS.overwrite)
plt.close(figure)
def main(_):
"""Main script."""
datasets = FLAGS.datasets.split(',') if FLAGS.datasets else None
generate_and_save_figure_fn = functools.partial(
script_utils.generate_and_save_artifact,
dst_dir=FLAGS.dst_dir or tfds.core.gcs_path('visualization/fig'),
overwrite=FLAGS.overwrite,
file_extension='.png',
get_artifact_fn=tfds.show_examples,
save_artifact_fn=_save_fig,
)
script_utils.multi_process_map(
worker_fn=generate_and_save_figure_fn,
datasets=datasets,
)
if __name__ == '__main__':
script_utils.multi_process_run(main)
| apache-2.0 | 7,384,031,377,480,852,000 | 31.1 | 76 | 0.723632 | false |
endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/graph_csv_test.py | 1 | 6662 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import csv
import StringIO
import unittest
import webapp2
import webtest
from dashboard import graph_csv
from dashboard.common import datastore_hooks
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import graph_data
class GraphCsvTest(testing_common.TestCase):
def setUp(self):
super(GraphCsvTest, self).setUp()
app = webapp2.WSGIApplication([('/graph_csv', graph_csv.GraphCsvHandler)])
self.testapp = webtest.TestApp(app)
self.SetCurrentUser('[email protected]', is_admin=True)
def _AddMockData(self):
master = graph_data.Master(id='ChromiumPerf').put()
bots = []
for name in ['win7', 'mac']:
bot = graph_data.Bot(id=name, parent=master).put()
bots.append(bot)
t = graph_data.TestMetadata(id='ChromiumPerf/%s/dromaeo' % name)
t.UpdateSheriff()
t.put()
dom_test = graph_data.TestMetadata(
id='ChromiumPerf/%s/dromaeo/dom' % name, has_rows=True)
dom_test.UpdateSheriff()
dom_test.put()
test_container_key = utils.GetTestContainerKey(dom_test)
for i in range(15000, 16000, 5):
graph_data.Row(parent=test_container_key, id=i, value=float(i * 2.5),
error=(i + 5)).put()
def _AddMockInternalData(self):
master = graph_data.Master(id='ChromiumPerf').put()
bots = []
for name in ['win7', 'mac']:
bot = graph_data.Bot(id=name, parent=master, internal_only=True).put()
bots.append(bot)
t = graph_data.TestMetadata(
id='ChromiumPerf/%s/dromaeo' % name, internal_only=True)
t.UpdateSheriff()
t.put()
dom_test = graph_data.TestMetadata(
id='ChromiumPerf/%s/dromaeo/dom' % name,
has_rows=True,
internal_only=True)
dom_test.UpdateSheriff()
dom_test.put()
test_container_key = utils.GetTestContainerKey(dom_test)
for i in range(1, 50):
graph_data.Row(
parent=test_container_key, id=i, value=float(i * 2), error=(i + 10),
internal_only=True).put()
def _CheckGet(
self, result_query, expected_result, whitelisted_ip='', status=200):
"""Asserts that the given query has the given CSV result.
Args:
result_query: The path and query string to request.
expected_result: The expected table of values (list of lists).
whitelisted_ip: The IP address to set as request remote address.
"""
response_rows = []
response = self.testapp.get(
result_query,
extra_environ={'REMOTE_ADDR': whitelisted_ip},
status=status)
if status != 200:
return
for row in csv.reader(StringIO.StringIO(response.body)):
response_rows.append(row)
self.assertEqual(expected_result, response_rows)
def testGetCsv(self):
self._AddMockData()
response = self.testapp.get(
'/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom')
for index, row, in enumerate(csv.reader(StringIO.StringIO(response.body))):
# Skip the headers
if index > 0:
expected_rev = str(15000 + ((index - 1) * 5))
expected_value = str(int(expected_rev) * 2.5)
self.assertEqual([expected_rev, expected_value], row)
def testPost(self):
self._AddMockData()
response = self.testapp.post(
'/graph_csv?', {'test_path': 'ChromiumPerf/win7/dromaeo/dom'})
for index, row, in enumerate(csv.reader(StringIO.StringIO(response.body))):
# Skip the headers
if index > 0:
expected_rev = str(15000 + ((index - 1) * 5))
expected_value = str(int(expected_rev) * 2.5)
self.assertEqual([expected_rev, expected_value], row)
def testRevNumRows(self):
self._AddMockData()
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'rev=15270&num_points=5')
expected = [
['revision', 'value'],
['15250', '38125.0'],
['15255', '38137.5'],
['15260', '38150.0'],
['15265', '38162.5'],
['15270', '38175.0'],
]
self._CheckGet(query, expected)
def testAttrRows(self):
self._AddMockData()
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'rev=15270&num_points=5&attr=revision,error,value')
expected = [
['revision', 'error', 'value'],
['15250', '15255.0', '38125.0'],
['15255', '15260.0', '38137.5'],
['15260', '15265.0', '38150.0'],
['15265', '15270.0', '38162.5'],
['15270', '15275.0', '38175.0'],
]
self._CheckGet(query, expected)
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'rev=15270&num_points=5&attr=value')
expected = [
['value'],
['38125.0'],
['38137.5'],
['38150.0'],
['38162.5'],
['38175.0'],
]
self._CheckGet(query, expected)
query = ('/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&'
'num_points=5&attr=revision,random,value')
expected = [
['revision', 'random', 'value'],
['15975', '', '39937.5'],
['15980', '', '39950.0'],
['15985', '', '39962.5'],
['15990', '', '39975.0'],
['15995', '', '39987.5'],
]
self._CheckGet(query, expected)
def testGet_WithNonInternalUserAndWhitelistedIP(self):
self._AddMockInternalData()
self.UnsetCurrentUser()
datastore_hooks.InstallHooks()
testing_common.SetIpWhitelist(['123.45.67.89'])
query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3'
expected = [['revision', 'value']]
self._CheckGet(query, expected, status=500)
def testGet_WhitelistedIPOnly(self):
self.PatchDatastoreHooksRequest('123.45.67.89')
self._AddMockInternalData()
self.UnsetCurrentUser()
datastore_hooks.InstallHooks()
testing_common.SetIpWhitelist(['123.45.67.89'])
query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3'
expected = [
['revision', 'value'],
['47', '94.0'],
['48', '96.0'],
['49', '98.0']
]
self._CheckGet(query, expected, whitelisted_ip='123.45.67.89')
def testGet_NoTestPathGiven_GivesError(self):
testing_common.SetIpWhitelist(['123.45.67.89'])
self.testapp.get(
'/graph_csv',
extra_environ={'REMOTE_ADDR': '123.45.67.89'},
status=400)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,719,458,784,556,416,000 | 32.31 | 80 | 0.613329 | false |
huntxu/neutron | neutron/common/constants.py | 1 | 9868 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import l3
from neutron_lib import constants as lib_constants
from neutron_lib.plugins import constants as plugin_consts
ROUTER_PORT_OWNERS = lib_constants.ROUTER_INTERFACE_OWNERS_SNAT + \
(lib_constants.DEVICE_OWNER_ROUTER_GW,)
ROUTER_STATUS_ACTIVE = 'ACTIVE'
ROUTER_STATUS_ALLOCATING = 'ALLOCATING'
ROUTER_STATUS_ERROR = 'ERROR'
VALID_ROUTER_STATUS = (ROUTER_STATUS_ACTIVE,
ROUTER_STATUS_ALLOCATING,
ROUTER_STATUS_ERROR)
HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
HA_NETWORK_NAME = 'HA network tenant %s'
HA_SUBNET_NAME = 'HA subnet tenant %s'
HA_PORT_NAME = 'HA port tenant %s'
HA_ROUTER_STATE_ACTIVE = 'active'
HA_ROUTER_STATE_STANDBY = 'standby'
HA_ROUTER_STATE_UNKNOWN = 'unknown'
VALID_HA_STATES = (HA_ROUTER_STATE_ACTIVE, HA_ROUTER_STATE_STANDBY,
HA_ROUTER_STATE_UNKNOWN)
PAGINATION_INFINITE = 'infinite'
SORT_DIRECTION_ASC = 'asc'
SORT_DIRECTION_DESC = 'desc'
ETHERTYPE_NAME_ARP = 'arp'
ETHERTYPE_ARP = 0x0806
ETHERTYPE_IP = 0x0800
ETHERTYPE_IPV6 = 0x86DD
IP_PROTOCOL_NAME_ALIASES = {lib_constants.PROTO_NAME_IPV6_ICMP_LEGACY:
lib_constants.PROTO_NAME_IPV6_ICMP}
IP_PROTOCOL_NUM_TO_NAME_MAP = {
str(v): k for k, v in lib_constants.IP_PROTOCOL_MAP.items()}
# When using iptables-save we specify '-p {proto}',
# but sometimes those values are not identical. This is a map
# of known protocol numbers that require a name to be used and
# protocol names that require a different name to be used,
# because that is how iptables-save will display them.
#
# This is how the list was created, so there is a possibility
# it will need to be updated in the future:
#
# $ for num in {0..255}; do iptables -A INPUT -p $num; done
# $ iptables-save
#
# These cases are special, and were found by inspection:
# - 'ipv6-encap' uses 'ipv6'
# - 'icmpv6' uses 'ipv6-icmp'
# - 'pgm' uses '113' instead of its name
# - protocol '0' uses no -p argument
IPTABLES_PROTOCOL_NAME_MAP = {lib_constants.PROTO_NAME_IPV6_ENCAP: 'ipv6',
lib_constants.PROTO_NAME_IPV6_ICMP_LEGACY:
'ipv6-icmp',
lib_constants.PROTO_NAME_PGM: '113',
'0': None,
'1': 'icmp',
'2': 'igmp',
'3': 'ggp',
'4': 'ipencap',
'5': 'st',
'6': 'tcp',
'8': 'egp',
'9': 'igp',
'12': 'pup',
'17': 'udp',
'20': 'hmp',
'22': 'xns-idp',
'27': 'rdp',
'29': 'iso-tp4',
'33': 'dccp',
'36': 'xtp',
'37': 'ddp',
'38': 'idpr-cmtp',
'41': 'ipv6',
'43': 'ipv6-route',
'44': 'ipv6-frag',
'45': 'idrp',
'46': 'rsvp',
'47': 'gre',
'50': 'esp',
'51': 'ah',
'57': 'skip',
'58': 'ipv6-icmp',
'59': 'ipv6-nonxt',
'60': 'ipv6-opts',
'73': 'rspf',
'81': 'vmtp',
'88': 'eigrp',
'89': 'ospf',
'93': 'ax.25',
'94': 'ipip',
'97': 'etherip',
'98': 'encap',
'103': 'pim',
'108': 'ipcomp',
'112': 'vrrp',
'115': 'l2tp',
'124': 'isis',
'132': 'sctp',
'133': 'fc',
'135': 'mobility-header',
'136': 'udplite',
'137': 'mpls-in-ip',
'138': 'manet',
'139': 'hip',
'140': 'shim6',
'141': 'wesp',
'142': 'rohc'}
# A length of a iptables chain name must be less than or equal to 11
# characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_IPTABLES_CHAIN_LEN_WRAP = 11
MAX_IPTABLES_CHAIN_LEN_NOWRAP = 28
# Timeout in seconds for getting an IPv6 LLA
LLA_TASK_TIMEOUT = 40
# length of all device prefixes (e.g. qvo, tap, qvb)
LINUX_DEV_PREFIX_LEN = 3
# must be shorter than linux IFNAMSIZ (which is 16)
LINUX_DEV_LEN = 14
# Possible prefixes to partial port IDs in interface names used by the OVS,
# Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the
# 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details.
INTERFACE_PREFIXES = (lib_constants.TAP_DEVICE_PREFIX,
lib_constants.VETH_DEVICE_PREFIX,
lib_constants.SNAT_INT_DEV_PREFIX)
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
# TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports
# Targets with multiple namespaces. Neutron will then implement callbacks
# for its RPC clients in order to support rolling upgrades.
# RPC Interface for agents to call DHCP API implemented on the plugin side
RPC_NAMESPACE_DHCP_PLUGIN = None
# RPC interface for the metadata service to get info from the plugin side
RPC_NAMESPACE_METADATA = None
# RPC interface for agent to plugin security group API
RPC_NAMESPACE_SECGROUP = None
# RPC interface for agent to plugin DVR api
RPC_NAMESPACE_DVR = None
# RPC interface for reporting state back to the plugin
RPC_NAMESPACE_STATE = None
# RPC interface for agent to plugin resources API
RPC_NAMESPACE_RESOURCES = None
# Default network MTU value when not configured
DEFAULT_NETWORK_MTU = 1500
IPV6_MIN_MTU = 1280
ROUTER_MARK_MASK = "0xffff"
VALID_ETHERTYPES = (lib_constants.IPv4, lib_constants.IPv6)
IP_ALLOWED_VERSIONS = [lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6]
PORT_RANGE_MIN = 1
PORT_RANGE_MAX = 65535
# Configuration values for accept_ra sysctl, copied from linux kernel
# networking (netdev) tree, file Documentation/networking/ip-sysctl.txt
#
# Possible values are:
# 0 Do not accept Router Advertisements.
# 1 Accept Router Advertisements if forwarding is disabled.
# 2 Overrule forwarding behaviour. Accept Router Advertisements
# even if forwarding is enabled.
ACCEPT_RA_DISABLED = 0
ACCEPT_RA_WITHOUT_FORWARDING = 1
ACCEPT_RA_WITH_FORWARDING = 2
# Some components communicate using private address ranges, define
# them all here. These address ranges should not cause any issues
# even if they overlap since they are used in disjoint namespaces,
# but for now they are unique.
# We define the metadata cidr since it falls in the range.
PRIVATE_CIDR_RANGE = '169.254.0.0/16'
DVR_FIP_LL_CIDR = '169.254.64.0/18'
L3_HA_NET_CIDR = '169.254.192.0/18'
METADATA_CIDR = '169.254.169.254/32'
# The only defined IpamAllocation status at this stage is 'ALLOCATED'.
# More states will be available in the future - e.g.: RECYCLABLE
IPAM_ALLOCATION_STATUS_ALLOCATED = 'ALLOCATED'
VALID_IPAM_ALLOCATION_STATUSES = (IPAM_ALLOCATION_STATUS_ALLOCATED,)
# Port binding states for Live Migration
PORT_BINDING_STATUSES = (lib_constants.ACTIVE,
lib_constants.INACTIVE)
VALID_FLOATINGIP_STATUS = (lib_constants.FLOATINGIP_STATUS_ACTIVE,
lib_constants.FLOATINGIP_STATUS_DOWN,
lib_constants.FLOATINGIP_STATUS_ERROR)
# Floating IP host binding states
FLOATING_IP_HOST_UNBOUND = "FLOATING_IP_HOST_UNBOUND"
FLOATING_IP_HOST_NEEDS_BINDING = "FLOATING_IP_HOST_NEEDS_BINDING"
# Possible types of values (e.g. in QoS rule types)
VALUES_TYPE_CHOICES = "choices"
VALUES_TYPE_RANGE = "range"
# Units base
SI_BASE = 1000
IEC_BASE = 1024
# Port bindings handling
NO_ACTIVE_BINDING = 'no_active_binding'
# Registered extension parent resource check mapping
# If we want to register some service plugin resources into policy and check
# the owner when operating their subresources. We can write here to use
# existing policy engine for parent resource owner check.
# Each entry here should be PARENT_RESOURCE_NAME: SERVICE_PLUGIN_NAME,
# PARENT_RESOURCE_NAME is usually from api definition.
# SERVICE_PLUGIN_NAME is the service plugin which introduced the resource and
# registered the service plugin name in neutron-lib.
EXT_PARENT_RESOURCE_MAPPING = {
l3.FLOATINGIP: plugin_consts.L3
}
EXT_PARENT_PREFIX = 'ext_parent'
| apache-2.0 | 3,229,446,407,521,964,500 | 38.15873 | 78 | 0.583502 | false |
davzhang/helix-python-binding | org/apache/helix/messaging/handling/GroupMessageHandler.py | 1 | 3490 | # package org.apache.helix.messaging.handling
#from org.apache.helix.messaging.handling import *
#from java.util import HashMap
#from java.util import List
#from java.util import Map
#from java.util.concurrent import ConcurrentHashMap
#from java.util.concurrent import ConcurrentLinkedQueue
#from java.util.concurrent.atomic import AtomicInteger
from org.apache.helix.PropertyKey import PropertyKey
from org.apache.helix.model.CurrentState import CurrentState
from org.apache.helix.model.Message import Message
from org.apache.helix.model.Message import Attributes
class CurrentStateUpdate:
"""
Parameters:
PropertyKey key
CurrentState curStateDelta
"""
def __init__(self, key, curStateDelta):
self._key = key
self._curStateDelta = curStateDelta
def merge(self, curState):
"""
Returns void
Parameters:
curState: CurrentState
"""
self._curStateDelta.getRecord().merge(curState.getRecord())
class GroupMessageInfo:
"""
Parameters:
Message message
"""
def __init__(self, message):
self._message = message
# List<String>
partitionNames = message.getPartitionNames()
self._countDown = partitionNames.size()
# self._countDown = AtomicInteger(partitionNames.size())
self._curStateUpdateList = []
# self._curStateUpdateList = ConcurrentLinkedQueue<CurrentStateUpdate>()
def merge(self):
"""
Returns Map<PropertyKey, CurrentState>
"""
# Map<String, CurrentStateUpdate>
curStateUpdateMap = {}
for update in self._curStateUpdateList: # String
path = update._key.getPath()
if not curStateUpdateMap.containsKey(path):
curStateUpdateMap.put(path, update)
else:
curStateUpdateMap.get(path).merge(update._curStateDelta)
# Map<PropertyKey, CurrentState>
ret = {}
for update in curStateUpdateMap.values():
ret[update._key] = update._curStateDelta
return ret
class GroupMessageHandler:
"""
"""
def __init__(self):
# self._groupMsgMap = ConcurrentHashMap<String, GroupMessageInfo>()
self._groupMsgMap = {}
def put(self, message):
"""
Returns void
Parameters:
message: Message
"""
self._groupMsgMap.putIfAbsent(message.getId(), GroupMessageInfo(message))
def onCompleteSubMessage(self, subMessage):
"""
Returns GroupMessageInfo
Parameters:
subMessage: Message
"""
# String
parentMid = subMessage.getAttribute(Attributes.PARENT_MSG_ID)
# GroupMessageInfo
info = self._groupMsgMap.get(parentMid)
if info != None:
# int
val = info._countDown.decrementAndGet()
if val <= 0:
return self._groupMsgMap.remove(parentMid)
return None
def addCurStateUpdate(self, subMessage, key, delta):
"""
Returns void
Parameters:
subMessage: Messagekey: PropertyKeydelta: CurrentState
"""
# String
parentMid = subMessage.getAttribute(Attributes.PARENT_MSG_ID)
# GroupMessageInfo
info = self._groupMsgMap.get(parentMid)
if info != None:
info._curStateUpdateList.add(CurrentStateUpdate(key, delta))
| apache-2.0 | -5,867,024,208,946,304,000 | 23.928571 | 83 | 0.619771 | false |
Azure/azure-sdk-for-python | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/aio/_iot_hub_client.py | 1 | 5911 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import IotHubClientConfiguration
from .operations import Operations
from .operations import IotHubResourceOperations
from .operations import ResourceProviderCommonOperations
from .operations import CertificatesOperations
from .operations import IotHubOperations
from .operations import PrivateLinkResourcesOperations
from .operations import PrivateEndpointConnectionsOperations
from .. import models
class IotHubClient(object):
"""Use this API to manage the IoT hubs in your Azure subscription.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.iothub.v2020_03_01.aio.operations.Operations
:ivar iot_hub_resource: IotHubResourceOperations operations
:vartype iot_hub_resource: azure.mgmt.iothub.v2020_03_01.aio.operations.IotHubResourceOperations
:ivar resource_provider_common: ResourceProviderCommonOperations operations
:vartype resource_provider_common: azure.mgmt.iothub.v2020_03_01.aio.operations.ResourceProviderCommonOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates: azure.mgmt.iothub.v2020_03_01.aio.operations.CertificatesOperations
:ivar iot_hub: IotHubOperations operations
:vartype iot_hub: azure.mgmt.iothub.v2020_03_01.aio.operations.IotHubOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.iothub.v2020_03_01.aio.operations.PrivateLinkResourcesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.iothub.v2020_03_01.aio.operations.PrivateEndpointConnectionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = IotHubClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_hub_resource = IotHubResourceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_provider_common = ResourceProviderCommonOperations(
self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_hub = IotHubOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "IotHubClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| mit | 8,649,690,516,236,446,000 | 51.776786 | 129 | 0.714092 | false |
qedsoftware/commcare-hq | corehq/apps/repeaters/tests/test_dbaccessors.py | 1 | 3943 | from datetime import datetime, timedelta
from django.test import TestCase
from corehq.apps.repeaters.dbaccessors import (
get_pending_repeat_record_count,
get_success_repeat_record_count,
get_failure_repeat_record_count,
get_repeat_record_count,
get_repeaters_by_domain,
get_paged_repeat_records,
iterate_repeat_records,
)
from corehq.apps.repeaters.models import RepeatRecord, CaseRepeater
from corehq.apps.repeaters.const import RECORD_PENDING_STATE
class TestRepeatRecordDBAccessors(TestCase):
repeater_id = '1234'
other_id = '5678'
domain = 'test-domain-2'
@classmethod
def setUpClass(cls):
before = datetime.utcnow() - timedelta(minutes=5)
failed = RepeatRecord(
domain=cls.domain,
failure_reason='Some python error',
repeater_id=cls.repeater_id,
next_event=before,
)
success = RepeatRecord(
domain=cls.domain,
succeeded=True,
repeater_id=cls.repeater_id,
next_event=before,
)
pending = RepeatRecord(
domain=cls.domain,
succeeded=False,
repeater_id=cls.repeater_id,
next_event=before,
)
other_id = RepeatRecord(
domain=cls.domain,
succeeded=False,
repeater_id=cls.other_id,
next_event=before,
)
cls.records = [
failed,
success,
pending,
other_id,
]
for record in cls.records:
record.save()
@classmethod
def tearDownClass(cls):
for record in cls.records:
record.delete()
def test_get_pending_repeat_record_count(self):
count = get_pending_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 1)
def test_get_success_repeat_record_count(self):
count = get_success_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 1)
def test_get_failure_repeat_record_count(self):
count = get_failure_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 1)
def test_get_paged_repeat_records_with_state_and_no_records(self):
count = get_repeat_record_count('wrong-domain', state=RECORD_PENDING_STATE)
self.assertEqual(count, 0)
def test_get_paged_repeat_records(self):
records = get_paged_repeat_records(self.domain, 0, 2)
self.assertEqual(len(records), 2)
def test_get_paged_repeat_records_with_repeater_id(self):
records = get_paged_repeat_records(self.domain, 0, 2, repeater_id=self.other_id)
self.assertEqual(len(records), 1)
def test_get_paged_repeat_records_with_state(self):
records = get_paged_repeat_records(self.domain, 0, 10, state=RECORD_PENDING_STATE)
self.assertEqual(len(records), 2)
def test_get_paged_repeat_records_wrong_domain(self):
records = get_paged_repeat_records('wrong-domain', 0, 2)
self.assertEqual(len(records), 0)
def test_iterate_repeat_records(self):
records = list(iterate_repeat_records(datetime.utcnow(), chunk_size=2))
self.assertEqual(len(records), 3) # Should grab all but the succeeded one
class TestRepeatersDBAccessors(TestCase):
domain = 'test-domain'
@classmethod
def setUpClass(cls):
repeater = CaseRepeater(
domain=cls.domain,
)
cls.repeaters = [
repeater
]
for repeater in cls.repeaters:
repeater.save()
@classmethod
def tearDownClass(cls):
for repeater in cls.repeaters:
repeater.delete()
def test_get_repeaters_by_domain(self):
repeaters = get_repeaters_by_domain(self.domain)
self.assertEqual(len(repeaters), 1)
self.assertEqual(repeaters[0].__class__, CaseRepeater)
| bsd-3-clause | -8,391,664,742,251,161,000 | 30.293651 | 90 | 0.628963 | false |
y4smeen/friendly-spork | camera/take-picture.py | 1 | 3087 | from __future__ import print_function
import sys
import cv2
def main(argv):
#capture from camera at location 0
cap = cv2.VideoCapture(0)
# Change the camera setting using the set() function
# cap.set(cv2.cv.CV_CAP_PROP_EXPOSURE, -6.0)
# cap.set(cv2.cv.CV_CAP_PROP_GAIN, 4.0)
# cap.set(cv2.cv.CV_CAP_PROP_BRIGHTNESS, 144.0)
# cap.set(cv2.cv.CV_CAP_PROP_CONTRAST, 27.0)
# cap.set(cv2.cv.CV_CAP_PROP_HUE, 13.0) # 13.0
# cap.set(cv2.cv.CV_CAP_PROP_SATURATION, 28.0)
# Read the current setting from the camera
test = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC)
ratio = cap.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO)
frame_rate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
brightness = cap.get(cv2.cv.CV_CAP_PROP_BRIGHTNESS)
contrast = cap.get(cv2.cv.CV_CAP_PROP_CONTRAST)
saturation = cap.get(cv2.cv.CV_CAP_PROP_SATURATION)
hue = cap.get(cv2.cv.CV_CAP_PROP_HUE)
gain = cap.get(cv2.cv.CV_CAP_PROP_GAIN)
exposure = cap.get(cv2.cv.CV_CAP_PROP_EXPOSURE)
print("Test: ", test)
print("Ratio: ", ratio)
print("Frame Rate: ", frame_rate)
print("Height: ", height)
print("Width: ", width)
print("Brightness: ", brightness)
print("Contrast: ", contrast)
print("Saturation: ", saturation)
print("Hue: ", hue)
print("Gain: ", gain)
print("Exposure: ", exposure)
while True:
ret, img = cap.read()
cv2.imshow("input", img)
key = cv2.waitKey(10)
if key == 27:
break
cv2.destroyAllWindows()
cv2.VideoCapture(0).release()
# 0 CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
# 1 CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
# 2 CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file
# 3 CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
# 4 CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
# 5 CV_CAP_PROP_FPS Frame rate.
# 6 CV_CAP_PROP_FOURCC 4-character code of codec.
# 7 CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.
# 8 CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
# 9 CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.
# 10 CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
# 11 CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).
# 12 CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).
# 13 CV_CAP_PROP_HUE Hue of the image (only for cameras).
# 14 CV_CAP_PROP_GAIN Gain of the image (only for cameras).
# 15 CV_CAP_PROP_EXPOSURE Exposure (only for cameras).
# 16 CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
# 17 CV_CAP_PROP_WHITE_BALANCE Currently unsupported
# 18 CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
if __name__ == '__main__':
main(sys.argv)
| mit | 2,285,477,887,663,510,300 | 41.875 | 127 | 0.675089 | false |
SumiTomohiko/Yog | tools/update_prototype.py | 1 | 8073 | #! python
# -*- coding: utf-8 -*-
from glob import glob
import re
class DeclarationInserter(object):
start = "/* PROTOTYPE_START */"
end = "/* PROTOTYPE_END */"
files = {
"include/yog/dir.h": [ "src/dir.c", ],
"include/yog/path.h": [ "src/path.c", ],
"include/yog/process.h": [ "src/process.c", ],
"include/yog/handle.h": [ "src/handle.c", ],
"include/yog/env.h": [ "src/env.c", ],
"include/yog/ffi.h": [ "src/ffi.c", ],
"include/yog/sprintf.h": [ "src/sprintf.c", ],
"include/yog/get_args.h": [ "src/get_args.c", ],
"include/yog/coroutine.h": [ "src/coroutine.c", ],
"include/yog/misc.h": [ "src/misc.c", ],
"include/yog/comparable.h": [ "src/comparable.c", ],
"include/yog/set.h": [ "src/set.c", ],
"include/yog/module.h": [ "src/module.c", ],
"include/yog/file.h": [ "src/file.c", ],
"include/yog/dict.h": [ "src/dict.c", ],
"include/yog/classmethod.h": [ "src/classmethod.c", ],
"include/yog/property.h": [ "src/property.c", ],
"include/yog/symbol.h": [ "src/symbol.c", ],
"include/yog/repl.h": [ "src/repl.c", ],
"include/yog/yog.h": [ "src/value.c", ],
"include/yog/builtins.h": [ "src/builtins.c", ],
"include/yog/error.h": [ "src/error.c", ],
"include/yog/bool.h": [ "src/bool.c", ],
"include/yog/fixnum.h": [ "src/fixnum.c", ],
"include/yog/arg.h": [ "src/arg.c", ],
"include/yog/class.h": [ "src/class.c", ],
"include/yog/nil.h": [ "src/nil.c", ],
"include/yog/callable.h": [ "src/callable.c", ],
"include/yog/binary.h": [ "src/binary.c", ],
"include/yog/package.h": [ "src/package.c", ],
"include/yog/code.h": [ "src/code.c", "src/code.inc", ],
"include/yog/compile.h": [ "src/compile.c", ],
"include/yog/array.h": [ "src/array.c", ],
"include/yog/parser.h": [
"src/lexer.c", "src/parser.y",
],
"include/yog/regexp.h": [ "src/regexp.c", ],
"include/yog/string.h": [ "src/string.c", ],
"include/yog/encoding.h": [ "src/encoding.c", ],
"include/yog/exception.h": [
"src/exception.c", "src/stacktrace.c", ],
"include/yog/inst.h.tmpl": [ "src/inst.c", ],
"include/yog/table.h": [ "src/table.c", ],
"include/yog/gc/mark-sweep-compact.h": [
"src/gc/mark-sweep-compact.c", ],
"include/yog/gc/copying.h": [ "src/gc/copying.c", ],
"include/yog/gc/mark-sweep.h": [ "src/gc/mark-sweep.c", ],
"include/yog/gc/generational.h": [ "src/gc/generational.c", ],
"include/yog/thread.h": [ "src/thread.c", ],
"include/yog/vm.h": [ "src/vm.c", ],
"include/yog/object.h": [ "src/object.c", ],
"include/yog/frame.h": [ "src/frame.c", ],
"include/yog/float.h": [ "src/float.c", ],
"include/yog/eval.h": [ "src/eval.c", ],
"include/yog/gc.h": [ "src/gc.c", ],
"include/yog/bignum.h": [ "src/bignum.c", ],
}
def _find(self, lines, s, start):
processed = []
i = start
while True:
try:
line = lines[i]
except IndexError:
break
try:
line.index(s)
except ValueError:
processed.append(line)
else:
processed.append(line)
break
i += 1
return processed, i
def _rewrite_header(self, header_filename, declarations):
try:
fp = open(header_filename)
try:
lines = fp.readlines()
finally:
fp.close()
except IOError:
lines = []
header, i = self._find(lines, self.start, 0)
header.append("""
/**
* DON'T EDIT THIS AREA. HERE IS GENERATED BY update_prototype.py.
*/
""")
for filename in sorted(declarations):
functions = declarations[filename]
header.append("/* %(filename)s */\n" % { "filename": filename })
for function in functions:
header.append(function + "\n")
header.append("\n")
old, i = self._find(lines, self.end, i)
header.extend(lines[i:])
fp = open(header_filename, "wb")
try:
fp.write("".join(header))
finally:
fp.close()
re_function = re.compile(r"\A(?P<name>\w+)\s*\([\w\s\*\\(\).,]*\)\Z")
re_function_pointer \
= re.compile(r"\A(?P<head>[\w\*]+\s+\(\*)\w+(?P<tail>\)\(.*\))\Z")
def _split_params(self, line):
params = []
paren_depth = 0
param = []
for c in line:
if (paren_depth == 0) and (c == ","):
params.append("".join(param).strip())
param = []
else:
if (c == "("):
paren_depth += 1
elif (c == ")"):
paren_depth -= 1
param.append(c)
last_param = "".join(param).strip()
if last_param != "":
params.append(last_param)
return params
def _get_functions(self, filename):
declarations = {}
lines = []
fp = open(filename)
try:
lines = fp.readlines()
finally:
fp.close()
comment = 0
line = ""
while True:
prev_line = line
try:
line = lines.pop(0).rstrip()
except IndexError:
break
if line == "#if 0":
comment += 1
elif (line == "#endif") and (0 < comment):
comment -= 1
if 0 < comment:
continue
m = self.re_function.search(line)
if m is None:
continue
if prev_line.startswith("INTERNAL ") or prev_line.startswith("static ") or prev_line.startswith("inline "):
continue
return_type = prev_line
name = m.group("name")
args = []
n = line.index("(")
m = line.rindex(")")
params = self._split_params(line[n + 1:m])
for param in params:
param = param.strip()
if param == "...":
args.append(param)
else:
m = self.re_function_pointer.search(param)
if m:
type_ = m.group("head") + m.group("tail")
else:
param = param.strip()
param = param.split(" ")
try:
n = param[-1].rindex("*")
except ValueError:
type_ = " ".join(param[:-1])
else:
type_ = " ".join(param[:-1]) \
+ param[-1][:n + 1]
args.append(type_)
declarations[name] = \
"%(return_type)s %(name)s(%(args)s);" % {
"return_type": return_type, "name": name,
"args": ", ".join(args) }
retval = []
for name in sorted(declarations):
retval.append(declarations[name])
return retval
def do(self):
for header, sources in self.files.items():
declarations = {}
for source in sources:
declarations[source] = self._get_functions(source)
self._rewrite_header(header, declarations)
if __name__ == "__main__":
inserter = DeclarationInserter()
inserter.do()
# vim: tabstop=4 shiftwidth=4 expandtab softtabstop=4
| mit | -4,793,780,574,572,863,000 | 34.563877 | 119 | 0.440852 | false |
jopcode/whoUR | whoUR.py | 1 | 1600 | import sys, os
import argparse
import urllib3, urllib
import re
# Modules
from libs.colors import *
from libs.selectChoice import select_choice
Parser = argparse.ArgumentParser(prog='whoUR.py', description='Tool for information gathering')
'''
this has been use in the future
Parser.add_argument('-d', '--dic-path', help='Dictonaries Path, Example: -d /root/', action='store', default='dicts/', dest='dicPath')
Parser.add_argument('-a', '--dic-adminspage', help='Admin Page dictonary, Example: -a adminspage.txt', action='store', default='adminspage.txt', dest='dicAdminsPage')
Args = Parser.parse_args()
# Dictonaries
dic_adminsPage = Args.dicPath +'/'+ Args.dicAdminsPage
'''
def main():
print('\n')
print(B+' _ ___ ')
print(B+' __ __ | |__ ___ _ _ _ __ |__ \ ')
print(B+' \ \ /\ / / | \_ \ / _ \ | | | | | |__| / / ')
print(B+' \ V V / | | | | | (_) | | |_| | | | |_| ')
print(B+' \_/\_/ |_| |_| \___/ \__,_| |_| (_) ')
print('\n')
print(lC+'Beta 1.6 JopCode')
print('\n')
select_choice()
print(lG+'\n---------')
print(lG+'- C Y A -')
print(lG+'---------\n')
print(lR+'[+] Script by JopCode\n')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print(lG+'\n---------')
print(lG+'- C Y A -')
print(lG+'---------\n')
print(lR+'[+] Script by JopCode\n')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| gpl-3.0 | 5,525,027,244,532,273,000 | 31 | 166 | 0.476875 | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractWanderingmusetranslationWordpressCom.py | 1 | 1211 | def extractWanderingmusetranslationWordpressCom(item):
'''
Parser for 'wanderingmusetranslation.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
chp_prefixes = [
('SH Chapter ', 'Swain Hakushaku', 'translated'),
('AS Chapter ', 'My Status As An Assassin Is Obviously Stronger Than That Of the Hero’s', 'translated'),
('Cat ', 'Me and My Beloved Cat (Girlfriend)', 'translated'),
]
if item['tags'] == ['Uncategorized']:
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | 9,097,084,817,052,766,000 | 35.666667 | 122 | 0.595533 | false |
portableant/open-context-py | opencontext_py/apps/edit/inputs/rules/models.py | 1 | 1257 | import collections
from jsonfield import JSONField
from datetime import datetime
from django.utils import timezone
from django.db import models
# Stores information about fields for a data entry form
class InputRule(models.Model):
uuid = models.CharField(max_length=50, primary_key=True) # uuid for the rule itself
project_uuid = models.CharField(max_length=50, db_index=True)
profile_uuid = models.CharField(max_length=50, db_index=True) # uuid for the input profile
label = models.CharField(max_length=200) # label for data entry form
note = models.TextField() # note for instructions in data entry
rules = models.TextField() # for JSON data for check values
t_note = models.TextField() # note for condition being True
f_note = models.TextField() # note for condition being False
created = models.DateTimeField()
updated = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
"""
saves a record with a creation date
"""
if self.created is None:
self.created = datetime.now()
super(InputRule, self).save(*args, **kwargs)
class Meta:
db_table = 'crt_rules'
ordering = ['profile_uuid',
'label']
| gpl-3.0 | 2,131,226,074,972,182,500 | 38.28125 | 95 | 0.674622 | false |
henriquegemignani/randovania | randovania/game_description/node_search.py | 1 | 2783 | from typing import Dict, Optional
import networkx
from randovania.game_description.area import Area
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.node import Node, DockNode, TeleporterNode, PickupNode, ResourceNode
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.resources.resource_info import ResourceInfo
from randovania.game_description.world_list import WorldList
def distances_to_node(world_list: WorldList, starting_node: Node,
*,
ignore_elevators: bool = True,
cutoff: Optional[int] = None,
patches: Optional[GamePatches] = None,
) -> Dict[Area, int]:
"""
Compute the shortest distance from a node to all reachable areas.
:param world_list:
:param starting_node:
:param ignore_elevators:
:param cutoff: Exclude areas with a length longer that cutoff.
:param patches:
:return: Dict keyed by area to shortest distance to starting_node.
"""
g = networkx.DiGraph()
dock_connections = patches.dock_connection if patches is not None else {}
elevator_connections = patches.elevator_connection if patches is not None else {}
for area in world_list.all_areas:
g.add_node(area)
for world in world_list.worlds:
for area in world.areas:
new_areas = set()
for node in area.nodes:
if isinstance(node, DockNode):
connection = dock_connections.get((area.area_asset_id, node.dock_index), node.default_connection)
new_areas.add(world.area_by_asset_id(connection.area_asset_id))
elif isinstance(node, TeleporterNode) and not ignore_elevators:
connection = elevator_connections.get(node.teleporter_instance_id, node.default_connection)
new_areas.add(world_list.area_by_area_location(connection))
for next_area in new_areas:
g.add_edge(area, next_area)
return networkx.single_source_shortest_path_length(g, world_list.nodes_to_area(starting_node), cutoff)
def pickup_index_to_node(world_list: WorldList, index: PickupIndex) -> PickupNode:
for node in world_list.all_nodes:
if isinstance(node, PickupNode) and node.pickup_index == index:
return node
raise ValueError(f"PickupNode with {index} not found.")
def node_with_resource(world_list: WorldList, resource: ResourceInfo) -> ResourceNode:
for node in world_list.all_nodes:
if isinstance(node, ResourceNode) and node.resource() == resource:
return node
raise ValueError(f"ResourceNode with {resource} not found.")
| gpl-3.0 | -1,139,548,063,911,922,300 | 42.484375 | 117 | 0.673015 | false |
srevenant/reactor | src/reactor/core/memstate.py | 1 | 4742 | #$#HEADER-START
# vim:set expandtab ts=4 sw=4 ai ft=python:
#
# Reactor Configuration Event Engine
#
# Copyright (C) 2016 Brandon Gillespie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#$#HEADER-END
"""
Memory Cache Management. Derived from onetimejwt
"""
import time
import threading
import timeinterval
#from reactor.core import trace
################################################################################
def mutex(func):
"""use a thread lock on current method, if self.lock is defined"""
def wrapper(*args, **kwargs):
"""Decorator Wrapper"""
lock = args[0].lock
lock.acquire(True)
try:
return func(*args, **kwargs)
except:
raise
finally:
lock.release()
return wrapper
################################################################################
class Cache(object):
"""
Threadsafe generic cache.
Meant to be configured after initialization. Supports deeper objects
(such as db master) having a cache, but letting objects configure what
is cached as they are supported.
"""
cache = None
ctypes = None
lock = threading.Lock()
############################################################################
def __init__(self, **kwargs):
self.cache = dict()
self.ctypes = dict()
for ctype in kwargs:
if ctype == 'housekeeper':
continue
self.configure(ctype, kwargs[ctype])
self._clean()
############################################################################
def start_housekeeper(self, interval):
"""startup the housekeeper interval"""
timeinterval.start(interval * 1000, self._clean)
############################################################################
def configure(self, ctype, age):
"""configure a parameter for cache"""
self.ctypes[ctype] = age
self.cache[ctype] = dict()
############################################################################
@mutex
def _clean(self):
"""
Run by housekeeper thread, cleans out stale cache items.
If this becomes a bottleneck by taking too long, then change it
so that only the call to delete is threadsafe, instead of the overall
process - BJG
"""
if not self.ctypes:
return
now = time.time()
for ctype in ['policy']: # self.ctypes:
keys = list(self.cache[ctype].keys()) # to avoid RuntimeError
for key in keys:
item = self.cache[ctype].get(key)
if item and item['expires'] < now:
del self.cache[ctype][key]
############################################################################
# pylint: disable=unused-argument
@mutex
def remove_cache(self, ctype, key, start=None):
"""remove an item from the cache"""
if self.cache[ctype].get(key):
#DEBUG# trace("CACHE REMOVE {} {}".format(ctype, key))
del self.cache[ctype][key]
def clear_type(self, ctype):
"""remove an item from the cache"""
#DEBUG# trace("CACHE CLEAR {}".format(ctype))
self.cache[ctype] = dict()
def get_cache(self, ctype, key, start=None):
"""get an item from the global mutex-safe cache"""
if not start:
start = time.time()
item = self.cache[ctype].get(key)
if item and item['expires'] > start:
#DEBUG# trace("CACHE HIT {} {}".format(ctype, key))
return item['value']
#DEBUG# trace("CACHE MISS {} {}".format(ctype, key))
return None
@mutex
def set_cache(self, ctype, key, value, base_time=None):
"""Set an item in the global mutex-safe cache"""
if not base_time:
base_time = time.time()
#DEBUG# trace("CACHE SET {} {}".format(ctype, key))
expires = base_time + self.ctypes[ctype]
self.cache[ctype][key] = dict(expires=expires, value=value)
return expires
| agpl-3.0 | 4,588,729,354,238,118,000 | 33.867647 | 80 | 0.531843 | false |
m-ober/byceps | scripts/import_permissions_and_roles.py | 1 | 1418 | #!/usr/bin/env python
"""Import permissions, roles, and their relations from a JSON file.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import json
import click
from byceps.services.authorization import service as authz_service
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.argument('json_file', type=click.File())
def execute(json_file):
data = json.load(json_file)
permissions = data['permissions']
roles = data['roles']
click.echo(f'Importing {len(permissions)} permissions ... ', nl=False)
create_permissions(permissions)
click.secho('done.', fg='green')
click.echo(f'Importing {len(roles)} roles ... ', nl=False)
create_roles(roles)
click.secho('done.', fg='green')
def create_permissions(permissions):
for permission in permissions:
authz_service.create_permission(permission['id'], permission['title'])
def create_roles(roles):
for role in roles:
role_id = role['id']
authz_service.create_role(role_id, role['title'])
for permission_id in role['assigned_permissions']:
authz_service.assign_permission_to_role(permission_id, role_id)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
| bsd-3-clause | -46,085,055,061,123,140 | 25.259259 | 78 | 0.687588 | false |
platformio/platformio | platformio/proc.py | 1 | 5706 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
from threading import Thread
from platformio import exception
from platformio.compat import (
WINDOWS,
get_filesystem_encoding,
get_locale_encoding,
string_types,
)
class AsyncPipeBase(object):
def __init__(self):
self._fd_read, self._fd_write = os.pipe()
self._pipe_reader = os.fdopen(self._fd_read)
self._buffer = ""
self._thread = Thread(target=self.run)
self._thread.start()
def get_buffer(self):
return self._buffer
def fileno(self):
return self._fd_write
def run(self):
try:
self.do_reading()
except (KeyboardInterrupt, SystemExit, IOError):
self.close()
def do_reading(self):
raise NotImplementedError()
def close(self):
self._buffer = ""
os.close(self._fd_write)
self._thread.join()
class BuildAsyncPipe(AsyncPipeBase):
def __init__(self, line_callback, data_callback):
self.line_callback = line_callback
self.data_callback = data_callback
super(BuildAsyncPipe, self).__init__()
def do_reading(self):
line = ""
print_immediately = False
for byte in iter(lambda: self._pipe_reader.read(1), ""):
self._buffer += byte
if line and byte.strip() and line[-3:] == (byte * 3):
print_immediately = True
if print_immediately:
# leftover bytes
if line:
self.data_callback(line)
line = ""
self.data_callback(byte)
if byte == "\n":
print_immediately = False
else:
line += byte
if byte != "\n":
continue
self.line_callback(line)
line = ""
self._pipe_reader.close()
class LineBufferedAsyncPipe(AsyncPipeBase):
def __init__(self, line_callback):
self.line_callback = line_callback
super(LineBufferedAsyncPipe, self).__init__()
def do_reading(self):
for line in iter(self._pipe_reader.readline, ""):
self._buffer += line
self.line_callback(line)
self._pipe_reader.close()
def exec_command(*args, **kwargs):
result = {"out": None, "err": None, "returncode": None}
default = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
default.update(kwargs)
kwargs = default
p = subprocess.Popen(*args, **kwargs)
try:
result["out"], result["err"] = p.communicate()
result["returncode"] = p.returncode
except KeyboardInterrupt:
raise exception.AbortedByUser()
finally:
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipeBase):
kwargs[s].close()
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipeBase):
result[s[3:]] = kwargs[s].get_buffer()
for k, v in result.items():
if isinstance(result[k], bytes):
try:
result[k] = result[k].decode(
get_locale_encoding() or get_filesystem_encoding()
)
except UnicodeDecodeError:
result[k] = result[k].decode("latin-1")
if v and isinstance(v, string_types):
result[k] = result[k].strip()
return result
def is_ci():
return os.getenv("CI", "").lower() == "true"
def is_container():
if os.path.exists("/.dockerenv"):
return True
if not os.path.isfile("/proc/1/cgroup"):
return False
with open("/proc/1/cgroup") as fp:
return ":/docker/" in fp.read()
def get_pythonexe_path():
return os.environ.get("PYTHONEXEPATH", os.path.normpath(sys.executable))
def copy_pythonpath_to_osenv():
_PYTHONPATH = []
if "PYTHONPATH" in os.environ:
_PYTHONPATH = os.environ.get("PYTHONPATH").split(os.pathsep)
for p in os.sys.path:
conditions = [p not in _PYTHONPATH]
if not WINDOWS:
conditions.append(
os.path.isdir(os.path.join(p, "click"))
or os.path.isdir(os.path.join(p, "platformio"))
)
if all(conditions):
_PYTHONPATH.append(p)
os.environ["PYTHONPATH"] = os.pathsep.join(_PYTHONPATH)
def where_is_program(program, envpath=None):
env = os.environ
if envpath:
env["PATH"] = envpath
# try OS's built-in commands
try:
result = exec_command(["where" if WINDOWS else "which", program], env=env)
if result["returncode"] == 0 and os.path.isfile(result["out"].strip()):
return result["out"].strip()
except OSError:
pass
# look up in $PATH
for bin_dir in env.get("PATH", "").split(os.pathsep):
if os.path.isfile(os.path.join(bin_dir, program)):
return os.path.join(bin_dir, program)
if os.path.isfile(os.path.join(bin_dir, "%s.exe" % program)):
return os.path.join(bin_dir, "%s.exe" % program)
return program
| apache-2.0 | -7,367,964,481,521,665,000 | 28.564767 | 82 | 0.58307 | false |
monaparty/counterparty-lib | counterpartylib/lib/messages/destroy.py | 1 | 4261 | #! /usr/bin/python3
"""Destroy a quantity of an asset."""
import struct
import json
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import util
from counterpartylib.lib import config
from counterpartylib.lib import script
from counterpartylib.lib import message_type
from counterpartylib.lib.script import AddressError
from counterpartylib.lib.exceptions import *
FORMAT = '>QQ8s'
LENGTH = 8 + 8 + 8
ID = 110
def initialise(db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS destructions(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
asset INTEGER,
quantity INTEGER,
tag TEXT,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
status_idx ON destructions (status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
address_idx ON destructions (source)
''')
def pack(db, asset, quantity, tag):
data = message_type.pack(ID)
if isinstance(tag, str):
tag = bytes.fromhex(tag)
data += struct.pack(FORMAT, util.get_asset_id(db, asset, util.CURRENT_BLOCK_INDEX), quantity, tag)
return data
def unpack(db, message):
try:
asset_id, quantity, tag = struct.unpack(FORMAT, message)
asset = util.get_asset_name(db, asset_id, util.CURRENT_BLOCK_INDEX)
except struct.error:
raise UnpackError('could not unpack')
except AssetIDError:
raise UnpackError('asset id invalid')
return asset, quantity, tag
def validate (db, source, destination, asset, quantity):
try:
util.get_asset_id(db, asset, util.CURRENT_BLOCK_INDEX)
except AssetError:
raise ValidateError('asset invalid')
try:
script.validate(source)
except AddressError:
raise ValidateError('source address invalid')
if destination:
raise ValidateError('destination exists')
if asset == config.BTC:
raise ValidateError('cannot destroy {}'.format(config.BTC))
if type(quantity) != int:
raise ValidateError('quantity not integer')
if quantity > config.MAX_INT:
raise ValidateError('integer overflow, quantity too large')
if quantity < 0:
raise ValidateError('quantity negative')
if util.get_balance(db, source, asset) < quantity:
raise BalanceError('balance insufficient')
def compose (db, source, asset, quantity, tag):
# resolve subassets
asset = util.resolve_subasset_longname(db, asset)
validate(db, source, None, asset, quantity)
data = pack(db, asset, quantity, tag)
return (source, [], data)
def parse (db, tx, message):
status = 'valid'
asset, quantity, tag = None, None, None
try:
asset, quantity, tag = unpack(db, message)
validate(db, tx['source'], tx['destination'], asset, quantity)
util.debit(db, tx['source'], asset, quantity, 'destroy', tx['tx_hash'])
except UnpackError as e:
status = 'invalid: ' + ''.join(e.args)
except (ValidateError, BalanceError) as e:
status = 'invalid: ' + ''.join(e.args)
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'asset': asset,
'quantity': quantity,
'tag': tag,
'status': status,
}
if "integer overflow" not in status:
sql = 'insert into destructions values(:tx_index, :tx_hash, :block_index, :source, :asset, :quantity, :tag, :status)'
cursor = db.cursor()
cursor.execute(sql, bindings)
else:
logger.warn("Not storing [destroy] tx [%s]: %s" % (tx['tx_hash'], status))
logger.debug("Bindings: %s" % (json.dumps(bindings), ))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -1,950,463,795,646,828,500 | 29.007042 | 125 | 0.596574 | false |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/rest_framework/views.py | 1 | 15361 | """
Provides an APIView class that is the base of all views in REST framework.
"""
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.utils.datastructures import SortedDict
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status, exceptions
from rest_framework.compat import smart_text, HttpResponseBase, View
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils import formatting
def get_view_name(view_cls, suffix=None):
"""
Given a view class, return a textual name to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_NAME_FUNCTION` setting.
"""
name = view_cls.__name__
name = formatting.remove_trailing_string(name, 'View')
name = formatting.remove_trailing_string(name, 'ViewSet')
name = formatting.camelcase_to_spaces(name)
if suffix:
name += ' ' + suffix
return name
def get_view_description(view_cls, html=False):
"""
Given a view class, return a textual description to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting.
"""
description = view_cls.__doc__ or ''
description = formatting.dedent(smart_text(description))
if html:
return formatting.markup_description(description)
return description
def exception_handler(exc):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's builtin `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait
return Response({'detail': exc.detail},
status=exc.status_code,
headers=headers)
elif isinstance(exc, Http404):
return Response({'detail': 'Not found'},
status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
return Response({'detail': 'Permission denied'},
status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None
class APIView(View):
# The following policies may be set at either globally, or per-view.
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
parser_classes = api_settings.DEFAULT_PARSER_CLASSES
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES
permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES
content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS
# Allow dependancy injection of other settings to make testing easier.
settings = api_settings
@classmethod
def as_view(cls, **initkwargs):
"""
Store the original class on the view function.
This allows us to discover information about the view when we do URL
reverse lookups. Used for breadcrumb generation.
"""
view = super(APIView, cls).as_view(**initkwargs)
view.cls = cls
return view
@property
def allowed_methods(self):
"""
Wrap Django's private `_allowed_methods` interface in a public property.
"""
return self._allowed_methods()
@property
def default_response_headers(self):
# TODO: deprecate?
# TODO: Only vary by accept if multiple renderers
return {
'Allow': ', '.join(self.allowed_methods),
'Vary': 'Accept'
}
def http_method_not_allowed(self, request, *args, **kwargs):
"""
If `request.method` does not correspond to a handler method,
determine what kind of exception to raise.
"""
raise exceptions.MethodNotAllowed(request.method)
def permission_denied(self, request):
"""
If request is not permitted, determine what kind of exception to raise.
"""
if not self.request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied()
def throttled(self, request, wait):
"""
If request is throttled, determine what kind of exception to raise.
"""
raise exceptions.Throttled(wait)
def get_authenticate_header(self, request):
"""
If a request is unauthenticated, determine the WWW-Authenticate
header to use for 401 responses, if any.
"""
authenticators = self.get_authenticators()
if authenticators:
return authenticators[0].authenticate_header(request)
def get_parser_context(self, http_request):
"""
Returns a dict that is passed through to Parser.parse(),
as the `parser_context` keyword argument.
"""
# Note: Additionally `request` will also be added to the context
# by the Request object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {})
}
def get_renderer_context(self):
"""
Returns a dict that is passed through to Renderer.render(),
as the `renderer_context` keyword argument.
"""
# Note: Additionally 'response' will also be added to the context,
# by the Response object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_view_name(self):
"""
Return the view name, as used in OPTIONS responses and in the
browsable API.
"""
func = self.settings.VIEW_NAME_FUNCTION
return func(self.__class__, getattr(self, 'suffix', None))
def get_view_description(self, html=False):
"""
Return some descriptive text for the view, as used in OPTIONS responses
and in the browsable API.
"""
func = self.settings.VIEW_DESCRIPTION_FUNCTION
return func(self.__class__, html)
# API policy instantiation methods
def get_format_suffix(self, **kwargs):
"""
Determine if the request includes a '.json' style format suffix
"""
if self.settings.FORMAT_SUFFIX_KWARG:
return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)
def get_renderers(self):
"""
Instantiates and returns the list of renderers that this view can use.
"""
return [renderer() for renderer in self.renderer_classes]
def get_parsers(self):
"""
Instantiates and returns the list of parsers that this view can use.
"""
return [parser() for parser in self.parser_classes]
def get_authenticators(self):
"""
Instantiates and returns the list of authenticators that this view can use.
"""
return [auth() for auth in self.authentication_classes]
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
return [permission() for permission in self.permission_classes]
def get_throttles(self):
"""
Instantiates and returns the list of throttles that this view uses.
"""
return [throttle() for throttle in self.throttle_classes]
def get_content_negotiator(self):
"""
Instantiate and return the content negotiation class to use.
"""
if not getattr(self, '_negotiator', None):
self._negotiator = self.content_negotiation_class()
return self._negotiator
# API policy implementation methods
def perform_content_negotiation(self, request, force=False):
"""
Determine which renderer and media type to use render the response.
"""
renderers = self.get_renderers()
conneg = self.get_content_negotiator()
try:
return conneg.select_renderer(request, renderers, self.format_kwarg)
except Exception:
if force:
return (renderers[0], renderers[0].media_type)
raise
def perform_authentication(self, request):
"""
Perform authentication on the incoming request.
Note that if you override this and simply 'pass', then authentication
will instead be performed lazily, the first time either
`request.user` or `request.auth` is accessed.
"""
request.user
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_permission(request, self):
self.permission_denied(request)
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_object_permission(request, self, obj):
self.permission_denied(request)
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
self.throttled(request, throttle.wait())
# Dispatch methods
def initialize_request(self, request, *args, **kargs):
"""
Returns the initial request object.
"""
parser_context = self.get_parser_context(request)
return Request(request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context)
def initial(self, request, *args, **kwargs):
"""
Runs anything that needs to occur prior to calling the method handler.
"""
self.format_kwarg = self.get_format_suffix(**kwargs)
# Ensure that the incoming request is permitted
self.perform_authentication(request)
self.check_permissions(request)
self.check_throttles(request)
# Perform content negotiation and store the accepted info on the request
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
def finalize_response(self, request, response, *args, **kwargs):
"""
Returns the final response object.
"""
# Make the error obvious if a proper response is not returned
assert isinstance(response, HttpResponseBase), (
'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` '
'to be returned from the view, but received a `%s`'
% type(response)
)
if isinstance(response, Response):
if not getattr(request, 'accepted_renderer', None):
neg = self.perform_content_negotiation(request, force=True)
request.accepted_renderer, request.accepted_media_type = neg
response.accepted_renderer = request.accepted_renderer
response.accepted_media_type = request.accepted_media_type
response.renderer_context = self.get_renderer_context()
for key, value in self.headers.items():
response[key] = value
return response
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
if isinstance(exc, (exceptions.NotAuthenticated,
exceptions.AuthenticationFailed)):
# WWW-Authenticate header for 401 responses, else coerce to 403
auth_header = self.get_authenticate_header(self.request)
if auth_header:
exc.auth_header = auth_header
else:
exc.status_code = status.HTTP_403_FORBIDDEN
response = self.settings.EXCEPTION_HANDLER(exc)
if response is None:
raise
response.exception = True
return response
# Note: session based authentication is explicitly CSRF validated,
# all other authentication is CSRF exempt.
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
"""
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers # deprecate?
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def options(self, request, *args, **kwargs):
"""
Handler method for HTTP 'OPTIONS' request.
We may as well implement this as Django will otherwise provide
a less useful default implementation.
"""
return Response(self.metadata(request), status=status.HTTP_200_OK)
def metadata(self, request):
"""
Return a dictionary of metadata about the view.
Used to return responses for OPTIONS requests.
"""
# By default we can't provide any form-like information, however the
# generic views override this implementation and add additional
# information for POST and PUT methods, based on the serializer.
ret = SortedDict()
ret['name'] = self.get_view_name()
ret['description'] = self.get_view_description()
ret['renders'] = [renderer.media_type for renderer in self.renderer_classes]
ret['parses'] = [parser.media_type for parser in self.parser_classes]
return ret
| mit | -931,368,044,246,631,700 | 35.143529 | 84 | 0.627238 | false |
NNPDF/reportengine | src/reportengine/tests/test_executor.py | 1 | 2275 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 22:51:32 2015
@author: zah
"""
import unittest
import time
from reportengine.dag import DAG
from reportengine.utils import ChainMap
from reportengine import namespaces
from reportengine.resourcebuilder import (ResourceExecutor, CallSpec)
def f(param):
print("Executing f")
time.sleep(0.1)
return "fresult: %s" % param
def g(fresult):
print("Executing g")
time.sleep(0.2)
return fresult*2
def h(fresult):
print("Executing h")
time.sleep(0.2)
return fresult*3
def m(gresult, hresult, param=None):
print("executing m")
return (gresult+hresult)*(param//2)
def n(mresult):
return mresult
def o(mresult):
return mresult*2
def p(mresult):
return mresult*3
class TestResourceExecutor(unittest.TestCase, ResourceExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ResourceExecutor.__init__(self, None, None)
def setUp(self):
self.rootns = ChainMap({'param':4, 'inner': {}})
def nsspec(x, beginning=()):
ns = namespaces.resolve(self.rootns, beginning)
default_label = '_default' + str(x)
namespaces.push_nslevel(ns, default_label)
return beginning + (default_label,)
self.graph = DAG()
fcall = CallSpec(f, ('param',), 'fresult',
nsspec(f))
gcall = CallSpec(g, ('fresult',), 'gresult',
nsspec(g))
hcall = CallSpec(h, ('fresult',), 'hresult',
nsspec(h))
mcall = CallSpec(m, ('gresult','hresult','param'), 'mresult',
nsspec(m))
self.graph.add_node(fcall)
self.graph.add_node(gcall, inputs={fcall})
self.graph.add_node(hcall, inputs={fcall})
self.graph.add_node(mcall, inputs={gcall, hcall})
def _test_ns(self):
mresult = 'fresult: 4'*10
namespace = self.rootns
self.assertEqual(namespace['mresult'], mresult)
def test_seq_execute(self):
self.execute_sequential()
self._test_ns()
def test_parallel_execute(self):
self.execute_parallel()
self._test_ns()
if __name__ =='__main__':
unittest.main() | gpl-2.0 | -3,430,533,522,206,889,000 | 23.212766 | 69 | 0.587253 | false |
RedHatInsights/insights-core | insights/parsers/tests/test_neutron_l3_agent_log.py | 1 | 1372 | import doctest
from insights.parsers import neutron_l3_agent_log
from insights.parsers.neutron_l3_agent_log import NeutronL3AgentLog
from insights.tests import context_wrap
from datetime import datetime
AGENT_LOG = """
2017-09-17 10:05:06.241 141544 INFO neutron.agent.l3.ha [-] Router 01d51830-0e3e-4100-a891-efd7dbc000b1 transitioned to backup
2017-09-17 10:05:07.828 141544 WARNING neutron.agent.linux.iptables_manager [-] Duplicate iptables rule detected. This may indicate a bug in the the iptables rule generation code. Line: -A neutron-l3-agent-INPUT -p tcp -m tcp --dport 9697 -j DROP
2017-09-17 10:05:07.829 141544 WARNING neutron.agent.linux.iptables_manager [-] Duplicate iptables rule detected. This may indicate a bug in the the iptables rule generation code. Line: -A neutron-l3-agent-INPUT -m mark --mark 0x1/0xffff -j ACCEP
"""
def test_metrics_log():
log = NeutronL3AgentLog(context_wrap(AGENT_LOG))
assert len(log.get('INFO')) == 1
assert 'Duplicate iptables rule detected' in log
assert len(log.get('Duplicate iptables rule detected')) == 2
assert len(list(log.get_after(datetime(2017, 2, 17, 19, 36, 38)))) == 3
def test_doc():
env = {'agent_log': NeutronL3AgentLog(context_wrap(AGENT_LOG, path='/var/log/neutron/l3-agent.log'))}
failed, total = doctest.testmod(neutron_l3_agent_log, globs=env)
assert failed == 0
| apache-2.0 | 1,068,674,162,350,740,700 | 51.769231 | 246 | 0.742711 | false |
SmartInfrastructures/enos | tests/unit/utils/test_network_constraints.py | 1 | 9499 | import unittest
from enos.utils.network_constraints import *
from enos.provider.host import Host
class TestExpandDescription(unittest.TestCase):
def test_no_expansion(self):
desc = {
'src': 'grp1',
'dst': 'grp2',
'delay': 0,
'rate': 0,
'symetric': True
}
descs = expand_description(desc)
self.assertEquals(1, len(descs))
self.assertDictEqual(desc, descs[0])
def test_src_expansion(self):
desc = {
'src': 'grp[1-3]',
'dst': 'grp4',
'delay': 0,
'rate': 0,
'symetric': True
}
# checking cardinality : the cartesian product
descs = expand_description(desc)
self.assertEquals(3, len(descs))
# checking that expansion has been generated
srcs = map(lambda d: d.pop('src'), descs)
self.assertEquals(set(srcs), {'grp1', 'grp2', 'grp3'})
# checking that the remaining is untouched
desc.pop('src')
for d in descs:
self.assertDictEqual(desc, d)
def test_dst_expansion(self):
desc = {
'src': 'grp4',
'dst': 'grp[1-3]',
'delay': 0,
'rate': 0,
'symetric': True
}
# checking cardinality : the cartesian product
descs = expand_description(desc)
self.assertEquals(3, len(descs))
# checking that expansion has been generated
dsts = map(lambda d: d.pop('dst'), descs)
self.assertEquals(set(dsts), {'grp1', 'grp2', 'grp3'})
# checking that the remaining is untouched
desc.pop('dst')
for d in descs:
self.assertDictEqual(desc, d)
def test_both_expansion(self):
desc = {
'src': 'grp[1-3]',
'dst': 'grp[4-6]',
'delay': 0,
'rate': 0,
'symetric': True
}
# checking cardinality : the cartesian product
descs = expand_description(desc)
self.assertEquals(9, len(descs))
# checking that expansion has been generated
dsts = map(lambda d: d.pop('dst'), descs)
self.assertEquals(set(dsts), {'grp4', 'grp5', 'grp6'})
# checking that expansion has been generated
srcs = map(lambda d: d.pop('src'), descs)
self.assertEquals(set(srcs), {'grp1', 'grp2', 'grp3'})
# checking that the remaining is untouched
desc.pop('dst')
desc.pop('src')
for d in descs:
self.assertDictEqual(desc, d)
class TestGenerateDefaultGrpConstraitns(unittest.TestCase):
def test_no_expansion(self):
topology = {
'grp1': {},
'grp2': {}
}
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms'
}
descs = generate_default_grp_constraints(topology, network_constraints)
# Cartesian product is applied
self.assertEquals(2, len(descs))
# defaults are applied
for d in descs:
self.assertEquals('10mbit', d['rate'])
self.assertEquals('10ms', d['delay'])
# descs are symetrics
self.assertEquals(descs[0]['src'], descs[1]['dst'])
self.assertEquals(descs[0]['dst'], descs[1]['src'])
def test_with_expansion(self):
topology = {
'grp[1-3]': {},
'grp[4-6]': {}
}
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms'
}
descs = generate_default_grp_constraints(topology, network_constraints)
# Cartesian product is applied
self.assertEquals(6*5, len(descs))
# defaults are applied
for d in descs:
self.assertEquals('10mbit', d['rate'])
self.assertEquals('10ms', d['delay'])
def test_except_one_group(self):
topology = {
'grp[1-3]': {},
'grp[4-6]': {}
}
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms',
'except': ['grp1']
}
descs = generate_default_grp_constraints(topology, network_constraints)
# Cartesian product is applied but grp1 isn't taken
self.assertEquals(5*4, len(descs))
for d in descs:
self.assertTrue('grp1' != d['src'])
self.assertTrue('grp1' != d['dst'])
class TestGenerateActualGrpConstraints(unittest.TestCase):
def test_no_expansion_no_symetric(self):
constraints = [{
'src': 'grp1',
'dst': 'grp2',
'rate': '20mbit',
'delay': '20ms'
}]
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms',
'constraints' : constraints
}
descs = generate_actual_grp_constraints(network_constraints)
self.assertEquals(1, len(descs))
self.assertDictEqual(constraints[0], descs[0])
def test_no_expansion_symetric(self):
constraints = [{
'src': 'grp1',
'dst': 'grp2',
'rate': '20mbit',
'delay': '20ms',
'symetric': True
}]
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms',
'constraints' : constraints
}
descs = generate_actual_grp_constraints(network_constraints)
self.assertEquals(2, len(descs))
# bw/rate are applied
for d in descs:
self.assertEquals('20mbit', d['rate'])
self.assertEquals('20ms', d['delay'])
# descs are symetrics
self.assertEquals(descs[0]['src'], descs[1]['dst'])
self.assertEquals(descs[0]['dst'], descs[1]['src'])
def test_expansion_symetric(self):
constraints = [{
'src': 'grp[1-3]',
'dst': 'grp[4-6]',
'rate': '20mbit',
'delay': '20ms',
'symetric': True
}]
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms',
'constraints' : constraints
}
descs = generate_actual_grp_constraints(network_constraints)
self.assertEquals(3*3*2, len(descs))
# bw/rate are applied
for d in descs:
self.assertEquals('20mbit', d['rate'])
self.assertEquals('20ms', d['delay'])
def test_expansion_no_symetric(self):
constraints = [{
'src': 'grp[1-3]',
'dst': 'grp[4-6]',
'rate': '20mbit',
'delay': '20ms',
}]
network_constraints = {
'default_rate': '10mbit',
'default_delay': '10ms',
'constraints' : constraints
}
descs = generate_actual_grp_constraints(network_constraints)
self.assertEquals(3*3, len(descs))
# bw/rate are applied
for d in descs:
self.assertEquals('20mbit', d['rate'])
self.assertEquals('20ms', d['delay'])
class TestMergeConstraints(unittest.TestCase):
def test_merge_constraints(self):
constraint = {
'src': 'grp1',
'dst': 'grp2',
'rate': '10mbit',
'delay': '10ms'
}
constraints = [constraint]
override = {
'src': 'grp1',
'dst': 'grp2',
'rate': '20mbit',
'delay': '20ms'
}
overrides = [override]
merge_constraints(constraints, overrides)
self.assertDictEqual(override, constraints[0])
def test_merge_constraints_default(self):
constraint = {
'src': 'grp1',
'dst': 'grp2',
'rate': '10mbit',
'delay': '10ms'
}
constraints = [constraint]
override = {
'src': 'grp1',
'dst': 'grp2',
'rate': '20mbit',
}
overrides = [override]
merged = merge_constraints(constraints, overrides)
override.update({'delay': '10ms'})
self.assertDictEqual(override, constraints[0])
class TestBuildIpConstraints(unittest.TestCase):
def test_build_ip_constraints(self):
# role distribution
rsc = {
'grp1': [Host('node1')],
'grp2': [Host('node2')]
}
# ips informations
ips = {
'node1': {
'all_ipv4_addresses': ['ip11', 'ip12'],
'devices': [{'device': 'eth0', 'active': True},{'device': 'eth1', 'active': True}]
},
'node2': {
'all_ipv4_addresses': ['ip21', 'ip21'],
'devices': [{'device': 'eth0', 'active': True},{'device': 'eth1', 'active': True}]
}
}
# the constraints
constraint = {
'src': 'grp1',
'dst': 'grp2',
'rate': '10mbit',
'delay': '10ms',
'loss': '0.1%'
}
constraints = [constraint]
ips_with_tc = build_ip_constraints(rsc, ips, constraints)
# tc rules are applied on the source only
self.assertTrue('tc' in ips_with_tc['node1'])
tcs = ips_with_tc['node1']['tc']
# one rule per dest ip and source device
self.assertEquals(2*2, len(tcs))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,309,345,473,356,157,000 | 28.871069 | 98 | 0.504579 | false |
xunilrj/sandbox | courses/columbia-ia/sklearn/problem3_3.py | 1 | 5477 |
import sys
import pandas as pd
#import numpy as np
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
#import matplotlib.pyplot as plt
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
inputfile = "input3.csv"
try:
inputfile = sys.argv[1]
except:
inputfile = "input3.csv"
outputfile = "output3.csv"
try:
outputfile = sys.argv[2]
except:
outputfile = "output3.csv"
df = pd.read_csv(inputfile)
dftrain, dftest = train_test_split(df, test_size = 0.4, stratify=None)
#def runSVM(kernel, c, gamma):
# clf = svm.SVC(kernel=kernel,C=c,gamma=gamma)
# model = clf.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
# score = model.score(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
# print(score)
# amin = df["A"].min()
# amax = df["A"].max()
# bmin = df["B"].min()
# bmax = df["B"].max()
# h = .02
# xx, yy = np.meshgrid(np.arange(amin, amax, h), np.arange(bmin, bmax, h))
# zz = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# print(zz)
# zz = zz.reshape(xx.shape)
# plt.contourf(xx, yy, zz, cmap=plt.cm.coolwarm, alpha=0.8)
# plt.scatter(dftest["A"], dftest["B"], c=dftest["label"], cmap=plt.cm.coolwarm)
# plt.xlabel('Sepal length')
# plt.ylabel('Sepal width')
# plt.xlim(xx.min(), xx.max())
# plt.ylim(yy.min(), yy.max())
# plt.show()
def runLinear(file):
parameters = [
{'C': [0.1,0.5,1,5,10,50,100], 'kernel': ['linear']}
]
classifier = svm.SVC()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("svm_linear,%f,%f\n" % (grid_search.best_score_,score))
def runPoly(file):
parameters = [
{'C': [0.1,1,3], 'gamma':[0.1,0.5,1,3,6,10], 'kernel': ['poly']}
]
classifier = svm.SVC()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("svm_polynomial,%f,%f\n" % (grid_search.best_score_,score))
def runRBF(file):
parameters = [
{'C': [0.1, 0.5, 1, 5, 10, 50, 100], 'gamma':[0.1, 0.5, 1, 3, 6, 10], 'kernel': ['rbf']}
]
classifier = svm.SVC()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("svm_rbf,%f,%f\n" % (grid_search.best_score_,score))
def runLogisticRegression(file):
parameters = [
{'C': [0.1, 0.5, 1, 5, 10, 50, 100]}
]
classifier = LogisticRegression()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("logistic,%f,%f\n" % (grid_search.best_score_,score))
def runKnn(file):
parameters = [
{'n_neighbors': list(range(1,51,1)), 'leaf_size': list(range(5,61,5))}
]
classifier = KNeighborsClassifier()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("knn,%f,%f\n" % (grid_search.best_score_,score))
def runDecisionTree(file):
parameters = [
{'max_depth': list(range(1,51,1)), 'min_samples_split': [2, 3, 4, 5, 6, 7, 8, 9, 10]}
]
classifier = tree.DecisionTreeClassifier()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("decision_tree,%f,%f\n" % (grid_search.best_score_,score))
def runRandomForest(file):
parameters = [
{'max_depth': list(range(1,51,1)), 'min_samples_split': [2, 3, 4, 5, 6, 7, 8, 9, 10]}
]
classifier = RandomForestClassifier()
grid_search = GridSearchCV(classifier, parameters, cv=5, scoring='accuracy')
grid_search.fit(dftrain[["A","B"]].as_matrix(), dftrain["label"].as_matrix())
score = grid_search.score(dftest[["A","B"]].as_matrix(),dftest["label"].as_matrix())
file.write("random_forest,%f,%f\n" % (grid_search.best_score_,score))
with open(outputfile,"w") as file:
runLinear(file)
runPoly(file)
runRBF(file)
runLogisticRegression(file)
runKnn(file)
runDecisionTree(file)
runRandomForest(file)
#possiblecs =
#gammas = [0.1,0.5]
#for c in possiblecs:
# runSVM('linear',c,'auto')
#for c in possiblecs:
# for gamma in gammas:
# runSVM('poly',c,gamma)
#for c in possiblecs:
# for gamma in gammas:
# runSVM('rbf',c,gamma)
| apache-2.0 | -2,348,761,504,530,381,000 | 39.272059 | 94 | 0.631915 | false |
ifp-uiuc/do-neural-networks-learn-faus-iccvw-2015 | tfd/cnn_a/train.py | 1 | 3189 | import argparse
import os
import sys
sys.path.append('..')
import numpy
from anna import util
from anna.datasets import supervised_dataset
from anna.datasets.supervised_data_loader import SupervisedDataLoader
import data_paths
from model import SupervisedModel
parser = argparse.ArgumentParser(prog='train_cnn_with_data_augmentation',
description='Script to train convolutional\
network from random initialization with \
data augmentation.')
parser.add_argument("-s", "--split", default='0', help='Training split of TFD \
to use. (0-4)')
args = parser.parse_args()
print('Start')
train_split = int(args.split)
if train_split < 0 or train_split > 4:
raise Exception("Training Split must be in range 0-4.")
print('Using TFD training split: {}'.format(train_split))
pid = os.getpid()
print('PID: {}'.format(pid))
f = open('pid_'+str(train_split), 'wb')
f.write(str(pid)+'\n')
f.close()
# Load model
model = SupervisedModel('experiment', './', learning_rate=1e-2)
monitor = util.Monitor(model,
checkpoint_directory='checkpoints_'+str(train_split),
save_steps=1000)
# Loading TFD dataset
print('Loading Data')
supervised_data_loader = SupervisedDataLoader(
os.path.join(data_paths.tfd_data_path, 'npy_files/TFD_96/split_'+str(train_split)))
train_data_container = supervised_data_loader.load(0)
val_data_container = supervised_data_loader.load(1)
test_data_container = supervised_data_loader.load(2)
X_train = train_data_container.X
y_train = train_data_container.y
X_val = val_data_container.X
y_val = val_data_container.y
X_test = test_data_container.X
y_test = test_data_container.y
X_train = numpy.float32(X_train)
X_train /= 255.0
X_train *= 2.0
X_val = numpy.float32(X_val)
X_val /= 255.0
X_val *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
mode='random_uniform', batch_size=64, num_batches=31000)
# Do data augmentation (crops, flips, rotations, scales, intensity)
data_augmenter = util.DataAugmenter2(crop_shape=(96, 96),
flip=True, gray_on=True)
normer = util.Normer3(filter_size=5, num_channels=1)
module_list_train = [data_augmenter, normer]
module_list_val = [normer]
preprocessor_train = util.Preprocessor(module_list_train)
preprocessor_val = util.Preprocessor(module_list_val)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = preprocessor_train.run(x_batch)
monitor.start()
log_prob, accuracy = model.train(x_batch, y_batch)
monitor.stop(1-accuracy)
if monitor.test:
monitor.start()
x_val_batch, y_val_batch = val_iterator.next()
x_val_batch = preprocessor_val.run(x_val_batch)
val_accuracy = model.eval(x_val_batch, y_val_batch)
monitor.stop_test(1-val_accuracy)
| bsd-3-clause | -1,721,680,931,794,856,200 | 31.876289 | 87 | 0.679837 | false |
monikagrabowska/osf.io | osf/models/base.py | 1 | 26122 |
import logging
import random
from datetime import datetime
import bson
import modularodm.exceptions
import pytz
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import models
from django.db.models import F
from django.db.models import ForeignKey
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from osf.utils.caching import cached_property
from osf.exceptions import ValidationError
from osf.modm_compat import to_django_query
from osf.utils.datetime_aware_jsonfield import (DateTimeAwareJSONField,
coerce_nonnaive_datetimes)
from osf.utils.fields import LowercaseCharField, NonNaiveDateTimeField
ALPHABET = '23456789abcdefghjkmnpqrstuvwxyz'
logger = logging.getLogger(__name__)
def generate_guid(length=5):
while True:
guid_id = ''.join(random.sample(ALPHABET, length))
try:
# is the guid in the blacklist
BlackListGuid.objects.get(guid=guid_id)
except BlackListGuid.DoesNotExist:
# it's not, check and see if it's already in the database
try:
Guid.objects.get(_id=guid_id)
except Guid.DoesNotExist:
# valid and unique guid
return guid_id
def generate_object_id():
return str(bson.ObjectId())
class MODMCompatibilityQuerySet(models.QuerySet):
def __getitem__(self, k):
item = super(MODMCompatibilityQuerySet, self).__getitem__(k)
if hasattr(item, 'wrapped'):
return item.wrapped()
else:
return item
def __iter__(self):
items = super(MODMCompatibilityQuerySet, self).__iter__()
for item in items:
if hasattr(item, 'wrapped'):
yield item.wrapped()
else:
yield item
def sort(self, *fields):
# Fields are passed in as e.g. [('title', 1), ('date_created', -1)]
if isinstance(fields[0], list):
fields = fields[0]
def sort_key(item):
if isinstance(item, basestring):
return item
elif isinstance(item, tuple):
field_name, direction = item
prefix = '-' if direction == -1 else ''
return ''.join([prefix, field_name])
sort_keys = [sort_key(each) for each in fields]
return self.order_by(*sort_keys)
def limit(self, n):
return self[:n]
class BaseModel(models.Model):
"""Base model that acts makes subclasses mostly compatible with the
modular-odm ``StoredObject`` interface.
"""
migration_page_size = 50000
objects = MODMCompatibilityQuerySet.as_manager()
class Meta:
abstract = True
@classmethod
def load(cls, data):
try:
if issubclass(cls, GuidMixin):
return cls.objects.get(guids___id=data)
elif issubclass(cls, ObjectIDMixin):
return cls.objects.get(_id=data)
elif isinstance(data, basestring):
# Some models (CitationStyle) have an _id that is not a bson
# Looking up things by pk will never work with a basestring
return cls.objects.get(_id=data)
return cls.objects.get(pk=data)
except cls.DoesNotExist:
return None
@classmethod
def find_one(cls, query):
try:
return cls.objects.get(to_django_query(query, model_cls=cls))
except cls.DoesNotExist:
raise modularodm.exceptions.NoResultsFound()
except cls.MultipleObjectsReturned as e:
raise modularodm.exceptions.MultipleResultsFound(*e.args)
@classmethod
def find(cls, query=None):
if not query:
return cls.objects.all()
else:
return cls.objects.filter(to_django_query(query, model_cls=cls))
@classmethod
def remove(cls, query=None):
return cls.find(query).delete()
@classmethod
def remove_one(cls, obj):
if obj.pk:
return obj.delete()
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
@property
def _primary_name(self):
return '_id'
def reload(self):
return self.refresh_from_db()
def _natural_key(self):
return self.pk
def clone(self):
"""Create a new, unsaved copy of this object."""
copy = self.__class__.objects.get(pk=self.pk)
copy.id = None
# empty all the fks
fk_field_names = [f.name for f in self._meta.model._meta.get_fields() if isinstance(f, (ForeignKey, GenericForeignKey))]
for field_name in fk_field_names:
setattr(copy, field_name, None)
try:
copy._id = bson.ObjectId()
except AttributeError:
pass
return copy
def save(self, *args, **kwargs):
# Make Django validate on save (like modm)
if not kwargs.get('force_insert') and not kwargs.get('force_update'):
try:
self.full_clean()
except DjangoValidationError as err:
raise ValidationError(*err.args)
return super(BaseModel, self).save(*args, **kwargs)
# TODO: Rename to Identifier?
class Guid(BaseModel):
"""Stores either a short guid or long object_id for any model that inherits from BaseIDMixin.
Each ID field (e.g. 'guid', 'object_id') MUST have an accompanying method, named with
'initialize_<ID type>' (e.g. 'initialize_guid') that generates and sets the field.
"""
primary_identifier_name = '_id'
# TODO DELETE ME POST MIGRATION
modm_query = None
migration_page_size = 500000
# /TODO DELETE ME POST MIGRATION
id = models.AutoField(primary_key=True)
_id = LowercaseCharField(max_length=255, null=False, blank=False, default=generate_guid, db_index=True,
unique=True)
referent = GenericForeignKey()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
created = NonNaiveDateTimeField(db_index=True, default=timezone.now) # auto_now_add=True)
# Override load in order to load by GUID
@classmethod
def load(cls, data):
try:
return cls.objects.get(_id=data)
except cls.DoesNotExist:
return None
def reload(self):
del self._referent_cache
return super(Guid, self).reload()
@classmethod
def migrate_from_modm(cls, modm_obj, object_id=None, content_type=None):
"""
Given a modm Guid make a django Guid
:param object_id:
:param content_type:
:param modm_obj:
:return:
"""
django_obj = cls()
if modm_obj._id != modm_obj.referent._id:
# if the object has a BSON id, get the created date from that
django_obj.created = bson.ObjectId(modm_obj.referent._id).generation_time
else:
# just make it now
django_obj.created = timezone.now()
django_obj._id = modm_obj._id
if object_id and content_type:
# if the referent was passed set the GFK to point to it
django_obj.content_type = content_type
django_obj.object_id = object_id
return django_obj
class Meta:
ordering = ['-created']
get_latest_by = 'created'
index_together = (
('content_type', 'object_id', 'created'),
)
class BlackListGuid(BaseModel):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'framework.guid.model.BlacklistGuid'
primary_identifier_name = 'guid'
modm_query = None
migration_page_size = 500000
# /TODO DELETE ME POST MIGRATION
id = models.AutoField(primary_key=True)
guid = LowercaseCharField(max_length=255, unique=True, db_index=True)
@property
def _id(self):
return self.guid
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm BlacklistGuid make a django BlackListGuid
:param modm_obj:
:return:
"""
django_obj = cls()
django_obj.guid = modm_obj._id
return django_obj
def generate_guid_instance():
return Guid.objects.create().id
class PKIDStr(str):
def __new__(self, _id, pk):
return str.__new__(self, _id)
def __init__(self, _id, pk):
self.__pk = pk
def __int__(self):
return self.__pk
class BaseIDMixin(models.Model):
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
class Meta:
abstract = True
class ObjectIDMixin(BaseIDMixin):
primary_identifier_name = '_id'
_id = models.CharField(max_length=24, default=generate_object_id, unique=True, db_index=True)
@classmethod
def load(cls, q):
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
# modm doesn't throw exceptions when loading things that don't exist
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
django_obj = super(ObjectIDMixin, cls).migrate_from_modm(modm_obj)
django_obj._id = str(modm_obj._id)
return django_obj
class Meta:
abstract = True
def _natural_key(self):
return self._id
class InvalidGuid(Exception):
pass
class OptionalGuidMixin(BaseIDMixin):
"""
This makes it so that things can **optionally** have guids. Think files.
Things that inherit from this must also inherit from ObjectIDMixin ... probably
"""
__guid_min_length__ = 5
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True)
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
def get_guid(self, create=False):
if create:
try:
guid, created = Guid.objects.get_or_create(
object_id=self.pk,
content_type_id=ContentType.objects.get_for_model(self).pk
)
except MultipleObjectsReturned:
# lol, hacks
pass
else:
return guid
return self.guids.order_by('-created').first()
@classmethod
def migrate_from_modm(cls, modm_obj):
instance = super(OptionalGuidMixin, cls).migrate_from_modm(modm_obj)
from website.models import Guid as MODMGuid
from modularodm import Q as MODMQ
if modm_obj.get_guid():
guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id))
setattr(instance, 'guid_string', [x.lower() for x in guids.get_keys()])
setattr(instance, 'content_type_pk', ContentType.objects.get_for_model(cls).pk)
return instance
class Meta:
abstract = True
class GuidMixinQuerySet(MODMCompatibilityQuerySet):
tables = ['osf_guid', 'django_content_type']
GUID_FIELDS = [
'guids__id',
'guids___id',
'guids__content_type_id',
'guids__object_id',
'guids__created'
]
def safe_table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.query.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
if alias in self.query.alias_refcount:
self.query.alias_refcount[alias] += 1
else:
self.query.alias_refcount[alias] = 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.query.alias_prefix, len(self.query.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.query.table_map[alias] = [alias]
self.query.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def annotate_query_with_guids(self):
self._prefetch_related_lookups = ['guids']
for field in self.GUID_FIELDS:
self.query.add_annotation(
F(field), '_{}'.format(field), is_summary=False
)
for table in self.tables:
if table not in self.query.tables:
self.safe_table_alias(table)
def remove_guid_annotations(self):
for k, v in self.query.annotations.iteritems():
if k[1:] in self.GUID_FIELDS:
del self.query.annotations[k]
for table_name in ['osf_guid', 'django_content_type']:
if table_name in self.query.alias_map:
del self.query.alias_map[table_name]
if table_name in self.query.alias_refcount:
del self.query.alias_refcount[table_name]
if table_name in self.query.tables:
del self.query.tables[self.query.tables.index(table_name)]
def _clone(self, annotate=False, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
if annotate:
self.annotate_query_with_guids()
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
# this method was copied from the default django queryset except for the below two lines
if annotate:
clone.annotate_query_with_guids()
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def annotate(self, *args, **kwargs):
self.annotate_query_with_guids()
return super(GuidMixinQuerySet, self).annotate(*args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
'Cannot filter a query once a slice has been taken.'
clone = self._clone(annotate=True)
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def all(self):
return self._clone(annotate=True)
# does implicit filter
def get(self, *args, **kwargs):
# add this to make sure we don't get dupes
self.query.add_distinct_fields('id')
return super(GuidMixinQuerySet, self).get(*args, **kwargs)
# TODO: Below lines are commented out to ensure that
# the annotations are used after running .count()
# e.g.
# queryset.count()
# queryset[0]
# This is more efficient when doing chained operations
# on a queryset, but less efficient when only getting a count.
# Figure out a way to get the best of both worlds
# def count(self):
# self.remove_guid_annotations()
# return super(GuidMixinQuerySet, self).count()
def update(self, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).update(**kwargs)
def update_or_create(self, defaults=None, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).update_or_create(defaults=defaults, **kwargs)
def values(self, *fields):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).values(*fields)
def create(self, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).create(**kwargs)
def bulk_create(self, objs, batch_size=None):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).bulk_create(objs, batch_size)
def get_or_create(self, defaults=None, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).get_or_create(defaults, **kwargs)
def values_list(self, *fields, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).values_list(*fields, **kwargs)
def exists(self):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).exists()
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
if 'guids' in self._prefetch_related_lookups and self._result_cache and hasattr(self._result_cache[0], '_guids__id'):
# if guids is requested for prefetch and there are things in the result cache and the first one has
# the annotated guid fields then remove guids from prefetch_related_lookups
del self._prefetch_related_lookups[self._prefetch_related_lookups.index('guids')]
results = []
for result in self._result_cache:
# loop through the result cache
guid_dict = {}
for field in self.GUID_FIELDS:
# pull the fields off of the result object and put them in a dictionary without prefixed names
guid_dict[field] = getattr(result, '_{}'.format(field), None)
if None in guid_dict.values():
# if we get an invalid result field value, stop
logger.warning(
'Annotated guids came back will None values for {}, resorting to extra query'.format(result))
return
if not hasattr(result, '_prefetched_objects_cache'):
# initialize _prefetched_objects_cache
result._prefetched_objects_cache = {}
if 'guids' not in result._prefetched_objects_cache:
# intialize guids in _prefetched_objects_cache
result._prefetched_objects_cache['guids'] = []
# build a result dictionary of even more proper fields
result_dict = {key.replace('guids__', ''): value for key, value in guid_dict.iteritems()}
# make an unsaved guid instance
guid = Guid(**result_dict)
result._prefetched_objects_cache['guids'].append(guid)
results.append(result)
# replace the result cache with the new set of results
self._result_cache = results
self._prefetch_related_objects()
class GuidMixin(BaseIDMixin):
__guid_min_length__ = 5
primary_identifier_name = 'guid_string'
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True)
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
objects = GuidMixinQuerySet.as_manager()
# TODO: use pre-delete signal to disable delete cascade
def _natural_key(self):
return self.guid_string
@cached_property
def _id(self):
try:
guid = self.guids.all()[0]
except IndexError:
return None
if guid:
return guid._id
return None
@_id.setter
def _id(self, value):
# TODO do we really want to allow this?
guid, created = Guid.objects.get_or_create(_id=value)
if created:
guid.object_id = self.pk
guid.content_type = ContentType.objects.get_for_model(self)
guid.save()
elif guid.content_type == ContentType.objects.get_for_model(self) and guid.object_id == self.pk:
# TODO should this up the created for the guid until now so that it appears as the first guid
# for this object?
return
else:
raise InvalidGuid('Cannot indirectly repoint an existing guid, please use the Guid model')
_primary_key = _id
@classmethod
def load(cls, q):
try:
content_type = ContentType.objects.get_for_model(cls)
# if referent doesn't exist it will return None
return Guid.objects.get(_id=q, content_type=content_type).referent
except Guid.DoesNotExist:
# modm doesn't throw exceptions when loading things that don't exist
return None
@property
def deep_url(self):
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set(
[x.name for x in django_obj._meta.get_fields() if not x.is_relation and x.name != '_id'])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
from website.models import Guid as MODMGuid
from modularodm import Q as MODMQ
guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id))
setattr(django_obj, 'guid_string', list(set([x.lower() for x in guids.get_keys()])))
setattr(django_obj, 'content_type_pk', ContentType.objects.get_for_model(cls).pk)
return django_obj
class Meta:
abstract = True
@receiver(post_save)
def ensure_guid(sender, instance, created, **kwargs):
if not issubclass(sender, GuidMixin):
return False
existing_guids = Guid.objects.filter(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance))
has_cached_guids = hasattr(instance, '_prefetched_objects_cache') and 'guids' in instance._prefetched_objects_cache
if not existing_guids.exists():
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance),
_id=generate_guid(instance.__guid_min_length__))
elif not existing_guids.exists() and instance.guid_string is not None:
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type_id=instance.content_type_pk,
_id=instance.guid_string)
| apache-2.0 | 2,214,120,148,595,470,600 | 34.588556 | 129 | 0.607917 | false |
rohitwaghchaure/frappe | frappe/model/meta.py | 1 | 15604 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
'''
Load metadata (DocType) class
Example:
meta = frappe.get_meta('User')
if meta.has_field('first_name'):
print "DocType" table has field "first_name"
'''
from __future__ import unicode_literals
import frappe, json, os
from frappe.utils import cstr, cint
from frappe.model import default_fields, no_value_fields, optional_fields
from frappe.model.document import Document
from frappe.model.base_document import BaseDocument
from frappe.model.db_schema import type_map
from frappe.modules import load_doctype_module
from frappe import _
def get_meta(doctype, cached=True):
if cached:
return frappe.cache().hget("meta", doctype, lambda: Meta(doctype))
else:
return Meta(doctype)
def get_table_columns(doctype):
return frappe.cache().hget("table_columns", doctype,
lambda: frappe.db.get_table_columns(doctype))
def load_doctype_from_file(doctype):
fname = frappe.scrub(doctype)
with open(frappe.get_app_path("frappe", "core", "doctype", fname, fname + ".json"), "r") as f:
txt = json.loads(f.read())
for d in txt.get("fields", []):
d["doctype"] = "DocField"
for d in txt.get("permissions", []):
d["doctype"] = "DocPerm"
txt["fields"] = [BaseDocument(d) for d in txt["fields"]]
if "permissions" in txt:
txt["permissions"] = [BaseDocument(d) for d in txt["permissions"]]
return txt
class Meta(Document):
_metaclass = True
default_fields = list(default_fields)[1:]
special_doctypes = ("DocField", "DocPerm", "Role", "DocType", "Module Def")
def __init__(self, doctype):
self._fields = {}
if isinstance(doctype, Document):
super(Meta, self).__init__(doctype.as_dict())
else:
super(Meta, self).__init__("DocType", doctype)
self.process()
def load_from_db(self):
try:
super(Meta, self).load_from_db()
except frappe.DoesNotExistError:
if self.doctype=="DocType" and self.name in self.special_doctypes:
self.__dict__.update(load_doctype_from_file(self.name))
else:
raise
def get_link_fields(self):
return self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]})
def get_dynamic_link_fields(self):
if not hasattr(self, '_dynamic_link_fields'):
self._dynamic_link_fields = self.get("fields", {"fieldtype": "Dynamic Link"})
return self._dynamic_link_fields
def get_select_fields(self):
return self.get("fields", {"fieldtype": "Select", "options":["not in",
["[Select]", "Loading..."]]})
def get_table_fields(self):
if not hasattr(self, "_table_fields"):
if self.name!="DocType":
self._table_fields = self.get('fields', {"fieldtype":"Table"})
else:
self._table_fields = doctype_table_fields
return self._table_fields
def get_global_search_fields(self):
'''Returns list of fields with `in_global_search` set and `name` if set'''
fields = self.get("fields", {"in_global_search": 1 })
if getattr(self, 'show_name_in_global_search', None):
fields.append(frappe._dict(fieldtype='Data', fieldname='name', label='Name'))
return fields
def get_valid_columns(self):
if not hasattr(self, "_valid_columns"):
if self.name in ("DocType", "DocField", "DocPerm", "Property Setter"):
self._valid_columns = get_table_columns(self.name)
else:
self._valid_columns = self.default_fields + \
[df.fieldname for df in self.get("fields") if df.fieldtype in type_map]
return self._valid_columns
def get_table_field_doctype(self, fieldname):
return { "fields": "DocField", "permissions": "DocPerm"}.get(fieldname)
def get_field(self, fieldname):
'''Return docfield from meta'''
if not self._fields:
for f in self.get("fields"):
self._fields[f.fieldname] = f
return self._fields.get(fieldname)
def has_field(self, fieldname):
'''Returns True if fieldname exists'''
return True if self.get_field(fieldname) else False
def get_label(self, fieldname):
'''Get label of the given fieldname'''
df = self.get_field(fieldname)
if df:
label = df.label
else:
label = {
'name': _('ID'),
'owner': _('Created By'),
'modified_by': _('Modified By'),
'creation': _('Created On'),
'modified': _('Last Modified On')
}.get(fieldname) or _('No Label')
return label
def get_options(self, fieldname):
return self.get_field(fieldname).options
def get_link_doctype(self, fieldname):
df = self.get_field(fieldname)
if df.fieldtype == "Link":
return df.options
elif df.fieldtype == "Dynamic Link":
return self.get_options(df.options)
else:
return None
def get_search_fields(self):
search_fields = self.search_fields or "name"
search_fields = [d.strip() for d in search_fields.split(",")]
if "name" not in search_fields:
search_fields.append("name")
return search_fields
def get_fields_to_fetch(self, link_fieldname=None):
'''Returns a list of docfield objects for fields whose values
are to be fetched and updated for a particular link field
These fields are of type Data, Link, Text, Readonly and their
options property is set as `link_fieldname`.`source_fieldname`'''
out = []
if not link_fieldname:
link_fields = [df.fieldname for df in self.get_link_fields()]
for df in self.fields:
if df.fieldtype in ('Data', 'Read Only', 'Text', 'Small Text',
'Text Editor', 'Code') and df.options:
if link_fieldname:
if df.options.startswith(link_fieldname + '.'):
out.append(df)
else:
if '.' in df.options:
fieldname = df.options.split('.', 1)[0]
if fieldname in link_fields:
out.append(df)
return out
def get_list_fields(self):
list_fields = ["name"] + [d.fieldname \
for d in self.fields if (d.in_list_view and d.fieldtype in type_map)]
if self.title_field and self.title_field not in list_fields:
list_fields.append(self.title_field)
return list_fields
def get_custom_fields(self):
return [d for d in self.fields if d.get('is_custom_field')]
def get_title_field(self):
'''Return the title field of this doctype,
explict via `title_field`, or `title` or `name`'''
title_field = getattr(self, 'title_field', None)
if not title_field and self.has_field('title'):
title_field = 'title'
else:
title_field = 'name'
return title_field
def process(self):
# don't process for special doctypes
# prevent's circular dependency
if self.name in self.special_doctypes:
return
self.add_custom_fields()
self.apply_property_setters()
self.sort_fields()
self.get_valid_columns()
self.set_custom_permissions()
def add_custom_fields(self):
try:
self.extend("fields", frappe.db.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.name,), as_dict=1,
update={"is_custom_field": 1}))
except Exception, e:
if e.args[0]==1146:
return
else:
raise
def apply_property_setters(self):
property_setters = frappe.db.sql("""select * from `tabProperty Setter` where
doc_type=%s""", (self.name,), as_dict=1)
if not property_setters: return
integer_docfield_properties = [d.fieldname for d in frappe.get_meta('DocField').fields
if d.fieldtype in ('Int', 'Check')]
for ps in property_setters:
if ps.doctype_or_field=='DocType':
if ps.property_type in ('Int', 'Check'):
ps.value = cint(ps.value)
self.set(ps.property, ps.value)
else:
docfield = self.get("fields", {"fieldname":ps.field_name}, limit=1)
if docfield:
docfield = docfield[0]
else:
continue
if ps.property in integer_docfield_properties:
ps.value = cint(ps.value)
docfield.set(ps.property, ps.value)
def sort_fields(self):
"""sort on basis of insert_after"""
custom_fields = sorted(self.get_custom_fields(), key=lambda df: df.idx)
if custom_fields:
newlist = []
# if custom field is at top
# insert_after is false
for c in list(custom_fields):
if not c.insert_after:
newlist.append(c)
custom_fields.pop(custom_fields.index(c))
# standard fields
newlist += [df for df in self.get('fields') if not df.get('is_custom_field')]
newlist_fieldnames = [df.fieldname for df in newlist]
for i in xrange(2):
for df in list(custom_fields):
if df.insert_after in newlist_fieldnames:
cf = custom_fields.pop(custom_fields.index(df))
idx = newlist_fieldnames.index(df.insert_after)
newlist.insert(idx + 1, cf)
newlist_fieldnames.insert(idx + 1, cf.fieldname)
if not custom_fields:
break
# worst case, add remaining custom fields to last
if custom_fields:
newlist += custom_fields
# renum idx
for i, f in enumerate(newlist):
f.idx = i + 1
self.fields = newlist
def set_custom_permissions(self):
'''Reset `permissions` with Custom DocPerm if exists'''
if frappe.flags.in_patch or frappe.flags.in_import:
return
if not self.istable and self.name not in ('DocType', 'DocField', 'DocPerm',
'Custom DocPerm'):
custom_perms = frappe.get_all('Custom DocPerm', fields='*',
filters=dict(parent=self.name), update=dict(doctype='Custom DocPerm'))
if custom_perms:
self.permissions = [Document(d) for d in custom_perms]
def get_fieldnames_with_value(self):
return [df.fieldname for df in self.fields if df.fieldtype not in no_value_fields]
def get_fields_to_check_permissions(self, user_permission_doctypes):
fields = self.get("fields", {
"fieldtype":"Link",
"parent": self.name,
"ignore_user_permissions":("!=", 1),
"options":("in", user_permission_doctypes)
})
if self.name in user_permission_doctypes:
fields.append(frappe._dict({
"label":"Name",
"fieldname":"name",
"options": self.name
}))
return fields
def get_high_permlevel_fields(self):
"""Build list of fields with high perm level and all the higher perm levels defined."""
if not hasattr(self, "high_permlevel_fields"):
self.high_permlevel_fields = []
for df in self.fields:
if df.permlevel > 0:
self.high_permlevel_fields.append(df)
return self.high_permlevel_fields
def get_dashboard_data(self):
'''Returns dashboard setup related to this doctype.
This method will return the `data` property in the
`[doctype]_dashboard.py` file in the doctype folder'''
data = frappe._dict()
try:
module = load_doctype_module(self.name, suffix='_dashboard')
if hasattr(module, 'get_data'):
data = frappe._dict(module.get_data())
except ImportError:
pass
return data
def get_row_template(self):
return self.get_web_template(suffix='_row')
def get_web_template(self, suffix=''):
'''Returns the relative path of the row template for this doctype'''
module_name = frappe.scrub(self.module)
doctype = frappe.scrub(self.name)
template_path = frappe.get_module_path(module_name, 'doctype',
doctype, 'templates', doctype + suffix + '.html')
if os.path.exists(template_path):
return '{module_name}/doctype/{doctype_name}/templates/{doctype_name}{suffix}.html'.format(
module_name = module_name, doctype_name = doctype, suffix=suffix)
return None
doctype_table_fields = [
frappe._dict({"fieldname": "fields", "options": "DocField"}),
frappe._dict({"fieldname": "permissions", "options": "DocPerm"})
]
#######
def is_single(doctype):
try:
return frappe.db.get_value("DocType", doctype, "issingle")
except IndexError:
raise Exception, 'Cannot determine whether %s is single' % doctype
def get_parent_dt(dt):
parent_dt = frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s and (parent not like "old_parent:%%")
limit 1""", dt)
return parent_dt and parent_dt[0][0] or ''
def set_fieldname(field_id, fieldname):
frappe.db.set_value('DocField', field_id, 'fieldname', fieldname)
def get_field_currency(df, doc=None):
"""get currency based on DocField options and fieldvalue in doc"""
currency = None
if not df.get("options"):
return None
if not doc:
return None
if not getattr(frappe.local, "field_currency", None):
frappe.local.field_currency = frappe._dict()
if not (frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or
(doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))):
ref_docname = doc.parent or doc.name
if ":" in cstr(df.get("options")):
split_opts = df.get("options").split(":")
if len(split_opts)==3:
currency = frappe.db.get_value(split_opts[0], doc.get(split_opts[1]), split_opts[2])
else:
currency = doc.get(df.get("options"))
if doc.parent:
if currency:
ref_docname = doc.name
else:
currency = frappe.db.get_value(doc.parenttype, doc.parent, df.get("options"))
if currency:
frappe.local.field_currency.setdefault((doc.doctype, ref_docname), frappe._dict())\
.setdefault(df.fieldname, currency)
return frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or \
(doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))
def get_field_precision(df, doc=None, currency=None):
"""get precision based on DocField options and fieldvalue in doc"""
from frappe.utils import get_number_format_info
if cint(df.precision):
precision = cint(df.precision)
elif df.fieldtype == "Currency":
number_format = None
if not currency and doc:
currency = get_field_currency(df, doc)
if not currency:
# use default currency
currency = frappe.db.get_default("currency")
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
else:
precision = cint(frappe.db.get_default("float_precision")) or 3
return precision
def get_default_df(fieldname):
if fieldname in default_fields:
if fieldname in ("creation", "modified"):
return frappe._dict(
fieldname = fieldname,
fieldtype = "Datetime"
)
else:
return frappe._dict(
fieldname = fieldname,
fieldtype = "Data"
)
def trim_tables():
"""Use this to remove columns that don't exist in meta"""
ignore_fields = default_fields + optional_fields
for doctype in frappe.db.get_all("DocType", filters={"issingle": 0}):
doctype = doctype.name
columns = frappe.db.get_table_columns(doctype)
fields = frappe.get_meta(doctype).get_fieldnames_with_value()
columns_to_remove = [f for f in list(set(columns) - set(fields)) if f not in ignore_fields
and not f.startswith("_")]
if columns_to_remove:
print doctype, "columns removed:", columns_to_remove
columns_to_remove = ", ".join(["drop `{0}`".format(c) for c in columns_to_remove])
query = """alter table `tab{doctype}` {columns}""".format(
doctype=doctype, columns=columns_to_remove)
frappe.db.sql_ddl(query)
def clear_cache(doctype=None):
cache = frappe.cache()
for key in ('is_table', 'doctype_modules'):
cache.delete_value(key)
groups = ["meta", "form_meta", "table_columns", "last_modified",
"linked_doctypes", 'email_alerts']
def clear_single(dt):
for name in groups:
cache.hdel(name, dt)
if doctype:
clear_single(doctype)
# clear all parent doctypes
for dt in frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s""", (doctype,)):
clear_single(dt[0])
# clear all notifications
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
else:
# clear all
for name in groups:
cache.delete_value(name)
| mit | -7,037,875,001,456,010,000 | 28.721905 | 101 | 0.680338 | false |
mkhutornenko/incubator-aurora | src/test/python/apache/aurora/client/cli/test_help.py | 1 | 3763 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import unittest
from mock import patch
from apache.aurora.client.cli import EXIT_INVALID_PARAMETER, EXIT_OK
from apache.aurora.client.cli.client import AuroraCommandLine
class TestHelp(unittest.TestCase):
"""Tests of the help command for the Aurora v2 client framework"""
def setUp(self):
self.cmd = AuroraCommandLine()
self.transcript = []
self.err_transcript = []
def mock_print(self, str):
for str in str.split('\n'):
self.transcript.append(str)
def mock_print_err(self, str):
for str in str.split('\n'):
self.err_transcript.append(str)
def test_help(self):
with patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out',
side_effect=self.mock_print):
self.cmd.execute(['help'])
assert len(self.transcript) > 10
assert self.transcript[1] == 'Usage:'
assert '==Commands for jobs' in self.transcript
assert '==Commands for quotas' in self.transcript
def test_help_noun(self):
with patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out',
side_effect=self.mock_print):
self.cmd.execute(['help', 'job'])
assert len(self.transcript) > 10
assert self.transcript[0] == 'Usage for noun "job":' in self.transcript
assert not any('quota' in t for t in self.transcript)
assert any('job status' in t for t in self.transcript)
assert any('job list' in t for t in self.transcript)
def test_help_verb(self):
with patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out',
side_effect=self.mock_print):
assert self.cmd.execute(['help', 'job', 'status']) == EXIT_OK
assert len(self.transcript) > 5
assert self.transcript[0] == 'Usage for verb "job status":' in self.transcript
assert not any('quota' in t for t in self.transcript)
assert not any('list' in t for t in self.transcript)
assert "Options:" in self.transcript
assert any('status' for t in self.transcript)
def test_help_unknown_noun(self):
with contextlib.nested(
patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out',
side_effect=self.mock_print),
patch('apache.aurora.client.cli.client.AuroraCommandLine.print_err',
side_effect=self.mock_print_err)):
assert self.cmd.execute(['help', 'nothing']) == EXIT_INVALID_PARAMETER
assert len(self.transcript) == 0
assert len(self.err_transcript) == 2
assert 'Unknown noun "nothing"' == self.err_transcript[0]
assert "Valid nouns" in self.err_transcript[1]
def test_help_unknown_verb(self):
with contextlib.nested(
patch('apache.aurora.client.cli.client.AuroraCommandLine.print_out',
side_effect=self.mock_print),
patch('apache.aurora.client.cli.client.AuroraCommandLine.print_err',
side_effect=self.mock_print_err)):
assert self.cmd.execute(['help', 'job', 'nothing']) == EXIT_INVALID_PARAMETER
assert len(self.transcript) == 0
assert len(self.err_transcript) == 2
assert 'Noun "job" does not support a verb "nothing"' == self.err_transcript[0]
assert 'Valid verbs for "job" are' in self.err_transcript[1]
| apache-2.0 | -4,082,335,328,770,860,000 | 39.902174 | 85 | 0.68642 | false |
MindPass/Code | Interface_graphique/mindmap/svgwrite-1.1.6/tests/test_elementfactory.py | 1 | 2340 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman --<[email protected]>
# Purpose: test elementfactory
# Created: 15.10.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
import sys
import unittest
from svgwrite.elementfactory import ElementFactory
from svgwrite.params import Parameter
class MockFactory(ElementFactory):
_parameter = Parameter(debug=True, profile='full')
debug = True
profile = 'full'
class TestElementFactory(unittest.TestCase):
def setUp(self):
self.factory = MockFactory()
def test_g(self):
group = self.factory.g(id='test')
self.assertEqual(group.elementname, 'g')
self.assertEqual(group['id'], 'test')
def test_svg(self):
svg = self.factory.svg()
self.assertEqual(svg.elementname, 'svg')
def test_defs(self):
defs = self.factory.defs()
self.assertEqual(defs.elementname, 'defs')
def test_symbol(self):
element = self.factory.symbol()
self.assertEqual(element.elementname, 'symbol')
def test_use(self):
element = self.factory.use('link')
self.assertEqual(element.elementname, 'use')
def test_a(self):
element = self.factory.a('link')
self.assertEqual(element.elementname, 'a')
def test_line(self):
element = self.factory.line((0,0), (1,1))
self.assertEqual(element.elementname, 'line')
def test_rect(self):
element = self.factory.rect((0,0), (1,1))
self.assertEqual(element.elementname, 'rect')
def test_circle(self):
element = self.factory.circle((0,0), 5)
self.assertEqual(element.elementname, 'circle')
def test_ellipse(self):
element = self.factory.ellipse((0,0), (5, 5))
self.assertEqual(element.elementname, 'ellipse')
def test_polygon(self):
element = self.factory.polygon([(0, 0), (5, 5)])
self.assertEqual(element.elementname, 'polygon')
def test_polyline(self):
element = self.factory.polyline([(0, 0), (5, 5)])
self.assertEqual(element.elementname, 'polyline')
def test_AttributeError(self):
try:
self.factory.test()
self.fail('AttributeError not raised.')
except AttributeError:
self.assertTrue(True)
if __name__=='__main__':
unittest.main()
| gpl-3.0 | -9,142,067,781,533,273,000 | 27.888889 | 57 | 0.632479 | false |
CloudNiner/fadds-parser | fadds/twr.py | 1 | 2207 | # -*- coding: utf-8 -*-
"""
Author: @sposs
Date: 19.08.16
"""
from fadds.base_file import BaseFile, BaseData
import re
value_re = re.compile(r"(?P<value>[0-9]+\.*[0-9]*)(?P<use>[A-Z ()0-9-/&]*)")
class TWRParser(BaseFile):
def __init__(self, twr_file):
super(TWRParser, self).__init__(twr_file)
self.object = TWR
class TWR(BaseData):
key_length = 4
NEW = "TWR1"
DATA = 'TWR1'
HOURS = 'TWR2'
COMFREQ = 'TWR3'
SERVICES = 'TWR4'
RADAR = 'TWR5'
TERMCOM = 'TWR6'
SATELLITE = 'TWR7'
AIRSPACECLASS = 'TWR8'
ATISDATA = 'TWR9'
def __init__(self):
super(TWR, self).__init__()
self.infodate = ""
self.site_num = ""
self.term_facility_type = ""
self.freqs = {"freqs": [], "freqs_untrunc": []}
def special_data(self, record_type, line):
"""
We only look at genral info and communication frequencies
:param str record_type:
:param str line:
:return: None
"""
if record_type == self.DATA:
self.infodate = self.get_value(line, 9, 10)
self.site_num = self.get_value(line, 19, 11).strip()
self.term_facility_type = self.get_value(line, 239, 12).strip()
elif record_type == self.COMFREQ:
d = {"freqs": [], "freqs_untrunc": []}
freqs = []
freqs_untrunc = []
period = 94
for i in range(9):
val = self.get_value(line, 9+period*i, 44).strip()
info = ""
match = value_re.match(val)
if match:
val = match.group("value")
if len(match.groups()) > 1:
info = match.group("use").strip()
use = self.get_value(line, 44+period*i, 50).strip()
if val:
freqs.append({"val": float(val), "type": use, "use": info})
for i in range(9):
val = self.get_value(line, 855+i*60, 60)
if val:
freqs_untrunc.append(val)
self.freqs['freqs'].extend(freqs)
self.freqs['freqs_untrunc'].extend(freqs_untrunc)
| mit | 4,184,547,079,323,127,000 | 28.426667 | 79 | 0.492524 | false |
apbard/scipy | scipy/spatial/tests/test_kdtree.py | 1 | 41366 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_,
assert_raises)
import numpy as np
from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
from scipy.spatial.ckdtree import cKDTreeNode
from scipy.spatial import minkowski_distance
import itertools
def distance_box(a, b, p, boxsize):
diff = a - b
diff[diff > 0.5 * boxsize] -= boxsize
diff[diff < -0.5 * boxsize] += boxsize
d = minkowski_distance(diff, 0, p)
return d
class ConsistencyTests:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,2) < d**2+eps),hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,self.distance(x,self.data[near_i],1))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,1) < d+eps),hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,self.distance(x,self.data[near_i],np.inf))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,np.inf) < d+eps),hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
class test_random(ConsistencyTests):
def setup_method(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = KDTree(self.data,leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
class test_random_far(test_random):
def setup_method(self):
test_random.setUp(self)
self.x = np.random.randn(self.m)+10
class test_small(ConsistencyTests):
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
self.n = self.kdtree.n
self.m = self.kdtree.m
np.random.seed(1234)
self.x = np.random.randn(3)
self.d = 0.5
self.k = 4
def test_nearest(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 1),
(0.1,0))
def test_nearest_two(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 2),
([0.1,0.9],[0,1]))
class test_small_nonleaf(test_small):
def setup_method(self):
test_small.setUp(self)
self.kdtree = KDTree(self.data,leafsize=1)
class test_small_compiled(test_small):
def setup_method(self):
test_small.setUp(self)
self.kdtree = cKDTree(self.data)
class test_small_nonleaf_compiled(test_small):
def setup_method(self):
test_small.setUp(self)
self.kdtree = cKDTree(self.data,leafsize=1)
class test_random_compiled(test_random):
def setup_method(self):
test_random.setUp(self)
self.kdtree = cKDTree(self.data)
class test_random_far_compiled(test_random_far):
def setup_method(self):
test_random_far.setUp(self)
self.kdtree = cKDTree(self.data)
class test_vectorization:
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query(np.array([0,0,0]))
assert_(isinstance(d,float))
assert_(np.issubdtype(i, int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.array([0,0,0]),k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
def test_single_query_all_neighbors(self):
d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1)
assert_(isinstance(d,list))
assert_(isinstance(i,list))
def test_vectorized_query_all_neighbors(self):
d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1)
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
assert_(isinstance(d[0,0],list))
assert_(isinstance(i[0,0],list))
class test_vectorization_compiled:
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = cKDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query([0,0,0])
assert_(isinstance(d,float))
assert_(isinstance(i,int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_vectorized_query_noncontiguous_values(self):
np.random.seed(1234)
qs = np.random.randn(3,1000).T
ds, i_s = self.kdtree.query(qs)
for q, d, i in zip(qs,ds,i_s):
assert_equal(self.kdtree.query(q),(d,i))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query([0,0,0],k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
class ball_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_in_ball(self):
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
c = np.ones(self.T.n,dtype=bool)
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class test_random_ball(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = KDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_random_ball_compiled(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = cKDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_random_ball_compiled_periodic(ball_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 10000
m = 4
np.random.seed(1234)
self.data = np.random.uniform(size=(n,m))
self.T = cKDTree(self.data,leafsize=2, boxsize=1)
self.x = np.ones(m) * 0.1
self.p = 2.
self.eps = 0
self.d = 0.2
def test_in_ball_outside(self):
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all_outside(self):
c = np.ones(self.T.n,dtype=bool)
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class test_random_ball_approx(test_random_ball):
def setup_method(self):
test_random_ball.setUp(self)
self.eps = 0.1
class test_random_ball_approx_compiled(test_random_ball_compiled):
def setup_method(self):
test_random_ball_compiled.setUp(self)
self.eps = 0.1
class test_random_ball_approx_compiled_periodic(test_random_ball_compiled_periodic):
def setup_method(self):
test_random_ball_compiled_periodic.setUp(self)
self.eps = 0.1
class test_random_ball_far(test_random_ball):
def setup_method(self):
test_random_ball.setUp(self)
self.d = 2.
class test_random_ball_far_compiled(test_random_ball_compiled):
def setup_method(self):
test_random_ball_compiled.setUp(self)
self.d = 2.
class test_random_ball_far_compiled_periodic(test_random_ball_compiled_periodic):
def setup_method(self):
test_random_ball_compiled_periodic.setUp(self)
self.d = 2.
class test_random_ball_l1(test_random_ball):
def setup_method(self):
test_random_ball.setUp(self)
self.p = 1
class test_random_ball_l1_compiled(test_random_ball_compiled):
def setup_method(self):
test_random_ball_compiled.setUp(self)
self.p = 1
class test_random_ball_l1_compiled_periodic(test_random_ball_compiled_periodic):
def setup_method(self):
test_random_ball_compiled_periodic.setUp(self)
self.p = 1
class test_random_ball_linf(test_random_ball):
def setup_method(self):
test_random_ball.setUp(self)
self.p = np.inf
class test_random_ball_linf_compiled_periodic(test_random_ball_compiled_periodic):
def setup_method(self):
test_random_ball_compiled_periodic.setUp(self)
self.p = np.inf
def test_random_ball_vectorized():
n = 20
m = 5
T = KDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_random_ball_vectorized_compiled():
n = 20
m = 5
np.random.seed(1234)
T = cKDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_query_ball_point_multithreading():
np.random.seed(0)
n = 5000
k = 2
points = np.random.randn(n,k)
T = cKDTree(points)
l1 = T.query_ball_point(points,0.003,n_jobs=1)
l2 = T.query_ball_point(points,0.003,n_jobs=64)
l3 = T.query_ball_point(points,0.003,n_jobs=-1)
for i in range(n):
if l1[i] or l2[i]:
assert_array_equal(l1[i],l2[i])
for i in range(n):
if l1[i] or l3[i]:
assert_array_equal(l1[i],l3[i])
class two_trees_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_all_in_ball(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
for j in l:
assert_(self.distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
c = np.ones(self.T2.n,dtype=bool)
c[l] = False
assert_(np.all(self.distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps)))
class test_two_random_trees(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = KDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = KDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_compiled(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = cKDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = cKDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_compiled_periodic(two_trees_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.uniform(size=(n,m))
self.T1 = cKDTree(self.data1,leafsize=2, boxsize=1.0)
self.data2 = np.random.uniform(size=(n,m))
self.T2 = cKDTree(self.data2,leafsize=2, boxsize=1.0)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_far(test_two_random_trees):
def setup_method(self):
test_two_random_trees.setUp(self)
self.d = 2
class test_two_random_trees_far_compiled(test_two_random_trees_compiled):
def setup_method(self):
test_two_random_trees_compiled.setUp(self)
self.d = 2
class test_two_random_trees_far_compiled_periodic(test_two_random_trees_compiled_periodic):
def setup_method(self):
test_two_random_trees_compiled_periodic.setUp(self)
self.d = 2
class test_two_random_trees_linf(test_two_random_trees):
def setup_method(self):
test_two_random_trees.setUp(self)
self.p = np.inf
class test_two_random_trees_linf_compiled(test_two_random_trees_compiled):
def setup_method(self):
test_two_random_trees_compiled.setUp(self)
self.p = np.inf
class test_two_random_trees_linf_compiled_periodic(test_two_random_trees_compiled_periodic):
def setup_method(self):
test_two_random_trees_compiled_periodic.setUp(self)
self.p = np.inf
class test_rectangle:
def setup_method(self):
self.rect = Rectangle([0,0],[1,1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0,0.1)
assert_array_equal(less.maxes,[0.1,1])
assert_array_equal(less.mins,[0,0])
assert_array_equal(greater.maxes,[1,1])
assert_array_equal(greater.mins,[0.1,0])
def test_distance_l2():
assert_almost_equal(minkowski_distance([0,0],[1,1],2),np.sqrt(2))
def test_distance_l1():
assert_almost_equal(minkowski_distance([0,0],[1,1],1),2)
def test_distance_linf():
assert_almost_equal(minkowski_distance([0,0],[1,1],np.inf),1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10,1,3)
y = np.random.randn(1,7,3)
assert_equal(minkowski_distance(x,y).shape,(10,7))
class count_neighbors_consistency:
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class test_count_neighbors(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
class test_count_neighbors_compiled(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = cKDTree(np.random.randn(n,m),leafsize=2)
self.T2 = cKDTree(np.random.randn(n,m),leafsize=2)
class sparse_distance_matrix_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
self.distance(self.T1.data[i], self.T2.data[j], self.p),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870 (FIXME: Does it?)
self.T1.sparse_distance_matrix(self.T1, self.r)
class test_sparse_distance_matrix(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.r = 0.5
self.p = 2
self.data1 = data1
self.data2 = data2
self.n = n
self.m = m
class test_sparse_distance_matrix_compiled(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(0)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.ref_T1 = KDTree(data1, leafsize=2)
self.ref_T2 = KDTree(data2, leafsize=2)
self.r = 0.5
self.n = n
self.m = m
self.data1 = data1
self.data2 = data2
self.p = 2
def test_consistency_with_python(self):
M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r)
assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14)
def test_against_logic_error_regression(self):
# regression test for gh-5077 logic error
np.random.seed(0)
too_many = np.array(np.random.randn(18, 2), dtype=int)
tree = cKDTree(too_many, balanced_tree=False, compact_nodes=False)
d = tree.sparse_distance_matrix(tree, 3).todense()
assert_array_almost_equal(d, d.T, decimal=14)
def test_ckdtree_return_types(self):
# brute-force reference
ref = np.zeros((self.n,self.n))
for i in range(self.n):
for j in range(self.n):
v = self.data1[i,:] - self.data2[j,:]
ref[i,j] = np.dot(v,v)
ref = np.sqrt(ref)
ref[ref > self.r] = 0.
# test return type 'dict'
dist = np.zeros((self.n,self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict')
for i,j in r.keys():
dist[i,j] = r[(i,j)]
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'ndarray'
dist = np.zeros((self.n,self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='ndarray')
for k in range(r.shape[0]):
i = r['i'][k]
j = r['j'][k]
v = r['v'][k]
dist[i,j] = v
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'dok_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='dok_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
# test return type 'coo_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='coo_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
def test_distance_matrix():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
assert_equal(ds.shape, (m,n))
for i in range(m):
for j in range(n):
assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j])
def test_distance_matrix_looping():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
dsl = distance_matrix(xs,ys,threshold=1)
assert_equal(ds,dsl)
def check_onetree_query(T,d):
r = T.query_ball_tree(T, d)
s = set()
for i, l in enumerate(r):
for j in l:
if i < j:
s.add((i,j))
assert_(s == T.query_pairs(d))
def test_onetree_query():
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n,k)
T = KDTree(points)
yield check_onetree_query, T, 0.1
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = KDTree(points)
yield check_onetree_query, T, 0.1
yield check_onetree_query, T, 0.001
yield check_onetree_query, T, 0.00001
yield check_onetree_query, T, 1e-6
def test_onetree_query_compiled():
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n,k)
T = cKDTree(points)
yield check_onetree_query, T, 0.1
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = cKDTree(points)
yield check_onetree_query, T, 0.1
yield check_onetree_query, T, 0.001
yield check_onetree_query, T, 0.00001
yield check_onetree_query, T, 1e-6
def test_query_pairs_single_node():
tree = KDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_query_pairs_single_node_compiled():
tree = cKDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_ckdtree_query_pairs():
np.random.seed(0)
n = 50
k = 2
r = 0.1
r2 = r**2
points = np.random.randn(n,k)
T = cKDTree(points)
# brute force reference
brute = set()
for i in range(n):
for j in range(i+1,n):
v = points[i,:] - points[j,:]
if np.dot(v,v) <= r2:
brute.add((i,j))
l0 = sorted(brute)
# test default return type
s = T.query_pairs(r)
l1 = sorted(s)
assert_array_equal(l0,l1)
# test return type 'set'
s = T.query_pairs(r, output_type='set')
l1 = sorted(s)
assert_array_equal(l0,l1)
# test return type 'ndarray'
s = set()
arr = T.query_pairs(r, output_type='ndarray')
for i in range(arr.shape[0]):
s.add((int(arr[i,0]),int(arr[i,1])))
l2 = sorted(s)
assert_array_equal(l0,l2)
def test_ball_point_ints():
# Regression test for #1373.
x, y = np.mgrid[0:4, 0:4]
points = list(zip(x.ravel(), y.ravel()))
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
points = np.asarray(points, dtype=float)
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
def test_kdtree_comparisons():
# Regression test: node comparisons were done wrong in 0.12 w/Py3.
nodes = [KDTree.node() for _ in range(3)]
assert_equal(sorted(nodes), sorted(nodes[::-1]))
def test_ckdtree_build_modes():
# check if different build modes for cKDTree give
# similar query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points).query(points, k=5)[-1]
T2 = cKDTree(points, compact_nodes=False).query(points, k=5)[-1]
T3 = cKDTree(points, balanced_tree=False).query(points, k=5)[-1]
T4 = cKDTree(points, compact_nodes=False, balanced_tree=False).query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
assert_array_equal(T1, T4)
def test_ckdtree_pickle():
# test if it is possible to pickle
# a cKDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_pickle_boxsize():
# test if it is possible to pickle a periodic
# cKDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.uniform(size=(n, k))
T1 = cKDTree(points, boxsize=1.0)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_copy_data():
# check if copy_data=True makes the kd-tree
# impervious to data corruption by modification of
# the data arrray
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points, copy_data=True)
q = points.copy()
T1 = T.query(q, k=5)[-1]
points[...] = np.random.randn(n, k)
T2 = T.query(q, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_parallel():
# check if parallel=True also generates correct
# query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points)
T1 = T.query(points, k=5, n_jobs=64)[-1]
T2 = T.query(points, k=5, n_jobs=-1)[-1]
T3 = T.query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
def test_ckdtree_view():
# Check that the nodes can be correctly viewed from Python.
# This test also sanity checks each node in the cKDTree, and
# thus verifies the internal structure of the kd-tree.
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n, k)
kdtree = cKDTree(points)
# walk the whole kd-tree and sanity check each node
def recurse_tree(n):
assert_(isinstance(n, cKDTreeNode))
if n.split_dim == -1:
assert_(n.lesser is None)
assert_(n.greater is None)
assert_(n.indices.shape[0] <= kdtree.leafsize)
else:
recurse_tree(n.lesser)
recurse_tree(n.greater)
x = n.lesser.data_points[:, n.split_dim]
y = n.greater.data_points[:, n.split_dim]
assert_(x.max() < y.min())
recurse_tree(kdtree.tree)
# check that indices are correctly retreived
n = kdtree.tree
assert_array_equal(np.sort(n.indices), range(100))
# check that data_points are correctly retreived
assert_array_equal(kdtree.data[n.indices, :], n.data_points)
# cKDTree is specialized to type double points, so no need to make
# a unit test corresponding to test_ball_point_ints()
def test_ckdtree_list_k():
# check ckdtree periodic boundary
n = 200
m = 2
klist = [1, 2, 3]
kint = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1)
# check agreement between arange(1,k+1) and k
dd, ii = kdtree.query(data, klist)
dd1, ii1 = kdtree.query(data, kint)
assert_equal(dd, dd1)
assert_equal(ii, ii1)
# now check skipping one element
klist = np.array([1, 3])
kint = 3
dd, ii = kdtree.query(data, kint)
dd1, ii1 = kdtree.query(data, klist)
assert_equal(dd1, dd[..., klist - 1])
assert_equal(ii1, ii[..., klist - 1])
# check k == 1 special case
# and k == [1] non-special case
dd, ii = kdtree.query(data, 1)
dd1, ii1 = kdtree.query(data, [1])
assert_equal(len(dd.shape), 1)
assert_equal(len(dd1.shape), 2)
assert_equal(dd, np.ravel(dd1))
assert_equal(ii, np.ravel(ii1))
def test_ckdtree_box():
# check ckdtree periodic boundary
n = 2000
m = 3
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1, boxsize=1.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = cKDTree(data, leafsize=1)
for p in [1, 2, 3.0, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree.query(data + 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd1, ii1 = kdtree.query(data - 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p)
assert_almost_equal(dd, dd2)
assert_equal(ii, ii2)
def test_ckdtree_box_0boxsize():
# check ckdtree periodic boundary that mimics non-periodic
n = 2000
m = 2
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1, boxsize=0.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = cKDTree(data, leafsize=1)
for p in [1, 2, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree2.query(data, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
def test_ckdtree_box_upper_bounds():
data = np.linspace(0, 2, 10).reshape(-1, 2)
data[:, 1] += 10
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0)
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=(0.0, 2.0))
# skip a dimension.
cKDTree(data, leafsize=1, boxsize=(2.0, 0.0))
def test_ckdtree_box_lower_bounds():
data = np.linspace(-1, 1, 10)
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0)
def simulate_periodic_box(kdtree, data, k, boxsize, p):
dd = []
ii = []
x = np.arange(3 ** data.shape[1])
nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T
nn = nn - 1.0
for n in nn:
image = data + n * 1.0 * boxsize
dd2, ii2 = kdtree.query(image, k, p=p)
dd2 = dd2.reshape(-1, k)
ii2 = ii2.reshape(-1, k)
dd.append(dd2)
ii.append(ii2)
dd = np.concatenate(dd, axis=-1)
ii = np.concatenate(ii, axis=-1)
result = np.empty([len(data), len(nn) * k], dtype=[
('ii', 'i8'),
('dd', 'f8')])
result['ii'][:] = ii
result['dd'][:] = dd
result.sort(order='dd')
return result['dd'][:, :k], result['ii'][:,:k]
def test_ckdtree_memuse():
# unit test adaptation of gh-5630
# NOTE: this will fail when run via valgrind,
# because rss is no longer a reliable memory usage indicator.
try:
import resource
except ImportError:
# resource is not available on Windows with Python 2.6
return
# Make some data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
z_copy = np.empty_like(z)
z_copy[:] = z
# Place FILLVAL in z_copy at random number of random locations
FILLVAL = 99.
mask = np.random.randint(0, z.size, np.random.randint(50) + 5)
z_copy.flat[mask] = FILLVAL
igood = np.vstack(np.where(x != FILLVAL)).T
ibad = np.vstack(np.where(x == FILLVAL)).T
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# burn-in
for i in range(10):
tree = cKDTree(igood)
# count memleaks while constructing and querying cKDTree
num_leaks = 0
for i in range(100):
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
tree = cKDTree(igood)
dist, iquery = tree.query(ibad, k=4, p=2)
new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if new_mem_use > mem_use:
num_leaks += 1
# ideally zero leaks, but errors might accidentally happen
# outside cKDTree
assert_(num_leaks < 10)
def test_ckdtree_weights():
data = np.linspace(0, 1, 4).reshape(-1, 1)
tree1 = cKDTree(data, leafsize=1)
weights = np.ones(len(data), dtype='f4')
nw = tree1._build_weights(weights)
assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1])
assert_raises(ValueError, tree1._build_weights, weights[:-1])
for i in range(10):
# since weights are uniform, these shall agree:
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i))
c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, weights))
c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, None))
c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(None, weights))
c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=weights)
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
assert_array_equal(c1, c4)
for i in range(len(data)):
# this tests removal of one data point by setting weight to 0
w1 = weights.copy()
w1[i] = 0
data2 = data[w1 != 0]
w2 = weights[w1 != 0]
tree2 = cKDTree(data2)
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100),
weights=(w1, w1))
# "c2 is correct"
c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100))
assert_array_equal(c1, c2)
#this asserts for two different trees, singular weights
# crashes
assert_raises(ValueError, tree1.count_neighbors,
tree2, np.linspace(0, 10, 100), weights=w1)
def test_ckdtree_count_neighbous_multiple_r():
n = 2000
m = 2
np.random.seed(1234)
data = np.random.normal(size=(n, m))
kdtree = cKDTree(data, leafsize=1)
r0 = [0, 0.01, 0.01, 0.02, 0.05]
i0 = np.arange(len(r0))
n0 = kdtree.count_neighbors(kdtree, r0)
nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False)
assert_equal(n0, nnc.cumsum())
for i, r in zip(itertools.permutations(i0),
itertools.permutations(r0)):
# permute n0 by i and it shall agree
n = kdtree.count_neighbors(kdtree, r)
assert_array_equal(n, n0[list(i)])
def test_len0_arrays():
# make sure len-0 arrays are handled correctly
# in range queries (gh-5639)
np.random.seed(1234)
X = np.random.rand(10,2)
Y = np.random.rand(10,2)
tree = cKDTree(X)
# query_ball_point (single)
d,i = tree.query([.5, .5], k=1)
z = tree.query_ball_point([.5, .5], 0.1*d)
assert_array_equal(z, [])
# query_ball_point (multiple)
d,i = tree.query(Y, k=1)
mind = d.min()
z = tree.query_ball_point(Y, 0.1*mind)
y = np.empty(shape=(10,), dtype=object)
y.fill([])
assert_array_equal(y, z)
# query_ball_tree
other = cKDTree(Y)
y = tree.query_ball_tree(other, 0.1*mind)
assert_array_equal(10*[[]], y)
# count_neighbors
y = tree.count_neighbors(other, 0.1*mind)
assert_(y == 0)
# sparse_distance_matrix
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix')
assert_array_equal(y == np.zeros((10,10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix')
assert_array_equal(y == np.zeros((10,10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict')
assert_equal(y, {})
y = tree.sparse_distance_matrix(other,0.1*mind, output_type='ndarray')
_dtype = [('i',np.intp), ('j',np.intp), ('v',np.float64)]
res_dtype = np.dtype(_dtype, align=True)
z = np.empty(shape=(0,), dtype=res_dtype)
assert_array_equal(y, z)
# query_pairs
d,i = tree.query(X, k=2)
mind = d[:,-1].min()
y = tree.query_pairs(0.1*mind, output_type='set')
assert_equal(y, set())
y = tree.query_pairs(0.1*mind, output_type='ndarray')
z = np.empty(shape=(0,2), dtype=np.intp)
assert_array_equal(y, z)
def test_ckdtree_duplicated_inputs():
# check ckdtree with duplicated inputs
n = 1024
for m in range(1, 8):
data = np.concatenate([
np.ones((n // 2, m)) * 1,
np.ones((n // 2, m)) * 2], axis=0)
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = cKDTree(data, leafsize=1)
assert_equal(kdtree.size, 3)
kdtree = cKDTree(data)
assert_equal(kdtree.size, 3)
# if compact_nodes are disabled, the number
# of nodes is n (per leaf) + (m - 1)* 2 (splits per dimension) + 1
# and the root
kdtree = cKDTree(data, compact_nodes=False, leafsize=1)
assert_equal(kdtree.size, n + m * 2 - 1)
def test_ckdtree_noncumulative_nondecreasing():
# check ckdtree with duplicated inputs
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = cKDTree([[0]], leafsize=1)
assert_raises(ValueError, kdtree.count_neighbors,
kdtree, [0.1, 0], cumulative=False)
def test_short_knn():
# The test case is based on github: #6425 by @SteveDoyle2
xyz = np.array([
[0., 0., 0.],
[1.01, 0., 0.],
[0., 1., 0.],
[0., 1.01, 0.],
[1., 0., 0.],
[1., 1., 0.],],
dtype='float64')
ckdt = cKDTree(xyz)
deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2)
assert_array_almost_equal(deq,
[[0., np.inf, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., np.inf, np.inf, np.inf]])
| bsd-3-clause | 5,765,639,288,397,933,000 | 30.055556 | 102 | 0.572282 | false |
dtamayo/reboundx | reboundx/test/test_params.py | 2 | 6991 | import rebound
import reboundx
from reboundx import data
import unittest
import math
import numpy as np
from ctypes import c_uint, c_uint8, c_uint32, c_uint64
def mycomp(obj1, obj2):
if type(obj1) != type(obj2):
return False
for attr in [attr for attr in dir(obj1) if not attr.startswith('_')]:
if getattr(obj1, attr) != getattr(obj2, attr):
return False
return True
from ctypes import Structure, c_double, cast, POINTER
class Mystruct(Structure):
_fields_ = [('dt', c_double),
('c', c_double)]
class TestParams(unittest.TestCase):
def setUp(self):
self.sim = rebound.Simulation()
self.sim.add(m=1.)
self.sim.add(a=1.)
self.rebx = reboundx.Extras(self.sim)
self.gr = self.rebx.load_force("gr")
self.mm = self.rebx.load_operator("modify_mass")
self.p = self.sim.particles[1]
def tearDown(self):
self.sim = None
def test_adddouble(self):
self.gr.params['c'] = 1.2
self.mm.params['c'] = 1.4
self.p.params['c'] = 1.7
self.assertAlmostEqual(self.gr.params["c"], 1.2, delta=1.e-15)
self.assertAlmostEqual(self.mm.params["c"], 1.4, delta=1.e-15)
self.assertAlmostEqual(self.p.params["c"], 1.7, delta=1.e-15)
def test_updatedouble(self):
self.gr.params['c'] = 1.2
self.mm.params['c'] = 1.4
self.p.params['c'] = 1.7
self.gr.params['c'] = 2.2
self.mm.params['c'] = 2.4
self.p.params['c'] = 2.7
self.assertAlmostEqual(self.gr.params["c"], 2.2, delta=1.e-15)
self.assertAlmostEqual(self.mm.params["c"], 2.4, delta=1.e-15)
self.assertAlmostEqual(self.p.params["c"], 2.7, delta=1.e-15)
def test_updatedoublecopy(self):
a = 1.2
self.gr.params['c'] = a
a = 3.7
self.assertAlmostEqual(self.gr.params["c"], 1.2, delta=1.e-15) # shouldn't reflect update because copied
def test_addint(self):
self.gr.params['gr_source'] = 1
self.mm.params['gr_source'] = 2
self.p.params['gr_source'] = 3
self.assertAlmostEqual(self.gr.params["gr_source"], 1, delta=1.e-15)
self.assertAlmostEqual(self.mm.params["gr_source"], 2, delta=1.e-15)
self.assertAlmostEqual(self.p.params["gr_source"], 3, delta=1.e-15)
def test_updateint(self):
self.gr.params['gr_source'] = 1
self.mm.params['gr_source'] = 2
self.p.params['gr_source'] = 3
self.gr.params['gr_source'] = 11
self.mm.params['gr_source'] = 22
self.p.params['gr_source'] = 33
self.assertEqual(self.gr.params["gr_source"], 11)
self.assertEqual(self.mm.params["gr_source"], 22)
self.assertEqual(self.p.params["gr_source"], 33)
def test_updateintcopy(self):
a = 1
self.gr.params['gr_source'] = a
a = 3
self.assertEqual(self.gr.params["gr_source"], 1) # shouldn't reflect update because copied
def test_addforce(self):
self.gr.params['c'] = 3.5
self.p.params['force'] = self.gr
newgr = self.p.params['force']
self.assertAlmostEqual(newgr.params['c'], 3.5, delta=1.e-15)
def test_addnonforce(self):
with self.assertRaises(AttributeError):
self.p.params['force'] = 3.
def test_updateforceptr(self):
self.gr.params['c'] = 3.5
self.p.params['force'] = self.gr
self.gr.params['c'] = 4.5
newgr = self.p.params['force']
self.assertAlmostEqual(newgr.params['c'], 4.5, delta=1.e-15)
def test_updateforce(self):
self.p.params['force'] = self.gr
self.gr.params['c'] = 3.5
newforce = self.rebx.load_force('gr')
newforce.params['c'] = -3.5
self.p.params['force'] = newforce
newgr = self.p.params['force']
self.assertAlmostEqual(newgr.params['c'], -3.5, delta=1.e-15)
def test_addcustomstruct(self):
self.rebx.register_param('my_new_struct', 'REBX_TYPE_POINTER')
s = Mystruct()
s.dt = 0.1
s.c = 3.5
self.gr.params['my_new_struct'] = s
new_s = self.gr.params['my_new_struct']
new_s = cast(new_s, POINTER(Mystruct)).contents
self.assertAlmostEqual(new_s.dt, 0.1, delta=1.e-15)
self.assertAlmostEqual(new_s.c, 3.5, delta=1.e-15)
def test_updatecustomstructptr(self):
self.rebx.register_param('my_new_struct', 'REBX_TYPE_POINTER')
s = Mystruct()
s.dt = 0.1
s.c = 3.5
self.gr.params['my_new_struct'] = s
s.dt = 1.1
new_s = self.gr.params['my_new_struct']
new_s = cast(new_s, POINTER(Mystruct)).contents
self.assertAlmostEqual(new_s.dt, 1.1, delta=1.e-15)
def test_updatecustomstruct(self):
self.rebx.register_param('my_new_struct', 'REBX_TYPE_POINTER')
s = Mystruct()
s.dt = 0.1
s.c = 3.5
self.gr.params['my_new_struct'] = s
s2 = Mystruct()
s2.dt = -0.1
s2.c = -3.5
self.gr.params['my_new_struct'] = s2
new_s = self.gr.params['my_new_struct']
new_s = cast(new_s, POINTER(Mystruct)).contents
self.assertAlmostEqual(new_s.dt, -0.1, delta=1.e-15)
self.assertAlmostEqual(new_s.c, -3.5, delta=1.e-15)
def test_getnotregistered(self):
with self.assertRaises(AttributeError):
b = self.gr.params['asd;flkj']
def test_notregistered(self):
with self.assertRaises(AttributeError):
self.gr.params['asldkjf'] = 1.2
def test_registerexisting(self):
with self.assertRaises(RuntimeError):
self.rebx.register_param('c', 'REBX_TYPE_INT')
def test_not_attached(self):
with self.assertRaises(AttributeError):
b = self.gr.params['beta']
def test_custom_not_attached(self):
self.rebx.register_param('my_custom', 'REBX_TYPE_POINTER')
with self.assertRaises(AttributeError):
b = self.gr.params['my_custom']
def test_newdouble(self):
self.rebx.register_param('my_new_double', 'REBX_TYPE_DOUBLE')
self.gr.params['my_new_double'] = 1.2
self.assertAlmostEqual(self.gr.params["my_new_double"], 1.2, delta=1.e-15)
def test_newint(self):
self.rebx.register_param('my_new_int', 'REBX_TYPE_INT')
self.gr.params['my_new_int'] = 2
self.assertEqual(self.gr.params["my_new_int"], 2)
def test_length(self):
self.gr.params['c'] = 1.3
self.gr.params['gr_source'] = 7
self.gr.params['tau_mass'] = 3.2
self.assertEqual(len(self.gr.params), 3)
def test_iter(self):
with self.assertRaises(AttributeError):
for p in self.gr.params:
pass
def test_del(self):
with self.assertRaises(AttributeError):
del self.gr.params["b"]
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 639,382,447,125,849,000 | 33.438424 | 112 | 0.579316 | false |
nke001/attention-lvcsr | libs/Theano/theano/configdefaults.py | 1 | 25288 | import os
import sys
import logging
import theano
from theano.configparser import (AddConfigVar, BoolParam, ConfigParam, EnumStr,
IntParam, StrParam, TheanoConfigParser)
from theano.misc.cpucount import cpuCount
from theano.misc.windows import call_subprocess_Popen
_logger = logging.getLogger('theano.configdefaults')
config = TheanoConfigParser()
def floatX_convert(s):
if s == "32":
return "float32"
elif s == "64":
return "float64"
elif s == "16":
return "float16"
else:
return s
AddConfigVar('floatX',
"Default floating-point precision for python casts.\n"
"\n"
"Note: float16 support is experimental, use at your own risk.",
EnumStr('float64', 'float32', 'float16',
convert=floatX_convert,),
)
AddConfigVar('warn_float64',
"Do an action when a tensor variable with float64 dtype is"
" created. They can't be run on the GPU with the current(old)"
" gpu back-end and are slow with gamer GPUs.",
EnumStr('ignore', 'warn', 'raise', 'pdb'),
in_c_key=False,
)
AddConfigVar('cast_policy',
'Rules for implicit type casting',
EnumStr('custom', 'numpy+floatX',
# The 'numpy' policy was originally planned to provide a
# smooth transition from numpy. It was meant to behave the
# same as numpy+floatX, but keeping float64 when numpy
# would. However the current implementation of some cast
# mechanisms makes it a bit more complex to add than what
# was expected, so it is currently not available.
# numpy,
),
)
# python 2.* define int / int to return int and int // int to return int.
# python 3* define int / int to return float and int // int to return int.
# numpy 1.6.1 behaves as python 2.*. I think we should not change it faster
# than numpy. When we will do the transition, we should create an int_warn
# and floatX_warn option.
AddConfigVar('int_division',
"What to do when one computes x / y, where both x and y are of "
"integer types",
EnumStr('int', 'raise', 'floatX'),
in_c_key=False)
# gpu means let the driver select the gpu. Needed in case of gpu in
# exclusive mode.
# gpuX mean use the gpu number X.
class DeviceParam(ConfigParam):
def __init__(self, default, *options, **kwargs):
self.default = default
def filter(val):
if val.startswith('cpu') or val.startswith('gpu') \
or val.startswith('opencl') or val.startswith('cuda'):
return val
else:
raise ValueError(('Invalid value ("%s") for configuration '
'variable "%s". Valid options start with '
'one of "cpu", "gpu", "opencl", "cuda"'
% (val, self.fullname)))
over = kwargs.get("allow_override", True)
super(DeviceParam, self).__init__(default, filter, over)
def __str__(self):
return '%s (cpu, gpu*, opencl*, cuda*) ' % (self.fullname,)
AddConfigVar(
'device',
("Default device for computations. If gpu*, change the default to try "
"to move computation to it and to put shared variable of float32 "
"on it. Do not use upper case letters, only lower case even if "
"NVIDIA use capital letters."),
DeviceParam('cpu', allow_override=False),
in_c_key=False,)
AddConfigVar('gpuarray.init_device',
"""
Device to initialize for gpuarray use without moving
computations automatically.
""",
StrParam(''),
in_c_key=False)
AddConfigVar(
'init_gpu_device',
("Initialize the gpu device to use, works only if device=cpu. "
"Unlike 'device', setting this option will NOT move computations, "
"nor shared variables, to the specified GPU. "
"It can be used to run GPU-specific tests on a particular GPU."),
EnumStr('', 'gpu',
'gpu0', 'gpu1', 'gpu2', 'gpu3',
'gpu4', 'gpu5', 'gpu6', 'gpu7',
'gpu8', 'gpu9', 'gpu10', 'gpu11',
'gpu12', 'gpu13', 'gpu14', 'gpu15',
allow_override=False),
in_c_key=False)
AddConfigVar(
'force_device',
"Raise an error if we can't use the specified device",
BoolParam(False, allow_override=False),
in_c_key=False)
AddConfigVar(
'print_active_device',
"Print active device at when the GPU device is initialized.",
BoolParam(True, allow_override=False),
in_c_key=False)
# This flag determines whether or not to raise error/warning message if
# there is a CPU Op in the computational graph.
AddConfigVar(
'assert_no_cpu_op',
"Raise an error/warning if there is a CPU op in the computational graph.",
EnumStr('ignore', 'warn', 'raise', 'pdb', allow_override=True),
in_c_key=False)
# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
# The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'.
# The old all capital letter way of working is deprecated as it is not
# scalable.
# Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode.
AddConfigVar(
'mode',
"Default compilation mode",
EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
in_c_key=False)
param = "g++"
# Test whether or not g++ is present: disable C code if it is not.
try:
rc = call_subprocess_Popen(['g++', '-v'])
except OSError:
rc = 1
if rc != 0:
param = ""
# On Mac we test for 'clang++' and use it by default
if sys.platform == 'darwin':
try:
rc = call_subprocess_Popen(['clang++', '-v'])
if rc == 0:
param = "clang++"
except OSError:
pass
# Try to find the full compiler path from the name
if param != "":
import distutils.spawn
newp = distutils.spawn.find_executable(param)
if newp is not None:
param = newp
del newp
del distutils
AddConfigVar('cxx',
"The C++ compiler to use. Currently only g++ is"
" supported, but supporting additional compilers should not be "
"too difficult. "
"If it is empty, no C++ code is compiled.",
StrParam(param),
in_c_key=False)
del param
if rc == 0 and config.cxx != "":
# Keep the default linker the same as the one for the mode FAST_RUN
AddConfigVar('linker',
("Default linker used if the theano flags mode is Mode "
"or ProfileMode(deprecated)"),
EnumStr('cvm', 'c|py', 'py', 'c', 'c|py_nogc',
'vm', 'vm_nogc', 'cvm_nogc'),
in_c_key=False)
else:
# g++ is not present or the user disabled it,
# linker should default to python only.
AddConfigVar('linker',
("Default linker used if the theano flags mode is Mode "
"or ProfileMode(deprecated)"),
EnumStr('vm', 'py', 'vm_nogc'),
in_c_key=False)
try:
# If the user provided an empty value for cxx, do not warn.
theano.configparser.fetch_val_for_key('cxx')
except KeyError:
_logger.warning(
'g++ not detected ! Theano will be unable to execute '
'optimized C-implementations (for both CPU and GPU) and will '
'default to Python implementations. Performance will be severely '
'degraded. To remove this warning, set Theano flags cxx to an '
'empty string.')
# Keep the default value the same as the one for the mode FAST_RUN
AddConfigVar('allow_gc',
"Do we default to delete intermediate results during Theano"
" function calls? Doing so lowers the memory requirement, but"
" asks that we reallocate memory at the next function call."
" This is implemented for the default linker, but may not work"
" for all linkers.",
BoolParam(True),
in_c_key=False)
# Keep the default optimizer the same as the one for the mode FAST_RUN
AddConfigVar(
'optimizer',
("Default optimizer. If not None, will use this linker with the Mode "
"object (not ProfileMode(deprecated) or DebugMode)"),
EnumStr('fast_run', 'merge', 'fast_compile', 'None'),
in_c_key=False)
AddConfigVar('optimizer_verbose',
"If True, we print all optimization being applied",
BoolParam(False),
in_c_key=False)
AddConfigVar(
'on_opt_error',
("What to do when an optimization crashes: warn and skip it, raise "
"the exception, or fall into the pdb debugger."),
EnumStr('warn', 'raise', 'pdb', 'ignore'),
in_c_key=False)
def safe_no_home(home):
"""
Make sure the user is not attempting to use `config.home`.
This config option was removed in Thenao 0.5 since it was redundant with
`config.base_compiledir`. This filter function ensures people who were
setting the location of their compilation directory through `config.home`
switch to `config.basecompiledir` instead, by raising an error when
`config.home` is used.
"""
if home:
raise RuntimeError(
'The `config.home` option has been removed and should not be '
'used anymore. Please set the `config.base_compiledir` option '
'instead (for instance to: %s)' %
os.path.join(home, '.theano'))
return True
AddConfigVar(
'home',
"This config option was removed in 0.5: do not use it!",
ConfigParam('', allow_override=False, filter=safe_no_home),
in_c_key=False)
AddConfigVar(
'nocleanup',
"Suppress the deletion of code files that did not compile cleanly",
BoolParam(False),
in_c_key=False)
AddConfigVar('on_unused_input',
"What to do if a variable in the 'inputs' list of "
" theano.function() is not used in the graph.",
EnumStr('raise', 'warn', 'ignore'),
in_c_key=False)
# This flag is used when we import Theano to initialize global variables.
# So changing it after import will not modify these global variables.
# This could be done differently... but for now we simply prevent it from being
# changed at runtime.
AddConfigVar(
'tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0, 1, 2), allow_override=False),
in_c_key=False)
AddConfigVar(
'tensor.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the elemwise "
"fusion optimization"),
BoolParam(True),
in_c_key=False)
AddConfigVar(
'gpu.local_elemwise_fusion',
("Enable or not in fast_run mode(fast_run optimization) the gpu "
"elemwise fusion optimization"),
BoolParam(True),
in_c_key=False)
# http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
AddConfigVar(
'lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar(
'gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True),
in_c_key=False)
AddConfigVar(
'traceback.limit',
"The number of stack to trace. -1 mean all.",
# We default to 6 to be able to know where v1 + v2 is created in the
# user script. The bigger this number is, the more run time it takes.
# We need to default to 7 to support theano.tensor.tensor(...).
IntParam(7),
in_c_key=False)
AddConfigVar('experimental.mrg',
"Another random number generator that work on the gpu",
BoolParam(False))
AddConfigVar('experimental.unpickle_gpu_on_cpu',
"Allow unpickling of pickled CudaNdarrays as numpy.ndarrays. "
"This is useful, if you want to open a CudaNdarray without "
"having cuda installed. "
"If you have cuda installed, this will force unpickling to "
"be done on the cpu to numpy.ndarray. "
"Please be aware that this may get you access to the data, "
"however, trying to unpicke gpu functions will not succeed. "
"This flag is experimental and may be removed any time, when "
"gpu<>cpu transparency is solved. "
"The flag 'device' MUST be set to 'cpu'.",
BoolParam(default=False),
in_c_key=False)
AddConfigVar('experimental.unpickle_shared_gpu_on_cpu',
"When True, allow unpickling of pickled CudaNdarraySharedVariable"
" as TensorSharedVariable. "
"This is useful, if you want to load a model saved on GPU "
"when no GPU is available. "
"This do not solve all problems! It only works if you pickled "
"only Theano shared variables. If you pickle a graph or function "
"based on those shared variable, they won't work correctly. "
"Please be aware that this a work around that only works in some "
"condition. This flag is experimental and may be removed any "
"time, when gpu<>cpu transparency is solved. "
"The flag 'device' MUST be set to 'cpu'.",
BoolParam(default=False),
in_c_key=False)
AddConfigVar('numpy.seterr_all',
("Sets numpy's behaviour for floating-point errors, ",
"see numpy.seterr. "
"'None' means not to change numpy's default, which can be "
"different for different numpy releases. "
"This flag sets the default behaviour for all kinds of floating-"
"point errors, its effect can be overriden for specific errors "
"by the following flags: seterr_divide, seterr_over, "
"seterr_under and seterr_invalid."),
EnumStr('ignore', 'warn', 'raise', 'call', 'print', 'log', 'None',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_divide',
("Sets numpy's behavior for division by zero, see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_over',
("Sets numpy's behavior for floating-point overflow, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_under',
("Sets numpy's behavior for floating-point underflow, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_invalid',
("Sets numpy's behavior for invalid floating-point operation, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
###
# To disable some warning about old bug that are fixed now.
###
AddConfigVar('warn.ignore_bug_before',
("If 'None', we warn about all Theano bugs found by default. "
"If 'all', we don't warn about Theano bugs found by default. "
"If a version, we print only the warnings relative to Theano "
"bugs found after that version. "
"Warning for specific bugs can be configured with specific "
"[warn] flags."),
EnumStr('0.6', 'None', 'all', '0.3', '0.4', '0.4.1', '0.5', '0.7',
allow_override=False),
in_c_key=False)
def warn_default(version):
"""
Return True iff we should warn about bugs fixed after a given version.
"""
if config.warn.ignore_bug_before == 'None':
return True
if config.warn.ignore_bug_before == 'all':
return False
if config.warn.ignore_bug_before >= version:
return False
return True
AddConfigVar('warn.argmax_pushdown_bug',
("Warn if in past version of Theano we generated a bug with the "
"theano.tensor.nnet.nnet.local_argmax_pushdown optimization. "
"Was fixed 27 may 2010"),
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.gpusum_01_011_0111_bug',
("Warn if we are in a case where old version of Theano had a "
"silent bug with GpuSum pattern 01,011 and 0111 when the first "
"dimensions was bigger then 4096. Was fixed 31 may 2010"),
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.sum_sum_bug',
("Warn if we are in a case where Theano version between version "
"9923a40c7b7a and the 2 august 2010 (fixed date), generated an "
"error in that case. This happens when there are 2 consecutive "
"sums in the graph, bad code was generated. "
"Was fixed 2 August 2010"),
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.sum_div_dimshuffle_bug',
("Warn if previous versions of Theano (between rev. "
"3bd9b789f5e8, 2010-06-16, and cfc6322e5ad4, 2010-08-03) "
"would have given incorrect result. This bug was triggered by "
"sum of division of dimshuffled tensors."),
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar(
'warn.subtensor_merge_bug',
"Warn if previous versions of Theano (before 0.5rc2) could have given "
"incorrect results when indexing into a subtensor with negative "
"stride (for instance, for instance, x[a:b:-1][c]).",
BoolParam(warn_default('0.5')),
in_c_key=False)
AddConfigVar(
'warn.gpu_set_subtensor1',
"Warn if previous versions of Theano (before 0.6) could have given "
"incorrect results when moving to the gpu "
"set_subtensor(x[int vector], new_value)",
BoolParam(warn_default('0.6')),
in_c_key=False)
AddConfigVar(
'warn.vm_gc_bug',
"There was a bug that existed in the default Theano configuration,"
" only in the development version between July 5th 2012"
" and July 30th 2012. This was not in a released version."
" If your code was affected by this bug, a warning"
" will be printed during the code execution if you use the"
" `linker=vm,vm.lazy=True,warn.vm_gc_bug=True` Theano flags."
" This warning is disabled by default as the bug was not released.",
BoolParam(False),
in_c_key=False)
AddConfigVar('warn.signal_conv2d_interface',
("Warn we use the new signal.conv2d() when its interface"
" changed mid June 2014"),
BoolParam(warn_default('0.7')),
in_c_key=False)
AddConfigVar('warn.reduce_join',
('Your current code is fine, but Theano versions '
'prior to 0.7 (or this development version) '
'might have given an incorrect result. '
'To disable this warning, set the Theano flag '
'warn.reduce_join to False. The problem was an '
'optimization, that modified the pattern '
'"Reduce{scalar.op}(Join(axis=0, a, b), axis=0)", '
'did not check the reduction axis. So if the '
'reduction axis was not 0, you got a wrong answer.'),
BoolParam(warn_default('0.7')),
in_c_key=False)
AddConfigVar('warn.inc_set_subtensor1',
('Warn if previous versions of Theano (before 0.7) could have '
'given incorrect results for inc_subtensor and set_subtensor '
'when using some patterns of advanced indexing (indexing with '
'one vector or matrix of ints).'),
BoolParam(warn_default('0.7')),
in_c_key=False)
AddConfigVar(
'compute_test_value',
("If 'True', Theano will run each op at graph build time, using "
"Constants, SharedVariables and the tag 'test_value' as inputs "
"to the function. This helps the user track down problems in the "
"graph before it gets optimized."),
EnumStr('off', 'ignore', 'warn', 'raise', 'pdb'),
in_c_key=False)
AddConfigVar('compute_test_value_opt',
("For debugging Theano optimization only."
" Same as compute_test_value, but is used"
" during Theano optimization"),
EnumStr('off', 'ignore', 'warn', 'raise', 'pdb'),
in_c_key=False)
AddConfigVar('unpickle_function',
("Replace unpickled Theano functions with None. "
"This is useful to unpickle old graphs that pickled"
" them when it shouldn't"),
BoolParam(True),
in_c_key=False)
AddConfigVar(
'reoptimize_unpickled_function',
"Re-optimize the graph when a theano function is unpickled from the disk.",
BoolParam(True, allow_override=True),
in_c_key=False)
"""Note to developers:
Generally your exceptions should use an apply node's __str__
method when exception_verbosity == 'low'. When exception_verbosity
== 'high', you should include a call to printing.min_informative_str
on all important apply nodes.
"""
AddConfigVar(
'exception_verbosity',
"If 'low', the text of exceptions will generally refer "
"to apply nodes with short names such as "
"Elemwise{add_no_inplace}. If 'high', some exceptions "
"will also refer to apply nodes with long descriptions "
""" like:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
C. log_likelihood_h""",
EnumStr('low', 'high'),
in_c_key=False)
# Test if the env variable is set
var = os.getenv('OMP_NUM_THREADS', None)
if var:
try:
int(var)
except ValueError:
raise TypeError("The environment variable OMP_NUM_THREADS"
" should be a number, got '%s'." % var)
else:
default_openmp = not int(var) == 1
else:
# Check the number of cores availables.
count = cpuCount()
if count == -1:
_logger.warning("We are not able to detect the number of CPU cores."
" We disable openmp by default. To remove this"
" warning, set the environment variable"
" OMP_NUM_THREADS to the number of threads you"
" want theano to use.")
default_openmp = count > 1
# Disable it by default for now as currently only the ConvOp supports
# it, and this causes slowdown by default as we do not disable it for
# too small convolution.
default_openmp = False
AddConfigVar('openmp',
"Allow (or not) parallel computation on the CPU with OpenMP. "
"This is the default value used when creating an Op that "
"supports OpenMP parallelization. It is preferable to define it "
"via the Theano configuration file ~/.theanorc or with the "
"environment variable THEANO_FLAGS. Parallelization is only "
"done for some operations that implement it, and even for "
"operations that implement parallelism, each operation is free "
"to respect this flag or not. You can control the number of "
"threads used with the environment variable OMP_NUM_THREADS."
" If it is set to 1, we disable openmp in Theano by default.",
BoolParam(default_openmp),
in_c_key=False,
)
AddConfigVar('openmp_elemwise_minsize',
"If OpenMP is enabled, this is the minimum size of vectors "
"for which the openmp parallelization is enabled "
"in element wise ops.",
IntParam(200000),
in_c_key=False,
)
AddConfigVar(
'check_input',
"Specify if types should check their input in their C code. "
"It can be used to speed up compilation, reduce overhead "
"(particularly for scalars) and reduce the number of generated C "
"files.",
BoolParam(True))
AddConfigVar(
'cache_optimizations',
"WARNING: work in progress, does not work yet. "
"Specify if the optimization cache should be used. This cache will "
"any optimized graph and its optimization. Actually slow downs a lot "
"the first optimization, and could possibly still contains some bugs. "
"Use at your own risks.",
BoolParam(False))
| mit | -677,660,655,487,862,100 | 38.5125 | 79 | 0.604951 | false |
cfc603/django-twilio-sms-models | django_twilio_sms/models.py | 1 | 12882 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django_twilio.client import twilio_client
from django_twilio.models import Caller
from twilio.rest.exceptions import TwilioRestException
from .signals import response_message, unsubscribe_signal
from .utils import AbsoluteURI
# Abstract Models
class CreatedUpdated(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class Sid(CreatedUpdated):
sid = models.CharField(max_length=34, primary_key=True)
def __str__(self):
return '{}'.format(self.sid)
class Meta:
abstract = True
# Message Model ForeignKeys
class Account(Sid):
# account type choices
TRIAL = 0
FULL = 1
ACCOUNT_TYPE_CHOICES = (
(TRIAL, 'Trial'),
(FULL, 'Full'),
)
# status choices
ACTIVE = 0
SUSPENDED = 1
CLOSED = 2
STATUS_CHOICES = (
(ACTIVE, 'active'),
(SUSPENDED, 'suspended'),
(CLOSED, 'closed'),
)
friendly_name = models.CharField(max_length=64)
account_type = models.PositiveSmallIntegerField(
choices=ACCOUNT_TYPE_CHOICES
)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES)
owner_account_sid = models.ForeignKey('self', null=True)
@classmethod
def get_account_type_choice(cls, account_type_display):
for choice in cls.ACCOUNT_TYPE_CHOICES:
if account_type_display == choice[1]:
return choice[0]
@classmethod
def get_status_choice(cls, status_display):
for choice in cls.STATUS_CHOICES:
if status_display == choice[1]:
return choice[0]
@classmethod
def get_or_create(cls, account_sid=None, account=None):
if not account_sid:
account_sid = account.sid
try:
return cls.objects.get(sid=account_sid)
except cls.DoesNotExist:
account_obj = cls(sid=account_sid)
account_obj.sync_twilio_account(account)
return account_obj
@property
def twilio_account(self):
return twilio_client.accounts.get(self.sid)
def sync_twilio_account(self, account=None):
if not account:
account = self.twilio_account
self.friendly_name = account.friendly_name
self.account_type = self.get_account_type_choice(account.type)
self.status = self.get_status_choice(account.status)
if account.sid != account.owner_account_sid:
self.owner_account_sid = Account.get_or_create(
account.owner_account_sid
)
self.save()
@python_2_unicode_compatible
class ApiVersion(models.Model):
date = models.DateField(unique=True)
def __str__(self):
return '{}'.format(self.date)
@classmethod
def get_or_create(cls, message_date):
api_version, created = cls.objects.get_or_create(
date=message_date
)
return api_version
@python_2_unicode_compatible
class Currency(models.Model):
code = models.CharField(max_length=3, primary_key=True)
def __str__(self):
return '{}'.format(self.code)
@classmethod
def get_or_create(cls, message_price_unit):
currency, created = cls.objects.get_or_create(code=message_price_unit)
return currency
@python_2_unicode_compatible
class Error(models.Model):
code = models.CharField(max_length=5, primary_key=True)
message = models.CharField(max_length=255)
def __str__(self):
return '{}'.format(self.code)
@classmethod
def get_or_create(cls, message_error_code, message_error_message):
error, created = cls.objects.get_or_create(
code=message_error_code,
defaults={'message': message_error_message}
)
return error
class MessagingService(Sid):
pass
@classmethod
def get_or_create(cls, messaging_service_sid):
messaging_service, created = cls.objects.get_or_create(
sid=messaging_service_sid
)
return messaging_service
@python_2_unicode_compatible
class PhoneNumber(CreatedUpdated):
caller = models.OneToOneField(Caller)
unsubscribed = models.BooleanField(default=False)
def __str__(self):
return '{}'.format(self.caller.phone_number)
@classmethod
def get_or_create(cls, phone_number, unsubscribed=False):
if isinstance(phone_number, cls):
return phone_number
caller, created = Caller.objects.get_or_create(
phone_number=phone_number
)
phone_number_obj, create = cls.objects.get_or_create(
caller=caller, defaults={'unsubscribed': unsubscribed}
)
return phone_number_obj
@property
def as_e164(self):
return self.caller.phone_number.as_e164
def subscribe(self):
self.unsubscribed = False
self.save()
def unsubscribe(self):
self.unsubscribed = True
self.save()
class Message(Sid):
# status choices
ACCEPTED = 0
QUEUED = 1
SENDING = 2
SENT = 3
RECEIVING = 4
RECEIVED = 5
DELIVERED = 6
UNDELIVERED = 7
FAILED = 8
UNKNOWN = 9
STATUS_CHOICES = (
(ACCEPTED, 'accepted'),
(QUEUED, 'queued'),
(SENDING, 'sending'),
(SENT, 'sent'),
(RECEIVING, 'receiving'),
(RECEIVED, 'received'),
(DELIVERED, 'delivered'),
(UNDELIVERED, 'undelivered'),
(FAILED, 'failed'),
)
# direction choices
INBOUND = 0
OUTBOUND_API = 1
OUTBOUND_CALL = 2
OUTBOUND_REPLY = 3
DIRECTION_CHOICES = (
(INBOUND, 'inbound'),
(OUTBOUND_API, 'outbound-api'),
(OUTBOUND_CALL, 'outbound-call'),
(OUTBOUND_REPLY, 'outbound-reply'),
)
UNSUBSCRIBE_MESSAGES = [
'STOP', 'STOPALL', 'UNSUBSCRIBE', 'CANCEL', 'END', 'QUIT'
]
SUBSCRIBE_MESSAGES = ['START', 'YES']
date_sent = models.DateTimeField(null=True)
account = models.ForeignKey(Account)
messaging_service = models.ForeignKey(MessagingService, null=True)
from_phone_number = models.ForeignKey(PhoneNumber, related_name='to_phone')
to_phone_number = models.ForeignKey(PhoneNumber, related_name='from_phone')
body = models.CharField(max_length=160)
num_media = models.PositiveSmallIntegerField()
num_segments = models.PositiveSmallIntegerField()
status = models.PositiveSmallIntegerField(
choices=STATUS_CHOICES, default=QUEUED
)
error = models.ForeignKey(Error, null=True, related_name='error')
direction = models.PositiveSmallIntegerField(choices=DIRECTION_CHOICES)
price = models.DecimalField(max_digits=6, decimal_places=5)
currency = models.ForeignKey(Currency)
api_version = models.ForeignKey(ApiVersion)
@classmethod
def get_direction_choice(cls, direction_display):
for choice in cls.DIRECTION_CHOICES:
if direction_display == choice[1]:
return choice[0]
@classmethod
def get_status_choice(cls, status_display):
for choice in cls.STATUS_CHOICES:
if status_display == choice[1]:
return choice[0]
@classmethod
def get_or_create(cls, message_sid=None, message=None):
if not message_sid:
message_sid = message.sid
try:
return (cls.objects.get(sid=message_sid), False)
except cls.DoesNotExist:
message_obj = cls(sid=message_sid)
message_obj.sync_twilio_message(message)
return (message_obj, True)
@classmethod
def send_message(cls, body, to, from_=settings.TWILIO_DEFAULT_CALLERID):
to_phone_number = PhoneNumber.get_or_create(to)
from_phone_number = PhoneNumber.get_or_create(from_)
twilio_message = twilio_client.messages.create(
body=body,
to=to_phone_number.as_e164,
from_=from_phone_number.as_e164,
status_callback=cls.get_status_callback()
)
return cls.get_or_create(message=twilio_message)
@property
def twilio_message(self):
max_retries = getattr(settings, 'DJANGO_TWILIO_SMS_MAX_RETRIES', 5)
retry_sleep = getattr(settings, 'DJANGO_TWILIO_SMS_RETRY_SLEEP', .5)
retries = 0
while True:
try:
return twilio_client.messages.get(self.sid)
except TwilioRestException:
if retries < max_retries:
time.sleep(retry_sleep)
retries = retries + 1
else:
raise
@staticmethod
def get_status_callback():
absolute_uri = AbsoluteURI('django_twilio_sms', 'callback_view')
return absolute_uri.get_absolute_uri()
def check_for_subscription_message(self):
if self.direction is self.INBOUND:
body = self.body.upper().strip()
if body in self.UNSUBSCRIBE_MESSAGES:
self.from_phone_number.unsubscribe()
unsubscribe_signal.send_robust(
sender=self.__class__, message=self, unsubscribed=True
)
elif body in self.SUBSCRIBE_MESSAGES:
self.from_phone_number.subscribe()
unsubscribe_signal.send_robust(
sender=self.__class__, message=self, unsubscribed=False
)
def send_response_message(self):
if self.direction is self.INBOUND:
if not self.from_phone_number.unsubscribed:
action = Action.get_action(self.body)
Message.send_message(
body=action.get_active_response().body,
to=self.from_phone_number,
from_=self.to_phone_number
)
response_message.send_robust(
sender=self.__class__, action=action, message=self
)
def sync_twilio_message(self, message=None):
if not message:
message = self.twilio_message
self.date_sent = message.date_sent
self.account = Account.get_or_create(message.account_sid)
if message.messaging_service_sid:
self.messaging_service = MessagingService.get_or_create(
message.messaging_service_sid
)
self.num_media = message.num_media
self.num_segments = message.num_segments
if message.status:
self.status = self.get_status_choice(message.status)
else:
self.status = self.UNKNOWN
if message.error_code:
self.error = Error.get_or_create(
message.error_code, message.error_message
)
self.direction = self.get_direction_choice(message.direction)
self.price = message.price or '0.0'
self.currency = Currency.get_or_create(message.price_unit)
self.api_version = ApiVersion.get_or_create(message.api_version)
self.from_phone_number = PhoneNumber.get_or_create(message.from_)
self.to_phone_number = PhoneNumber.get_or_create(message.to)
self.body = message.body
self.check_for_subscription_message()
self.save()
@python_2_unicode_compatible
class Action(CreatedUpdated):
name = models.CharField(max_length=50, unique=True)
active = models.BooleanField(default=True)
def __str__(self):
return '{}'.format(self.name)
@classmethod
def get_action(cls, message_body):
try:
return cls.objects.get(
name=message_body.strip().upper(), active=True
)
except cls.DoesNotExist:
return cls.objects.get(name='UNKNOWN', active=True)
def get_active_response(self):
return self.response_set.filter(active=True)[0]
def save(self, *args, **kwargs):
self.name = self.name.upper()
super(Action, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Response(CreatedUpdated):
body = models.CharField(max_length=160)
action = models.ForeignKey(Action)
active = models.BooleanField(default=True)
def __str__(self):
return 'Response for {}'.format(self.action)
def save(self, *args, **kwargs):
if self.active:
try:
current = Response.objects.get(action=self.action, active=True)
if self != current:
current.active = False
current.save()
except Response.DoesNotExist:
pass
super(Response, self).save(*args, **kwargs)
| bsd-3-clause | 5,583,914,624,737,663,000 | 28.682028 | 79 | 0.613957 | false |
totalgood/twote | twote/models_calendar.py | 1 | 1157 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class Event(models.Model):
'''
This model represents an one-time event
'''
title = models.CharField(max_length=255)
description = models.TextField()
start = models.DateTimeField()
end = models.DateTimeField(
blank=True,
# validators=[validate_after]
)
#TODO in view, make logic that end time must be later than start time.
location = models.CharField(max_length=100)
creator = models.ForeignKey(User, null=True)
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
if not self.end:
self.end = self.start + timezone.timedelta(hours=1)
super(Event, self).save(*args, **kwargs)
if self.end - self.start < timezone.timedelta(0):
raise ValidationError('end time must occur after start time, is now occuring {} before'.format(self.end - self.start))
def __str__(self):
return self.title
| mit | -7,334,137,260,139,386,000 | 34.060606 | 130 | 0.675886 | false |
compmem/ptsa | ptsa/data/hdf5wrapper.py | 1 | 10253 | #emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the PTSA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# global imports
import numpy as np
import h5py
# local imports
from basewrapper import BaseWrapper
from timeseries import TimeSeries
class HDF5Wrapper(BaseWrapper):
"""
Interface to data stored in an HDF5 file.
"""
def __init__(self, filepath, dataset_name='data',
annotations_name='annotations',
channel_info_name='channel_info',
data=None, file_dtype=None, apply_gain=True, gain_buffer=.005,
samplerate=None, nchannels=None, nsamples=None,
annotations=None, channel_info=None, **hdf5opts):
"""
Initialize the interface to the data.
Much documentation is needed here.
For example, here is one way to create an HDF5 dataset from a
TimeSeries instance:
HDF5Wrapper('data.hdf5', data=data, compression='gzip')
Now let's say the TimeSeries is float64, but you want to save
space (and lose significant digits), you can specify a
file_dtype, which will apply a gain factor to ensure you
retain as much data accuracy as possible. Here's how you can
save the data in int16:
HDF5Wrapper('data.hdf5', data=data, file_dtype=np.int16, compression='gzip')
"""
# set up the basic params of the data
self.filepath = filepath
self.dataset_name = dataset_name
self.annotations_name = annotations_name
self.channel_info_name = channel_info_name
self.apply_gain = apply_gain
self.gain_buffer = gain_buffer
self.gain = None
self.hdf5opts = hdf5opts
self.file_dtype = file_dtype
self.data_dtype = None
# see if create dataset
if not data is None:
# must provide samplerate and data
# connect to the file and get the dataset
f = h5py.File(self.filepath,'a')
# use the data to create a dataset
self.data_dtype = data.dtype
d = f.create_dataset(self.dataset_name,
data=self._data_to_file(data),
**hdf5opts)
d.attrs['data_dtype'] = data.dtype.char
d.attrs['gain'] = self.gain
if not 'samplerate' in d.attrs:
# must have provided samplerate
if isinstance(data, TimeSeries):
# get the samplerate from the TimeSeries
samplerate = data.samplerate
if samplerate is None:
raise ValueError("You must specify a samplerate " +
"if the dataset does not already exist.")
# set the samplerate
d.attrs['samplerate'] = samplerate
# create annotations if necessary
if not annotations is None:
if self.annotations_name in f:
raise ValueError("Told to create dataset annotations, " +
"but %s already exists." %
self.annotations_name)
a = f.create_dataset(self.annotations_name,
data=annotations, **hdf5opts)
# create channel_info if necessary
if not channel_info is None:
if self.channel_info_name in f:
raise ValueError("Told to create dataset channel_info, " +
"but %s already exists." %
self.channel_info_name)
c = f.create_dataset(self.channel_info_name,
data=channel_info, **hdf5opts)
# close the hdf5 file
f.close()
else:
# connect to the file and get info
f = h5py.File(self.filepath,'r')
d = f[self.dataset_name]
self.data_dtype = np.dtype(d.attrs['data_dtype'])
self.file_dtype = d.dtype
self.gain = d.attrs['gain']
def _data_to_file(self, data):
# process the datatypes
if self.file_dtype is None:
# load from data
self.file_dtype = data.dtype
else:
# make sure it's a dtype
if not isinstance(self.file_dtype, np.dtype):
try:
self.file_dtype = np.dtype(self.file_dtype)
except:
ValueError("file_dtype should be a numpy dtype.")
# process the gain
if self.gain is None:
# default to 1.0
self.gain = 1.0
# calc it if we are going from float to int
if (self.file_dtype.kind == 'i') and (self.data_dtype.kind == 'f'):
fr = np.iinfo(self.file_dtype).max*2
dr = np.abs(data).max()*2 * (1.+self.gain_buffer)
self.gain = dr/fr
# calc and apply gain if necessary
if self.apply_gain and self.gain != 1.0:
return np.asarray(data/self.gain,dtype=self.file_dtype)
else:
return np.asarray(data,dtype=self.file_dtype)
def _data_from_file(self, data):
# see if apply gain we've already calculated
if self.apply_gain and self.gain != 1.0:
return np.asarray(data*self.gain, dtype=self.data_dtype)
else:
return np.asarray(data, dtype=self.data_dtype)
def _get_samplerate(self, channel=None):
# Same samplerate for all channels.
# get the samplerate property of the dataset
f = h5py.File(self.filepath,'r')
data = f[self.dataset_name]
samplerate = data.attrs['samplerate']
f.close()
return samplerate
def _get_nsamples(self,channel=None):
# get the dimensions of the data
f = h5py.File(self.filepath,'r')
data = f[self.dataset_name]
nsamples = data.shape[1]
f.close()
return nsamples
def _get_nchannels(self):
# get the dimensions of the data
f = h5py.File(self.filepath,'r')
data = f[self.dataset_name]
nchannels = data.shape[0]
f.close()
return nchannels
def _get_annotations(self):
# get the dimensions of the data
f = h5py.File(self.filepath,'r')
if self.annotations_name in f:
annot = f[self.annotations_name][:]
else:
annot = None
f.close()
return annot
def _set_annotations(self, annotations):
# get the dimensions of the data
f = h5py.File(self.filepath,'a')
if self.annotations_name in f:
del f[self.annotations_name]
a = f.create_dataset(self.annotations_name,
data=annotations, **self.hdf5opts)
f.close()
def _get_channel_info(self):
# get the dimensions of the data
f = h5py.File(self.filepath,'r')
if self.channel_info_name in f:
chan_info = f[self.channel_info_name][:]
else:
chan_info = None
f.close()
return chan_info
def _set_channel_info(self, channel_info):
# get the dimensions of the data
f = h5py.File(self.filepath,'a')
if self.channel_info_name in f:
del f[self.channel_info_name]
a = f.create_dataset(self.channel_info_name,
data=channel_info, **self.hdf5opts)
f.close()
def _load_data(self,channels,event_offsets,dur_samp,offset_samp):
"""
"""
# connect to the file and get the dataset
f = h5py.File(self.filepath,'r')
data = f[self.dataset_name]
# allocate for data
eventdata = np.empty((len(channels),len(event_offsets),dur_samp),
dtype=self.data_dtype)*np.nan
# loop over events
for e,evOffset in enumerate(event_offsets):
# set the range
ssamp = offset_samp+evOffset
esamp = ssamp + dur_samp
# check the ranges
if ssamp < 0 or esamp > data.shape[1]:
raise IOError('Event with offset '+str(evOffset)+
' is outside the bounds of the data.')
eventdata[:,e,:] = self._data_from_file(data[channels,ssamp:esamp])
# close the file
f.close()
return eventdata
def append_data(self, data):
"""
Must be all channels.
"""
# connect to the file and get the dataset
f = h5py.File(self.filepath,'a')
# get the dataset (must already exist)
d = f[self.dataset_name]
# check data size
if data.shape[0] != d.shape[0]:
raise ValueError("New data must have the same number of channels: %d." %
d.shape[0])
# reshape to hold new data
cursamp = d.shape[1]
newsamp = data.shape[1]
d.shape = (d.shape[0], cursamp+newsamp)
# append the data
d[:,cursamp:cursamp+newsamp] = self._data_to_file(data)
# close the file
f.close()
def set_channel_data(self, channel, data):
"""
Set the data for an entire channel. Will reshape the nsamples
of the entire dataset to match, throwing out data if smaller.
"""
# connect to the file and get the dataset
f = h5py.File(self.filepath,'a')
# get the dataset (must already exist)
d = f[self.dataset_name]
# reshape if necessary
cursamp = d.shape[1]
newsamp = len(data)
if cursamp != newsamp:
d.shape = (d.shape[0], newsamp)
# set the data
d[channel,:] = self._data_to_file(data)
# close the file
f.close()
| gpl-3.0 | 8,240,189,674,893,211,000 | 34.477509 | 84 | 0.531649 | false |
shootstar/novatest | nova/tests/cells/test_cells_rpcapi.py | 1 | 21250 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPCAPI
"""
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
class CellsAPITestCase(test.TestCase):
"""Test case for cells.api interfaces."""
def setUp(self):
super(CellsAPITestCase, self).setUp()
self.fake_topic = 'fake_topic'
self.fake_context = 'fake_context'
self.flags(topic=self.fake_topic, enable=True, group='cells')
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _stub_rpc_method(self, rpc_method, result):
call_info = {}
def fake_rpc_method(ctxt, topic, msg, *args, **kwargs):
call_info['context'] = ctxt
call_info['topic'] = topic
call_info['msg'] = msg
return result
self.stubs.Set(rpc, rpc_method, fake_rpc_method)
return call_info
def _check_result(self, call_info, method, args, version=None):
if version is None:
version = self.cells_rpcapi.BASE_RPC_API_VERSION
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(self.fake_topic, call_info['topic'])
self.assertEqual(method, call_info['msg']['method'])
msg_version = call_info['msg']['version']
self.assertTrue(isinstance(msg_version, basestring),
"Message version %s is not a string" % msg_version)
self.assertEqual(version, call_info['msg']['version'])
self.assertEqual(args, call_info['msg']['args'])
def test_cast_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': False}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.cast_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
def test_call_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
fake_response = 'fake_response'
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': True}
call_info = self._stub_rpc_method('call', fake_response)
result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
self.assertEqual(fake_response, result)
def test_schedule_run_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.schedule_run_instance(
self.fake_context, arg1=1, arg2=2, arg3=3)
expected_args = {'host_sched_kwargs': {'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'schedule_run_instance',
expected_args)
def test_build_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.build_instances(
self.fake_context, instances=['1', '2'],
image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
expected_args = {'build_inst_kwargs': {'instances': ['1', '2'],
'image': {'fake': 'image'},
'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
expected_args, version='1.8')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
call_info = self._stub_rpc_method('call',
result=capacity_info)
result = self.cells_rpcapi.get_capacities(self.fake_context,
cell_name="name")
self._check_result(call_info, 'get_capacities',
{'cell_name': 'name'}, version='1.9')
self.assertEqual(capacity_info, result)
def test_instance_update_at_top(self):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'other': 'moo'}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'security_groups': 'fake',
'instance_type': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_update_at_top',
expected_args)
def test_instance_destroy_at_top(self):
fake_instance = {'uuid': 'fake-uuid'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_destroy_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_destroy_at_top',
expected_args)
def test_instance_delete_everywhere(self):
fake_instance = {'uuid': 'fake-uuid'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_delete_everywhere(
self.fake_context, fake_instance,
'fake-type')
expected_args = {'instance': fake_instance,
'delete_type': 'fake-type'}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args)
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 2,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_fault_create_at_top(
self.fake_context, fake_instance_fault)
expected_args = {'instance_fault': fake_instance_fault}
self._check_result(call_info, 'instance_fault_create_at_top',
expected_args)
def test_bw_usage_update_at_top(self):
update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
'fake_ctr_out')
update_kwargs = {'last_refreshed': 'fake_refreshed'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bw_usage_update_at_top(
self.fake_context, *update_args, **update_kwargs)
bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_ctr_in',
'last_ctr_out': 'fake_ctr_out',
'last_refreshed': 'fake_refreshed'}
expected_args = {'bw_update_info': bw_update_info}
self._check_result(call_info, 'bw_usage_update_at_top',
expected_args)
def test_get_cell_info_for_neighbors(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_cell_info_for_neighbors(
self.fake_context)
self._check_result(call_info, 'get_cell_info_for_neighbors', {},
version='1.1')
self.assertEqual(result, 'fake_response')
def test_sync_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.sync_instances(self.fake_context,
project_id='fake_project', updated_since='fake_time',
deleted=True)
expected_args = {'project_id': 'fake_project',
'updated_since': 'fake_time',
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
def test_service_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
fake_filters = {'key1': 'val1', 'key2': 'val2'}
result = self.cells_rpcapi.service_get_all(self.fake_context,
filters=fake_filters)
expected_args = {'filters': fake_filters}
self._check_result(call_info, 'service_get_all', expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_service_get_by_compute_host(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_get_by_compute_host(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'service_get_by_compute_host',
expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_service_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_update(
self.fake_context, host_name='fake-host-name',
binary='nova-api', params_to_update={'disabled': True})
expected_args = {
'host_name': 'fake-host-name',
'binary': 'nova-api',
'params_to_update': {'disabled': True}}
self._check_result(call_info, 'service_update',
expected_args,
version='1.7')
self.assertEqual(result, 'fake_response')
def test_proxy_rpc_to_manager(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.proxy_rpc_to_manager(
self.fake_context, rpc_message='fake-msg',
topic='fake-topic', call=True, timeout=-1)
expected_args = {'rpc_message': 'fake-msg',
'topic': 'fake-topic',
'call': True,
'timeout': -1}
self._check_result(call_info, 'proxy_rpc_to_manager',
expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_task_log_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.task_log_get_all(self.fake_context,
task_name='fake_name',
period_beginning='fake_begin',
period_ending='fake_end',
host='fake_host',
state='fake_state')
expected_args = {'task_name': 'fake_name',
'period_beginning': 'fake_begin',
'period_ending': 'fake_end',
'host': 'fake_host',
'state': 'fake_state'}
self._check_result(call_info, 'task_log_get_all', expected_args,
version='1.3')
self.assertEqual(result, 'fake_response')
def test_compute_node_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
hypervisor_match='fake-match')
expected_args = {'hypervisor_match': 'fake-match'}
self._check_result(call_info, 'compute_node_get_all', expected_args,
version='1.4')
self.assertEqual(result, 'fake_response')
def test_compute_node_stats(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_stats(self.fake_context)
expected_args = {}
self._check_result(call_info, 'compute_node_stats',
expected_args, version='1.4')
self.assertEqual(result, 'fake_response')
def test_compute_node_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get(self.fake_context,
'fake_compute_id')
expected_args = {'compute_id': 'fake_compute_id'}
self._check_result(call_info, 'compute_node_get',
expected_args, version='1.4')
self.assertEqual(result, 'fake_response')
def test_actions_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.actions_get(self.fake_context,
fake_instance)
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid']}
self._check_result(call_info, 'actions_get', expected_args,
version='1.5')
self.assertEqual(result, 'fake_response')
def test_actions_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.actions_get, self.fake_context,
fake_instance)
def test_action_get_by_request_id(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
fake_instance,
'req-fake')
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid'],
'request_id': 'req-fake'}
self._check_result(call_info, 'action_get_by_request_id',
expected_args, version='1.5')
self.assertEqual(result, 'fake_response')
def test_action_get_by_request_id_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_get_by_request_id,
self.fake_context, fake_instance, 'req-fake')
def test_action_events_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_events_get(self.fake_context,
fake_instance,
'fake-action')
expected_args = {'cell_name': 'region!child',
'action_id': 'fake-action'}
self._check_result(call_info, 'action_events_get', expected_args,
version='1.5')
self.assertEqual(result, 'fake_response')
def test_action_events_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_events_get,
self.fake_context, fake_instance, 'fake-action')
def test_consoleauth_delete_tokens(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
'fake-uuid')
expected_args = {'instance_uuid': 'fake-uuid'}
self._check_result(call_info, 'consoleauth_delete_tokens',
expected_args, version='1.6')
def test_validate_console_port(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.validate_console_port(self.fake_context,
'fake-uuid', 'fake-port', 'fake-type')
expected_args = {'instance_uuid': 'fake-uuid',
'console_port': 'fake-port',
'console_type': 'fake-type'}
self._check_result(call_info, 'validate_console_port',
expected_args, version='1.6')
self.assertEqual(result, 'fake_response')
def test_bdm_update_or_create_at_top(self):
fake_bdm = {'id': 2, 'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_update_or_create_at_top(
self.fake_context, fake_bdm, create='fake-create')
expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
self._check_result(call_info, 'bdm_update_or_create_at_top',
expected_args, version='1.10')
def test_bdm_destroy_at_top(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
'fake-uuid',
device_name='fake-device',
volume_id='fake-vol')
expected_args = {'instance_uuid': 'fake-uuid',
'device_name': 'fake-device',
'volume_id': 'fake-vol'}
self._check_result(call_info, 'bdm_destroy_at_top',
expected_args, version='1.10')
def test_get_migrations(self):
call_info = self._stub_rpc_method('call', None)
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
self.cells_rpcapi.get_migrations(self.fake_context, filters)
expected_args = {'filters': filters}
self._check_result(call_info, 'get_migrations', expected_args,
version="1.11")
def test_start_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.start_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'start_instance',
expected_args, version='1.12')
def test_stop_instance_cast(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True)
expected_args = {'instance': 'fake-instance',
'do_cast': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.12')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False)
expected_args = {'instance': 'fake-instance',
'do_cast': False}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.12')
self.assertEqual(result, 'fake_response')
| apache-2.0 | -8,717,394,188,039,251,000 | 41.842742 | 78 | 0.5344 | false |
gdraynz/nyuki | nyuki/bus/persistence/memory_backend.py | 1 | 1899 | import logging
from nyuki.bus.persistence.events import EventStatus
from nyuki.bus.persistence.backend import PersistenceBackend
log = logging.getLogger(__name__)
class FIFOSizedQueue(object):
def __init__(self, size):
self._list = list()
self._size = size
def __len__(self):
return len(self._list)
@property
def size(self):
return self._size
@property
def list(self):
return self._list
@property
def is_full(self):
return len(self._list) >= self._size
def put(self, item):
while self.is_full:
log.debug('queue full (%d), poping first item', len(self._list))
self._list.pop(0)
self._list.append(item)
def empty(self):
while self._list:
yield self._list.pop(0)
class MemoryBackend(PersistenceBackend):
def __init__(self, max_size=10000, **kwargs):
self._last_events = FIFOSizedQueue(max_size)
def __repr__(self):
return '<MemoryBackend max_size={}>'.format(self._last_events.size)
async def store(self, event):
self._last_events.put(event)
async def update(self, uid, status):
for event in self._last_events.list:
if event['id'] == uid:
event['status'] = status.value
return
async def retrieve(self, since, status):
def check_params(item):
since_check = True
status_check = True
if since:
since_check = item['created_at'] >= since
if status:
if isinstance(status, list):
status_check = EventStatus[item['status']] in status
else:
status_check = item['status'] == status.value
return since_check and status_check
return list(filter(check_params, self._last_events.list))
| apache-2.0 | -7,991,617,701,439,254,000 | 24.662162 | 76 | 0.567667 | false |
aristanetworks/arista-ovs-quantum | quantum/plugins/ryu/db/models_v2.py | 1 | 2004 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from quantum.db import model_base
class OFPServer(model_base.BASEV2):
"""Openflow Server/API address."""
__tablename__ = 'ofp_server'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
address = sa.Column(sa.String(64)) # netloc <host ip address>:<port>
host_type = sa.Column(sa.String(255)) # server type
# Controller, REST_API
def __repr__(self):
return "<OFPServer(%s,%s,%s)>" % (self.id, self.address,
self.host_type)
class TunnelKeyLast(model_base.BASEV2):
"""Lastly allocated Tunnel key. The next key allocation will be started
from this value + 1
"""
last_key = sa.Column(sa.Integer, primary_key=True)
def __repr__(self):
return "<TunnelKeyLast(%x)>" % self.last_key
class TunnelKey(model_base.BASEV2):
"""Netowrk ID <-> tunnel key mapping."""
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
tunnel_key = sa.Column(sa.Integer, primary_key=True,
nullable=False, autoincrement=False)
def __repr__(self):
return "<TunnelKey(%s,%x)>" % (self.network_id, self.tunnel_key)
| apache-2.0 | 5,778,167,961,306,299,000 | 36.111111 | 79 | 0.63523 | false |
dNG-git/pas_gapi_core | setup.py | 1 | 2785 | # -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;gapi;core
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
setup.py
"""
def get_version():
"""
Returns the version currently in development.
:return: (str) Version string
:since: v0.1.02
"""
return "v0.2.00"
#
from dNG.distutils.command.build_py import BuildPy
from dNG.distutils.command.install_data import InstallData
from dNG.distutils.temporary_directory import TemporaryDirectory
from distutils.core import setup
from os import path
with TemporaryDirectory(dir = ".") as build_directory:
parameters = { "pasGapiCoreVersion": get_version() }
InstallData.set_build_target_path(build_directory)
InstallData.set_build_target_parameters(parameters)
_build_path = path.join(build_directory, "src")
setup(name = "pas_gapi_core",
version = get_version(),
description = "Python Application Services",
long_description = """"pas_gapi_core" is an adapter and abstraction layer for the C-Level GObject Introspection API.""",
author = "direct Netware Group et al.",
author_email = "[email protected]",
license = "GPLv2+",
url = "https://www.direct-netware.de/redirect?pas;gapi;core",
platforms = [ "any" ],
package_dir = { "": _build_path },
packages = [ "dNG" ],
data_files = [ ( "docs", [ "LICENSE", "README" ]) ],
# Override build_py to first run builder.py over all PAS modules
cmdclass = { "build_py": BuildPy,
"install_data": InstallData
}
)
#
| gpl-2.0 | -6,165,827,496,580,687,000 | 34.253165 | 130 | 0.631957 | false |
gford1000/awssl | examples/wait_state_example.py | 1 | 1277 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import awssl
def wait_state_example():
# Construct states
final_state = awssl.Task(
Name="FinalState",
EndState=True,
ResourceArn="arn:aws:lambda:REGION:ACCOUNT_ID:function:FUNCTION_NAME")
wait_using_seconds_path = awssl.Wait(
Name="wait_using_seconds_path",
NextState=final_state,
WaitForSecondsPath="$.expiryseconds")
wait_using_timestamp_path = awssl.Wait(
Name="wait_using_timestamp_path",
NextState=wait_using_seconds_path,
WaitUntilISO8601TimestampPath="$.expirydate")
wait_using_timestamp = awssl.Wait(
Name="wait_using_timestamp",
NextState=wait_using_timestamp_path,
WaitUntilISO8601Timestamp="2015-09-04T01:59:00Z")
wait_using_seconds = awssl.Wait(
Name="wait_using_second",
NextState=wait_using_timestamp,
WaitForSeconds=10)
first_state = awssl.Task(
Name="FirstState",
ResourceArn="arn:aws:lambda:REGION:ACCOUNT_ID:function:FUNCTION_NAME",
EndState=False,
NextState=wait_using_seconds)
# Construct state machine
return awssl.StateMachine(
Comment="An example of the Amazon States Language using wait states",
StartState=first_state)
if __name__ == "__main__":
sm = wait_state_example()
print sm
| mit | -2,577,020,994,533,087,700 | 25.061224 | 82 | 0.735317 | false |
KarnUllrich/HDToolsPython | classification.py | 1 | 1403 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides a kNN classier.
"""
import numpy as np
__author__ = ["Karen Ullrich"]
__email__ = "[email protected]"
__version__ = "Dec 2014"
def kNN(data, k):
'''
Performs kNN. Computes LOOCV accuracy.
k ...neighborhood size
D ...distance matrix
n ...number of data instances
t ...ground truth/ labels
acc ...classification accuracy for LOOCV
'''
n = len(data.D)
num_evaluations = len(k) # How many neigbohood sizes are given
acc = np.zeros(num_evaluations)
corr = np.zeros((n, num_evaluations))
for i in xrange(n):
ground_truth = data.t[i]
row = data.D[i, :]
row[i] = float('inf')
idx = np.argsort(row)
for j in xrange(num_evaluations):
nn_class = findMostCommonElementOfSet(data.t[idx[:k[j]]])
if ground_truth == nn_class:
acc[j] += 1./n
corr[i,j] = 1
return acc
def findMostCommonElementOfSet(elements):
'''
Returns the most common element in a set.'For ties it decides randomly.
Input:
a ... list or np.array
'''
elementCounter = Counter(elements).most_common()
highest_count = max([i[1] for i in elementCounter])
element = [i[0] for i in elementCounter if i[1] == highest_count]
r.shuffle(element)
return element[0] | gpl-2.0 | 8,126,564,849,569,907,000 | 22.79661 | 75 | 0.577334 | false |
cullophid/Scienceman | spritesheettest.py | 1 | 1419 | import sys, os
import pygame
from pygame.locals import *
from pygame.color import *
from gamelib import data
class Spritesheet:
def __init__(self, filename):
self.sheet = pygame.image.load(os.path.join('data',filename)).convert()
def imgat(self,rect,colorkey=None):
rect = Rect(rect)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0,0),rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image
def imgsat(self,rects,colorkey=None):
imgs = []
for rect in rects:
imgs.append(self.imgat(rect, colorkey))
return imgs
os.environ["SDL_VIDEO_CENTERED"] = "1"
#pygame.mixer.pre_init(44100, -16, 2, 4096)
pygame.init()
pygame.mouse.set_visible(1)
pygame.display.set_caption("In the name of Science")
screen = pygame.display.set_mode((1000, 480))
font = pygame.font.Font((os.path.join('data','font.ttf')), 16)
ren = font.render("YOU DIED!", 1, (255, 255, 255))
screen.blit(ren, (320-ren.get_width()/2, 235))
sheet = Spritesheet("CaptainCommando.gif")
image = sheet.imgat((14,6,64,86),-1)
while True:
for e in pygame.event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYDOWN:
if e.key == K_ESCAPE:
self.end()
ren = font.render("YOU DIED!", 1, (255, 255, 255))
screen.blit(ren, (320-ren.get_width()/2, 235))
screen.blit(image,(100,100))
pygame.display.flip()
| lgpl-2.1 | -1,517,194,131,688,636,400 | 27.38 | 73 | 0.674419 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.