repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
overfl0/Bulletproof-Arma-Launcher | src/utils/filecache.py | 1 | 1887 | # Bulletproof Arma Launcher
# Copyright (C) 2017 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
import errno
import hashlib
import os
from utils import paths
from utils import context
def get_cache_directory():
return paths.get_launcher_directory('filecache')
def map_file(url):
"""Get the path where the file should be stored in the cache."""
file_name = hashlib.sha256(url).hexdigest()
return os.path.join(get_cache_directory(), file_name)
def get_file(url):
"""Get the file contents from the cache or None if the file is not present
in the cache.
"""
path = map_file(url)
f = None
try:
f = open(path, 'rb')
return f.read()
except IOError as ex:
if ex.errno == errno.ENOENT: # No such file
return None
raise
finally:
if f:
f.close()
def save_file(url, data):
"""Save the file contents to the cache.
The contents of the file are saved to a temporary file and then moved to
ensure that no truncated file is present in the cache.
"""
# Ensure the directory exists
paths.mkdir_p(get_cache_directory())
path = map_file(url)
tmp_path = path + '_tmp'
f = open(tmp_path, 'wb')
f.write(data)
f.close()
# Ensure the file does not exist (would raise an exception on Windows
with context.ignore_nosuchfile_exception():
os.unlink(path)
os.rename(tmp_path, path)
| gpl-3.0 | -3,351,975,119,354,723,300 | 23.506494 | 78 | 0.670906 | false |
hkff/AccLab | pyAAL/shell.py | 1 | 5489 | """
Shell
Copyright (C) 2014 Walid Benghabrit
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'walid'
import AALCompiler
from AALChecker import *
from importlib import reload
import os
import re
import curses
from tools.hottie import hot
from AALCompiler import AALCompilerListener
# TODO : make it more user friendly
self = None
help_str = "Shell Help" +\
"\n - call(macro, args) " + "\t call a macro where /\n" +\
"\t\t\t *macro : is the name of the macro\n" +\
"\t\t\t *args : a list of string; << ex : [\"'args1'\", \"'args2'\", ...\"'argsN'\"] >>" +\
"\n - clauses() " + "\t show all declared clauses in the loaded aal program" +\
"\n - macros() " + "\t show all declared macros in the loaded aal program" +\
"\n - load(lib) " + "\t load the library lib" +\
"\n - quit / q " + "\t exit the shell" +\
"\n - help / h / man() " + "\t show this help" +\
"\n - self " + "\t the current compiler instance of the loaded aal program" +\
"\n - aalprog " + "\t the current loaded aal program " +\
"\n - man(arg) " + "\t print the help for the given arg" +\
"\n - hs(module) " + "\t hotswaping : reload the module" +\
"\n - r() " + "\t hot-swaping the shell"
help_str = Color(help_str)
COMMANDS = ['clauses()', 'macros()', 'quit', 'q', 'h', 'help', 'self', 'aalprog', 'man()', 'call', 'extra']
RE_SPACE = re.compile('.*\s+$', re.M)
# Completer class
class Completer(object):
def complete(self, text, state):
"""Generic readline completion entry point."""
try:
import readline
except:
print(Color("{autored}[ERROR] You need to install readline module to use the shell.{/red}\n"
"Please visit {autogreen}https://pypi.python.org/pypi/readline{/green}\n"))
sys.exit(-1)
buffer = readline.get_line_buffer()
line = readline.get_line_buffer().split()
# show all commands
if not line:
return [c + ' ' for c in COMMANDS][state]
# account for last argument ending in a space
if RE_SPACE.match(buffer):
line.append('')
# resolve command to the implementation function
cmd = line[0].strip()
if cmd in COMMANDS:
impl = getattr(self, 'complete_%s' % cmd)
args = line[1:]
if args:
return (impl(args) + [None])[state]
return [cmd + ' '][state]
results = [c + ' ' for c in COMMANDS if c.startswith(cmd)] + [None]
return results[state]
# Man method
def man(args=None):
if args is None:
print(help_str)
else:
print("printing manual for " + str(args.__class__))
arg_type = type(args)
if isinstance(args, aalmmnode):
print(args.man())
else:
AALCompilerListener.man()
# Interactive mode
@hot
def shell(listener):
try:
import readline
except:
print(Color("{autored}[ERROR] You need to install readline module to use the shell.{/red}\n"
"Please visit {autogreen}https://pypi.python.org/pypi/readline{/green}\n"))
sys.exit(-1)
import shell, AALMetaModel, inspect # For hotswaping
stop = False
self = listener
aalprog = self.aalprog
comp = Completer()
# we want to treat '/' as part of a word, so override the delimiters
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
# Load a lib on the current AAL file
def load(lib):
return self.load_lib(lib)
# Call a macro on the loaded file
def call(macro, args=None):
if args is None:
args = []
return self.macro_call(macro, args)
# Get clauses
def clauses():
return self.get_clauses()
# Get macros
def macros():
return self.get_macros()
# Reload shell
def r():
return reload(shell)
# Reload a module
def hs(module):
res = reload(module)
# Use hot swaping decoration on all AALMetaModel classes
# NOTE : stop abusing introspection...
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj):
if "AALMetaModel" in str(obj):
obj = hot(obj)
return res
# return
#exec("from AALCompiler import AALCompilerListener");
while not stop:
cmd = input("shell >")
# cmd = sys.stdin.read()
if cmd == "quit" or cmd == "q":
stop = True
elif cmd == "help" or cmd == "h":
man()
else:
try:
res = eval(cmd)
if res is not None:
print(res)
except:
print("Eval error !", sys.exc_info()[:2])
| gpl-3.0 | -8,510,460,452,724,908,000 | 31.473373 | 107 | 0.574891 | false |
kenyansongithub/django-rog | setup.py | 1 | 1360 | __author__ = 'ndieks'
import os
from setuptools import setup
import setuptools
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-rog',
version='0.1',
packages=setuptools.find_packages(),
include_package_data=True,
zip_safe=False,
license='BSD License',
description='A simple Django app to track activities of people from some location.',
long_description=README,
url='https://www.example.com/',
author='danleyb2',
author_email='[email protected]',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| bsd-2-clause | -8,937,582,572,829,403,000 | 34.789474 | 92 | 0.580882 | false |
osuripple/lets | helpers/leaderboardHelper.py | 1 | 2869 | from common.log import logUtils as log
from common.ripple import scoreUtils
from objects import glob
from common.ripple import userUtils
def getRankInfo(userID, gameMode):
"""
Get userID's current rank, user above us and pp/score difference
:param userID: user
:param gameMode: gameMode number
:return: {"nextUsername": "", "difference": 0, "currentRank": 0}
"""
data = {"nextUsername": "", "difference": 0, "currentRank": 0}
k = "ripple:leaderboard:{}".format(scoreUtils.readableGameMode(gameMode))
position = userUtils.getGameRank(userID, gameMode) - 1
log.debug("Our position is {}".format(position))
if position is not None and position > 0:
aboveUs = glob.redis.zrevrange(k, position - 1, position)
log.debug("{} is above us".format(aboveUs))
if aboveUs is not None and len(aboveUs) > 0 and aboveUs[0].isdigit():
# Get our rank, next rank username and pp/score difference
myScore = glob.redis.zscore(k, userID)
otherScore = glob.redis.zscore(k, aboveUs[0])
nextUsername = userUtils.getUsername(aboveUs[0])
if nextUsername is not None and myScore is not None and otherScore is not None:
data["nextUsername"] = nextUsername
data["difference"] = int(myScore) - int(otherScore)
else:
position = 0
data["currentRank"] = position + 1
return data
def update(userID, newScore, gameMode, *, relax=False):
"""
Update gamemode's leaderboard.
Doesn't do anything if userID is banned/restricted.
:param userID: user
:param newScore: new score or pp
:param gameMode: gameMode number
:param relax: if True, update relax global leaderboard, otherwise update classic global leaderboard
"""
if userUtils.isAllowed(userID):
log.debug("Updating leaderboard...")
glob.redis.zadd(
"ripple:leaderboard:{}{}".format(scoreUtils.readableGameMode(gameMode), ":relax" if relax else ""),
str(userID),
str(newScore)
)
else:
log.debug("Leaderboard update for user {} skipped (not allowed)".format(userID))
def updateCountry(userID, newScore, gameMode, *, relax=False):
"""
Update gamemode's country leaderboard.
Doesn't do anything if userID is banned/restricted.
:param userID: user, country is determined by the user
:param newScore: new score or pp
:param gameMode: gameMode number
:param relax: if True, update relax country leaderboard, otherwise update classic country leaderboard
:return:
"""
if userUtils.isAllowed(userID):
country = userUtils.getCountry(userID)
if country is not None and len(country) > 0 and country.lower() != "xx":
log.debug("Updating {} country leaderboard...".format(country))
k = "ripple:leaderboard:{}:{}{}".format(
scoreUtils.readableGameMode(gameMode),
country.lower(),
":relax" if relax else ""
)
glob.redis.zadd(k, str(userID), str(newScore))
else:
log.debug("Country leaderboard update for user {} skipped (not allowed)".format(userID))
| agpl-3.0 | 9,100,213,351,443,686,000 | 36.25974 | 102 | 0.721854 | false |
wolcomm/djangolg | djangolg/dialects/base.py | 1 | 2044 | # Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Base dialect class for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import napalm
from napalm_base import NetworkDriver
class BaseDialect(object):
"""Device base dialect class."""
driver_class = None
name = None
description = None
commands = {}
def __init__(self):
"""Initialise new instance."""
if not isinstance(self.driver_class, NetworkDriver):
if type(self).name:
self.driver_class = napalm.get_network_driver(type(self).name)
else:
raise ValueError
def get_command_syntax(self, method=None, option=None):
"""Get the dialect specific syntax for a given method as a lambda."""
from djangolg.methods.base import BaseMethod
if not isinstance(method, BaseMethod):
return ValueError
syntax = None
if method.name in self.commands:
if option is not None:
if option in self.commands[method.name]:
syntax = self.commands[method.name][option]
else:
syntax = self.commands[method.name]
if syntax:
if inspect.isfunction(syntax):
return syntax
else:
raise TypeError # pragma: no cover
raise NotImplementedError # pragma: no cover
| apache-2.0 | 8,075,817,260,728,690,000 | 33.644068 | 79 | 0.65362 | false |
juancarlosqr/datascience | python/playground/distributed_systems/classes.py | 1 | 3241 | #!/usr/bin/env python
import sqlalchemy
from sqlalchemy import Column, ForeignKey, Integer, String, Float
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
""" ****** New Class ****** """
class Servidor(Base):
"""docstring for Servidor"""
__tablename__ = 'servidor'
ser_id = Column(Integer, primary_key=True)
ser_ip = Column(String)
ser_nombre = Column(String)
grupos = relationship("Grupo", back_populates="servidor", cascade="all, delete, delete-orphan")
discos = relationship("DiscoDuro", back_populates="servidor_dis", cascade="all, delete, delete-orphan")
def __init__(self, ser_ip, ser_nombre):
self.ser_ip = ser_ip
self.ser_nombre = ser_nombre
def __repr__(self):
return "<Servidor ('%s','%s')>" % (self.ser_ip, self.ser_nombre)
""" ****** New Class ****** """
class Grupo(Base):
"""docstring for Servidor"""
__tablename__ = 'grupo'
gru_id = Column(Integer, primary_key=True)
gru_grupo = Column(String)
gru_groupid = Column(Integer)
servidor_ser_id = Column(Integer, ForeignKey('servidor.ser_id'))
servidor = relationship("Servidor", back_populates="grupos")
usuarios = relationship("Usuario", back_populates="grupo", cascade="all, delete, delete-orphan")
def __init__(self, gru_grupo, gru_groupid):
self.gru_grupo = gru_grupo
self.gru_groupid = gru_groupid
def __repr__(self):
return "<Grupo ('%s','%s')>" % (self.gru_grupo, self.gru_groupid)
""" ****** New Class ****** """
class Usuario(Base):
"""docstring for Usuario"""
__tablename__ = 'usuario'
usu_id = Column(Integer, primary_key=True)
usu_usuario = Column(String)
usu_descripcion = Column(String)
usu_directorio = Column(String)
usu_shell = Column(String)
grupo_gru_id = Column(Integer, ForeignKey('grupo.gru_id'))
grupo = relationship("Grupo", back_populates="usuarios")
def __init__(self, usu_usuario, usu_descripcion, usu_directorio, usu_shell):
self.usu_usuario = usu_usuario
self.usu_descripcion = usu_descripcion
self.usu_directorio = usu_directorio
self.usu_shell = usu_shell
def __repr__(self):
return "<Usuario ('%s','%s','%s','%s')>" % (self.usu_usuario, self.usu_descripcion, self.usu_directorio, usu_shell)
""" ****** New Class ****** """
class DiscoDuro(Base):
"""docstring for DiscoDuro"""
__tablename__ = 'disco_duro'
dis_id = Column(Integer, primary_key=True)
dis_nombre = Column(String)
dis_tamano = Column(String)
dis_usado = Column(String)
dis_disponible = Column(String)
dis_usado_porcen = Column(String)
dis_montado = Column(String)
servidor_ser_id = Column(Integer, ForeignKey('servidor.ser_id'))
servidor_dis = relationship("Servidor", back_populates="discos")
def __init__(self, dis_nombre, dis_tamano, dis_usado, dis_disponible, dis_usado_porcen, dis_montado ):
self.dis_nombre = dis_nombre
self.dis_tamano = dis_tamano
self.dis_usado = dis_usado
self.dis_disponible = dis_disponible
self.dis_usado_porcen = dis_usado_porcen
self.dis_montado = dis_montado
def __repr__(self):
return "<DiscoDuro ('%s','%s','%s','%s')>" % (self.dis_nombre, self.dis_tamano, self.dis_usado, self.dis_disponible)
| mit | -4,709,690,366,457,514,000 | 33.849462 | 120 | 0.668929 | false |
rgayon/plaso | tests/parsers/vsftpd.py | 1 | 1664 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the vsftpd parser."""
from __future__ import unicode_literals
import unittest
from plaso.parsers import vsftpd
from tests.parsers import test_lib
class VsftpdLogParserTest(test_lib.ParserTestCase):
"""Tests for the vsftpd parser."""
def testParse(self):
"""Tests the Parse function."""
parser = vsftpd.VsftpdLogParser()
storage_writer = self._ParseFile(['vsftpd.log'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 25)
events = list(storage_writer.GetEvents())
event = events[12]
self.CheckTimestamp(event.timestamp, '2016-06-10 14:24:19.000000')
expected_message = (
'[pid 3] [jean] OK DOWNLOAD: Client "192.168.1.7", '
'"/home/jean/trains/how-thomas-the-tank-engine-works-1.jpg", '
'49283 bytes, 931.38Kbyte/sec')
expected_short_message = '{0:s}...'.format(expected_message[:77])
event_data = self._GetEventDataOfEvent(storage_writer, event)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testParseWithTimeZone(self):
"""Tests the Parse function with a time zone."""
parser = vsftpd.VsftpdLogParser()
storage_writer = self._ParseFile(['vsftpd.log'], parser, timezone='CET')
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 25)
events = list(storage_writer.GetEvents())
event = events[12]
self.CheckTimestamp(event.timestamp, '2016-06-10 12:24:19.000000')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,911,020,386,839,673,000 | 27.689655 | 76 | 0.683894 | false |
ChristopherHogan/cython | Cython/Compiler/UtilityCode.py | 2 | 9261 | from __future__ import absolute_import
from .TreeFragment import parse_from_strings, StringParseContext
from . import Symtab
from . import Naming
from . import Code
class NonManglingModuleScope(Symtab.ModuleScope):
cpp = False
def __init__(self, prefix, *args, **kw):
self.prefix = prefix
self.cython_scope = None
Symtab.ModuleScope.__init__(self, *args, **kw)
def add_imported_entry(self, name, entry, pos):
entry.used = True
return super(NonManglingModuleScope, self).add_imported_entry(name, entry, pos)
def mangle(self, prefix, name=None):
if name:
if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix):
# Functions, classes etc. gets a manually defined prefix easily
# manually callable instead (the one passed to CythonUtilityCode)
prefix = self.prefix
return "%s%s" % (prefix, name)
else:
return Symtab.ModuleScope.mangle(self, prefix)
class CythonUtilityCodeContext(StringParseContext):
scope = None
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=True, absolute_fallback=True):
if relative_to:
raise AssertionError("Relative imports not supported in utility code.")
if module_name != self.module_name:
if module_name not in self.modules:
raise AssertionError("Only the cython cimport is supported.")
else:
return self.modules[module_name]
if self.scope is None:
self.scope = NonManglingModuleScope(
self.prefix, module_name, parent_module=None, context=self)
return self.scope
class CythonUtilityCode(Code.UtilityCodeBase):
"""
Utility code written in the Cython language itself.
The @cname decorator can set the cname for a function, method of cdef class.
Functions decorated with @cname('c_func_name') get the given cname.
For cdef classes the rules are as follows:
obj struct -> <cname>_obj
obj type ptr -> <cname>_type
methods -> <class_cname>_<method_cname>
For methods the cname decorator is optional, but without the decorator the
methods will not be prototyped. See Cython.Compiler.CythonScope and
tests/run/cythonscope.pyx for examples.
"""
is_cython_utility = True
def __init__(self, impl, name="__pyxutil", prefix="", requires=None,
file=None, from_scope=None, context=None, compiler_directives=None,
outer_module_scope=None):
# 1) We need to delay the parsing/processing, so that all modules can be
# imported without import loops
# 2) The same utility code object can be used for multiple source files;
# while the generated node trees can be altered in the compilation of a
# single file.
# Hence, delay any processing until later.
context_types = {}
if context is not None:
from .PyrexTypes import BaseType
for key, value in context.items():
if isinstance(value, BaseType):
context[key] = key
context_types[key] = value
impl = Code.sub_tempita(impl, context, file, name)
self.impl = impl
self.name = name
self.file = file
self.prefix = prefix
self.requires = requires or []
self.from_scope = from_scope
self.outer_module_scope = outer_module_scope
self.compiler_directives = compiler_directives
self.context_types = context_types
def __eq__(self, other):
if isinstance(other, CythonUtilityCode):
return self._equality_params() == other._equality_params()
else:
return False
def _equality_params(self):
outer_scope = self.outer_module_scope
while isinstance(outer_scope, NonManglingModuleScope):
outer_scope = outer_scope.outer_scope
return self.impl, outer_scope, self.compiler_directives
def __hash__(self):
return hash(self.impl)
def get_tree(self, entries_only=False, cython_scope=None):
from .AnalysedTreeTransforms import AutoTestDictTransform
# The AutoTestDictTransform creates the statement "__test__ = {}",
# which when copied into the main ModuleNode overwrites
# any __test__ in user code; not desired
excludes = [AutoTestDictTransform]
from . import Pipeline, ParseTreeTransforms
context = CythonUtilityCodeContext(
self.name, compiler_directives=self.compiler_directives)
context.prefix = self.prefix
context.cython_scope = cython_scope
#context = StringParseContext(self.name)
tree = parse_from_strings(
self.name, self.impl, context=context, allow_struct_enum_decorator=True)
pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes)
if entries_only:
p = []
for t in pipeline:
p.append(t)
if isinstance(p, ParseTreeTransforms.AnalyseDeclarationsTransform):
break
pipeline = p
transform = ParseTreeTransforms.CnameDirectivesTransform(context)
# InterpretCompilerDirectives already does a cdef declarator check
#before = ParseTreeTransforms.DecoratorTransform
before = ParseTreeTransforms.InterpretCompilerDirectives
pipeline = Pipeline.insert_into_pipeline(pipeline, transform,
before=before)
def merge_scope(scope):
def merge_scope_transform(module_node):
module_node.scope.merge_in(scope)
return module_node
return merge_scope_transform
if self.from_scope:
pipeline = Pipeline.insert_into_pipeline(
pipeline, merge_scope(self.from_scope),
before=ParseTreeTransforms.AnalyseDeclarationsTransform)
for dep in self.requires:
if isinstance(dep, CythonUtilityCode) and hasattr(dep, 'tree') and not cython_scope:
pipeline = Pipeline.insert_into_pipeline(
pipeline, merge_scope(dep.tree.scope),
before=ParseTreeTransforms.AnalyseDeclarationsTransform)
if self.outer_module_scope:
# inject outer module between utility code module and builtin module
def scope_transform(module_node):
module_node.scope.outer_scope = self.outer_module_scope
return module_node
pipeline = Pipeline.insert_into_pipeline(
pipeline, scope_transform,
before=ParseTreeTransforms.AnalyseDeclarationsTransform)
if self.context_types:
# inject types into module scope
def scope_transform(module_node):
for name, type in self.context_types.items():
entry = module_node.scope.declare_type(name, type, None, visibility='extern')
entry.in_cinclude = True
return module_node
pipeline = Pipeline.insert_into_pipeline(
pipeline, scope_transform,
before=ParseTreeTransforms.AnalyseDeclarationsTransform)
(err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False)
assert not err, err
self.tree = tree
return tree
def put_code(self, output):
pass
@classmethod
def load_as_string(cls, util_code_name, from_file=None, **kwargs):
"""
Load a utility code as a string. Returns (proto, implementation)
"""
util = cls.load(util_code_name, from_file, **kwargs)
return util.proto, util.impl # keep line numbers => no lstrip()
def declare_in_scope(self, dest_scope, used=False, cython_scope=None,
whitelist=None):
"""
Declare all entries from the utility code in dest_scope. Code will only
be included for used entries. If module_name is given, declare the
type entries with that name.
"""
tree = self.get_tree(entries_only=True, cython_scope=cython_scope)
entries = tree.scope.entries
entries.pop('__name__')
entries.pop('__file__')
entries.pop('__builtins__')
entries.pop('__doc__')
for entry in entries.values():
entry.utility_code_definition = self
entry.used = used
original_scope = tree.scope
dest_scope.merge_in(original_scope, merge_unused=True, whitelist=whitelist)
tree.scope = dest_scope
for dep in self.requires:
if dep.is_cython_utility:
dep.declare_in_scope(dest_scope)
return original_scope
def declare_declarations_in_scope(declaration_string, env, private_type=True,
*args, **kwargs):
"""
Declare some declarations given as Cython code in declaration_string
in scope env.
"""
CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env)
| apache-2.0 | 8,189,437,940,946,829,000 | 38.075949 | 110 | 0.620451 | false |
cschenck/blender_sim | cutil/video_creator.py | 1 | 18026 | #!/usr/bin/env python
import os
import cv2
import numpy as np
import subprocess
import tempfile
import connor_util as cutil
def draw_arrow(image, p, q, color, arrow_magnitude=9, thickness=1, line_type=8, shift=0):
# adapted from http://mlikihazar.blogspot.com.au/2013/02/draw-arrow-opencv.html
# draw arrow tail
cv2.line(image, p, q, color, thickness, line_type, shift)
# calc angle of the arrow
angle = np.arctan2(p[1]-q[1], p[0]-q[0])
# starting point of first line of arrow head
p = (int(q[0] + arrow_magnitude * np.cos(angle + np.pi/4)),
int(q[1] + arrow_magnitude * np.sin(angle + np.pi/4)))
# draw first half of arrow head
cv2.line(image, p, q, color, thickness, line_type, shift)
# starting point of second line of arrow head
p = (int(q[0] + arrow_magnitude * np.cos(angle - np.pi/4)),
int(q[1] + arrow_magnitude * np.sin(angle - np.pi/4)))
# draw second half of arrow head
cv2.line(image, p, q, color, thickness, line_type, shift)
class VideoCreator:
def __init__(self, width, height):
self.width_ = width
self.height_ = height
self.frames = np.zeros((1,height,width,3), dtype=np.uint8)
self.shift = 0
def length(self):
return self.frames.shape[0]+self.shift
def width(self):
return self.frames.shape[2]
def height(self):
return self.frames.shape[1]
def save(self, out_fp, codec='XVID', fps=30):
writer = cv2.VideoWriter(out_fp, cv2.cv.CV_FOURCC(*codec), fps, (self.width(), self.height()))
for t in range(self.length()):
writer.write(self.frames[t,...])
writer.release()
def saveMP4(self, out_fp, fps=30):
tmp = tempfile.NamedTemporaryFile()
self.save(tmp.name, fps=fps)
command = "avconv -i %s -c:v libx264 -c:a copy %s" % (tmp.name, out_fp)
subprocess.call(command.split())
def saveGif(self, out_fp, fps=30):
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
ax = fig.add_subplot(111)
#ax = fig.add_axes([0,0,1.0,1.0])
ax.set_axis_off()
fig.tight_layout()
fig.set_size_inches(self.width()/100.0, self.height()/100.0, forward=True)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
ims = map(lambda i: (ax.imshow(self.frames[i,...,::-1]),ax.set_title('')), range(0, self.frames.shape[0]))
im_ani = animation.ArtistAnimation(fig, ims, interval=1000.0/fps, repeat_delay=0, blit=False)
#plt.show()
im_ani.save(out_fp, writer='imagemagick', savefig_kwargs={'bbox_inches':'tight'})
def savePartial(self, end, out_fp=None, codec='XVID', fps=30, finish=False):
if out_fp is not None:
self.writer = cv2.VideoWriter(out_fp, cv2.cv.CV_FOURCC(*codec), fps, (self.width(), self.height()))
for i in range(self.shift, end):
self.writer.write(self.frames[i-self.shift,...])
self.frames = self.frames[(end-self.shift):,...]
self.shift = end
if finish:
self.writer.release()
def load(self, fp):
cap = cv2.VideoCapture(fp)
length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
self.frames = np.zeros((length, height, width, 3), dtype=np.uint8)
self.width_ = width
self.height_ = height
self.shift = 0
i = 0
pm = cutil.ProgressMonitor(lambda : 1.0*i/length, update_interval=None)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
self.frames[i,...] = frame
i += 1
pm.print_progress()
pm.stop()
def crop(self, start=0, end=None, x=0, y=0, w=None, h=None):
if end is None:
end = self.length()
if w is None:
w = self.width()
if h is None:
h = self.height()
other = VideoCreator(w, h)
other.setFrames(self.frames[start-self.shift:end-self.shift,y:y+h,x:x+w,...], 0)
return other
def playVideo(self, fps=30):
spin = True
while spin:
for i in range(self.frames.shape[0]):
cv2.imshow("Video", self.frames[i,...])
k = cv2.waitKey(1000/fps)
if k in [27, 1048603]:
spin = False
break
if spin:
print("Restarting from the beginning.")
def __expand_frames(self, start, end):
if end-self.shift >= self.frames.shape[0]:
self.frames = np.concatenate((self.frames,
np.zeros((end-self.shift - self.frames.shape[0],self.height_,self.width_,3), dtype=self.frames.dtype)),
axis=0)
def solidColor(self, start, end, color):
self.__expand_frames(start, end)
for c in range(self.frames.shape[-1]):
self.frames[(start-self.shift):(end-self.shift),:,:,c] = color[c]
def __listify(self, x):
if type(x) in [list, tuple]:
return x
else:
return [x]
def placeText(self, lines, start, end, location='center', font=cv2.FONT_HERSHEY_COMPLEX, scale=2,
color=(255,255,255), thickness=2, fade_in=None, fade_out=None, x_shift=0, y_shift=0):
self.__expand_frames(start, end)
lines = self.__listify(lines)
font = self.__listify(font)
scale = self.__listify(scale)
thickness = self.__listify(thickness)
fade_in = self.__listify(fade_in)
fade_out = self.__listify(fade_out)
x_shift = self.__listify(x_shift)
y_shift = self.__listify(y_shift)
if type(color[0]) not in [list, tuple]:
color = [color]
sizes = []
for i,line in enumerate(lines):
f = font[min(i, len(font)-1)]
s = scale[min(i, len(scale)-1)]
t = thickness[min(i, len(thickness)-1)]
(w,h),b = cv2.getTextSize(line, f, s, t)
w = int(round(w))
h = int(round(h))
b = int(round(b))
sizes.append((w, h, b))
if location in ['northwest', 'southwest', 'west']:
x_coeff = 0
start_x = 0
elif location in ['north', 'center', 'south']:
x_coeff = 0.5
start_x = self.width_/2
else:
x_coeff = 1.0
start_x = self.width_
if location in ['northwest', 'northeast', 'north']:
y = 0
elif location in ['west', 'center', 'east']:
y = self.height_/2 - sum([x[1]+x[2] for x in sizes])/2
else:
y = self.height_ - sum([x[1]+x[2] for x in sizes])
y = int(round(y))
for i,line in enumerate(lines):
f = font[min(i, len(font)-1)]
s = scale[min(i, len(scale)-1)]
t = thickness[min(i, len(thickness)-1)]
c = color[min(i, len(color)-1)]
fi = fade_in[min(i, len(fade_in)-1)]
fi = fi if fi is not None else 0
fo = fade_out[min(i, len(fade_out)-1)]
fo = fo if fo is not None else 0
xs = x_shift[min(i, len(x_shift)-1)]
ys = y_shift[min(i, len(y_shift)-1)]
w,h,b = sizes[i]
y += h
yy = y + ys
x = int(round(start_x - x_coeff*w)) + xs
bjs = x
bje = x+w
bis = yy - h
bie = yy + b
for j in range(start, start+fi):
r = 1.0*(j-start)/fi
orig = self.frames[j-self.shift,bis:bie,bjs:bje,...].copy()
cv2.putText(self.frames[j-self.shift,...], line, (x,yy), f, s, c, t)
self.frames[j-self.shift,bis:bie,bjs:bje,...] = r*self.frames[j-self.shift,bis:bie,bjs:bje,...] + (1-r)*orig
for j in range(start+fi, end-fo):
cv2.putText(self.frames[j-self.shift,...], line, (x,yy), f, s, c, t)
for j in range(end-fo, end):
r = 1.0*(j - (end-fo))/fo
orig = self.frames[j-self.shift,bis:bie,bjs:bje,...].copy()
cv2.putText(self.frames[j-self.shift,...], line, (x,yy), f, s, c, t)
self.frames[j-self.shift,bis:bie,bjs:bje,...] = (1-r)*self.frames[j-self.shift,bis:bie,bjs:bje,...] + r*orig
y += b
def drawArrow(self, start, end, p, q, color, arrow_magnitude=9, thickness=1, fade_in=0, fade_out=0):
self.__expand_frames(start, end)
for t in range(start, start+fade_in):
r = 1.0*(t-start)/fade_in
orig = self.frames[t-self.shift,...].copy()
draw_arrow(self.frames[t-self.shift,...], p, q, color, arrow_magnitude=arrow_magnitude, thickness=thickness)
self.frames[t-self.shift,...] = r*self.frames[t-self.shift,...] + (1-r)*orig
for t in range(start+fade_in, end-fade_out):
draw_arrow(self.frames[t-self.shift,...], p, q, color, arrow_magnitude=arrow_magnitude, thickness=thickness)
for t in range(end-fade_out, end):
r = 1.0*(t - (end-fade_out))/fade_out
orig = self.frames[t-self.shift,...].copy()
draw_arrow(self.frames[t-self.shift,...], p, q, color, arrow_magnitude=arrow_magnitude, thickness=thickness)
self.frames[t-self.shift,...] = (1-r)*self.frames[t-self.shift,...] + r*orig
def append(self, other, crossfade=0):
self.combine(other, 0, other.length(), self.length() - crossfade, self.length())
def append_load(self, fp, crossfade=0):
cap = cv2.VideoCapture(fp)
length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
start = self.length() - crossfade
end = start + length
self.__expand_frames(start, end)
i = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = self.resize(frame, self.width(), self.height())
if i < crossfade:
r = 1.0*i/crossfade
self.frames[start+i-self.shift,...] = r*frame + (1-r)*self.frames[start+i-self.shift,...]
else:
self.frames[start+i-self.shift,...] = frame
i += 1
def combine(self, other, other_start, other_end, self_start, self_end_trans):
ntrans = (self_end_trans - self_start)
length = other_end - other_start
self_end = self_start + length
self.__expand_frames(self_start, self_end)
for i in range(ntrans):
r = 1.0*i/ntrans
self.frames[self_start+i-self.shift,...] = (r*other.frames[other_start+i-other.shift,...]
+ (1-r)*self.frames[self_start+i-self.shift,...])
for i in range(ntrans,length):
self.frames[self_start+i-self.shift,...] = other.frames[other_start+i-other.shift,...]
def __resize_params(self, img_width, img_height, width, height):
rw = 1.0*width/img_width
rh = 1.0*height/img_height
x_offset = 0
y_offset = 0
# Black bars on the side.
if rh < rw:
ratio = rh
x_offset = int((width - ratio*img_width)/2)
else:
ratio = rw
y_offset = int((height - ratio*img_height)/2)
return ratio, x_offset, y_offset
def resize(self, img, width, height):
if img.shape[1] == width and img.shape[0] == height:
return img
ratio, x_offset, y_offset = self.__resize_params(img.shape[1], img.shape[0], width, height)
img = cv2.resize(img, (int(ratio*img.shape[1]), int(ratio*img.shape[0])))
ret = np.zeros((height, width, 3), dtype=img.dtype)
ret[y_offset:(y_offset+img.shape[0]),x_offset:(x_offset+img.shape[1]),:] = img
return ret
def loadFrames(self, fps, start):
end = len(fps) + start
self.__expand_frames(start, end)
for i,fp in enumerate(fps):
img = cv2.imread(fp)
self.frames[i+start-self.shift,...] = self.resize(img, self.width_, self.height_)
def setFrames(self, frs, start, heatmap=None):
self.__expand_frames(start, start+frs.shape[0])
for t in range(frs.shape[0]):
img = frs[t,...]
while len(img.shape) < 3:
img = np.expand_dims(img, len(img.shape))
# Only a single color channel.
if img.shape[2] == 1:
if heatmap is not None:
#img = cv2.applyColorMap(img, heatmap)
img = cutil.grayscaleToHeatmap(img, maxVal=255, rgb_max=255)
else:
img = np.tile(img, (1,1,3))
self.frames[start+t-self.shift,...] = self.resize(img, self.width(), self.height())
def grid(self, vcs, vcs_ranges, start):
maxh = max([x.height() for x in vcs.flatten()])
maxw = max([x.width() for x in vcs.flatten()])
length = np.max(vcs_ranges[...,1] - vcs_ranges[...,0])
nrows = vcs.shape[0]
ncols = vcs.shape[1]
img = np.zeros((nrows*maxh, ncols*maxw, 3), dtype=np.uint8)
self.__expand_frames(start, start+length)
for t in range(length):
for i in range(nrows):
for j in range(ncols):
if vcs[i,j] is None:
continue
r1 = vcs_ranges[i,j,0]
try:
img[(i*maxh):((i+1)*maxh),(j*maxw):((j+1)*maxw),...] = self.resize(vcs[i,j].frames[r1+t,...], maxw, maxh)
except:
cutil.keyboard("ERROR: video_creator.py:210")
self.frames[start+t-self.shift,...] = self.resize(img, self.width(), self.height())
def grid_shift(self, vcs_start, vcs_end, vcs_ranges, start):
# First let's setup all the variables.
smaxh = max([x.height() for x in vcs_start.flatten()])
smaxw = max([x.width() for x in vcs_start.flatten()])
snrows = vcs_start.shape[0]
sncols = vcs_start.shape[1]
emaxh = max([x.height() for x in vcs_end.flatten()])
emaxw = max([x.width() for x in vcs_end.flatten()])
enrows = vcs_end.shape[0]
encols = vcs_end.shape[1]
length = np.max(vcs_ranges[...,1] - vcs_ranges[...,0])
height = self.height()
width = self.width()
sratio, sx_off, sy_off = self.__resize_params(smaxw*sncols, smaxh*snrows, width, height)
eratio, ex_off, ey_off = self.__resize_params(emaxw*encols, emaxh*enrows, width, height)
self.__expand_frames(start, start+length)
# Next get the parentage.
parents = vcs_end.copy()
for i in range(enrows):
for j in range(encols):
parents[i,j] = None
for pi in range(snrows):
for pj in range(sncols):
if vcs_start[pi, pj] == vcs_end[i, j]:
parents[i,j] = (pi, pj)
img = np.zeros((height, width, 3), dtype=np.uint8)
for t in range(length):
img[...] = 0
for i in range(enrows):
for j in range(encols):
pi, pj = parents[i,j]
r1 = vcs_ranges[i,j,0]
si1 = pi*smaxh*sratio + sy_off
sj1 = pj*smaxw*sratio + sx_off
si2 = (pi+1)*smaxh*sratio + sy_off
sj2 = (pj+1)*smaxw*sratio + sx_off
ei1 = i*smaxh*eratio + ey_off
ej1 = j*smaxw*eratio + ex_off
ei2 = (i+1)*smaxh*eratio + ey_off
ej2 = (j+1)*smaxw*eratio + ex_off
r = 1.0*t/length
i1 = int(round((1-r)*si1 + r*ei1))
i2 = int(round((1-r)*si2 + r*ei2))
j1 = int(round((1-r)*sj1 + r*ej1))
j2 = int(round((1-r)*sj2 + r*ej2))
try:
img[i1:i2,j1:j2,...] = self.resize(vcs_end[i,j].frames[t+r1,...], j2-j1, i2-i1)
except:
cutil.keyboard('err')
self.frames[start+t-self.shift,...] = self.resize(img, self.width(), self.height())
def overlay(self, other, start, other_start, other_end, threshold=0):
length = other_end - other_start
self.__expand_frames(start, start+length)
for t in range(length):
img = other.frames[other_start+t-other.shift,...]
idxs = np.where((img[...,0] > threshold) | (img[...,1] > threshold) | (img[...,2] > threshold))
if idxs[0].shape[0] == 0:
continue
for c in range(3):
ii = idxs + (np.array([c]*idxs[0].shape[0]),)
jj = (np.array([start + t-self.shift]*idxs[0].shape[0]),) + ii
self.frames[jj] = img[ii]
def blend(self, other, start, other_start, other_end, other_alpha=None):
length = other_end - other_start
self.__expand_frames(start, start+length)
for t in range(length):
img = other.frames[other_start+t-other.shift,...]
under = self.frames[start+t-self.shift,...]
if other_alpha is not None:
bf = other_alpha.frames[t,...].max(axis=-1)/255.0
else:
bf = img.max(axis=-1)/255.0
for c in range(3):
self.frames[start+t-self.shift,...,c] = img[...,c]*bf + under[...,c]*(1 - bf)
def repeatLastFrame(self, n):
start = self.length()
end = self.length() + n
self.__expand_frames(start, end)
for t in range(start, end):
self.frames[t-self.shift,...] = self.frames[start-1-self.shift,...]
| gpl-3.0 | 5,922,609,657,792,200,000 | 41.514151 | 129 | 0.524576 | false |
diogenesfilho/Estadio | Estádio/ArquibancadaMetalicaTeste.py | 1 | 5668 | # -*- coding: utf-8 -*-
from math import cos
from math import pi
from math import sin
import timeit
#import numpy
import ctypes
import random
from sys import argv
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
global esqdir,cimabaixo
global mouseX, mouseY,mouseX_ant, mouseY_ant
global distancia
global obj
esqdir,cimabaixo = 0,0
mouseY,mouseX,mouseX_ant,mouseY_ant = .0,.0,.0,.0
distancia = 20
obj = GLuint()
def grade(qtd):
glRotate(-90,1,0,0)
glPushMatrix()
glColor(0,0,0)
for i in range(qtd):
glutSolidCylinder(0.08,(i+1),10,10)
glTranslate(1,0,0)
glPopMatrix()
glRotate(90,1,0,0)
def bancos(qtd):
glPushMatrix()
glScale(.5,.4,2)
for i in range(qtd):
glutSolidCube(0.5)
glTranslate(0.5,0,0)
glPopMatrix()
def corrimao():
# CORRIMÃO
glPushMatrix()
glColor3f(0.3,0.3,0.3)
glTranslate(-0.6,3.5,17)
glutSolidCylinder(0.02, 3.0, 40, 10)
glPopMatrix()
glPushMatrix()
glColor3f(0.8,0.8,0.8)
glTranslate(-0.6,3.4,17)
glutSolidCylinder(0.02, 3.0, 40, 10)
glPopMatrix()
glPushMatrix()
glColor3f(0.8,0.8,0.8)
glTranslate(-0.6,3.3,17)
glutSolidCylinder(0.02, 3.0, 40, 10)
glPopMatrix()
glPushMatrix()
glColor3f(0.3,0.3,0.3)
glRotate(90, 1.0, 0.0, 0.0)
glTranslate(-0.6,18,-3.5)
glutSolidCylinder(0.02, 0.5, 40, 10)
glPopMatrix()
def desenho():
global obj
obj = glGenLists(1)
glNewList(obj, GL_COMPILE)
# PISO PASSAGEM
glPushMatrix()
glTranslate(0,1,99.9)
glRotate(90,0,1,0)
for i in range(1):
glScale(1,1,2)
bancos(400)
glTranslate(0,1,1)
glColor3f(3,0,0) # <- Apague o chapéu aqui.
glRotate(90,1,0,0)
glTranslate(0,3.5,-8)
bancos(400)
glPopMatrix()
glPushMatrix()
glTranslate(2,-15,-85)
glScale(5,5,5)
for i in range(15):
corrimao()
glTranslate(0,0,1)
glPopMatrix()
glPushMatrix()
glTranslate(0.4,1,100)
glRotate(90,0,1,0)
for i in range(9):
if i % 2 == 0:
glColor3f(0.2,0.2,0.2)
else:
glColor3f(0.8,0.8,0.8)
bancos(400)
glTranslate(0,1,1)
glPopMatrix()
for i in range(50):
glPushMatrix()
grade(10)
glRotate(-180,0,1,0)
glRotate(-90,0,0,1)
glTranslate(-9,-9,0)
grade(10)
glPopMatrix()
glTranslate(0,0,2)
glEndList()
def executar():
global obj
glCallList(obj)
def iluminacao_da_cena():
luzAmbiente=[0.2,0.2,0.2,1.0]
luzDifusa=[0.7,0.7,0.7,1.0] # ; // "cor"
luzEspecular = [1.0, 1.0, 1.0, 1.0] #;// "brilho"
posicaoLuz=[25, 50.0, 50.0, 1.0]
#Capacidade de brilho do material
especularidade=[1.0,1.0,1.0,1.0]
especMaterial = 60;
# Especifica que a cor de fundo da janela será branca
glClearColor(1.0, 1.0, 1.0, 1.0)
# Habilita o modelo de colorização de Gouraud
glShadeModel(GL_SMOOTH)
# Define a refletância do material
glMaterialfv(GL_FRONT,GL_SPECULAR, especularidade)
# Define a concentração do brilho
glMateriali(GL_FRONT,GL_SHININESS,especMaterial)
# Ativa o uso da luz ambiente
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, luzAmbiente)
# Define os parâmetros da luz de número 0
glLightfv(GL_LIGHT0, GL_AMBIENT, luzAmbiente)
glLightfv(GL_LIGHT0, GL_DIFFUSE, luzDifusa )
glLightfv(GL_LIGHT0, GL_SPECULAR, luzEspecular )
glLightfv(GL_LIGHT0, GL_POSITION, posicaoLuz )
# Habilita a definição da cor do material a partir da cor corrente
glEnable(GL_COLOR_MATERIAL)
# Habilita o uso de iluminação
glEnable(GL_LIGHTING)
# Habilita a luz de número 0
glEnable(GL_LIGHT0)
# Habilita o depth-buffering
glEnable(GL_DEPTH_TEST)
def tela():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Limpar a tela
glClearColor(1.0, 1.0, 1.0, 1.0) # Limpa a janela com a cor especificada
glMatrixMode(GL_PROJECTION) # Muda a matriz de projeçao
glLoadIdentity()# carrega a matriz identidade
gluPerspective(distancia,1,0.1,500) # Especifica a projeção perspectiva
glMatrixMode(GL_MODELVIEW) # Especifica sistema de coordenadas do modelo
glLoadIdentity() # Inicializa sistema de coordenadas do modelo
gluLookAt(sin(esqdir) * 10, cimabaixo ,cos(esqdir) * 10, mouseX,mouseY,0, 0,1,0) # Especifica posição do observador e do alvo
#iluminacao_da_cena()
glEnable(GL_DEPTH_TEST) # verifica os pixels que devem ser plotados no desenho 3d
executar()
glFlush() # Aplica o desenho
def teclado(tecla,x,y):
global esqdir
global cimabaixo
if tecla == b'a':
esqdir = esqdir - 0.1
elif tecla == b'd':
esqdir = esqdir + 0.1
elif tecla == b'w':
cimabaixo = cimabaixo + 0.1
elif tecla == b's':
cimabaixo = cimabaixo - 0.1
glutPostRedisplay()
def mouse(x,y):
global mouseX, mouseY, mouseY_ant, mouseX_ant
mouseX = (mouseX - mouseX_ant) * 0.005
mouseY = (mouseY_ant - mouseY) * 0.005
mouseY_ant,mouseX_ant = y,x
glutPostRedisplay()
def scroll(button,state,x,y):
global distancia
if(button == 3):
distancia += 2
elif(button == 4):
distancia -= 4
glutPostRedisplay()
glutInit(argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH)
glutInitWindowSize(600,600)
glutCreateWindow("Arquibancada")
distancia = 20
desenho()
glutDisplayFunc(tela)
glutMotionFunc(mouse)
glutMouseFunc(scroll)
glutKeyboardFunc (teclado)
glutMainLoop() # Inicia o laço de eventos da GLUT
| gpl-2.0 | -4,241,487,094,422,690,000 | 23.876652 | 129 | 0.630777 | false |
radio-ho0/rpi_temp_humi | dht11_py/xtemp.py | 1 | 3048 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
"""
import sys
import time
import datetime
import dht11
import RPi.GPIO as GPIO
from PySide import QtGui, QtCore
# initialize GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
# read data using pin 14
#instance = dht11.DHT11(pin = 14)
instance = dht11.DHT11(pin = 18)
class TempWidget(QtGui.QWidget):
def __init__(self):
super(TempWidget, self).__init__()
self.initUi()
def initUi(self):
lb_title = QtGui.QLabel('temperature viewer', self)
lb_title.move(180, 15)
self.initBtExit()
lb_dht11_hum = QtGui.QLabel('Dht11 hum')
self.le_dht11_hum =QtGui.QLineEdit()
lb_dht11_temp = QtGui.QLabel('dht11 temp')
self.le_dht11_temp = QtGui.QLineEdit()
self.lb_18b20_temp = QtGui.QLabel('Ds18b20')
self.le_18b20_temp = QtGui.QLineEdit()
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget( lb_dht11_hum, 1, 0, 1, 1)
grid.addWidget( self.le_dht11_hum, 1, 1, 1, 1)
grid.addWidget( lb_dht11_temp, 2, 0, 1, 1)
grid.addWidget( self.le_dht11_temp, 2, 1, 1, 1)
grid.addWidget( self.lb_18b20_temp, 3, 0, 1, 1)
grid.addWidget( self.le_18b20_temp, 3, 1, 1, 1)
self.setLayout(grid)
self.le_18b20_temp.setText('18')
update_timer = QtCore.QTimer(self)
update_timer.timeout.connect(self.get_all_temp)
update_timer.start(2000)
self.show()
def initBtExit(self):
btn1 = QtGui.QPushButton('aHa!', self)
btn1.setToolTip('Just for play!')
btn1.resize( btn1.sizeHint())
btn1.move( 10, 10)
btnExit = QtGui.QPushButton('&Exit', self)
btnExit.setToolTip('88')
btnExit.clicked.connect(QtCore.QCoreApplication.instance().quit)
btnExit.move( 380, 320 )
def get_all_temp(self):
self.get_dht11()
self.get_db18b20()
def get_db18b20(self):
tempfile = open("/sys/bus/w1/devices/28-031571bf56ff/w1_slave")
thetext = tempfile.read()
tempfile.close
tempdata = thetext.split("\n")[1].split(" ")[9]
temperature = float(tempdata[2:])
temperature = temperature / 1000
self.le_18b20_temp.setText( str(temperature) )
print("db18b20: " , temperature)
def get_dht11(self):
result = instance.read()
if result.is_valid():
print("Last valid input: " + str(datetime.datetime.now()))
print("Temperature: %d C" % result.temperature)
print("Humidity: %d %%" % result.humidity)
self.le_dht11_hum.setText(str(result.humidity))
self.le_dht11_temp.setText(str(result.temperature))
def main():
app = QtGui.QApplication(sys.argv)
m_widget = TempWidget()
m_widget.resize( 480, 360 )
m_widget.setWindowTitle('Temperature viewer!')
sys.exit( app.exec_())
if __name__ == '__main__' :
main()
| gpl-2.0 | -5,555,138,411,479,741,000 | 24.830508 | 74 | 0.593504 | false |
KevinOConnor/klipper | klippy/extras/delayed_gcode.py | 1 | 2248 | # A simple timer for executing gcode templates
#
# Copyright (C) 2019 Eric Callahan <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging
class DelayedGcode:
def __init__(self, config):
self.printer = config.get_printer()
self.reactor = self.printer.get_reactor()
self.name = config.get_name().split()[1]
self.gcode = self.printer.lookup_object('gcode')
gcode_macro = self.printer.load_object(config, 'gcode_macro')
self.timer_gcode = gcode_macro.load_template(config, 'gcode')
self.duration = config.getfloat('initial_duration', 0., minval=0.)
self.timer_handler = None
self.inside_timer = self.repeat = False
self.printer.register_event_handler("klippy:ready", self._handle_ready)
self.gcode.register_mux_command(
"UPDATE_DELAYED_GCODE", "ID", self.name,
self.cmd_UPDATE_DELAYED_GCODE,
desc=self.cmd_UPDATE_DELAYED_GCODE_help)
def _handle_ready(self):
waketime = self.reactor.NEVER
if self.duration:
waketime = self.reactor.monotonic() + self.duration
self.timer_handler = self.reactor.register_timer(
self._gcode_timer_event, waketime)
def _gcode_timer_event(self, eventtime):
self.inside_timer = True
try:
self.gcode.run_script(self.timer_gcode.render())
except Exception:
logging.exception("Script running error")
nextwake = self.reactor.NEVER
if self.repeat:
nextwake = eventtime + self.duration
self.inside_timer = self.repeat = False
return nextwake
cmd_UPDATE_DELAYED_GCODE_help = "Update the duration of a delayed_gcode"
def cmd_UPDATE_DELAYED_GCODE(self, gcmd):
self.duration = gcmd.get_float('DURATION', minval=0.)
if self.inside_timer:
self.repeat = (self.duration != 0.)
else:
waketime = self.reactor.NEVER
if self.duration:
waketime = self.reactor.monotonic() + self.duration
self.reactor.update_timer(self.timer_handler, waketime)
def load_config_prefix(config):
return DelayedGcode(config)
| gpl-3.0 | 6,386,100,682,036,468,000 | 40.62963 | 79 | 0.636121 | false |
n3storm/django-dynamic-preferences | dynamic_preferences/urls.py | 1 | 1081 | from django.conf.urls import patterns, include, url
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from . import views
from . import global_preferences_registry
from .forms import GlobalPreferenceForm
urlpatterns = patterns('',
url(r'^global/$',
staff_member_required(views.PreferenceFormView.as_view(
registry=global_preferences_registry,
form_class=GlobalPreferenceForm)),
name="dynamic_preferences.global"),
url(r'^global/(?P<section>[\w\ ]+)$',
staff_member_required(views.PreferenceFormView.as_view(
registry=global_preferences_registry,
form_class=GlobalPreferenceForm)),
name="dynamic_preferences.global.section"),
url(r'^user/$',
login_required(views.UserPreferenceFormView.as_view()),
name="dynamic_preferences.user"),
url(r'^user/(?P<section>[\w\ ]+)$',
login_required(views.UserPreferenceFormView.as_view()),
name="dynamic_preferences.user.section"),
)
| bsd-3-clause | 8,162,394,057,924,089,000 | 37.607143 | 71 | 0.690102 | false |
aidin36/beneath-a-binary-sky | tests/test_utils/test_id_generator.py | 1 | 1205 | # This file is part of Beneath a Binary Sky.
# Copyright (C) 2016, Aidin Gharibnavaz <[email protected]>
#
# Beneath a Binary Sky is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Beneath a Binary Sky is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beneath a Binary Sky. If not, see
# <http://www.gnu.org/licenses/>.
import unittest
from utils.id_generator import IDGenerator
class TestIDGenerator(unittest.TestCase):
def test_id_serial(self):
'''Tests if the serial of the ID increases.'''
id_generator = IDGenerator()
first_id = id_generator.get_robot_id()
second_id = id_generator.get_robot_id()
self.assertEqual(int(first_id.split('.')[2]) + 1,
int(second_id.split('.')[2]))
| gpl-3.0 | -8,666,536,903,539,326,000 | 35.515152 | 73 | 0.704564 | false |
jjdmol/LOFAR | LCU/StationTest/prbs_dir_test.py | 1 | 7843 |
""" script for testing PRBS data in dir.
This script can be used for testing data from the TBB, it will be used by TBB test scripts.
Started by Gijs, 16 dec 07
Modified by Gijs on March 17 2009:
-PRBS test bug fixed, when data is all 0 error did't count.
-CRC test on files with RRBS errors. When a PRBS error and no CRC, error in RCU-to-RSP communications, when both has errors, error between RSP-to-TBB communication
Modified by Menno on Sept 21 2009:
-Removed Samples Checked because sometime 10238 or 10239
"""
# INIT
import array
import operator
import os
import time
import commands
# Look for files to test
def open_dir() :
files = os.listdir('./prbs/.')
files.sort()
#print files
return files
# Open de file for testing
def open_file(files, file_nr) :
file_name = './prbs/' + files[file_nr][:]
if files[file_nr][-3:] == 'dat':
fileinfo = os.stat(file_name)
size = int(fileinfo.st_size)
f=open(file_name,'rb')
max_frames = size/(88 + 1024*2 + 4)
frames_to_proces=max_frames
else :
frames_to_proces=0
f=open(file_name,'rb')
return f, frames_to_proces
# Read single frame from file
def read_frame(f, info_plot, frame_nr,f_log):
station_info = array.array('B')
station_info.fromfile(f,4) # Bytes 0..3
time_info = array.array('L')
time_info.fromfile(f,3) # Bytes 4..15
if (info_plot) :
time_string = time.ctime(time_info[1])
# string_info = 'Frame nr %(FR)d Station %(ST)d RSP %(RSP)d RCU %(RCU)d Sample rate %(S)d MHz time of data %(ti_D)s and %(SN)00.6f seconds'%\
# {"FR": frame_nr, "ST": station_info[0] ,"RSP": station_info[1], "RCU": station_info[2], "S": station_info[3], "ti_D": time_string,"SN": float(time_info[2])/float(200000000)}
string_info = 'Frame nr %(FR)d RSP %(RSP)d RCU %(RCU)d Sample rate %(S)d MHz'%\
{"FR": frame_nr,"RSP": station_info[1], "RCU": station_info[2], "S": station_info[3]}
# print string_info
f_log.write(string_info + '\n')
div_info = array.array('H')
div_info.fromfile(f,36) # Bytes 16..87
# READ DATA SAMPLES
data_in = array.array('H')
samples = int(div_info[0])
data_in.fromfile(f,samples)
data_list = data_in.tolist()
data_crc = array.array('l')
data_crc.fromfile(f,1)
return data_list, time_info[1], time_info[2]
# Function for testing PRBS data
def PRBS_CHECK(data_list, prev):
samples_chk=0
prbs_err=0
for i in range(0,len(data_list)) :
if prev == 0x0FFF :
prev = data_list[i] & 0x07FF
elif data_list[i] == 0xFFFF :
prbs_err = prbs_err + 1
elif data_list[i] == data_list[i-1]:
cur = data_list[i]
samples_chk = samples_chk + 1
prbs_err = prbs_err + 1
prev = data_list[i] & 0x07FF
else :
cur = data_list[i] & 0x0FFE
samples_chk = samples_chk + 1
if cur != 2*prev :
prbs_err = prbs_err + 1
# print(str(i) + ' ' + hex(2*prev) + ' ' + hex(cur))
prev = data_list[i] & 0x07FF
return samples_chk, prbs_err, prev
# Function for testing CRC of header
def CRC16_check(buf) :
CRC=0
CRC_poly=0x18005
bits=16
data=0
CRCDIV = (CRC_poly & 0x7fffffff) * 32768 # << 15
data = (buf[0] & 0x7fffffff) << 16
len_buf = len(buf)
for cnt in range(1,len_buf) :
data = data + buf[cnt]
for cnt in range(bits) :
if data & 0x80000000 :
data = data ^ CRCDIV
data = data & 0x7fffffff
data = data * 2 # << 1
CRC = data >> 16
return CRC
# Function for testing CRC of data
def CRC32_check(buf) :
CRC=0
CRC_poly=0x104C11DB7 # 1 0000 0100 1100 0001 0001 1101 1011 0111
bits=16
data=0
CRCDIV = (CRC_poly & 0x7fffffffffff) * 32768 #<< 15
data = buf[0]
data = data & 0x7fffffffffff
data = data << 16
data = data + buf[1]
data = data & 0x7fffffffffff
data = data << 16
len_buf = len(buf)
for cnt in range(2,len_buf) :
data = data + buf[cnt]
for cnt in range(bits) :
if data & 0x800000000000 :
data = data ^ CRCDIV
data = data & 0x7fffffffffff
data = data * 2 # << 1
CRC = int(data >> 16)
return CRC
#Function for testing CRC of complete frame (header and data)
def crc_frame(f, info_plot, frame_nr,f_log):
CRC_ERROR=0
header = array.array('H')
data_in = array.array('H')
data_crc = array.array('H')
# READING HEADER INFORMATION
header.fromfile(f,44) # Bytes 0..88
# remove SEQNR from header, this data is added after CRC calculations
header[2]=0
header[3]=0
if CRC16_check(header) :
str_info = 'CRC ERROR IN HEADER '
# f_log.write(str_info )
CRC_ERROR=1
Station_id = header[0] & 0xFF
RSP_id = header[0] >> 8
RCU_id = header[1] &0xFF
Sample_rate = header[1] >> 8
Time = float((header[5] * 65536) + header[4])
Sample_nr = (header[7] * 65536) + header[6]
Samples = header[8]
if (info_plot) :
time_string = time.ctime(Time)
# str_info = 'Frame nr %(FR)d Station %(ST)d RSP %(RSP)d RCU %(RCU)d Sample rate %(S)d MHz time of data %(ti_D)s and %(SN)00.6f seconds'%\
# {"FR": frame_nr, "ST": Station_id ,"RSP": RSP_id, "RCU": RCU_id, "S": Sample_rate, "ti_D": time_string,"SN": float(Sample_nr)/float(200000000)}
# print string_info
# f_log.write(str_info + '\n')
del(header)
# READ DATA SAMPLES
data_in.fromfile(f,1024)
data_crc.fromfile(f,2)
data_list = data_in.tolist()
for cnt in range(len(data_in)):
data_in[cnt] = (data_in[cnt] & 0x0FFF)
data_in.append(data_crc[1])
data_in.append(data_crc[0])
if CRC32_check(data_in):
str_info = 'CRC ERROR IN DATA, '
# f_log.write(str_info )
CRC_ERROR=1
return CRC_ERROR
# Main loop
def main() :
files = open_dir()
f_log = file('prbs_dir_test.log', 'w')
f_log.write('\n \n PRSB test \n \n')
for file_cnt in range(len(files)) :
prev = 0x0FFF;
samples_chk=0
prbs_err=0
o_ta=0
o_tb=0
(f, frames_to_proces) = open_file(files, file_cnt)
if frames_to_proces >0 :
for frame_cnt in range(frames_to_proces):
data_list, ta, tb = read_frame(f, (frame_cnt==0), frame_cnt, f_log)
if (((ta==o_ta) and tb==(o_tb+1024)) or (ta == (o_ta+1))) :
# if (tb==(o_tb+1)) :
prev = prev
else:
prev=0x0FFF
r_samples_chk, r_prbs_err, prev = PRBS_CHECK(data_list, prev)
samples_chk = samples_chk + r_samples_chk
prbs_err = prbs_err + r_prbs_err
o_ta = ta
o_tb = tb
# plot results
# print 'PRBS errors: ' + str(prbs_err)
f_log.write('PRBS errors: ' + str(prbs_err) + '\n')
f.close
if prbs_err > 0:
(f, frames_to_proces) = open_file(files, file_cnt)
if frames_to_proces >0 :
crc_err=0
for frame_cnt in range(frames_to_proces):
crc_err = crc_err + crc_frame(f, (frame_cnt==0), frame_cnt, f_log)
# print 'PRBS errors: ' + str(prbs_err)
f_log.write('Number of frames with CRC errors: ' + str(crc_err) + '\n')
f.close
f_log.close
if __name__ == "__main__":
main()
| gpl-3.0 | 6,226,011,526,394,658,000 | 32.806034 | 178 | 0.536529 | false |
uwosh/uwosh.itpeoplesoftdocs | uwosh/itdocs/content/queryinstructions.py | 1 | 2467 | """Definition of the QueryInstructions content type
"""
from zope.interface import implements
from Products.Archetypes import atapi
from Products.ATContentTypes.content import folder
from Products.ATContentTypes.content import schemata
from uwosh.itdocs import itdocsMessageFactory as _
from uwosh.itdocs.interfaces import IQueryInstructions
from uwosh.itdocs.config import PROJECTNAME
from Products.ATContentTypes.configuration import zconf
QueryInstructionsSchema = folder.ATFolderSchema.copy() + atapi.Schema((
# -*- Your Archetypes field definitions here ... -*-
atapi.TextField('operation',
storage=atapi.AnnotationStorage(),
default_output_type = 'text/x-html-safe',
widget=atapi.LabelWidget(label=_(u'Operation'), rows = 25,
description=_(u'How to run this query'),),
default='',
),
atapi.TextField('parameters',
storage=atapi.AnnotationStorage(),
default_output_type = 'text/x-html-safe',
widget=atapi.RichWidget(label=_(u'Parameters'), rows = 25,
description=_(u'Parameters to enter for this query'),),
default='',
),
atapi.TextField('additionalinfo',
storage=atapi.AnnotationStorage(),
default_output_type = 'text/x-html-safe',
widget=atapi.RichWidget(label=_(u'Additional Information'), rows = 25,
description=_(u''),),
default='Information for the system is in the document: _SystemInfo.doc',
),
))
# Set storage on fields copied from ATFolderSchema, making sure
# they work well with the python bridge properties.
QueryInstructionsSchema['title'].widget.label = _(u'Query Name')
#QueryInstructionsSchema['description'].widget.label = _(u'Purpose')
QueryInstructionsSchema['description'].widget.description=_(u'Query Function')
QueryInstructionsSchema['description'].default=''
QueryInstructionsSchema['description'].visible = {'view':'visible', 'edit':'visible'}
schemata.finalizeATCTSchema(
QueryInstructionsSchema,
folderish=True,
moveDiscussion=False
)
class QueryInstructions(folder.ATFolder):
"""Instructions for running a PeopleSoft query (UW Oshkosh Administrative Computing)"""
implements(IQueryInstructions)
meta_type = "QueryInstructions"
schema = QueryInstructionsSchema
atapi.registerType(QueryInstructions, PROJECTNAME)
| gpl-2.0 | 6,203,758,016,776,983,000 | 33.746479 | 95 | 0.68788 | false |
Jc2k/libcloudcore | libcloudcore/importer.py | 1 | 3816 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import sys
from .loader import Loader
from .models import Model
from .driver import Driver
from .utils import force_str
from . import backend, client
class Importer(object):
def __init__(self, module_prefix, backend=backend.Driver):
self.loader = Loader()
self.module_prefix = "{}.".format(module_prefix)
self.backend = backend
def find_module(self, fullname, path):
if fullname.startswith(self.module_prefix):
service = fullname[len(self.module_prefix):].replace(".", "/")
if self.loader.find(service):
return self
return self
return None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
service = fullname[len(self.module_prefix):].replace(".", "/")
if not self.loader.find(service):
raise ImportError("No such module {}".format(fullname))
module = sys.modules[fullname] = imp.new_module(fullname)
module.__name__ = fullname
module.__loader__ = self
module.__path__ = [fullname]
if self.loader.is_service(service):
module.Client = self.get_client(service)
module.Client.__module__ = module
module.Driver = module.Client.Driver
module.Driver.__module__ = module
module.__all__ = ['Client']
module.__package__ = fullname.rpartition('.')[0]
elif self.loader.is_namespace(service):
module.__package__ = fullname
return module
def get_driver_method(self, operation):
def method(self, *args, **kwargs):
return self.driver.call(operation, *args, **kwargs)
setattr(method, "__doc__", operation.documentation)
setattr(method, "__name__", force_str(operation.name))
return method
def get_waiter_method(self, waiter):
def method(self, *args, **kwargs):
return self.driver.wait(waiter, *args, **kwargs)
setattr(method, "__doc__", waiter.documentation)
setattr(method, "__name__", force_str(waiter.name))
return method
def get_driver(self, service):
model = Model(self.loader.load_service(service))
if not model.name:
model.name = service
bases = (Driver, self.backend) + model.request_pipeline
attrs = {
'name': service,
'model': model,
}
return type("Driver", bases, attrs)
def get_client(self, service):
driver = self.get_driver(service)
model = driver.model
attrs = {
'name': service,
'__doc__': model.documentation,
'Driver': driver,
}
for operation in model.get_operations():
attrs[operation.name] = self.get_driver_method(operation)
for waiter in model.get_waiters():
attrs[waiter.name] = self.get_waiter_method(waiter)
return type("Client", (client.Client, ), attrs)
| apache-2.0 | -4,060,576,626,925,605,400 | 33.378378 | 74 | 0.622904 | false |
Florianboux/zds-site | zds/utils/templatetags/tests/tests_date.py | 3 | 4180 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.test import TestCase
from django.template import Context, Template
class DateFormatterTest(TestCase):
# todo: Add test with localization parameters
def setUp(self):
now = datetime.now()
date_previous_in_day = now - timedelta(hours=1)
date_previous_abs = datetime(2013, 9, 12, hour=11, minute=10, second=42, microsecond=10)
date_future_in_day = now + timedelta(hours=1)
yearlapse = now - timedelta(days=366)
self.context = Context({"date_previous_in_day": date_previous_in_day,
"date_previous_abs": date_previous_abs,
"date_future_in_day": date_future_in_day,
"yearlapse": yearlapse,
"date_epoch": 42,
"NoneVal": None})
def test_format_date(self):
# Default behaviour
tr = Template("{% load date %}"
"{{ date_previous_in_day | format_date }}"
).render(self.context)
self.assertEqual(u"il y a une heure", tr)
tr = Template("{% load date %}"
"{{ date_future_in_day | format_date }}"
).render(self.context)
self.assertEqual(u"Dans le futur", tr)
tr = Template("{% load date %}"
"{{ date_previous_abs | format_date }}"
).render(self.context)
self.assertEqual(u"jeudi 12 septembre 2013 à 11h10", tr)
# small == False :=> Same behaviour
tr = Template("{% load date %}"
"{{ date_previous_in_day | format_date:False }}"
).render(self.context)
self.assertEqual(u"il y a une heure", tr)
tr = Template("{% load date %}"
"{{ date_future_in_day | format_date:False }}"
).render(self.context)
self.assertEqual(u"Dans le futur", tr)
tr = Template("{% load date %}"
"{{ date_previous_abs | format_date:False }}"
).render(self.context)
self.assertEqual(u"jeudi 12 septembre 2013 à 11h10", tr)
# small == True :=> absolute date change
tr = Template("{% load date %}"
"{{ date_previous_in_day | format_date:True }}"
).render(self.context)
self.assertEqual(u"il y a une heure", tr)
tr = Template("{% load date %}"
"{{ date_future_in_day | format_date:True }}"
).render(self.context)
self.assertEqual(u"Dans le futur", tr)
tr = Template("{% load date %}"
"{{ date_previous_abs | format_date:True }}"
).render(self.context)
self.assertEqual(u"12/09/13 à 11h10", tr)
# Bad format
tr = Template("{% load date %}"
"{{ NoneVal | format_date }}"
).render(self.context)
self.assertEqual(u"None", tr)
def test_tooltip_date(self):
# Default behaviour
# Todo: Add test to step time less than one day with tooltip
# Todo: I don't know how to test this without hugly hack on datetime.now()
tr = Template("{% load date %}"
"{{ date_future_in_day | tooltip_date }}"
).render(self.context)
self.assertEqual(u"Dans le futur", tr)
tr = Template("{% load date %}"
"{{ yearlapse | tooltip_date }}"
).render(self.context)
self.assertEqual(u"il y a 1\xa0année", tr)
# Bad format
tr = Template("{% load date %}"
"{{ NoneVal | tooltip_date }}"
).render(self.context)
self.assertEqual(u"None", tr)
def test_humane_time(self):
# Default behaviour
tr = Template("{% load date %}"
"{{ date_epoch | humane_time }}"
).render(self.context)
self.assertEqual(u"01 Jan 1970, 01:00:42", tr)
| gpl-3.0 | -2,024,235,395,036,158,200 | 36.963636 | 96 | 0.497845 | false |
ToAruShiroiNeko/revscoring | revscoring/datasources/parent_revision.py | 1 | 1575 | import mwparserfromhell as mwp
from deltas.tokenizers import wikitext_split
from . import revision
from .datasource import Datasource
metadata = Datasource("parent_revision.metadata")
"""
Returns a :class:`~revscoring.datasources.types.RevisionMetadata` for the
parent revision.
"""
text = Datasource("parent_revision.text")
"""
Returns the text content of the parent revision.
"""
# ############################### Tokenized ###################################
def process_tokens(revision_text):
return [t for t in wikitext_split.tokenize(revision_text or '')]
tokens = Datasource("parent_revision.tokens",
process_tokens, depends_on=[text])
"""
Returns a list of tokens.
"""
# ############################## Parse tree ###################################
def process_parse_tree(revision_text):
return mwp.parse(revision_text or "")
parse_tree = Datasource("parent_revision.parse_tree",
process_parse_tree, depends_on=[text])
"""
Returns a :class:`mwparserfromhell.wikicode.Wikicode` abstract syntax tree
representing the content of the revision.
"""
content = Datasource("parent_revision.content", revision.process_content,
depends_on=[parse_tree])
"""
Returns the raw content (no markup or templates) of the revision.
"""
content_tokens = Datasource("parent_revision.content_tokens",
revision.process_content_tokens,
depends_on=[content])
"""
Returns tokens from the raw content (no markup or templates) of the current
revision
"""
| mit | 6,807,011,217,652,050,000 | 28.716981 | 79 | 0.633651 | false |
ses4j/THEbot | database_generator.py | 1 | 5097 | """
THEbot, a Texas Hold'em poker software library.
Copyright (C) 2011 Scott Stafford
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
database_generator.py
Execute this script to regenerate the precomputed databases of poker
hands and their respective values: pokervals?.shelf for 5, 6, and 7 hands.
"""
import poker,pickle,sys,shelve,anydbm,time,logging
from poker_globals import *
global pokerval_cache,pokerval_cachehits,pokerval_cachemisses,weightedcomparehands_cache,weightedcomparehands_cachehits
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
def _clear_pokerval_cache():
global pokerval_cache,pokerval_cachehits,pokerval_cachemisses,weightedcomparehands_cache,weightedcomparehands_cachehits
pokerval_cache={}
weightedcomparehands_cache={}
pokerval_cachehits=0
pokerval_cachemisses=0
weightedcomparehands_cachehits=0
def calculate_pokerval(_cards):
""" Calculate/retrieve a pokerval from a set of 5 or more cards. Also return
the 'index' used for db storage. """
global pokerval_cache,pokerval_cachehits,pokerval_cachemisses
cards = poker.normalize_cards(_cards)
try:
index = poker.make_stringindex(cards)
try:
pokerval = pokerval_cache[index]
pokerval_cachehits+=1
return index, pokerval
except KeyError:
pokerval_cachemisses+=1
pass
pokerval = 0
if len(cards) == 5:
pokerval = poker.CalculatingHand(cards).getpokerval()
elif len(cards) > 5:
for fivecards in xuniqueCombinations(cards,5):
hand = poker.Hand(fivecards)
pokerval = max(pokerval, hand.getpokerval())
else:
raise ValueError("Not enough cards!")
pokerval_cache[index] = pokerval
except KeyError:
errstr = "Hand not in database: %s %s, <%s>, %s"%(format_cards(_cards),format_cards(cards),index,reverse_stringindex(index))
raise KeyError(errstr)
except:
raise
return index,pokerval
def regenerate_database():
""" Go thru each possible hand and make a new db with the data items. """
deck = []
for val in range(2,15):
for suit in range(1,5):
deck.append((val,suit))
possiblehands = {
5: (2598960, 160537),
6: (20358520, 1250964),
7: (133784560, 210080),
}
allCombinations = sum([y[0] for (x,y) in possiblehands.iteritems()])
print """Generating all 5, 6, and 7 card hands. It takes a while
(there are %d possible combinations) so find something else to do for a bit.
If you kill the process at any time, no problem, you can resume it where it left off
just by rerunning this method.
Let's begin...
""" % allCombinations
start_time_all = time.clock()
for numcards in range(5, 8):
i = 0
_clear_pokerval_cache()
start_time = time.clock()
db = shelve.open("pokervals"+str(numcards)+".shelf",protocol=2)
try:
num_computed = db["num_computed"]
except KeyError:
num_computed = 0
(total, uniqueindices) = possiblehands[numcards]
if len(db) != uniqueindices + 1: # +1 cause we store the counter in the database too, for restarting.
print "Generating all "+str(total)+" possible "+str(numcards)+" card hands... "
for cards in xuniqueCombinations(deck, numcards):
i=i+1
# enable skipping ahead if we ran halfway and terminated this process.
if i<num_computed:
continue
(idx,pokerval) = calculate_pokerval(cards)
db[idx] = pokerval
if i%100000 == 0:
now = time.clock()
print "%d%% of %d-card hands complete. %d processed, %d unique, %.2fm elapsed (%.2fm total)." % (i*100.0/total, numcards, i, len(db), (now - start_time)/60.0, (now - start_time_all)/60.0)
s = format_cards(cards) + ' val: '
print "\tLast Hand: ", s + format_pokerval(pokerval)
db["num_computed"] = i
print len(db)
print "Your %d-card database is complete! It has %d complete hands." % (numcards, len(db))
if __name__ == '__main__':
regenerate_database() | gpl-3.0 | 202,160,966,135,602,700 | 37.330827 | 208 | 0.613694 | false |
jeffFranklin/iam-resttools | resttools/models/irws.py | 1 | 5374 | from base64 import b64encode, b64decode
from datetime import datetime
# IRWS Name
class Name():
validid = ''
formal_cname = ''
formal_fname = ''
formal_lname = ''
formal_privacy = ''
display_cname = ''
display_fname = ''
display_mname = ''
display_lname = ''
display_privacy = ''
def json_data(self):
return {"formal_cname": self.formal_cname,
"formal_fname": self.formal_fname,
"formal_lname": self.formal_lname,
"formal_privacy": self.formal_privacy,
"display_cname": self.display_cname,
"display_fname": self.display_fname,
"display_mname": self.display_mname,
"display_lname": self.display_lname,
"display_privacy": self.display_privacy,
}
def __eq__(self, other):
return self.uwregid == other.uwregid
# IRWS Profile (only recover part for now)
class Profile():
validid = ''
recover_email = None
recover_email_date = None
recover_sms = None
recover_sms_date = None
recover_block_code = None
def json_data(self):
prof = {}
if self.recover_email is not None:
prof['recover_email'] = self.recover_email
if self.recover_email_date is not None:
prof['recover_email_date'] = self.recover_email_date
if self.recover_sms is not None:
prof['recover_sms'] = self.recover_sms
if self.recover_sms_date is not None:
prof['recover_sms_date'] = self.recover_sms_date
if self.recover_block_code is not None:
prof['recover_block_code'] = self.recover_block_code
return {'profile': [prof]}
def __eq__(self, other):
return self.uwregid == other.uwregid
# IRWS Person
class Person():
regid = ''
lname = ''
fname = ''
identifiers = {}
# def __init__(self, *args, **kwargs):
# self.identifiers = {}
# IRWS UWhr Person
class UWhrPerson():
validid = ''
regid = ''
studentid = ''
birthdate = ''
fname = ''
lname = ''
category_code = ''
category_name = ''
contact_email = ''
workday_home_email = ''
org_supervisor = ''
wp_name = ''
wp_department = ''
wp_email = []
wp_phone = ''
wp_title = ''
wp_address = ''
wp_publish = False
college = ''
department = ''
home_department = ''
mailstop = ''
unit = ''
emp_ecs_code = ''
emp_status_code = ''
budget = ''
faccode = ''
source_code = ''
source_name = ''
status_code = ''
status_name = ''
pac = ''
in_feed = ''
created = ''
updated = ''
def __eq__(self, other):
if other is None:
return False
return self.regid == other.regid
# IRWS Sdb Person
class SdbPerson():
validid = ''
regid = ''
studentid = ''
birthdate = ''
fname = ''
lname = ''
category_code = ''
category_name = ''
college = ''
department = ''
source_code = ''
source_name = ''
# status: 1=active, 3=former
status_code = ''
status_name = ''
pac = ''
wp_publish = 'Y'
in_feed = ''
created = ''
updated = ''
def __eq__(self, other):
if other is None:
return False
return self.regid == other.regid
# IRWS Supplemental Person
class SupplementalPerson():
validid = ''
regid = ''
lname = ''
category_code = ''
category_name = ''
comment_code = ''
comment_name = ''
sponsor_id = ''
college = ''
source_code = ''
source_name = ''
status_code = ''
status_name = ''
in_feed = ''
created = ''
updated = ''
def __eq__(self, other):
if other is None:
return False
return self.regid == other.regid
# IRWS GenericPerson
class GenericPerson():
validid = ''
regid = ''
lname = ''
fname = ''
contact_email = ''
category_code = ''
source_code = ''
# IRWS UWNetId
class UWNetId():
uwnetid = ''
accid = ''
validid = ''
uid = ''
luid = ''
disenfran = ''
netid_code = ''
netid_name = ''
status_code = ''
status_name = ''
logname = ''
created = ''
updated = ''
def json_data(self):
return {"",
}
def __eq__(self, other):
if other is None:
return False
return self.uwnetid == other.uwnetid
# IRWS Regid
class Regid():
regid = ''
entity_code = ''
entity_name = ''
status_code = ''
status_name = ''
created = ''
updated = ''
def __eq__(self, other):
if other is None:
return False
return self.regid == other.regid
# IRWS Subscription
class Subscription():
uwnetid = ''
subscription_code = ''
subscription_name = ''
notify_code = ''
status_code = ''
status_name = ''
logname = ''
created = ''
updated = ''
def json_data(self):
return {"",
}
def __eq__(self, other):
return self.uwnetid == other.uwnetid
# IRWS PAC
class Pac():
pac = ''
expiration = ''
def json_data(self):
return {"",
}
# IRWS QnA
class QnA():
uwnetid = ''
ordinal = ''
question = ''
answer = ''
| apache-2.0 | 7,709,327,650,782,617,000 | 18.613139 | 64 | 0.51284 | false |
yushroom/FishEngine | script/gen_enum_to_string.py | 1 | 2907 | from mako.template import Template
t_str = '''
// enum count
template<>
constexpr int EnumCount<${T}>() { return ${length}; }
// string array
static const char* ${T}Strings[] =
{
${CStrings}
};
// cstring array
template<>
inline constexpr const char** EnumToCStringArray<${T}>()
{
return ${T}Strings;
}
// index to enum
template<>
inline ${T} ToEnum<${T}>(const int index)
{
switch (index) {
${IndexToEnumCases}
default: abort(); break;
}
}
// enum to index
template<>
inline int EnumToIndex<${T}>(${T} e)
{
switch (e) {
${EnumToIndexCases}
default: abort(); break;
}
}
// string to enum
template<>
inline ${T} ToEnum<${T}>(const std::string& s)
{
${StringToEnumCases}
abort();
}
'''
t = Template(t_str)
cpp_enum_code = '''
enum class TextureImporterType
{
Default, // This is the most common setting used for all the textures in general.
NormalMap, // Select this to turn the color channels into a format suitable for real - time normal mapping.
GUI, // Use this if your texture is going to be used on any HUD / GUI Controls.
Sprite, // Select this if you will be using your texture for Sprite graphics.
Cursor, // Use this if your texture is going to be used as a cursor.
Cookie, // This sets up your texture with the basic parameters used for the Cookies of your lights.
Lightmap, // This sets up your texture with the parameters used by the lightmap.
SingleChannel, //Use this for texture containing a single channel.
};
'''
lines = cpp_enum_code.strip().split('\n');
line1 = lines[0].strip()
if line1.endswith('{'):
line1 = line1[:-1]
enum_name = line1.strip().split()[-1]
print(enum_name)
enum_elements = []
for line in lines[1:-1]:
#print line
if line.startswith('{'):
continue
if "=" in line:
var = line.split('=')[0]
else:
var = line.split(',')[0]
enum_elements.append(var.strip())
print(enum_elements)
print('')
#enum_name = "ShadowCastingMode"
#enum_elements = ['Off', 'On', 'TwoSided', 'ShdowsOnly']
index_to_enum_case = "case {0}: return {1}::{2}; break;"
enum_to_index_case = "case {1}::{2}: return {0}; break;"
string_to_enum_case = 'if (s == "{1}") return {0}::{1};'
index_to_enum_cases = ''
enum_to_index_cases = ''
string_to_enum_cases = ''
for i in range(len(enum_elements)):
index_to_enum_cases += index_to_enum_case.format(i, enum_name, enum_elements[i]) + '\n\t'
enum_to_index_cases += enum_to_index_case.format(i, enum_name, enum_elements[i]) + '\n\t'
string_to_enum_cases += string_to_enum_case.format(enum_name, enum_elements[i]) + '\n\t'
CStrings = ',\n\t'.join(['"{}"'.format(e) for e in enum_elements])
print t.render(T = enum_name, length = len(enum_elements), CStrings= CStrings, \
IndexToEnumCases = index_to_enum_cases, EnumToIndexCases = enum_to_index_cases, \
StringToEnumCases = string_to_enum_cases)
| mit | 687,920,250,908,670,300 | 26.424528 | 116 | 0.642931 | false |
McGill-DMaS/Kam1n0-Plugin-IDA-Pro | ida-plugin/Kam1n0/Plugin.py | 1 | 1702 | # *******************************************************************************
# * Copyright 2017 McGill University All rights reserved.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *******************************************************************************/
import idaapi
import Manager
from idaapi import plugin_t
class kam1n0_t(plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Kam1n0."
help = "Kam1n0."
wanted_name = "Kam1n0"
wanted_hotkey = ""
def init(self):
global kam1n0_manager
# Check if already initialized
if not 'kam1n0_manager' in globals():
print("Kam1n0: initializing Kam1n0 IDA-pro plugin ...")
kam1n0_manager = Manager.Kam1n0PluginManager()
if kam1n0_manager.register_all_actions():
print "Failed to initialize Kam1n0."
# kam1n0_manager.removeAllAction()
del kam1n0_manager
return idaapi.PLUGIN_SKIP
else:
print("Kam1n0: Completed initialization.")
return idaapi.PLUGIN_KEEP
def run(self, arg):
pass
def term(self):
pass | apache-2.0 | 9,062,801,436,616,464,000 | 31.75 | 83 | 0.576968 | false |
ghchinoy/tensorflow | tensorflow/python/framework/ops.py | 1 | 245803 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# This is to avoid a circular dependency: ops -> tensor_spec -> ops
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.compat.v1.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.compat.v1.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited:
continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def __iter__(self):
if not context.executing_eagerly():
raise TypeError(
"Tensor objects are only iterable when eager execution is "
"enabled. To iterate over this tensor use tf.map_fn.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
# NOTE(taylorrobie): equivalent to: id(self) == id(other)
return self is other
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
maybe_arr = self._cpu_nograd()._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
# TODO(slebedev): avoid redundant copy in all of the following methods.
def __int__(self):
return int(self.numpy())
def __long__(self):
return long(self.numpy())
def __float__(self):
return float(self.numpy())
def __index__(self):
maybe_arr = self.numpy()
if isinstance(maybe_arr, np.ndarray):
return maybe_arr.__index__()
return int(maybe_arr) # Must be a NumPy scalar.
def __array__(self, dtype=None):
# This is only called if the buffer interface conversion failed.
# Remove once numpy/numpy#13507 is merged and released or py_function
# creates EagerTensors with a non-nullptr context.
return np.asarray(self.numpy(), dtype=dtype)
def __format__(self, format_spec):
return self.numpy().__format__(format_spec)
def __reduce__(self):
return (convert_to_tensor, (self.numpy(),))
def _numpy(self):
raise NotImplementedError()
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name
raise NotImplementedError()
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
return self._shape_tuple()[0]
def _cpu_nograd(self):
"""A copy of this Tensor with contents backed by host memory.
The copy cannot be differentiated through.
Returns:
A CPU-memory backed Tensor object with the same contents as this Tensor.
"""
return self._copy_nograd(context.context(), "CPU:0")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
return bool(self.numpy())
def __nonzero__(self):
return self.__bool__()
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]
}
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None,
accept_symbolic_tensors=True,
accept_composite_tensors=False):
"""Implementation of the public convert_to_tensor."""
if ctx is None:
ctx = context.context()
if isinstance(value, EagerTensor):
if ctx.executing_eagerly():
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
value = _TensorTensorConversionFunction(value, dtype=dtype)
return value
else:
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
elif ((not accept_symbolic_tensors) and isinstance(value, Tensor) and
ctx.executing_eagerly()):
# Found a symbolic tensor in an eager context.
# This happens when we use the Keras functional API (i.e. calling layers
# on the output of `keras.Input()`, which is symbolic) while eager
# execution is enabled.
if _is_keras_symbolic_tensor(value):
# If the graph of the tensor isn't the Keras graph, we should still
# fail, for the time being. TODO(fchollet): consider allowing
# all symbolic tensors to raise this exception in this case.
raise core._SymbolicException( # pylint: disable=protected-access
"Using the symbolic output of a Keras layer during eager execution.")
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
unwrapped_type = type(value)
conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None)
if conversion_func_list is None:
with _tensor_conversion_func_lock:
conversion_func_list = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
conversion_func_list.append((base_type, conversion_func))
_tensor_conversion_func_cache[unwrapped_type] = conversion_func_list
for base_type, conversion_func in conversion_func_list:
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError, errors.UnimplementedError,
errors.InvalidArgumentError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
is_acceptable_type = (
isinstance(ret, Tensor) or
(accept_composite_tensors and
isinstance(ret, composite_tensor.CompositeTensor)))
if not is_acceptable_type:
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, unwrapped_type))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
@tf_export(v1=["convert_to_tensor_or_indexed_slices"])
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, EagerTensor) and not context.executing_eagerly():
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accept_composite_tensors=True)
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
# TODO(josh11b): Add ctx argument to conversion_func() signature.
@tf_export("register_tensor_conversion_function")
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values run
earlier than conversion functions with larger priority values. Defaults to
100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
global _tensor_conversion_func_cache
with _tensor_conversion_func_lock:
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
# context._context is checked so that we don't inadvertently create it.
# This is because enable_eager_execution will fail when called from the main
# function if the context._context is already created, and the
# register_tensor_conversion_function calls happen when the module is
# imported.
if context._context is not None and context.executing_eagerly(
) and isinstance(base_type, six.integer_types + (
float,
np.ndarray,
)):
# TODO(nareshmodi): consider setting a context variable which disables the
# fastpath instead.
raise TypeError(
"Cannot register conversions for numpy arrays, python number types "
"when executing eagerly.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
_tensor_conversion_func_cache = {}
@tf_export("IndexedSlices")
class IndexedSlices(_TensorLike, composite_tensor.CompositeTensor):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. `tf.gather`).
Contrast this representation with
`tf.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
if not isinstance(values, tensor_spec.TensorSpec):
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values,
(", dense_shape=%s" %
self._dense_shape) if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
def _to_components(self):
if self._dense_shape is None:
return (self._values, self._indices)
else:
return (self._values, self._indices, self._dense_shape)
@classmethod
def _from_components(cls, components, metadata):
return cls(*components)
def _shape_invariant_to_components(self, shape=None):
if shape is None:
shape = self._values.shape
if self._dense_shape is None:
return (shape, shape[:1]) # values, indices
else:
# values, indices, dense_shape
return (shape, shape[:1], tensor_shape.TensorShape([shape.ndims]))
@property
def _is_graph_tensor(self):
return hasattr(self._values, "graph")
def consumers(self):
return self._consumers()
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string. Value for the
"device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
`tf.Session.run`.
`op.run()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._id_value = self._graph._next_id()
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# pylint: enable=protected-access
# Initialize self._c_op.
if c_op:
self._c_op = c_op
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)
]
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return tf_stack.convert_stack(
self._traceback, include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used.
Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
`tf.Operation` objects,
which represent units of computation; and
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
`tf.compat.v1.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.compat.v1.get_default_graph()
```
Another typical usage involves the
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
for input_tensor in func_graph_inputs:
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op.op_def.is_stateful:
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, _auto_cast_variable_read_dtype):
self._thread_local._auto_cast_variable_read_dtype = ( # pylint: disable=protected-access
_auto_cast_variable_read_dtype)
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
# TODO(agarwal): support device functions in EAGER mode.
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
`device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0",
or partially specified, containing only a subset of the "/"-separated
fields. Any fields which are specified override device annotations from outer
scopes. For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
if context.executing_eagerly():
return context.device(device_name)
else:
return get_default_graph().device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when defining graph functions via
tf.contrib.eager.defun. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def func():
# A defun-decorated function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
default_graph = get_default_graph()
scope = default_graph.get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
outer_context = None
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function."""
# Fastpath for when this is called eagerly (its not necessary to init_scope).
if context.executing_eagerly():
return True
with init_scope():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
if context._context is None:
context._context = context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def)
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
name_scope_cache = {}
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
self._has_symbolic_input_in_eager = False
if self._values and self._in_eager_mode:
# The presence of a graph tensor in `self._values` overrides the context.
for value in self._values:
if hasattr(value, "graph"):
self._has_symbolic_input_in_eager = True
self._name_scope = value.graph.name_scope(self._name)
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._has_symbolic_input_in_eager:
return self._name_scope.__enter__()
if self._in_eager_mode:
self._old_name = self._ctx.scope_name
if not self._name:
scope_name = ""
else:
cache_key = self._name, self._old_name, self._default_name
if cache_key in name_scope_cache:
self._ctx.scope_name = name_scope_cache[cache_key]
return self._ctx.scope_name
elif self._name[-1] == "/":
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = self._name
else:
name_with_trailing_slash = self._name + "/"
scope_name = (
self._old_name + name_with_trailing_slash
if self._old_name else name_with_trailing_slash)
name_scope_cache[cache_key] = scope_name
self._ctx.scope_name = scope_name
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
if self._values is None:
self._values = []
g = _get_graph_from_inputs(self._values)
self._g_manager = g.as_default()
self._g_manager.__enter__()
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._has_symbolic_input_in_eager:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
elif self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
@tf_export("name_scope", v1=[])
class name_scope_v2(name_scope):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
super(name_scope_v2, self).__init__(name=None, default_name=name)
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and hasattr(v.handle, "op") and isinstance(
v.handle.op, Operation):
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
register_tensor_conversion_function(Operation, _operation_conversion_error)
| apache-2.0 | 4,550,424,314,189,283,000 | 35.490944 | 116 | 0.666058 | false |
andrew-blais/VirtualMachine | model.py | 1 | 23863 | #!/usr/bin/python2.7
# Copyright 2014 by Andrew L. Blais.
# This program is distributed under the terms of the
# GNU General Public License version 3.
from constants import Range8, Egnar8, Egnar16, listToList, \
listToLists, listsToList, RangeMEM
from random import randint
from thread import start_new_thread
#from _thread import start_new_thread
#import threading
class model:
# ===== Load ===================================================================
def loadA(self):
self.setMessage("load A")
listToList(self.DATABUS, self.A)
self.paintView()
def loadB(self):
self.setMessage("load B")
listToList(self.DATABUS, self.B)
self.updateALU()
self.paintView()
def loadC(self):
self.setMessage("load C")
listToList(self.DATABUS, self.C)
self.updateALU()
self.paintView()
def loadD(self):
self.setMessage("load D")
listToList(self.DATABUS, self.D)
self.paintView()
def loadM1(self):
self.setMessage("load M1")
listToList(self.DATABUS, self.M1)
self.paintView()
def loadM2(self):
self.setMessage("load M2")
listToList(self.DATABUS, self.M2)
self.paintView()
def loadX(self):
self.setMessage("load X")
listToList(self.DATABUS, self.X)
self.paintView()
def loadY(self):
self.setMessage("load Y")
listToList(self.DATABUS, self.Y)
self.paintView()
def loadJ1(self):
self.setMessage("load J1")
listToList(self.DATABUS, self.J1)
self.paintView()
def loadJ2(self):
self.setMessage("load J2")
listToList(self.DATABUS, self.J2)
self.paintView()
def loadInst(self):
self.setMessage("load Inst")
listToList(self.DATABUS, self.Inst)
self.paintView()
def loadXY(self):
self.setMessage("load XY")
listToLists(self.X, self.Y, self.ADDRESSBUS)
self.paintView()
def loadPC(self):
self.setMessage("load PC")
listToLists(self.PC1, self.PC2, self.ADDRESSBUS)
self.paintView()
def loadINC(self):
self.setMessage("load INC")
listToList(self.IncUnit1, self.Inc1)
listToList(self.IncUnit2, self.Inc2)
self.paintView()
# ===== Select =================================================================
def selectA(self):
self.setMessage("select A")
listToList(self.A, self.DATABUS)
self.paintView()
def selectB(self):
self.setMessage("select B")
listToList(self.B, self.DATABUS)
self.paintView()
def selectC(self):
self.setMessage("select C")
listToList(self.C, self.DATABUS)
self.paintView()
def selectD(self):
self.setMessage("select D")
listToList(self.D, self.DATABUS)
self.paintView()
def selectM1(self):
self.setMessage("select M1")
listToList(self.M1, self.DATABUS)
self.paintView()
def selectM2(self):
self.setMessage("select M2")
listToList(self.M2, self.DATABUS)
self.paintView()
def selectX(self):
self.setMessage("select X")
listToList(self.X, self.DATABUS)
self.paintView()
def selectY(self):
self.setMessage("select Y")
listToList(self.Y, self.DATABUS)
self.paintView()
def selectM(self):
self.setMessage("select M")
listsToList(self.M1, self.M2, self.ADDRESSBUS)
self.updateIncrUnit()
self.paintView()
def selectXY(self):
self.setMessage("select XY")
listsToList(self.X, self.Y, self.ADDRESSBUS)
self.updateIncrUnit()
self.paintView()
def selectJ(self):
self.setMessage("select J")
listsToList(self.J1, self.J2, self.ADDRESSBUS)
self.updateIncrUnit()
self.paintView()
def selectPC(self):
self.setMessage("select PC")
listsToList(self.PC1, self.PC2, self.ADDRESSBUS)
self.updateIncrUnit()
self.paintView()
def selectINC(self):
self.setMessage("select INC")
listsToList(self.Inc1, self.Inc2, self.ADDRESSBUS)
self.updateIncrUnit()
self.paintView()
# ===== ALU ====================================================================
def setFUNCTION(self, f):
self.oldFUNCTION = self.FUNCTION[:]
listToList(f, self.FUNCTION)
self.updateALU()
def updateF0(self):
listToList(self.FUNCTION, self.oldFUNCTION)
self.FUNCTION[0] = (0 if self.FUNCTION[0] == 1 else 1)
self.updateALU()
self.paintView()
def updateF1(self):
listToList(self.FUNCTION, self.oldFUNCTION)
self.FUNCTION[1] = (0 if self.FUNCTION[1] == 1 else 1)
self.updateALU()
self.paintView()
def updateF2(self):
listToList(self.FUNCTION, self.oldFUNCTION)
self.FUNCTION[2] = (0 if self.FUNCTION[2] == 1 else 1)
self.updateALU()
self.paintView()
# ===== Mathematics ============================================================
def getSum(self, k, b, c):
return int(((not k) and (not b) and c) or \
((not k) and b and (not c)) or \
(k and (not b) and (not c)) or \
(k and b and c))
def getCarry(self, k, b, c):
return int(((not k) and b and c ) or \
(k and (not b) and c) or \
(k and b and (not c)) or \
(k and b and c))
def addBandC(self):
self.ADDcarry = 0
for i in Egnar8:
b = self.B[i]
c = self.C[i]
self.ADD[i] = self.getSum(self.ADDcarry, b, c)
self.ADDcarry = self.getCarry(self.ADDcarry, b, c)
def incB(self):
self.INCcarry = 1
for i in Egnar8:
b = self.B[i]
self.INC[i] = self.getSum(self.INCcarry, b, 0)
self.INCcarry = self.getCarry(self.INCcarry, b, 0)
def shlB(self):
x = self.B[:]
x = x[1:] + [x[0]]
listToList(x, self.SHL)
# ===== Update =================================================================
def updateALU(self):
self.updateFunctions()
self.updateDatabus()
self.updateStates()
def updateFunctions(self):
self.addBandC()
self.incB()
self.shlB()
for i in Range8:
b = self.B[i]
c = self.C[i]
self.AND[i] = int(b and c)
self.OR[i] = int(b or c)
self.NOT[i] = (0 if b == 1 else 1)
self.XOR[i] = int(b ^ c)
def updateDatabus(self):
f = tuple(self.FUNCTION)
F = self.functionLabelsDictionary[f]
listToList(F, self.DATABUS)
# Sets DATABUS relative to current function
# as linked in functionLabelsDictionary.
def updateStates(self):
self.setCarryState()
self.setZeroState()
self.setSignState()
def setCarryState(self):
self.CARRY = int(self.ADDcarry == 1 or self.INCcarry == 1)
def setZeroState(self):
self.ZERO = int(self.DATABUS == [0,0,0,0,0,0,0,0])
def setSignState(self):
self.SIGN = int(self.DATABUS[0] == 1)
# ===== BUSES ==================================================================
def setADDRESSBUSpart(self, i):
self.ADDRESSBUS[i] = (1 if self.ADDRESSBUS[i] == 0 else 0)
self.updateIncrUnit()
self.paintView()
def setDATABUSwhole(self, x):
listToList(x, self.DATABUS)
def setDATABUSpart(self, i):
self.DATABUS[i] = (1 if self.DATABUS[i] == 0 else 0)
self.paintView()
# ===== Increment Unit =========================================================
def updateIncrUnit(self):
Cy = 1
x = [0]*16
for i in Egnar16:
A = self.ADDRESSBUS[i]
x[i] = self.getSum(Cy, A, 0)
Cy = self.getCarry(Cy, A, 0)
listToList(x[0:8], self.IncUnit1)
listToList(x[8:16], self.IncUnit2)
# ===== Memory =================================================================
def increment(self, A):
Cy = 1
R = [0] * len(A)
# Since this is little endian, a reversed list is needed for
# the for loop.
L = list( range( len(A) ) )
L.reverse()
for i in L:
R[i] = self.getSum(Cy, A[i], 0)
Cy = self.getCarry(Cy, A[i], 0)
return R
def mkMemory(self):
A = [0]*15
R = {}
for unused_i in RangeMEM:
R.update({tuple(A) : [0,0,0,0,0,0,0,0]})
A = self.increment(A)
return R
def getMEMORY(self):
return self.MEMORY[tuple(self.MEMORYADDRESS)]
def addressbusToMemoryAddress(self):
listToList(self.ADDRESSBUS[1:], self.MEMORYADDRESS)
self.paintView()
self.setMessage("Address bus to memory address: BusToMem")
def readMemoryToDatabus(self):
listToList(self.MEMORY[tuple(self.MEMORYADDRESS)], self.DATABUS)
self.paintView()
self.setMessage("Write memory to databus: WRITE MEM")
def writeDatabusToMemory(self):
listToList(self.DATABUS, self.MEMORY[tuple(self.MEMORYADDRESS)])
self.paintView()
self.setMessage("Write databus to memory: READ MEM")
def CLEARMEM(self):
self.setMessage("Clear Memory: start")
A = [0]*15
for unused_i in RangeMEM:
listToList([0,0,0,0,0,0,0,0], self.MEMORY[tuple(A)])
A = self.increment(A)
self.paintView()
self.setMessage("Clear Memory: end")
def clearMemory(self):
start_new_thread( self.CLEARMEM, () )
def RANDMEM(self):
self.setMessage("Random Memory: start")
A = [0]*15
for unused_i in RangeMEM:
r = [ randint(0,1) for unused_i in range(8) ]
listToList(r, self.MEMORY[tuple(A)])
A = self.increment(A)
self.paintView()
self.setMessage("Random Memory: end")
def randomMemory(self):
start_new_thread( self.RANDMEM, () )
def loadPGMtoMEM(self, filename):
try:
pgmFile = open(filename, 'r')
for LINE in pgmFile:
LINE = LINE.split()
Address = [ int(i) for i in LINE[0]]
Code = [ int(i) for i in LINE[1]]
listToList(Code, self.MEMORY[tuple(Address[1:])])
pgmFile.close()
fn = filename.split('/')
self.setMessage("Loaded " + fn[len(fn) - 1] + " to MEMORY")
self.paintView()
except IOError:
self.setMessage("File IO Error")
# ===== CALLBACKS ==============================================================
def setPaintCallback(self, cb):
self.paintView = cb
def setMessageCallback(self, tcb):
self.setMessage = tcb
# ===== Fetch, Increment & Execute =============================================
def FETCH(self):
self.setMessage("<<< FETCH >>>")
self.selectPC()
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadInst()
def INCREMENT(self):
self.setMessage("<<< INCREMENT >>>")
self.loadINC()
self.selectINC()
self.loadPC()
def MOVfunction(self):
self.setMessage("MOVE")
if self.Inst[2:5] == self.Inst[5:8]:
self.setDATABUSwhole([0,0,0,0,0,0,0,0])
self.setMessage("D = S: set to [0,0,0,0,0,0,0,0]")
else:
self.setMessage("Moving stuff: ")
self.regSelectMap[tuple(self.Inst[5:8])]()
self.regLoadMap[tuple(self.Inst[2:5])]()
def SETABfunction(self):
self.setMessage("SETABfunction")
p = [1,1,1] if self.Inst[3] == 1 else [0,0,0]
# Since the negative numbers are represented by "two's
# complement" the first three digits will be either 0s or
# 1s depending on whether the number is positive, zero or
# negative. This fixes that.
self.setDATABUSwhole(p + self.Inst[3:8])
if self.Inst[2] == 0:
self.loadA()
else:
self.loadB()
self.setMessage(str(p + self.Inst[3:8]))
def ALUfunction(self):
self.setMessage("ALU function: " + str(self.Inst[5:8]))
self.setFUNCTION(self.Inst[5:8])
if self.Inst[4] == 0:
self.loadA()
else:
self.loadD()
def LOADfunction(self):
self.setMessage("LOADfunction")
self.selectM()
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
if self.Inst[6:8] == [0,0]:
self.loadA()
else:
if self.Inst[6:8] == [0,1]:
self.loadB()
else:
if self.Inst[6:8] == [1,0]:
self.loadC()
else:
self.loadD()
def STOREfunction(self):
self.setMessage("STOREfunction")
self.selectM()
if self.Inst[6:8] == [0,0]:
self.selectA()
else:
if self.Inst[6:8] == [0,1]:
self.selectB()
else:
if self.Inst[6:8] == [1,0]:
self.selectC()
else:
self.selectD()
self.addressbusToMemoryAddress()
self.writeDatabusToMemory()
def RET_MOV16function(self):
self.setMessage("RETURN / MOVE 16 bits: " + str(self.Inst))
RUN = True
if self.Inst[5:7] == [1,1]:
self.setMessage("HALT ")
# Set PC to zero................................
listToList([0,0,0,0,0,0,0,0], self.PC1)
listToList([0,0,0,0,0,0,0,0], self.PC2)
RUN = False
else:
self.setMessage("MOV16")
if self.Inst[4] == 0: # d is XY
if self.Inst[5:7] == [0,0]:
self.selectM()
if self.Inst[5:7] == [0,1]: # What would Harry's machine do?
self.selectXY()
if self.Inst[5:7] == [1,0]:
self.selectJ()
self.loadXY()
else: # d is PC
if self.Inst[5:7] == [0,0]:
self.selectM()
if self.Inst[5:7] == [0,1]:
self.selectXY()
if self.Inst[5:7] == [1,0]:
self.selectJ()
self.loadPC()
return RUN
def INCfunction(self):
self.setMessage("INC: XY > XY + 1")
self.selectXY()
self.loadINC()
self.selectINC()
self.loadXY()
def SETMfunction(self):
self.setMessage("SETMfunction: Move next 16 bits to M")
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadM1()
self.loadINC()
self.selectINC()
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadM2()
self.loadINC()
self.selectINC()
self.loadPC()
def GOTOfunction(self):
self.setMessage("GOTOfunction: set address bus, PC, to next 16 bits")
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadJ1()
self.loadINC()
self.selectINC()
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadJ2()
self.selectJ()
self.loadPC()
def CALLfunction(self):
self.setMessage("CALLfunction: set address bus to next 16 bits & PC => XY")
# CALLfunction is like GOTOfunction except that the address of the next instruction
# after CALLfunction is saved in XY.
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadJ1()
self.loadINC()
self.selectINC()
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadJ2()
self.loadINC()
self.selectINC()
self.loadXY()
self.selectJ()
self.loadPC()
def BCfunction(self):
self.setMessage("Branch Conditionally")
C0 = (self.Inst[3] == 1) and (self.SIGN == 1)
C1 = (self.Inst[4] == 1) and (self.CARRY == 0)
C2 = (self.Inst[5] == 1) and (self.ZERO == 1)
C3 = (self.Inst[6] == 1) and (self.ZERO == 0)
c0 = " S1+ " if self.Inst[3] == 1 else " S1- "
c1 = "Cy0+ " if self.Inst[4] == 1 else "Cy0- "
c2 = " Z1+ " if self.Inst[5] == 1 else " Z1- "
c3 = " Z0+ " if self.Inst[6] == 1 else " Z0- "
a0 = "S=1" if self.SIGN == 1 else "S=0"
a1 = "Cy=0" if self.CARRY == 0 else "Cy=1"
a2 = "Z=1" if self.ZERO == 1 else "Z=0"
a3 = "Z=0" if self.ZERO == 0 else "Z=1"
m0 = c0 + " " + a0 + "\n"
m1 = c1 + " " + a1 + "\n"
m2 = c2 + " " + a2 + "\n"
m3 = c3 + " " + a3
M = m0 + m1 + m2 + m3
self.setMessage(M)
if C0 or C1 or C2 or C3:
self.setMessage("Branch")
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadJ1()
self.loadINC()
self.selectINC()
self.addressbusToMemoryAddress()
self.readMemoryToDatabus()
self.loadJ2()
self.selectJ()
self.loadPC()
else:
self.setMessage("No Branch")
self.loadINC()
self.selectINC()
self.loadINC()
self.selectINC()
self.loadPC()
def EXECUTE(self):
self.setMessage("<<< EXECUTE >>>")
RUN = True
if self.Inst[0] == 0:
if self.Inst[1] == 0:
self.MOVfunction()
else:
self.SETABfunction()
else:
if self.Inst[1] == 0:
if self.Inst[2] == 0:
if self.Inst[3] == 0:
self.ALUfunction()
else:
if self.Inst[4] == 0:
self.LOADfunction()
else:
self.STOREfunction()
else:
if self.Inst[3] == 0:
RUN = self.RET_MOV16function()
else:
self.INCfunction()
else:
if self.Inst[2] == 0:
self.SETMfunction()
else:
if self.Inst[5:7] == [1,1]:
if self.Inst[7] == 0:
self.GOTOfunction()
else:
self.CALLfunction()
else:
self.BCfunction()
self.setMessage("*"*50)
return RUN
# ===== RUN & CONTROLS =========================================================
def step(self):
self.PAUSE = False
def pause(self):
while self.PAUSE == True:
pass
self.PAUSE = True
def noStep(self):
self.NOSTEP = True if self.NOSTEP == False else False
self.paintView()
def FetchIncrementExecute(self):
self.PAUSE = True
self.RUN = True
while self.RUN == True:
self.FETCH()
self.INCREMENT()
self.RUN = self.EXECUTE()
if self.RUN == True and self.NOSTEP == False:
self.pause() # Make time to inspect the machine....
self.setMessage("="*50)
def run(self):
start_new_thread( self.FetchIncrementExecute, () )
# self.T = threading.Thread(target = self.FetchIncrementExecute )
# TL = threading.Lock()
# TL.acquire()
# self.T.start()
# TL.release()
# ===== Initialization =========================================================
def __init__(self):
self.DATABUS = [0,0,0,0,0,0,0,0]
self.ADDRESSBUS = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self.MEMORYADDRESS = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
# Memory addresses are 15 bits, but the address
# bus is 16 bits. So, the highest bit is dropped.
self.MEMORY = self.mkMemory()
self.Inst = [0,0,0,0,0,0,0,0]
self.PGM = []
self.FUNCTION = [0,0,0]
self.oldFUNCTION = [0,0,0]
self.CARRY = 0
self.ADDcarry = 0
self.INCcarry = 0
self.SIGN = 0
self.ZERO = 0
self.PAUSE = True
self.NOSTEP = False
self.RUN = True
self.A = [0,0,0,0,0,0,0,0]
self.B = [0,0,0,0,0,0,0,0]
self.C = [0,0,0,0,0,0,0,0]
self.D = [0,0,0,0,0,0,0,0]
self.M1 = [0,0,0,0,0,0,0,0]
self.M2 = [0,0,0,0,0,0,0,0]
self.X = [0,0,0,0,0,0,0,0]
self.Y = [0,0,0,0,0,0,0,0]
self.J1 = [0,0,0,0,0,0,0,0]
self.J2 = [0,0,0,0,0,0,0,0]
# Program Counter
self.PC1 = [0,0,0,0,0,0,0,0]
self.PC2 = [0,0,0,0,0,0,0,0]
# Increment Unit
# This is always the address bus plus one.
self.IncUnit1 = [0,0,0,0,0,0,0,0]
self.IncUnit2 = [0,0,0,0,0,0,0,0]
# IncUnit is loaded into Inc
# Inc is selected onto the address bus
self.Inc1 = [0,0,0,0,0,0,0,0]
self.Inc2 = [0,0,0,0,0,0,0,0]
self.ADD = [0,0,0,0,0,0,0,0]
self.INC = [0,0,0,0,0,0,0,0]
self.AND = [0,0,0,0,0,0,0,0]
self.OR = [0,0,0,0,0,0,0,0]
self.XOR = [0,0,0,0,0,0,0,0]
self.NOT = [0,0,0,0,0,0,0,0]
self.SHL = [0,0,0,0,0,0,0,0]
self.CLR = [0,0,0,0,0,0,0,0]
# ===== Dictionaries ===========================================================
self.functionLabelsDictionary = { (0,0,0) : self.ADD, \
(0,0,1) : self.INC, \
(0,1,0) : self.AND, \
(0,1,1) : self.OR, \
(1,0,0) : self.XOR, \
(1,0,1) : self.NOT, \
(1,1,0) : self.SHL, \
(1,1,1) : self.CLR \
}
self.regLoadMap = { (0,0,0) : self.loadA, \
(0,0,1) : self.loadB, \
(0,1,0) : self.loadC, \
(0,1,1) : self.loadD, \
(1,0,0) : self.loadM1, \
(1,0,1) : self.loadM2, \
(1,1,0) : self.loadX, \
(1,1,1) : self.loadY
}
self.regSelectMap = { (0,0,0) : self.selectA, \
(0,0,1) : self.selectB, \
(0,1,0) : self.selectC, \
(0,1,1) : self.selectD, \
(1,0,0) : self.selectM1, \
(1,0,1) : self.selectM2, \
(1,1,0) : self.selectX, \
(1,1,1) : self.selectY
}
# ==============================================================================
self.updateALU()
self.updateIncrUnit()
# ===== END Initialization =====================================================
| gpl-3.0 | 2,728,928,531,485,198,300 | 30.112125 | 92 | 0.471315 | false |
windskyer/k_nova | nova_extension/compute/ibm/configuration_strategy_common.py | 1 | 3091 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities for configuration strategy."""
import httplib
import json
import re
def property_mapping_to_dict(cs_data):
"""Converts the property mapping in config strategy data from an
array like [ {'source': 'string'}, {'target': 'string'} ] to
a dict with { 'source-string': 'target-string', ... }.
"""
property_mapping_arr = cs_data['properties']['mapping']
property_mapping = {}
for mapping_obj in property_mapping_arr:
source = mapping_obj['source']
target = mapping_obj['target']
if source in property_mapping:
property_mapping[source].append(target)
else:
property_mapping[source] = [target]
return property_mapping
class InvalidMapping(Exception):
pass
def parse_mapping(mapping_str):
mapping_re = re.compile('^(.*)=(.*)$')
m = mapping_re.match(mapping_str)
if not m:
raise InvalidMapping(mapping_str)
return (m.group(1), m.group(2))
def parse_mappings(mappings_strings):
if not mappings_strings:
return []
mappings = []
for mapping_str in mappings_strings:
(target, source) = parse_mapping(mapping_str)
mappings.append({'source': source, 'target': target})
return mappings
def calc_common_metadata(type_str, metadata_template, mappings):
cs_properties = {'metadata_template': metadata_template,
'mapping': mappings}
cs_metadata = {'type': type_str, 'properties': cs_properties}
return cs_metadata
def set_image_metadata(config_strategy_data, image_id, auth_token_filename,
do_replace, hostname='localhost'):
config_strategy_json = json.dumps(config_strategy_data)
port = 9292
conn = httplib.HTTPConnection(hostname, port)
resource_path = '/v2/images/%s' % image_id
with open(auth_token_filename, 'r') as f:
auth_token = f.read().strip()
headers = {
'X-Auth-Token': auth_token,
'Content-Type': 'application/openstack-images-v2.0-json-patch'
}
operation = 'add' if not do_replace else 'replace'
set_config_strategy_op_data = {}
set_config_strategy_op_data[operation] = '/configuration_strategy'
set_config_strategy_op_data['value'] = config_strategy_json
data = [set_config_strategy_op_data]
data_str = json.dumps(data)
conn.request('PATCH', resource_path, body=data_str, headers=headers)
r1 = conn.getresponse()
return r1
| apache-2.0 | -7,789,197,377,235,513,000 | 28.160377 | 78 | 0.65254 | false |
fernandog/Medusa | medusa/providers/torrent/html/morethantv.py | 1 | 9151 | # coding=utf-8
"""Provider code for MoreThanTV."""
from __future__ import unicode_literals
import logging
import re
import time
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.helper.exceptions import AuthException
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
from six.moves.urllib_parse import parse_qs
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class MoreThanTVProvider(TorrentProvider):
"""MoreThanTV Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(MoreThanTVProvider, self).__init__('MoreThanTV')
# Credentials
self.username = None
self.password = None
self._uid = None
self._hash = None
# URLs
self.url = 'https://www.morethan.tv/'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'torrents.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Miscellaneous Options
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# Search Params
search_params = {
'tags_type': 1,
'order_by': 'time',
'order_way': 'desc',
'action': 'basic',
'group_results': 0,
'searchsubmit': 1,
'searchstr': '',
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
if mode == 'Season':
additional_strings = []
for search_string in search_strings[mode]:
additional_strings.append(re.sub(r'(.*)S0?', r'\1Season ', search_string))
search_strings[mode].extend(additional_strings)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['searchstr'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
def process_column_header(td):
result = ''
if td.a and td.a.img:
result = td.a.img.get('title', td.a.get_text(strip=True))
if not result:
result = td.get_text(strip=True)
return result
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', class_='torrent_table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
labels = [process_column_header(label) for label in torrent_rows[0]('td')]
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
if len(cells) < len(labels):
continue
try:
# Skip if torrent has been nuked due to poor quality
if row.find('img', alt='Nuked'):
continue
title = row.find('a', title='View torrent').get_text(strip=True)
download_url = urljoin(self.url, row.find('span', title='Download').parent['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True).replace(',', ''), 1)
leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True).replace(',', ''))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
# If it's a season search, query the torrent's detail page.
if mode == 'Season':
title = self._parse_season(row, download_url, title)
torrent_size = cells[labels.index('Size')].get_text(strip=True)
size = convert_size(torrent_size) or -1
pubdate_raw = cells[labels.index('Time')].find('span')['title']
pubdate = self.parse_pubdate(pubdate_raw)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'keeplogged': '1',
'login': 'Log in',
}
response = self.session.post(self.urls['login'], data=login_params)
if not response or not response.text:
log.warning('Unable to connect to provider')
return False
if re.search('Your username or password was incorrect.', response.text):
log.warning('Invalid username or password. Check your settings')
return False
return True
def _check_auth(self):
if not self.username or not self.password:
raise AuthException('Your authentication credentials for {0} are missing,'
' check your config.'.format(self.name))
return True
def _parse_season(self, row, download_url, title):
"""Parse the torrent's detail page and return the season pack title."""
details_url = row.find('span').find_next(title='View torrent').get('href')
torrent_id = parse_qs(download_url).get('id')
if not all([details_url, torrent_id]):
log.debug("Couldn't parse season pack details page for title: {0}", title)
return title
# Take a break before querying the provider again
time.sleep(0.5)
response = self.session.get(urljoin(self.url, details_url))
if not response or not response.text:
log.debug("Couldn't open season pack details page for title: {0}", title)
return title
with BS4Parser(response.text, 'html5lib') as html:
torrent_table = html.find('table', class_='torrent_table')
torrent_row = torrent_table.find('tr', id='torrent_{0}'.format(torrent_id[0]))
if not torrent_row:
log.debug("Couldn't find season pack details for title: {0}", title)
return title
# Strip leading and trailing slash
season_title = torrent_row.find('div', class_='filelist_path')
if not season_title or not season_title.get_text():
log.debug("Couldn't parse season pack title for: {0}", title)
return title
return season_title.get_text(strip=True).strip('/')
provider = MoreThanTVProvider()
| gpl-3.0 | 2,425,226,955,275,046,400 | 34.607004 | 110 | 0.538083 | false |
igrowing/Orchids | orchid_app/utils/__init__.py | 1 | 2936 | import re
import time
import pushb
import cPickle
import hashlib
import sendmail
from functools import wraps
class Dict(dict):
"""Represent dictionary items as object attributes."""
def filter(self, *args):
return Dict((k, v) for k, v in self.items() if k in args)
def __getattr__(self, name):
if name in self.keys():
return self[name]
for key in self.keys():
if name == _namify(key):
return self[key]
return dict.__getattribute__(self, name)
def __setattr__(self, name, value):
self[name] = value
def _getAttributeNames(self, *args, **kwargs):
"""Support auto completion of keys in iPython."""
return map(_namify, self.keys())
def _namify(key):
return re.sub(r'[^\w]+', '_', key.lower()).strip('_')
def dictify(obj, _dict=Dict, _list=list):
if hasattr(obj, '_dictify'):
obj = obj._dictify()
if isinstance(obj, dict):
return _dict((k, dictify(v, _dict, _list)) for k, v in obj.items())
elif hasattr(obj, '__iter__'):
return _list(dictify(v, _dict, _list) for v in obj)
return obj
def as_key(obj):
try:
hash(obj)
return obj
except:
return hashlib.md5(cPickle.dumps(obj)).hexdigest()
def memoize(keep=True, cache=None):
'''Decorator: provide timed keeping functions results in memory.
@:param keep: Boolean or number. Boolean keep or discards the cached data.
Number defines time in seconds to keep the cache with further discard of cache.
@:param cache: empty dict. Separated cache names can be used if needed to keep similar function from different places.
'''
if cache is None:
cache = {}
INF = -1
def _memoize0(func):
@wraps(func)
def _memoize1(*args, **kwargs):
refresh = dict.pop(kwargs, '_refresh', False)
timeout = dict.pop(kwargs, '_memoize', keep)
timeout = INF if timeout is True else int(timeout)
# Get the key name
key = as_key((func, args, tuple(kwargs.items())))
if refresh:
cache.pop(key, None)
if timeout and key in cache:
t0, v = cache.get(key)
if t0 == INF or t0 >= time.time():
return v
value = func(*args, **kwargs)
if not timeout:
cache.pop(key, None)
return value
t0 = INF if timeout == INF else time.time() + timeout
cache[key] = (t0, value)
return value
return _memoize1
return _memoize0
def flatten_dict(init_dict):
res_dict = {}
if type(init_dict) is not dict:
return res_dict
for k, v in init_dict.iteritems():
if type(v) == dict:
res_dict.update(flatten_dict(v))
else:
res_dict[k] = v
return res_dict
| mit | 6,515,311,847,145,798,000 | 27.504854 | 122 | 0.560627 | false |
LamCiuLoeng/fd | rpac/util/layout_pdf.py | 1 | 4038 | # -*- coding: utf-8 -*-
import traceback
import os
import random
# import json
import subprocess
import zipfile
import zlib
from datetime import datetime as dt
from tg import request, config
from rpac.model import *
__all__ = [
'gen_pdf',
'null_string_sizes',
'format_fibers',
'format_cares',
'format_coo',
'format_list']
CARES = [
"WASH",
"BLEACH",
"IRON",
"DRY",
"DRYCLEAN",
"SPECIALCARE"
]
def gen_pdf(header_no, details):
try:
public_dir = config.get( 'public_dir' )
download_dir = os.path.join( public_dir, 'layout_pdf' )
if not os.path.exists( download_dir ):
os.makedirs( download_dir )
phantomjs = os.path.join( public_dir, 'phantomjs', 'phantomjs.exe' )
labeljs = os.path.join( public_dir, 'phantomjs', 'pdf.js' )
pdfs = []
for detail_id, item_code in details:
http_url = 'http://%s/pdflayout/index?id=%s' % (request.headers.get( 'Host' ), detail_id)
_name = '%s_%s%d' % (trim(item_code), dt.now().strftime( "%Y%m%d%H%M%S" ), random.randint( 1, 1000 ) )
pdf_file = os.path.join( download_dir, '%s.pdf' % _name )
cmd = '%s %s %s %s' % (phantomjs, labeljs, http_url, pdf_file)
# print cmd
sp = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
while 1:
if sp.poll() is not None:
#print 'exec command completed.'
break
# else:
# line = sp.stdout.readline().strip()
pdfs.append(pdf_file)
pd_zip_file = os.path.join( download_dir, "%s_pdf_%s%d.zip" % (trim(header_no), dt.now().strftime( "%Y%m%d%H%M%S" ), random.randint( 1, 1000 ) ) )
create_zip(pd_zip_file, pdfs)
remove_files(pdfs)
return pd_zip_file
except:
traceback.print_exc()
return None
def create_zip(zipf, files):
_zip = zipfile.ZipFile(zipf, 'w', zlib.DEFLATED)
for f in files:
if os.path.exists(f):
_zip.write(os.path.abspath(f), os.path.basename(f))
_zip.close()
return zipf
def remove_files(files):
for f in files:
remove_file(f)
def remove_file(file):
try:
os.remove(file)
except:
pass
def trim(s):
return ''.join(s.split())
def null_string_sizes(data):
null_list = data.get('SIZE', {'values': []})['values']
if not null_list:
return ['']
return null_list
def format_fibers(data, capitalize=False):
fibers = {
'en': [],
'sp': []
}
for ff in data['FIBERS']['values']:
if ff:
if capitalize:
fibers['en'].append('%s%% %s' % (ff['percent'], ff['english'].lower().capitalize()))
fibers['sp'].append('%s%% %s' % (ff['percent'], ff['spanish'].lower().capitalize()))
else:
fibers['en'].append('%s%% %s' % (ff['percent'], ff['english']))
fibers['sp'].append('%s%% %s' % (ff['percent'], ff['spanish']))
# print fibers
return fibers
def format_cares(data):
cares = {
'en': [],
'sp': []
}
for cs in CARES:
cc = data.get(cs, {'values': []})
for c in cc['values']:
# print '****', c
cares['en'].append(c['english'])
cares['sp'].append(c['spanish'])
return cares
def format_coo(data):
coos = {
'en': [],
'sp': []
}
for coo in data['CO']['values']:
coos['en'].append(coo['english'])
coos['sp'].append(coo['spanish'])
return coos
def format_list(ll, method=None, s=''):
if method:
return s.join([getattr(l, method)() for l in ll if l])
return s.join(ll)
def format_list2(ll):
return [l.lower().capitalize() for l in ll if l]
def format_price(data):
try:
price = '$%.2f' % float(data['PRICE']['values'][0])
return price
except:
return '$0.00'
| mit | 5,333,141,877,359,527,000 | 22.614035 | 154 | 0.521545 | false |
isstiaung/Adimal | adimal/twitter_feed/get_metadata.py | 1 | 2472 | from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import requests
import urllib
from newspaper import Article
import string
from . import config
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from newspaper import Article
high_level_mapper = {"Computers":"computer","Arts" : "art", "Business" : "business" , "Games" : "game" , "Health": "health","Home":"home","Recreation" : "recreation","Science" : "science" , "Society":"society", "Sports" : "sport"}
low_level_mapper = {}
LANGUAGE = "english"
SENTENCES_COUNT = 3
def get_topics_and_category(link):
print("link is" , link)
text =get_article_text(link)
print("got article text")
topics=get_both_topics(text)
summary=get_article_summary_using_link(link)
result={}
result['topics']=topics
result['summary']=summary
return result
def get_article_text(link):
article = Article(link)
print("set article")
article.download()
print("downloaded article")
article.parse()
print("parsed article")
text=article.text.encode("ascii","ignore")
text=string.replace(text,"\n","")
text=string.replace(text,"*","")
print("replaced text")
return text
def get_both_topics(text):
result = {}
key=config.uclassify_token
dict = {"text":text,"readKey":key}
data=urllib.urlencode(dict)
high_level_topic=requests.get("https://api.uclassify.com/v1/uclassify/topics/classify",params=data)
response = eval(high_level_topic.text)
high_level = max(response.iterkeys(),key=(lambda key : response[key]))
result['high_level'] = high_level
url_to_call = high_level_mapper.get(high_level)
low_level_topic=requests.get("https://api.uclassify.com/v1/uclassify/" + url_to_call + "-topics/classify",params=data)
response=eval(low_level_topic.text)
low_level = max(response.iterkeys(),key=(lambda key : response[key]))
result['low_level']=low_level
return result
def get_article_summary_using_link(link):
stemmer = Stemmer(LANGUAGE)
parser = HtmlParser.from_url(link, Tokenizer(LANGUAGE))
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
summary = ""
for sentence in summarizer(parser.document, SENTENCES_COUNT):
summary = summary + ": " + str(sentence).decode('ascii','ignore') + "\n"
return summary | mit | -8,905,665,559,059,703,000 | 33.830986 | 230 | 0.737864 | false |
maheshp/novatest | nova/api/openstack/compute/contrib/volumes.py | 1 | 22325 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
authorize_attach = extensions.extension_authorizer('compute',
'volume_attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
metadata = vol.get('volume_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
else:
d['metadata'] = {}
return d
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availabilityZone')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeType')
elem.set('snapshotId')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
vol['metadata'] = self.extract_metadata(metadata_node)
return vol
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = xmlutil.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
vol = self.volume_api.get(context, id)
self.volume_api.delete(context, vol)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
vol = body['volume']
vol_type = vol.get('volume_type', None)
metadata = vol.get('metadata', None)
snapshot_id = vol.get('snapshot_id')
if snapshot_id is not None:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
availability_zone = vol.get('availability_zone', None)
new_volume = self.volume_api.create(context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def make_attachment(elem):
elem.set('id')
elem.set('serverId')
elem.set('volumeId')
elem.set('device')
class VolumeAttachmentTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachment',
selector='volumeAttachment')
make_attachment(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachments')
elem = xmlutil.SubTemplateElement(root, 'volumeAttachment',
selector='volumeAttachments')
make_attachment(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API()
super(VolumeAttachmentController, self).__init__()
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
assigned_mountpoint = None
for bdm in bdms:
if bdm['volume_id'] == volume_id:
assigned_mountpoint = bdm['device_name']
break
if assigned_mountpoint is None:
LOG.debug("volume_id not found")
raise exc.HTTPNotFound()
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance['uuid'],
assigned_mountpoint)}
def _validate_volume_id(self, volume_id):
if not uuidutils.is_uuid_like(volume_id):
msg = _("Bad volumeId format: volumeId is "
"not in proper format (%s)") % volume_id
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
self._validate_volume_id(volume_id)
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
try:
instance = self.compute_api.get(context, server_id)
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound:
raise exc.HTTPNotFound()
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
def update(self, req, server_id, id, body):
"""Update a volume attachment. We don't currently support this."""
raise exc.HTTPBadRequest()
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
LOG.debug(_("Instance %s is not attached."), server_id)
raise exc.HTTPNotFound()
found = False
for bdm in bdms:
if bdm['volume_id'] == volume_id:
self.compute_api.detach_volume(context,
volume_id=volume_id)
found = True
break
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = self.compute_api.get_instance_bdms(context, instance)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm['volume_id']:
results.append(entity_maker(bdm['volume_id'],
bdm['instance_uuid'],
bdm['device_name']))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeId')
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
except exception.NotFound:
return exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
vol = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
LOG.audit(_("Create snapshot from volume %s"), volume_id,
context=context)
if not utils.is_valid_boolstr(force):
msg = _("Invalid value '%s' for force. ") % force
raise exception.InvalidParameterValue(err=msg)
if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.ExtensionDescriptor):
"""Volumes support."""
name = "Volumes"
alias = "os-volumes"
namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1"
updated = "2011-03-25T00:00:00+00:00"
def get_resources(self):
resources = []
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-snapshots',
SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
| apache-2.0 | 5,517,570,708,372,171,000 | 32.877086 | 79 | 0.598253 | false |
PowerDNS/exabgp | lib/exabgp/reactor/__init__.py | 1 | 13895 | # encoding: utf-8
"""
reactor.py
Created by Thomas Mangin on 2012-06-10.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import os
import re
import sys
import time
import signal
import select
from collections import deque
from exabgp.reactor.daemon import Daemon
from exabgp.reactor.listener import Listener
from exabgp.reactor.listener import NetworkError
from exabgp.reactor.api.processes import Processes
from exabgp.reactor.api.processes import ProcessError
from exabgp.reactor.peer import Peer
from exabgp.reactor.peer import ACTION
from exabgp.reactor.network.error import error
from exabgp.reactor.api.decoding import Decoder
from exabgp.configuration.ancient import Configuration
from exabgp.configuration.environment import environment
from exabgp.version import version
from exabgp.logger import Logger
class Reactor (object):
# [hex(ord(c)) for c in os.popen('clear').read()]
clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])
def __init__ (self,configurations):
self.ip = environment.settings().tcp.bind
self.port = environment.settings().tcp.port
self.respawn = environment.settings().api.respawn
self.max_loop_time = environment.settings().reactor.speed
self.logger = Logger()
self.daemon = Daemon(self)
self.processes = None
self.listener = None
self.configuration = Configuration(configurations)
self.decoder = Decoder()
self.peers = {}
self.route_update = False
self._shutdown = False
self._reload = False
self._reload_processes = False
self._restart = False
self._saved_pid = False
self._pending = deque()
self._running = None
signal.signal(signal.SIGTERM, self.sigterm)
signal.signal(signal.SIGHUP, self.sighup)
signal.signal(signal.SIGALRM, self.sigalrm)
signal.signal(signal.SIGUSR1, self.sigusr1)
signal.signal(signal.SIGUSR2, self.sigusr2)
def sigterm (self,signum, frame):
self.logger.reactor("SIG TERM received - shutdown")
self._shutdown = True
def sighup (self,signum, frame):
self.logger.reactor("SIG HUP received - shutdown")
self._shutdown = True
def sigalrm (self,signum, frame):
self.logger.reactor("SIG ALRM received - restart")
self._restart = True
def sigusr1 (self,signum, frame):
self.logger.reactor("SIG USR1 received - reload configuration")
self._reload = True
def sigusr2 (self,signum, frame):
self.logger.reactor("SIG USR2 received - reload configuration and processes")
self._reload = True
self._reload_processes = True
def ready (self,ios,sleeptime=0):
# never sleep a negative number of second (if the rounding is negative somewhere)
# never sleep more than one second (should the clock time change during two time.time calls)
sleeptime = min(max(0.0,sleeptime),1.0)
if not ios:
time.sleep(sleeptime)
return []
try:
read,_,_ = select.select(ios,[],[],sleeptime)
return read
except select.error,e:
errno,message = e.args
if not errno in error.block:
raise e
return []
def run (self):
if self.ip:
try:
self.listener = Listener([self.ip,],self.port)
self.listener.start()
except NetworkError,e:
self.listener = None
if os.geteuid() != 0 and self.port <= 1024:
self.logger.reactor("Can not bind to %s:%d, you may need to run ExaBGP as root" % (self.ip,self.port),'critical')
else:
self.logger.reactor("Can not bind to %s:%d (%s)" % (self.ip,self.port,str(e)),'critical')
self.logger.reactor("unset exabgp.tcp.bind if you do not want listen for incoming connections",'critical')
self.logger.reactor("and check that no other daemon is already binding to port %d" % self.port,'critical')
sys.exit(1)
self.logger.reactor("Listening for BGP session(s) on %s:%d" % (self.ip,self.port))
if not self.daemon.drop_privileges():
self.logger.reactor("Could not drop privileges to '%s' refusing to run as root" % self.daemon.user,'critical')
self.logger.reactor("Set the environmemnt value exabgp.daemon.user to change the unprivileged user",'critical')
return
# This is required to make sure we can write in the log location as we now have dropped root privileges
if not self.logger.restart():
self.logger.reactor("Could not setup the logger, aborting",'critical')
return
self.daemon.daemonise()
if not self.daemon.savepid():
self.logger.reactor('could not update PID, not starting','error')
# Make sure we create processes one we have dropped privileges and closed file descriptor
self.processes = Processes(self)
self.reload()
# did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ?
reload_completed = True
wait = environment.settings().tcp.delay
if wait:
sleeptime = (wait * 60) - int(time.time()) % (wait * 60)
self.logger.reactor("waiting for %d seconds before connecting" % sleeptime)
time.sleep(float(sleeptime))
while True:
try:
while self.peers:
start = time.time()
end = start+self.max_loop_time
if self._shutdown:
self._shutdown = False
self.shutdown()
elif self._reload and reload_completed:
self._reload = False
self.reload(self._reload_processes)
self._reload_processes = False
elif self._restart:
self._restart = False
self.restart()
elif self.route_update:
self.route_update = False
self.route_send()
ios = {}
keys = set(self.peers.keys())
while start < time.time() < end:
for key in list(keys):
peer = self.peers[key]
action = peer.run()
# .run() returns an ACTION enum:
# * immediate if it wants to be called again
# * later if it should be called again but has no work atm
# * close if it is finished and is closing down, or restarting
if action == ACTION.close:
self.unschedule(peer)
keys.discard(key)
# we are loosing this peer, not point to schedule more process work
elif action == ACTION.later:
for io in peer.sockets():
ios[io] = key
# no need to come back to it before a a full cycle
keys.discard(key)
if not self.schedule() and not keys:
ready = self.ready(ios.keys() + self.processes.fds(),end-time.time())
for io in ready:
if io in ios:
keys.add(ios[io])
del ios[io]
if not keys:
reload_completed = True
# RFC state that we MUST not send more than one KEEPALIVE / sec
# And doing less could cause the session to drop
if self.listener:
for connection in self.listener.connected():
# found
# * False, not peer found for this TCP connection
# * True, peer found
# * None, conflict found for this TCP connections
found = False
for key in self.peers:
peer = self.peers[key]
neighbor = peer.neighbor
# XXX: FIXME: Inet can only be compared to Inet
if connection.local == str(neighbor.peer_address) and connection.peer == str(neighbor.local_address):
if peer.incoming(connection):
found = True
break
found = None
break
if found:
self.logger.reactor("accepted connection from %s - %s" % (connection.local,connection.peer))
elif found is False:
self.logger.reactor("no session configured for %s - %s" % (connection.local,connection.peer))
connection.notification(6,3,'no session configured for the peer')
connection.close()
elif found is None:
self.logger.reactor("connection refused (already connected to the peer) %s - %s" % (connection.local,connection.peer))
connection.notification(6,5,'could not accept the connection')
connection.close()
self.processes.terminate()
self.daemon.removepid()
break
except KeyboardInterrupt:
while True:
try:
self._shutdown = True
self.logger.reactor("^C received")
break
except KeyboardInterrupt:
pass
except SystemExit:
while True:
try:
self._shutdown = True
self.logger.reactor("exiting")
break
except KeyboardInterrupt:
pass
except IOError:
while True:
try:
self._shutdown = True
self.logger.reactor("I/O Error received, most likely ^C during IO",'warning')
break
except KeyboardInterrupt:
pass
except ProcessError:
while True:
try:
self._shutdown = True
self.logger.reactor("Problem when sending message(s) to helper program, stopping",'error')
break
except KeyboardInterrupt:
pass
except select.error,e:
while True:
try:
self._shutdown = True
self.logger.reactor("problem using select, stopping",'error')
break
except KeyboardInterrupt:
pass
# from exabgp.leak import objgraph
# print objgraph.show_most_common_types(limit=20)
# import random
# obj = objgraph.by_type('Route')[random.randint(0,2000)]
# objgraph.show_backrefs([obj], max_depth=10)
def shutdown (self):
"""terminate all the current BGP connections"""
self.logger.reactor("Performing shutdown")
if self.listener:
self.listener.stop()
for key in self.peers.keys():
self.peers[key].stop()
def reload (self,restart=False):
"""reload the configuration and send to the peer the route which changed"""
self.logger.reactor("Performing reload of exabgp %s" % version)
reloaded = self.configuration.reload()
if not reloaded:
#
# Careful the string below is used but the QA code to check for sucess of failure
self.logger.configuration("Problem with the configuration file, no change done",'error')
# Careful the string above is used but the QA code to check for sucess of failure
#
self.logger.configuration(self.configuration.error,'error')
return
for key, peer in self.peers.items():
if key not in self.configuration.neighbor:
self.logger.reactor("Removing peer: %s" % peer.neighbor.name())
peer.stop()
for key, neighbor in self.configuration.neighbor.items():
# new peer
if key not in self.peers:
self.logger.reactor("New peer setup: %s" % neighbor.name())
peer = Peer(neighbor,self)
self.peers[key] = peer
# modified peer
elif self.peers[key].neighbor != neighbor:
self.logger.reactor("Peer definition change, establishing a new connection for %s" % str(key))
self.peers[key].reestablish(neighbor)
# same peer but perhaps not the routes
else:
# finding what route changed and sending the delta is not obvious
self.logger.reactor("Peer definition identical, updating peer routes if required for %s" % str(key))
self.peers[key].reconfigure(neighbor)
self.logger.configuration("Loaded new configuration successfully",'warning')
# This only starts once ...
self.processes.start(restart)
def schedule (self):
try:
# read at least on message per process if there is some and parse it
for service,command in self.processes.received():
self.decoder.parse_command(self,service,command)
# if we have nothing to do, return or save the work
if not self._running:
if not self._pending:
return False
self._running = self._pending.popleft()
# run it
try:
self._running.next() # run
# should raise StopIteration in most case
# and prevent us to have to run twice to run one command
self._running.next() # run
except StopIteration:
self._running = None
return True
except StopIteration:
pass
except KeyboardInterrupt:
self._shutdown = True
self.logger.reactor("^C received",'error')
def route_send (self):
"""the process ran and we need to figure what routes to changes"""
self.logger.reactor("Performing dynamic route update")
for key in self.configuration.neighbor.keys():
self.peers[key].send_new()
self.logger.reactor("Updated peers dynamic routes successfully")
def route_flush (self):
"""we just want to flush any unflushed routes"""
self.logger.reactor("Performing route flush")
for key in self.configuration.neighbor.keys():
self.peers[key].send_new(update=True)
def restart (self):
"""kill the BGP session and restart it"""
self.logger.reactor("Performing restart of exabgp %s" % version)
self.configuration.reload()
for key in self.peers.keys():
if key not in self.configuration.neighbor.keys():
neighbor = self.configuration.neighbor[key]
self.logger.reactor("Removing Peer %s" % neighbor.name())
self.peers[key].stop()
else:
self.peers[key].reestablish()
self.processes.terminate()
self.processes.start()
def unschedule (self,peer):
key = peer.neighbor.name()
if key in self.peers:
del self.peers[key]
def answer (self,service,string):
self.processes.write(service,string)
self.logger.reactor('Responding to %s : %s' % (service,string))
def api_shutdown (self):
self._shutdown = True
self._pending = deque()
self._running = None
def api_reload (self):
self._reload = True
self._pending = deque()
self._running = None
def api_restart (self):
self._restart = True
self._pending = deque()
self._running = None
@staticmethod
def match_neighbor (description,name):
for string in description:
if re.search('(^|[\s])%s($|[\s,])' % re.escape(string), name) is None:
return False
return True
def match_neighbors (self,descriptions):
"""returns the sublist of peers matching the description passed, or None if no description is given"""
if not descriptions:
return self.peers.keys()
returned = []
for key in self.peers:
for description in descriptions:
if Reactor.match_neighbor(description,key):
if key not in returned:
returned.append(key)
return returned
def nexthops (self,peers):
return dict((peer,self.peers[peer].neighbor.local_address) for peer in peers)
def plan (self,callback):
self._pending.append(callback)
| bsd-3-clause | 1,335,862,084,631,700,000 | 30.942529 | 126 | 0.684131 | false |
eirmag/weboob | modules/bp/browser.py | 1 | 5496 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from urlparse import urlsplit, parse_qsl
from datetime import datetime
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword, BrowserBanned
from .pages import LoginPage, Initident, CheckPassword, repositionnerCheminCourant, BadLoginPage, AccountDesactivate, \
AccountList, AccountHistory, \
TransferChooseAccounts, CompleteTransfer, TransferConfirm, TransferSummary
from weboob.capabilities.bank import Transfer
__all__ = ['BPBrowser']
class BPBrowser(BaseBrowser):
DOMAIN = 'voscomptesenligne.labanquepostale.fr'
PROTOCOL = 'https'
CERTHASH = '868646b852c989638d4e5bbfab830e2cfbb82f4d2524e28d0251686a44e49163'
ENCODING = None # refer to the HTML encoding
PAGES = {r'.*wsost/OstBrokerWeb/loginform.*' : LoginPage,
r'.*authentification/repositionnerCheminCourant-identif.ea' : repositionnerCheminCourant,
r'.*authentification/initialiser-identif.ea' : Initident,
r'.*authentification/verifierMotDePasse-identif.ea' : CheckPassword,
r'.*synthese_assurancesEtComptes/afficheSynthese-synthese\.ea' : AccountList,
r'.*synthese_assurancesEtComptes/rechercheContratAssurance-synthese.ea' : AccountList,
r'.*CCP/releves_ccp/releveCPP-releve_ccp\.ea' : AccountHistory,
r'.*CNE/releveCNE/releveCNE-releve_cne\.ea' : AccountHistory,
r'.*/virementSafran_aiguillage/init-saisieComptes\.ea' : TransferChooseAccounts,
r'.*/virementSafran_aiguillage/formAiguillage-saisieComptes\.ea' : CompleteTransfer,
r'.*/virementSafran_national/validerVirementNational-virementNational.ea' : TransferConfirm,
r'.*/virementSafran_national/confirmerVirementNational-virementNational.ea' : TransferSummary,
r'.*ost/messages\.CVS\.html\?param=0x132120c8.*' : BadLoginPage,
r'.*ost/messages\.CVS\.html\?param=0x132120cb.*' : AccountDesactivate,
}
def __init__(self, *args, **kwargs):
kwargs['parser'] = ('lxml',)
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
self.location('https://voscomptesenligne.labanquepostale.fr/wsost/OstBrokerWeb/loginform?TAM_OP=login&'
'ERROR_CODE=0x00000000&URL=%2Fvoscomptes%2FcanalXHTML%2Fidentif.ea%3Forigin%3Dparticuliers')
def is_logged(self):
return not self.is_on_page(LoginPage)
def login(self):
if not self.is_on_page(LoginPage):
self.location('https://voscomptesenligne.labanquepostale.fr/wsost/OstBrokerWeb/loginform?TAM_OP=login&'
'ERROR_CODE=0x00000000&URL=%2Fvoscomptes%2FcanalXHTML%2Fidentif.ea%3Forigin%3Dparticuliers',
no_login=True)
self.page.login(self.username, self.password)
if self.is_on_page(BadLoginPage):
raise BrowserIncorrectPassword()
if self.is_on_page(AccountDesactivate):
raise BrowserBanned()
def get_accounts_list(self):
self.location("https://voscomptesenligne.labanquepostale.fr/voscomptes/canalXHTML/comptesCommun/synthese_assurancesEtComptes/rechercheContratAssurance-synthese.ea")
return self.page.get_accounts_list()
def get_account(self, id):
if not self.is_on_page(AccountList):
self.location("https://voscomptesenligne.labanquepostale.fr/voscomptes/canalXHTML/comptesCommun/synthese_assurancesEtComptes/rechercheContratAssurance-synthese.ea")
return self.page.get_account(id)
def get_history(self, account):
v = urlsplit(account._link_id)
args = dict(parse_qsl(v.query))
args['typeRecherche'] = 10
self.location(self.buildurl(v.path, **args))
if not self.is_on_page(AccountHistory):
return iter([])
return self.page.get_history()
def make_transfer(self, from_account, to_account, amount):
self.location('https://voscomptesenligne.labanquepostale.fr/voscomptes/canalXHTML/virement/virementSafran_aiguillage/init-saisieComptes.ea')
self.page.set_accouts(from_account, to_account)
#TODO: Check
self.page.complete_transfer(amount)
self.page.confirm()
id_transfer = self.page.get_transfer_id()
transfer = Transfer(id_transfer)
transfer.amount = amount
transfer.origin = from_account.label
transfer.recipient = to_account.label
transfer.date = datetime.now()
return transfer
| agpl-3.0 | -1,063,427,746,391,495,000 | 45.184874 | 176 | 0.663937 | false |
UrLab/DocHub | catalog/tests/load_tree_test.py | 1 | 2117 | import os
from django.conf import settings
from django.core.management import call_command
import pytest
from catalog.models import Category, Course
pytestmark = [pytest.mark.django_db]
fixtures = os.path.join(settings.BASE_DIR, 'catalog', 'tests', 'fixtures')
SIMPLE_TREE = os.path.join(fixtures, 'simple_tree.yaml')
MULTIPLE_TREE = os.path.join(fixtures, 'multiple_tree.yaml')
REAL_TREE = os.path.join(fixtures, 'real_tree.yaml')
def test_load_tree():
call_command('loadtree', SIMPLE_TREE)
ulb = Category.objects.get(level=0)
assert ulb.name == "ULB"
opti = Course.objects.get(slug="opti-f-1001")
assert opti.categories.count() == 1
options = opti.categories.last()
assert options.name == "Options"
assert options.level == 3
def test_load_multiple_tree():
call_command('loadtree', MULTIPLE_TREE)
info = Category.objects.get(name="Informatique")
assert info.level == 1
phys = Category.objects.get(name="Physique")
assert phys.level == 1
master = phys.children.first()
assert master.name == "Master"
assert master.course_set.count() == 1
assert master.course_set.last().slug == "phys-h-200"
def test_empty_tree():
category = Category.objects.create(name="Caca", slug="prout")
course = Course.objects.create(name="Testing", slug="test-h-100")
course.categories.add(category)
call_command('loadtree', SIMPLE_TREE)
assert Category.objects.filter(slug="prout").count() == 0
course = Course.objects.get(slug="test-h-100")
assert course.categories.count() == 0
def test_fill_twice():
call_command('loadtree', SIMPLE_TREE)
course = Course.objects.last()
course.name = "Autre chose"
course.save()
call_command('loadtree', SIMPLE_TREE)
new_course = Course.objects.get(slug=course.slug)
assert new_course.id == course.id
assert course.name == new_course.name
@pytest.mark.slow
@pytest.mark.network
def test_load_tree_hit_ulb():
call_command('loadtree', REAL_TREE, hitulb=True)
info = Course.objects.get(slug="info-f-101")
assert info.name == "Programmation"
| agpl-3.0 | -1,783,864,578,022,005,200 | 25.135802 | 74 | 0.685876 | false |
kosior/taktyk | taktyk/utils.py | 1 | 5722 | import configparser
import logging
import os
import shutil
import traceback
from . import settings
class CustomFormatter(logging.Formatter):
FORMATS = {logging.DEBUG: 'DEBUG: %(module)s: %(lineno)d: %(message)s',
logging.INFO: '%(message)s',
logging.WARNING: 'UWAGA! %(message)s',
# logging.ERROR: 'ERROR: %(message)s',
# logging.CRITICAL: 'CRITICAL: %(message)s'
}
def __init__(self):
super().__init__(fmt="%(levelname)s: %(message)s", datefmt=None, style='%')
def format(self, record):
fmt = self._style._fmt
self._style._fmt = self.FORMATS.get(record.levelno, fmt)
result = logging.Formatter.format(self, record)
self._style._fmt = fmt
return result
def configure_logging():
log_file_path = os.path.join(settings.BASE_DIR, 'taktyk.log')
def get_save_mode(path):
if os.path.isfile(path) and os.path.getsize(path) > 1048576:
return 'w'
return 'a'
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
custom_fmt = CustomFormatter()
formatter = logging.Formatter('%(levelname)s: %(module)s: %(lineno)d: %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(custom_fmt)
fh = logging.FileHandler(log_file_path, mode=get_save_mode(log_file_path))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
requests_logger = logging.getLogger('requests')
requests_logger.propagate = False
logging.debug('\n-------------------------------NEW EXECUTION-------------------------------\n')
def ex_hook(ex_cls, ex, tb):
logging.debug(' '.join(traceback.format_tb(tb)))
logging.debug('%s %s', ex_cls, ex)
logging.critical('Nieznany błąd: %s: %s', ex_cls.__name__, ex)
class Decision:
"""
Class for custom prompts
options - dictionary options {'Y': {func: *args}, 'n': exit}
if input == 'Y' --> execute func with args - func(*args)
if input == 'n' --> exit()
validator (optional) - pass function to validate input and if validation passes,
input will be returned
(to get any input set 'validator=lambda x: x' in __init__)
"""
def __init__(self, msg, options=None, validator=None):
self.msg = msg
self._options = options
self.validator = validator
@property
def options(self):
return {k.lower(): v for k, v in self._options.items()} if self._options else {}
@options.setter
def options(self, value):
self._options = value
@staticmethod
def _run_func(func):
if isinstance(func, dict):
for k, v in func.items():
return k(*v)
elif func:
return func()
return True
def run(self):
while True:
answer = input(self.msg)
answer_low = answer.lower()
if answer_low in self.options.keys():
if callable(self.options[answer_low]) or isinstance(self.options[answer_low], dict):
return self._run_func(self.options[answer_low])
return self.options[answer_low]
elif self.validator:
try:
result = self.validator(answer)
except ValueError as err:
logging.error(err.__str__())
else:
return result
else:
msg = 'Zły wybór! Wybierz jeszcze raz.'
logging.error(msg)
def unpack_archive(file, extract_dir, format_, msg):
try:
shutil.unpack_archive(file, extract_dir=extract_dir, format=format_)
except (ValueError, OSError) as err:
logging.debug(traceback.format_exc())
logging.critical(err)
logging.critical(msg)
raise SystemExit
class ConfigFile:
template = [
['DANE LOGOWANIA', [
['username', ''],
['password', '']
]],
['WykopAPI', [
['appkey', ''],
['secretkey', ''],
['accountkey', ''],
['# opcjonalnie:'],
['userkey', '']
]],
['ARGUMENTY', [
['# przykład: static_args = -s chrome --skip -n'],
['static_args', '']
]],
['POBIERANE ROZSZERZENIA', [
['exts', '.gif .jpg .jpeg .png .webm']
]]
]
def __init__(self):
self.file_path = os.path.join(settings.BASE_DIR, settings.CONFIG_FILE)
self.config = configparser.ConfigParser(allow_no_value=True)
def prepare(self):
for section, options_and_values in self.template:
self.config.add_section(section)
for opt_val in options_and_values:
self.config.set(section, *opt_val)
def create_configfile(self):
with open(self.file_path, 'w') as configfile:
self.config.write(configfile)
def read_and_apply_config(self):
self.config.read(self.file_path)
for section in self.config.sections():
for option in self.config[section]:
value = self.config.get(section, option)
if value:
if option in ('static_args', 'exts'):
value = value.split(' ')
setattr(settings, option.upper(), value)
def set_up(self):
if os.path.isfile(self.file_path):
self.read_and_apply_config()
else:
self.prepare()
self.create_configfile()
| mit | 4,620,440,564,190,430,000 | 30.585635 | 100 | 0.544866 | false |
bitdancer/pynvm | nvm/pmemobj/list.py | 1 | 7019 | import collections
import sys
from .compat import recursive_repr, abc
from _pmem import ffi # XXX refactor to make this import unneeded?
# XXX: refactor to allocate this instead of hardcoding it.
LIST_POBJPTR_ARRAY_TYPE_NUM = 30
class PersistentList(abc.MutableSequence):
"""Persistent version of the 'list' type."""
# XXX locking!
# XXX All bookkeeping attrs should be _v_xxxx so that all other attrs
# (other than __manager__) can be made persistent.
def __init__(self, *args, **kw):
if '__manager__' not in kw:
raise ValueError("__manager__ is required")
mm = self.__manager__ = kw.pop('__manager__')
if '_oid' not in kw:
with mm.transaction():
# XXX Will want to implement a freelist here, like CPython
self._oid = mm.malloc(ffi.sizeof('PListObject'))
ob = ffi.cast('PObject *', mm.direct(self._oid))
ob.ob_type = mm._get_type_code(PersistentList)
else:
self._oid = kw.pop('_oid')
if kw:
raise TypeError("Unrecognized keyword argument(s) {}".format(kw))
self._body = ffi.cast('PListObject *', mm.direct(self._oid))
if args:
if len(args) != 1:
raise TypeError("PersistentList takes at most 1"
" argument, {} given".format(len(args)))
self.extend(args[0])
# Methods and properties needed to implement the ABC required methods.
@property
def _size(self):
return ffi.cast('PVarObject *', self._body).ob_size
@property
def _allocated(self):
return self._body.allocated
@property
def _items(self):
mm = self.__manager__
ob_items = mm.otuple(self._body.ob_items)
if ob_items == mm.OID_NULL:
return None
return ffi.cast('PObjPtr *', mm.direct(ob_items))
def _resize(self, newsize):
mm = self.__manager__
allocated = self._allocated
# Only realloc if we don't have enough space already.
if (allocated >= newsize and newsize >= allocated >> 1):
assert self._items != None or newsize == 0
with mm.transaction():
ob = ffi.cast('PVarObject *', self._body)
mm.snapshot_range(ffi.addressof(ob, 'ob_size'),
ffi.sizeof('size_t'))
ob.ob_size = newsize
return
# We use CPython's overallocation algorithm.
new_allocated = (newsize >> 3) + (3 if newsize < 9 else 6) + newsize
if newsize == 0:
new_allocated = 0
items = self._items
with mm.transaction():
if items is None:
items = mm.malloc(new_allocated * ffi.sizeof('PObjPtr'),
type_num=LIST_POBJPTR_ARRAY_TYPE_NUM)
else:
items = mm.realloc(self._body.ob_items,
new_allocated * ffi.sizeof('PObjPtr'),
LIST_POBJPTR_ARRAY_TYPE_NUM)
mm.snapshot_range(self._body, ffi.sizeof('PListObject'))
self._body.ob_items = items
self._body.allocated = new_allocated
ffi.cast('PVarObject *', self._body).ob_size = newsize
def insert(self, index, value):
mm = self.__manager__
size = self._size
newsize = size + 1
with mm.transaction():
self._resize(newsize)
if index < 0:
index += size
if index < 0:
index = 0
if index > size:
index = size
items = self._items
mm.snapshot_range(items + index,
ffi.offsetof('PObjPtr *', newsize))
for i in range(size, index, -1):
items[i] = items[i-1]
v_oid = mm.persist(value)
mm.incref(v_oid)
items[index] = v_oid
def _normalize_index(self, index):
try:
index = int(index)
except TypeError:
# Assume it is a slice
# XXX fixme
raise NotImplementedError("Slicing not yet implemented")
if index < 0:
index += self._size
if index < 0 or index >= self._size:
raise IndexError(index)
return index
def __setitem__(self, index, value):
mm = self.__manager__
index = self._normalize_index(index)
items = self._items
with mm.transaction():
v_oid = mm.persist(value)
mm.snapshot_range(ffi.addressof(items, index),
ffi.sizeof('PObjPtr *'))
mm.xdecref(items[index])
items[index] = v_oid
mm.incref(v_oid)
def __delitem__(self, index):
mm = self.__manager__
index = self._normalize_index(index)
size = self._size
newsize = size - 1
items = self._items
with mm.transaction():
mm.snapshot_range(ffi.addressof(items, index),
ffi.offsetof('PObjPtr *', size))
mm.decref(items[index])
for i in range(index, newsize):
items[i] = items[i+1]
self._resize(newsize)
def __getitem__(self, index):
index = self._normalize_index(index)
items = self._items
return self.__manager__.resurrect(items[index])
def __len__(self):
return self._size
# Additional list methods not provided by the ABC.
@recursive_repr()
def __repr__(self):
return "{}([{}])".format(self.__class__.__name__,
', '.join("{!r}".format(x) for x in self))
def __eq__(self, other):
if not (isinstance(other, PersistentList) or
isinstance(other, list)):
return NotImplemented
if len(self) != len(other):
return False
for i in range(len(self)):
if self[i] != other[i]:
return False
return True
if sys.version_info[0] < 3:
def __ne__(self, other):
return not self == other
def clear(self):
mm = self.__manager__
if self._size == 0:
return
items = self._items
with mm.transaction():
for i in range(self._size):
# Grab oid in tuple form so the assignment can't change it
oid = mm.otuple(items[i])
if oid == mm.OID_NULL:
continue
items[i] = mm.OID_NULL
mm.decref(oid)
self._resize(0)
# Additional methods required by the pmemobj API.
def _traverse(self):
items = self._items
for i in range(len(self)):
yield items[i]
def _substructures(self):
return ((self._body.ob_items, LIST_POBJPTR_ARRAY_TYPE_NUM),)
def _deallocate(self):
self.clear()
| bsd-3-clause | -1,384,105,293,564,038,400 | 33.406863 | 77 | 0.515601 | false |
pombredanne/django-avocado | avocado/columns/tests/cache.py | 1 | 1498 | from django.test import TestCase
from django.core.cache import cache as djcache
from avocado.columns.cache import cache
from avocado.models import Column
__all__ = ('ColumnCacheTestCase',)
class ColumnCacheTestCase(TestCase):
fixtures = ['test_data.yaml']
def setUp(self):
djcache.clear()
def test_get(self):
concept_id = 1
key = cache.id_key % concept_id
self.assertFalse(djcache.has_key(key))
concept = cache.get(concept_id)
self.assertNotEqual(concept, None)
self.assertEqual(djcache.get(key), concept)
djcache.delete(key)
queryset = Column.objects.none()
concept = cache.get(concept_id, queryset=queryset)
self.assertEqual(concept, None)
self.assertFalse(djcache.has_key(key))
def test_get_many(self):
concept_ids = [1, 2]
concepts = list(cache.get_many(concept_ids))
self.assertEqual([x.id for x in concepts], concept_ids)
for i, x in enumerate(concept_ids):
key = cache.id_key % x
self.assertEqual(djcache.get(key), concepts[i])
def test_get_fields(self):
concept_id = 1
key = cache.id_key % concept_id
fkey = cache.field_id_key % concept_id
self.assertFalse(djcache.has_key(key))
self.assertFalse(djcache.has_key(fkey))
fields = cache.get_fields(concept_id)
self.assertTrue(djcache.has_key(key))
self.assertEqual(djcache.get(fkey), fields)
| bsd-3-clause | -7,975,161,356,698,268,000 | 27.807692 | 63 | 0.634846 | false |
thirdkey-solutions/granary | granary/seed.py | 1 | 1984 | import seedlib
import string
import json
from mnemonic import Mnemonic
from binascii import hexlify, unhexlify
import bitcoin
# class Granary():
# pass
class Seed():
def __init__(self):
self._bin_seed = None
self._fingerprint = None
self._bip32_xpriv = None
def __nonzero__(self):
return bool(self._bin_seed)
def __repr__(self):
return "< Seed: %s >" % self.fingerprint() if self else "< Seed: empty >"
def bin_seed(self):
return self._bin_seed
def fingerprint(self):
if not self._bin_seed:
return None
self._fingerprint = seedlib.fingerprint(self._bin_seed)
return self._fingerprint
def from_random(self):
self._bin_seed = seedlib.random_key()
def from_bin(self, bin_seed):
assert(len(bin_seed) == 32)
self._bin_seed = bin_seed
def from_hex(self, hex_seed):
assert(set(hex_seed) <= set(string.hexdigits))
assert(len(hex_seed) == 64)
self._bin_seed = unhexlify(hex_seed)
def as_hex(self):
return hexlify(self._bin_seed) if self._bin_seed else None
def from_mnemonic(self, mnemonic):
self._bin_seed = seedlib.mnemonic_decode(mnemonic)
def as_mnemonic(self):
return seedlib.mnemonic_encode(self._bin_seed) if self._bin_seed else None
def stretched(self, passphrase):
# stretch key
newseed = Seed()
newseed.from_bin(seedlib.stretched_key(self._bin_seed, passphrase))
return newseed
# mnemonic seed -> BIP39 -> BIP32 xpriv
def as_HD_root(self):
# BIP39 compatible derivation from seed mnemonic without passphrase
master_seed = seedlib.mnemonic.to_seed(self.as_mnemonic())
# Master key pair for BIP32
master_xpriv = bitcoin.bip32_master_key(master_seed)
return master_xpriv
| mit | 700,877,953,537,606,800 | 28.176471 | 82 | 0.588206 | false |
peraktong/Cannon-Experiment | DR13/0330_read_table_rc.py | 1 | 24195 | import numpy as np
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
import pickle
from matplotlib import cm
from numpy.random import randn
# table path
path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits"
star = fits.open(path)
table = Table.read(path)
"""
There are 13 columns in the table:
1. 'APOGEEID' -- The name of the star
2. 'VISIT' -- The name of the visit file
3. BJD -- Barycentric JD
Inferred labels are from the Cannon. The spectra we use are from the first combined spectra
(There are two combined spectra for each star, which are obtained by two different methods)
: (1) global weighting, where each visit spectrum is weighted by its (S/N)2, and
(2) pixel-by-pixel weighting, where each pixel is weighted by its (S/N)2.
4. TEFF
5. LOGG
6. FEH
The abc parameters for each visit:
7. A -- parameter a
8. B -- parameter b
9. C -- parameter c
10. CHIINF -- chi-squared for the inferred flux from the cannon (a=0,b=1,c=0)
11. CHIMIX -- chi-squared for the mixed flux from the abc fit.
12. VBARY -- The barycentric Velocity(km/s) from the APOGEE team.
13. VSHIFT -- The velocity shift from the abc fit(km/s)
14. FIBER -- Fiber ID
15. SNR -- SNR of the visit
####
The covariance matrix of the abc fit is in HDU0 data, which is
a 3*3*N 3-d matrix. N is the number of visits.
###
"""
# read covariance matrix from the abc fit:
un_cov = star[0].data[:,:,0]
#print(un_cov)
# read the velocity shift from the abc fit
v_shift = table["VSHIFT"]
#print(v_shift.shape)
########################
#Read table and plot to check.
class plot():
def read_table(self):
path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits"
star = fits.open(path)
table = Table.read(path)
# read it:
un_cov = star[0].data
self.un_cov = un_cov
a = table["A"]
b = table["B"]
c = table["C"]
self.a = a
self.b = b
self.c = c
mask = 2*b>a+c
self.mask = mask
name = table["APOGEEID"]
self.name = name
SHIFT = table["VSHIFT"]
self.shift = SHIFT
VBARY = table["VBARY"]
self.VBARY = VBARY
teff = table["TEFF"]
self.teff = teff
logg = table["LOGG"]
self.logg = logg
feh = table["FEH"]
self.feh = feh
self.chi_inf = table["CHIINF"]
self.chi_mix = table["CHIMIX"]
self.BJD = table["BJD"]
self.fiber = table["FIBER"]
self.SNR =table["SNR"]
def plot_teff_logg(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
# shift is in km/s
shift = self.shift[mask]*1000
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(logg,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('Logg ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(logg,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("Teff vs Logg for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_feh(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask] * 1000
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(feh,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('FeH ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(feh,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("Teff vs FeH for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_logg_bac(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
low = 0
up = 3
ax1.scatter(logg,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('Logg ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(logg,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("2b-a-c", fontsize=20)
f.suptitle("Teff vs Logg for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc_2bac" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_feh_bac(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
low = 0
up = 3
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(feh,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('FeH ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(feh,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("2b-a-c", fontsize=20)
f.suptitle("Teff vs FeH for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc_2bac" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_shift_bjd(self):
mask = self.mask
shift =self.shift[mask]
BJD = self.BJD[mask]
feh = self.feh[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(BJD,shift, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('BJD', fontsize=20)
ax1.set_ylabel('RV shift $km/s$ ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(BJD,shift, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Fe/H", fontsize=20)
f.suptitle("RV shift vs BJD for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_BJD_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_rv_fiber(self):
mask = self.mask
a = self.a[mask]
b = self.b[mask]
c = self.c[mask]
fiber = self.fiber[mask]
SNR = self.SNR[mask]
portion = (c+a)/(a+b+c)
RV = (c - a) / (a + b + c) * 4144.68
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(fiber,RV, marker='x', c=SNR,
vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('FiberID', fontsize=20)
ax1.set_ylabel('RV shift $m/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(fiber,RV, marker='x', c=SNR,
vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("SNR", fontsize=20)
f.suptitle("RV shifts vs FiberID for the red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_Fiber_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_ac_fiber(self):
mask = self.mask
a = self.a[mask]
b = self.b[mask]
c = self.c[mask]
fiber = self.fiber[mask]
portion = (c+a)/(a+b+c)
RV = (c - a) / (a + b + c) * 4144.68
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(fiber,portion, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('FiberID', fontsize=20)
ax1.set_ylabel('$(c+a)/(a+b+c)$ ', fontsize=20)
axes = plt.gca()
axes.set_ylim([-1,1])
f.subplots_adjust(right=0.8)
pl = ax1.scatter(fiber,portion, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("$(c+a)/(a+b+c)$ vs FiberID for the red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "ac_vs_Fiber_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_delta_chi_SNR(self):
mask = self.mask
delta_chi = (self.chi_inf-self.chi_mix)[mask]
SNR = self.SNR[mask]
RV = self.shift[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(SNR,delta_chi, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('SNR', fontsize=20)
ax1.set_ylabel('Delta chi squared ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(SNR,delta_chi, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("Delta chi squared vs SNR for the red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "dchi_vs_SNR_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def histogram_shift_abc(self):
a = self.a
b = self.b
c = self.c
RV = (c-a)/(a+b+c)*4144.68
# add a mask: only show results with 2b>a+c
mask = 2*b>a+c
a = a[mask]
b = b[mask]
c = c[mask]
RV = RV[mask]
font = {'weight': 'bold', 'size': 15}
matplotlib.rc('font', **font)
f, ((ax1, ax2), (ax3, ax4)) = \
plt.subplots(2, 2)
colors = ["cyan",'b', 'g', 'r']
name = ["RV","a", "b", "c"]
# histogram of rv
#ax1
rms_RV = (np.nansum(RV*RV)/len(RV))**0.5
rms_a = (np.nansum(a * a) / len(a)) ** 0.5
rms_b = (np.nansum(b*b) / len(b)) ** 0.5
rms_c = (np.nansum(c * c) / len(c)) ** 0.5
ax1.hist(RV, bins=40, color=colors[0], label="%s RMS = %.2f $m/s$"%(name[0],rms_RV))
#ax1.set_title('Histogram of Radial velocity shifts', fontsize=30)
ax1.set_xlabel('values of radial velocity shifts $m/s$', fontsize=15)
ax1.set_ylabel('Number', fontsize=15)
ax1.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of a
#ax2
ax2.hist(a, bins=40, color=colors[1], label="%s RMS = %.2f"%(name[1],rms_a))
#ax2.set_title('Histogram of parameter a', fontsize=30)
ax2.set_xlabel('values of parameter a', fontsize=15)
ax2.set_ylabel('Number', fontsize=15)
ax2.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of b
#ax3
ax3.hist(b, bins=40, color=colors[2], label="%s RMS = %.2f"%(name[2],rms_b))
ax3.legend(prop={'size': 15})
#ax3.set_title('Histogram of paramete b', fontsize=30)
ax3.set_xlabel("values of parameter b", fontsize=15)
ax3.set_ylabel('Number', fontsize=15)
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of c
#ax4
ax4.hist(c, bins=40, color=colors[3], label="%s RMS = %.2f"%(name[3],rms_c))
ax4.legend(prop={'size': 15})
#ax4.set_title('Histogram of parameter c', fontsize=30)
ax4.set_xlabel("values of parameter c", fontsize=15)
ax4.set_ylabel('Number', fontsize=15)
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
f.suptitle("Histogram of RV shifts, a, b and c for the red clumps in DR13",fontsize=25)
f.legends
#f.suptitle("Histogram of RV shifts, a, b and c by using the absorption lines")
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "histogram_rv_shift_rc" + ".png"
fig.savefig(save_path, dpi=500)
plt.close()
# RV before after
def plot_RV_std_before_after_teff(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
# From the average (c+a)/(a+b+c)
# Do put a mask here
mask = self.mask
# add points with the same fiberid together
name = self.name[mask]
target = list(set(name))
VBARY = self.VBARY[mask]
shift =self.shift[mask]
#SNR = self.SNR[mask]
fusion_new = []
# name+std_old and std_new + Teff logg feh
for i in range(0,len(target)):
print("Doing %.2f %%"%(i/len(target)*100))
index = np.where(name == target[i])
index = np.array(index)
index = index.ravel()
std_old_i = np.std(VBARY[index])
std_new_i = np.std(VBARY[index]+shift[index])
teff_i = np.nanmedian(teff[index])
logg_i = np.nanmedian(logg[index])
feh_i = np.nanmedian(feh[index])
fusion_new.append([target[i],std_old_i,std_new_i,teff_i,logg_i,feh_i])
fusion_new = np.array(fusion_new)
self.fusion_new = fusion_new
# portion+fiber+rv
# name = fusion_new[:, 0]
std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
# use int
teff = np.array(fusion_new[:,3],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(std_old,std_new, marker='x', c=teff,
vmin=np.min(teff), vmax=np.max(teff), alpha=alpha, cmap=cm.coolwarm)
ax1.plot(std_old,std_old,"k",alpha=alpha,linewidth=0.3)
ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20)
ax1.set_ylabel('Std of RVs after the correction $km/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(std_old,std_new, marker='x', c=teff,
vmin=np.min(teff), vmax=np.max(teff), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Teff $K$", fontsize=20)
f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_teff" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_RV_std_before_after_logg(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
fusion_new =self.fusion_new
# name = fusion_new[:, 0]
std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
logg = np.array(fusion_new[:,4],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(std_old,std_new, marker='x', c=logg,
vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=cm.coolwarm)
ax1.plot(std_old,std_old, "k", alpha=alpha, linewidth=0.3)
ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20)
ax1.set_ylabel('Sts of RVs after the correction $km/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(std_old,std_new, marker='x', c=logg,
vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("logg", fontsize=20)
f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_logg" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_RV_std_before_after_feh(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
fusion_new =self.fusion_new
# name = fusion_new[:, 0]
std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
feh = np.array(fusion_new[:,5],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(std_old,std_new, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
ax1.plot(std_old,std_old, "k", alpha=alpha, linewidth=0.3)
ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20)
ax1.set_ylabel('Std of RVs after the correction $km/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(std_old,std_new, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("FeH", fontsize=20)
f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_feh" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
model = plot()
model.read_table()
"""
model.plot_teff_logg()
model.plot_teff_feh()
model.plot_teff_logg_bac()
model.plot_teff_feh_bac()
model.plot_rv_fiber()
model.plot_ac_fiber()
"""
#VBARY vs
model.plot_RV_std_before_after_teff()
model.plot_RV_std_before_after_logg()
model.plot_RV_std_before_after_feh()
| mit | -1,033,193,668,488,516,000 | 21.633302 | 102 | 0.538128 | false |
motion-planning/rrt-algorithms | examples/rrt_connect/rrt_connect_2d_with_random_obstacles.py | 1 | 1254 | # This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
import numpy as np
from src.rrt.rrt_connect import RRTConnect
from src.search_space.search_space import SearchSpace
from src.utilities.obstacle_generation import generate_random_obstacles
from src.utilities.plotting import Plot
X_dimensions = np.array([(0, 100), (0, 100)]) # dimensions of Search Space
x_init = (0, 0) # starting location
x_goal = (100, 100) # goal location
Q = np.array([2]) # length of tree edges
r = 0.5 # length of smallest edge to check for intersection with obstacles
max_samples = 2048 # max number of samples to take before timing out
prc = 0.1 # probability of checking for a connection to goal
# create search space
X = SearchSpace(X_dimensions)
n = 50
Obstacles = generate_random_obstacles(X, x_init, x_goal, n)
# create rrt_search
rrt_connect = RRTConnect(X, Q, x_init, x_goal, max_samples, r, prc)
path = rrt_connect.rrt_connect()
# plot
plot = Plot("rrt_connect_2d_with_random_obstacles")
plot.plot_tree(X, rrt_connect.trees)
if path is not None:
plot.plot_path(X, path)
plot.plot_obstacles(X, Obstacles)
plot.plot_start(X, x_init)
plot.plot_goal(X, x_goal)
plot.draw(auto_open=True)
| mit | 4,732,484,829,832,730,000 | 34.828571 | 75 | 0.73764 | false |
Midnighter/foggy-march | foggy/plots.py | 1 | 3454 | # -*- coding: utf-8 -*-
"""
======================
Variance Scaling Plots
======================
:Author:
Moritz Emanuel Beber
:Date:
2013-05-03
:Copyright:
Copyright |c| 2013 Jacobs University Bremen gGmbH, all rights reserved.
:File:
plots.py
.. |c| unicode:: U+A9
"""
__all__ = ["BREWER_SET1", "fluctuation_scaling", "fluctuation_scaling_fit",
"correlation", "histogram"]
import numpy
import scipy.stats
import matplotlib.pyplot as plt
from itertools import izip
from scipy.optimize import curve_fit
BREWER_SET1 = ["#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#FFFF33",
"#A65628", "#F781BF", "#8DD3C7"]
def fluctuation_scaling(data, labels):
"""
Plot many curves with labels.
data: list
Contains tuples of x-locations and y-locations.
labels: list
For each pair in ``data`` one string.
"""
for ((x_loc, y_loc), label, colour) in izip(data, labels, BREWER_SET1):
mask = numpy.isfinite(x_loc) & (x_loc > 0.0) & numpy.isfinite(y_loc) & (y_loc > 0.0)
x_loc = x_loc[mask]
y_loc = y_loc[mask]
if len(x_loc) == 0 or len(y_loc) == 0:
continue
plt.scatter(x_loc, y_loc, label=label, color=colour)
plt.xlabel("$<f_{i}>$")
plt.ylabel("$\\sigma_{i}$")
plt.xscale("log")
plt.yscale("log")
plt.legend(loc="upper left")
plt.show()
def _continuous_power_law(x, k, alpha, c):
return k * numpy.power(x, alpha) + c
def fluctuation_scaling_fit(data, labels):
for ((x_loc, y_loc), label, colour) in izip(data, labels, BREWER_SET1):
mask = numpy.isfinite(x_loc) & (x_loc > 0.0) & numpy.isfinite(y_loc) & (y_loc > 0.0)
x_loc = x_loc[mask]
y_loc = y_loc[mask]
if len(x_loc) == 0 or len(y_loc) == 0:
continue
try:
(popt, pcov) = curve_fit(_continuous_power_law, x_loc, y_loc)
fit_y = numpy.power(x_loc, popt[1])
# fit_y *= popt[0] # can cause OverflowError
# (slope, intercept, r, p, err) = stats.linregress(x_log, y_log)
# fit_y = numpy.power(x_loc, slope) * numpy.power(10.0, intercept)
plt.plot(x_loc, fit_y, color=colour)
except RuntimeError:
plt.scatter(x_loc, y_loc, label=label, color=colour)
else:
plt.scatter(x_loc, y_loc, label="%s $\\alpha = %.3G \\pm %.3G$" % (label,
popt[1], numpy.sqrt(pcov[1, 1])), color=colour)
# plt.text(lab_xloc, lab_yloc, "$\\alpha = %.3G$\n$R^{2} = %.3G$\n$p = %.3G$\ns.e.$= %.3G$" % (slope, numpy.power(r, 2.0), p, err))
plt.xlabel("$<f_{i}>$")
plt.ylabel("$\\sigma_{i}$")
plt.xscale("log")
plt.yscale("log")
plt.legend(loc="upper left")
plt.show()
def correlation(x, y, x_lbl="Degree $k$", y_lbl="$\\eta$"):
mask = numpy.isfinite(x) & numpy.isfinite(y)
x = x[mask]
y = y[mask]
pearson = scipy.stats.pearsonr(x, y)
spearman = scipy.stats.spearmanr(x, y)
fig = plt.figure()
plt.plot(x, y, "x", label="$r=%.3g$\n$p=%.3g$\n$\\rho=%.3g$\n$p=%.3g$" % (pearson[0], pearson[1], spearman[0], spearman[1]))
plt.xlabel(x_lbl)
plt.ylabel(y_lbl)
plt.legend(loc="best")
return fig
def histogram(x, x_lbl="Speed $v$", y_lbl="$f(v)$", num_bins=100):
mask = numpy.isfinite(x)
if not mask.any():
return
x = x[mask]
plt.hist(x, bins=num_bins)
plt.xlabel(x_lbl)
plt.ylabel(y_lbl)
plt.show()
| bsd-3-clause | -80,002,290,279,166,700 | 29.034783 | 137 | 0.556456 | false |
salv-orlando/MyRepo | nova/image/fake.py | 1 | 7157 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an fake image service"""
import copy
import datetime
import random
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
# NOTE(bcwaldon): was image '123456'
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
# NOTE(bcwaldon): was image 'fake'
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '2'
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '1'
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '3'
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
super(_FakeImageService, self).__init__()
def index(self, context, filters=None, marker=None, limit=None):
"""Returns list of images."""
retval = []
for img in self.images.values():
retval += [dict([(k, v) for k, v in img.iteritems()
if k in ['id', 'name']])]
return retval
def detail(self, context, filters=None, marker=None, limit=None):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
images = copy.deepcopy(self.images.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', utils.gen_uuid()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images."""
self.images.clear()
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
| apache-2.0 | -7,230,544,118,180,133,000 | 34.430693 | 78 | 0.54562 | false |
MowenPan/star_wars | ship.py | 1 | 1577 | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
"""Initialize the ship, and set its starting position."""
super(Ship, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load the ship image, and get its rect.
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the ship's center.
self.center = float(self.rect.centerx)
# Movement flags.
self.moving_right = False
self.moving_left = False
def center_ship(self):
"""Center the ship on the screen."""
self.center = self.screen_rect.centerx
def update(self):
"""Update the ship's position, based on movement flags."""
# Update the ship's center value, not the rect.
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect object from self.center.
self.rect.centerx = self.center
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect)
| apache-2.0 | -7,004,505,927,314,254,000 | 34.044444 | 74 | 0.623335 | false |
joeyoung658/A-Level_2016-18 | Other/numbers.py | 1 | 1301 | # 09/09/2016
# Joe Young
import random
from time import sleep
count = 0
alist = []
randomc = 0
while randomc != 4:
ran = random.randint(1, 100)
alist.append(ran)
randomc = randomc + 1
alist.sort()
def question():
global alist
global count
number = 0
clist = len(alist) - 1
print(alist)
try:
number = int(input("Input a number which fits the sequence \n -"))
except ValueError:
print("Please input a whole number!\n")
print(question())
if number > alist[clist]:
if count == 4:
exit = str(input("Would you like to exit? (Yes/No)")).lower()
if exit == "yes":
print("You have exited the program!")
sleep(5)
elif exit == "no":
print("You have chosen not to exit the program!")
sleep(3)
count = 0
return question()
else:
print("Please enter a valid option!")
else:
count = count + 1
print("Yes,", number, " does fit the sequence \n")
return question()
else:
print("No,", number, " does not fit the sequence \n")
return question()
question()
| gpl-3.0 | 7,266,662,100,259,031,000 | 21.654545 | 74 | 0.496541 | false |
nive-cms/nive | nive/utils/dataPool2/tests/slow_queries.py | 1 | 1915 |
import copy
import test_MySql
try:
from nive.utils.dataPool2.mySqlPool import *
except:
pass
def sqlquery4(n):
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print "SQL Query filename (text index) result=all, sort=filename, operator=like: ",
t = time.time()
for i in range(0,n):
files = pool.SearchFiles({u"filename": u"file1.txt"}, sort=u"filename", operators={u"fielname":u"like"})
t2 = time.time()
pool.Close()
print n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement"
print
def sqlquery5(n):
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print "SQL Query filename (text index), result=all, sort=id, operator==: ",
t = time.time()
for i in range(0,n):
files = pool.SearchFiles({u"filename": u"file1.txt"}, sort=u"id", operators={u"fielname":u"="})
t2 = time.time()
pool.Close()
print n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement"
print
def sqlquery6(n):
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print "SQL Query filename (text index) no result: ",
t = time.time()
for i in range(0,n):
files = pool.SearchFiles({u"filename": u"filexxx.txt"}, sort=u"filename", operators={u"fielname":u"like"})
t2 = time.time()
pool.Close()
print n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement"
print
def run():
n = 100
sqlquery4(n)
sqlquery5(n)
sqlquery6(n)
if __name__ == '__main__':
run()
| gpl-3.0 | -4,846,017,067,129,257,000 | 24.197368 | 114 | 0.634987 | false |
OAButton/tricorder | plugins/python/sciencedirect.py | 1 | 7168 | #!/usr/bin/env python2.7
# NOTE THIS NEEDS 2.6 as parser breaks with 2.5 :-)
import warnings
warnings.simplefilter("ignore",DeprecationWarning)
import os, sys, re, urllib2, string, socket
import htmlentitydefs
import mechanize
import html5lib
from html5lib import treebuilders
import lxml.html, lxml.etree
from lxml.cssselect import CSSSelector
socket.setdefaulttimeout(15)
class ParseException(Exception):
pass
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text).encode('utf-8')
#
# Strip off any institutional proxies we find
#
def canon_url(url):
# print "xxxxx url = %s" % url
m = re.match(r'http://[^/]*sciencedirect.com[^/]*/(science(\?_ob|/article).*$)', url)
if not m:
raise ParseException, "bad source url"
return "http://www.sciencedirect.com/" + m.group(1)
#
# Make up crossref metadata URL (just need the DOI)
#
def crossref_xml_url(doi):
url = "http://www.crossref.org/openurl/?id=doi:" + doi
url += "&noredirect=true"
# see http://www.crossref.org/help/Content/05_Interfacing_with_the_CrossRef_system/Using_the_Open_URL_Resolver.htm
# key is either "username:password" or "<email>"
key_file = os.environ.get("HOME") + "/.crossref-key"
if os.path.exists(key_file):
f = open(key_file)
key = f.read().strip()
f.close()
url += "&pid=" + key
url += "&format=unixref"
return url
#
# Try, by foul trickery, to get an abstract
# We're looking for HTML like this:
# <div class="articleText" style="display: inline;">
# <h3 class="h3">Abstract</h3>
# <p>An instrumented indentation technique...
#
def scrape_abstract(page):
root = lxml.html.fromstring(page)
#root = lxml.html.fromstring(html_data)
#links_lxml_res = root.cssselect("a.detailsViewLink")
#links_lxml = [link.get("href") for link in links_lxml_res]
#links_lxml = list(set(links_lxml))
abs = []
for div in root.cssselect("div.articleText"):
for h3 in div.cssselect("h3.h3"):
if h3.text and string.lower(h3.text) in ('abstract','summary'):
for p in div.cssselect("p"):
abs.append(p.xpath("string()"))
if len(abs) == 0:
for div in root.cssselect('div.svAbstract'):
for p in div.cssselect("p"):
abs.append(p.xpath("string()"))
if len(abs) == 0:
for div in root.cssselect('#articleContent'):
for p in div.cssselect("div.articleText_indent"):
abs.append(p.xpath("string()"))
abstract = ' '.join(abs)
abstract = re.sub('\n+',' ',abstract)
abstract = re.sub('\s+',' ',abstract)
# print "1================================================================="
# print abstract
# print "2================================================================="
return unescape(abstract)
#
# Just try to fetch the metadata from crossref
#
def handle(url):
cUrl = canon_url(url)
#print "%s => %s" % (url, cUrl)
cookies = mechanize.CookieJar()
browser = mechanize.Browser()
browser.addheaders = [("User-Agent", "Mozilla/5.0 (compatible; citeulike/1.0)"),
("From", "[email protected]")]
#browser.add_handler(PrettifyHandler())
browser.set_handle_robots(False)
browser.set_debug_http(False)
browser.set_debug_redirects(False)
browser.open(cUrl)
response = browser.response()
page = response.get_data()
# print page
#
# Elsevier insist on user selecting a "preferred source" when the article is
# available. This is normally stored in a cookie.
# If we get directed to the Elsevier "linking hub", find the 1st SD link on the
# and follow that.
# Yeah, I know - rubbish.
#
huburl = browser.geturl()
doi = None
m = re.search(r'linkinghub.elsevier.com/', huburl)
if m:
root = lxml.html.fromstring(page)
inputs = root.cssselect("input")
hrefs = [link.get("value") for link in inputs]
for href in hrefs:
n = re.search('sciencedirect.com',href)
if n:
browser.open(href)
response = browser.response()
page = response.get_data()
break
m = re.search(r'<a(?: id="[^"]+")? href="http://dx.doi.org/([^"]+)"', page)
# this page might requires a login. Luckily there seems to be a
# link "View Abstract" which can take us to a page we can read
if not m and not doi:
root = lxml.html.fromstring(page)
links = root.cssselect("a")
for href in [e.get("href") for e in links]:
if href:
m = re.search(r'http://dx.doi.org/([^"]+)', href)
if m:
break
if False:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("beautifulsoup"))
# print page
soup = parser.parse(page)
link = soup.find(text=re.compile(r"view abstract", re.I))
if link:
href = link.parent['href']
browser.open(href)
response = browser.response()
page = response.get_data()
m = re.search(r'<a(?: id="[^"]+")? href="http://dx.doi.org/([^"]+)"', page)
if m:
doi = m.group(1)
else:
root = lxml.html.fromstring(page)
doi_nodes = root.cssselect("#doi")
for n in [e.text for e in doi_nodes]:
doi = re.sub(r'doi:','',n)
break
if not doi:
m = re.search(r'/doi/(10\.\d\d\d\d)_([^/]+)/', page)
if m:
doi = "%s/%s" % (m.group(1), m.group(2))
if not doi:
raise ParseException, "Cannot find DOI in page"
# if not re.search(r'^10[.](1016|1006|1053)/',doi):
# raise ParseException, "Cannot find an Elsevier DOI (10.1006, 10.1016, 10.1053) DOI"
xml_url = crossref_xml_url(doi)
browser.open(xml_url)
response = browser.response()
xml_page = response.get_data()
xml_page = xml_page.decode('utf-8')
# Get rid of extraneous "stars" \u2606. Sometimes at end of title (hopefully
# they're never meant to be "real" elsewhere...)
xml_page = xml_page.replace(u'\u2606',' ')
m = re.search("not found in CrossRef", xml_page)
if m:
raise ParseException, "Unable to locate that DOI (%s) in crossref" % doi
yield "begin_tsv"
yield "use_crossref\t1"
yield "linkout\tDOI\t\t%s\t\t" % doi
abstract = scrape_abstract(page)
# try:
# abstract = scrape_abstract(page)
# except:
# abstract = ''
if abstract:
print "abstract\t%s" % (abstract)
yield "end_tsv"
yield "status\tok"
if __name__ == "__main__":
url = sys.stdin.readline().strip()
for line in handle(url):
print line.encode("utf-8")
sys.exit(0)
try:
for line in handle(url):
print line.encode("utf-8")
except Exception, e:
import traceback
line = traceback.tb_lineno(sys.exc_info()[2])
print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to [email protected] quoting error code %d." % line])
raise
| bsd-3-clause | -3,845,690,253,872,181,000 | 25.947368 | 165 | 0.628906 | false |
forkbong/qutebrowser | tests/end2end/features/test_completion_bdd.py | 1 | 1124 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import pytest_bdd as bdd
bdd.scenarios('completion.feature')
@bdd.then(bdd.parsers.parse("the completion model should be {model}"))
def check_model(quteproc, model):
"""Make sure the completion model was set to something."""
pattern = "Starting {} completion *".format(model)
quteproc.wait_for(message=pattern)
| gpl-3.0 | -5,833,988,742,366,090,000 | 39.142857 | 74 | 0.75089 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/remote/models/device_group_defn_remote.py | 1 | 6241 | from ..remote import RemoteModel
class DeviceGroupDefnRemote(RemoteModel):
"""
The device group criteria definitions. This is distinct from the evaluated device groups captured in the DeviceGroup API. One Device Group Definition may result in several Device Groups, one within each defined Network.
| ``GroupID:`` The internal NetMRI identifier for this device group definition.
| ``attribute type:`` number
| ``GroupName:`` The device group name, as specified by the user.
| ``attribute type:`` string
| ``Criteria:`` The criteria used to place members within the group.
| ``attribute type:`` string
| ``Rank:`` The rank is used to determine which group settings to apply to a device that is a member of multiple groups. The highest ranked group's settings will be used.
| ``attribute type:`` string
| ``SNMPPolling:`` A flag indicating whether this group should be polled via SNMP.
| ``attribute type:`` number
| ``SNMPAnalysis:`` A flag indicating whether issue analysis should be performed on this group.
| ``attribute type:`` number
| ``FingerPrint:`` A flag indicating whether network fingerprinting should be performed on this group.
| ``attribute type:`` number
| ``CCSCollection:`` A flag indicating whether job execution is enabled against this group.
| ``attribute type:`` number
| ``VendorDefaultCollection:`` A flag indicating whether vendor default credential collection is enabled for this group.
| ``attribute type:`` number
| ``ConfigPolling:`` A flag indicating whether configuration file collection is enabled for this group.
| ``attribute type:`` number
| ``PortScanning:`` A flag indicating whether port scanning is enabled for this group.
| ``attribute type:`` number
| ``StandardsCompliance:`` A flag indicating whether this group is subject to standard's compliance reporting.
| ``attribute type:`` number
| ``MemberCount:`` Not used.
| ``attribute type:`` number
| ``ConfigLocked:`` Indicates whether configuration changes within this group are considered authorized or unauthorized.
| ``attribute type:`` number
| ``PolicyScheduleMode:`` Not used.
| ``attribute type:`` string
| ``SPMCollectionInd:`` A flag indicating whether Switch Port Management collection applies to this group.
| ``attribute type:`` bool
| ``NetBIOSScanningInd:`` A flag indicating whether to scan this group for NetBOIS names.
| ``attribute type:`` bool
| ``ARPCacheRefreshInd:`` A flag indicating whether to refresh the device ARP and forwarding table caches for devices in this group prior to data collection.
| ``attribute type:`` bool
| ``SAMLicensedInd:`` A flag indicating whether or not access diff viewer is available for this entry.
| ``attribute type:`` bool
| ``UpdatedAt:`` The date and time this record was last modified.
| ``attribute type:`` datetime
| ``StartBlackoutSchedule:`` The blackout start time in cron format.
| ``attribute type:`` string
| ``BlackoutDuration:`` The blackout duration in minutes.
| ``attribute type:`` number
| ``CLIPolling:`` A flag indicating whether this group should be polled via the command line interface.
| ``attribute type:`` bool
| ``StartPortControlBlackoutSchedule:`` Port Control Blackout in cron format.
| ``attribute type:`` string
| ``PortControlBlackoutDuration:`` Port Control Blackout in minutes.
| ``attribute type:`` number
| ``AdvancedGroupInd:`` A flag indicating whether this group is an advanced group.
| ``attribute type:`` bool
| ``IncludeEndHostsInd:`` A flag indicating whether this group should include end host devices.
| ``attribute type:`` bool
| ``ParentDeviceGroupID:`` Internal identifier for the parent device group. A value of 0 is used for root level groups.
| ``attribute type:`` number
| ``PerfEnvPollingInd:`` A flag that indicates if Performance and Environment polling is enabled for the device group members.
| ``attribute type:`` bool
| ``PrivilegedPollingInd:`` A flag indicated that NetMRI should send enable command when interacting with device
| ``attribute type:`` bool
| ``PolFreqModifier:`` Polling frequency modifier for devices belonging to this device group. Actual polling frequency intervals for the device are calculated by multiplying the default intervals by this value.
| ``attribute type:`` number
| ``UseGlobalPolFreq:`` A flag indicating if Global Polling Frequency should be used instead Device Group Polling Frequency.
| ``attribute type:`` bool
| ``CredentialGroupID:`` The unique identifier of the credential group.
| ``attribute type:`` number
| ``SystemGroupInd:`` A flag indicating if this device group is system-created
| ``attribute type:`` bool
"""
properties = ("GroupID",
"GroupName",
"Criteria",
"Rank",
"SNMPPolling",
"SNMPAnalysis",
"FingerPrint",
"CCSCollection",
"VendorDefaultCollection",
"ConfigPolling",
"PortScanning",
"StandardsCompliance",
"MemberCount",
"ConfigLocked",
"PolicyScheduleMode",
"SPMCollectionInd",
"NetBIOSScanningInd",
"ARPCacheRefreshInd",
"SAMLicensedInd",
"UpdatedAt",
"StartBlackoutSchedule",
"BlackoutDuration",
"CLIPolling",
"StartPortControlBlackoutSchedule",
"PortControlBlackoutDuration",
"AdvancedGroupInd",
"IncludeEndHostsInd",
"ParentDeviceGroupID",
"PerfEnvPollingInd",
"PrivilegedPollingInd",
"PolFreqModifier",
"UseGlobalPolFreq",
"CredentialGroupID",
"SystemGroupInd",
)
| apache-2.0 | 4,289,286,610,266,419,700 | 41.455782 | 223 | 0.63916 | false |
surban/slurm | testsuite/expect/driveregress.py | 1 | 17884 | #!/usr/bin/python
############################################################################
# Copyright (C) 2011-2013 SchedMD LLC
# Written by David Bigagli <[email protected]>
#
# This file is part of SLURM, a resource management program.
# For details, see <http://slurm.schedmd.com/>.
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with SLURM; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
############################################################################
import sys
import os
import errno
import time
import argparse
import ConfigParser
import pdb
import subprocess
import datetime
import smtplib
from email.mime.text import MIMEText
import logging
import glob
import shutil
"""This program is a driver for the Slurm regression program."""
logger = logging.getLogger('driveregress.py')
logger.setLevel(logging.DEBUG)
def init_console():
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%b %d %H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
def init_log(htab):
testlogdir = '%s/log/%s' % (htab['root'], htab['section'])
if not os.path.isdir(testlogdir):
os.mkdir(testlogdir)
htab['testlogdir'] = testlogdir
testlogfile = '%s/log/%s/Log' % (htab['root'], htab['section'])
fh = logging.FileHandler(testlogfile)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%b %d %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
return fh
# Read the driver configuration which is in ini format
#
# NOTA BENE:
# ConfigParser craps out if there are leading spaces
# in the configuration file.
#
#[test_1]
#version = 26
#arch = linux
#multiple_slurmd = 4
#
def read_config(confile):
conf = ConfigParser.ConfigParser()
try:
conf.read(confile)
logger.info( 'configuration read')
except Exception as e:
logger.info( 'Error reading configuration file')
print e
return -1
for section in conf.sections():
logger.info('section -> %s', section)
for option in conf.options(section):
logger.info('%s = %s' % (option, conf.get(section, option)))
return conf
# clean up the daemon logs from previous run
def cleanup_logs(htab):
# pdb.set_trace()
# hose slurmctld and slurmdbd logs
logger.info('cd logdir -> %s' % (htab['logdir']))
os.chdir(htab['logdir'])
for f in glob.iglob('*'):
try:
os.unlink(f)
except:
pass
# hose slurmd logs
slogdir = '%s/log' % (htab['logdir'])
logger.info('cd logdir -> %s' % (slogdir))
os.chdir(slogdir)
for f in glob.iglob('*'):
try:
os.unlink(f)
except:
pass
# hose the spool
shutil.rmtree(htab['spooldir'])
os.mkdir(htab['spooldir'])
# section is the test name
def configure_and_build(htab, conf, section):
multi = 0
multiname = None
try:
mailto = conf.get(section, 'mailto')
htab['mailto'] = mailto
logger.info( 'mailto -> %s', mailto)
except ConfigParser.NoOptionError :
pass
try:
version = conf.get(section, 'version')
arch = conf.get(section, 'arch')
multi = conf.get(section, 'multiple_slurmd')
multiname = conf.get(section, 'multi_name')
except:
pass
buildpath = '%s/clusters/%s/%s/build' % (htab['root'], version, arch)
sbindir = '%s/clusters/%s/%s/sbin' % (htab['root'], version, arch)
spooldir = '%s/clusters/%s/%s/spool' % (htab['root'], version, arch)
bindir = '%s/clusters/%s/%s/bin' % (htab['root'], version, arch)
prefix = '%s/clusters/%s/%s' % (htab['root'], version, arch)
srcdir = '%s/distrib/%s/slurm' % (htab['root'], version)
logdir = '%s/clusters/%s/%s/log' % (htab['root'], version, arch)
slurmdbd = '%s/slurmdbd' % (sbindir)
slurmctld = '%s/slurmctld' % (sbindir)
slurmd = '%s/slurmd' % (sbindir)
# Use hash table to communicate across
# functions.
htab['buildpath'] = buildpath
htab['prefix'] = prefix
htab['srcdir'] = srcdir
htab['logdir'] = logdir
htab['sbindir'] = sbindir
htab['bindir'] = bindir
htab['spooldir'] = spooldir
htab['slurmdbd'] = slurmdbd
htab['slurmctld']= slurmctld
htab['slurmd'] = slurmd
if multi != 0:
htab['multi'] = multi
htab['multiname'] = multiname
htab['version'] = version
htab['arch'] = arch
htab['section'] = section
logger.info('test: %s version: %s arch: %s multi: %s multiname: %s'
% (section, htab['version'], htab['arch'],
htab['multi'], htab['multiname']))
logger.info('buildpath -> %s', buildpath)
logger.info('prefix -> %s', prefix)
logger.info('srcdir -> %s', srcdir)
logger.info('logdir -> %s', logdir)
logger.info('spooldir -> %s', spooldir)
logger.info('sbindir -> %s', sbindir)
logger.info('bindir -> %s', bindir)
logger.info('slurmdbd -> %s', slurmdbd)
logger.info('slurmctld -> %s', slurmctld)
logger.info('slurmd -> %s', slurmd)
# clean up logdir
cleanup_logs(htab)
# before configuring let's make sure to pull
# the github repository
git_update(srcdir)
# configure and build
os.chdir(buildpath)
logger.info('cd -> %s', os.getcwd())
# this is the build file log
buildlog = '%s/Build' % (htab['testlogdir'])
lfile = open(buildlog, 'w')
logger.info('build log file -> %s' % (lfile.name))
logger.info('running -> make uninstall')
make = 'make uninstall'
try:
proc = subprocess.Popen(make,
shell=True,
stdout = lfile,
stderr = lfile)
except Exception :
logger.error('Error make uninstall failed, make for the very first time?')
rc = proc.wait()
if rc != 0:
logger.error('make uninstal exit with status %s,\
make for the very first time?' % (rc))
logger.info('running -> make clean')
make = 'make distclean'
try:
proc = subprocess.Popen(make,
shell=True,
stdout = lfile,
stderr = lfile)
except Exception :
logger.error('Error make distclean failed, make for the very first time?')
rc = proc.wait()
if rc != 0:
logger.error('make distclean exit with status %s,\
make for the very first time?' % (rc))
if 'multi' in htab:
configure = ('%s/configure --prefix=%s --enable-debug\
--enable-multiple-slurmd' %
(srcdir, prefix))
else:
configure = '%s/configure --prefix=%s --enable-debug' % (srcdir, prefix)
logger.info('running -> %s', configure)
try:
proc = subprocess.Popen(configure,
shell=True,
stdout=lfile,
stderr=lfile)
except OSError as e:
logger.error('Error execution failed:' % (e))
rc = proc.wait()
if rc != 0:
logger.critical('configure failed with status %s' % (rc))
make = '/usr/bin/make -j 4 install'
logger.info( 'cd -> %s', os.getcwd())
logger.info('running -> %s', make)
try:
proc = subprocess.Popen(make,
shell=True,
stdout=lfile,
stderr=lfile)
except OSError as e:
logger.error('Error execution failed:' % (e))
rc = proc.wait()
if rc != 0:
logger.critical('make -j 4 failed with status %s' % (rc))
lfile.close()
return True
def git_update(srcdir):
logger.info('running git pull on -> %s', srcdir)
gitpull = 'git pull'
# dont forget to chdir back
os.chdir(srcdir)
try:
proc = subprocess.check_call([gitpull], shell=True)
except Exception as e:
logger.error('Failed to run git pull on %s %s' % (srcdir, e))
def start_daemons(htab):
logger.info('starting daemons...')
try:
proc = subprocess.Popen(htab['slurmdbd'], stdout = None,
stderr = subprocess.PIPE)
rc = proc.wait()
if rc != 0:
logger.critic('Problems starting %s' % (htab['slurmdbd']))
for line in proc.stderr:
logger.critic('stderr: %s' % (line.strip()))
return False
except Exception as e:
logger.error('Failed starting slurmdbd %s ' % (e))
return -1
logger.info('slurmdbd started')
try:
proc = subprocess.Popen(htab['slurmctld'], stdout = None,
stderr = subprocess.PIPE)
rc = proc.wait()
if rc != 0:
logger.critic('Problems starting %s' % (htab['slurmctld']))
for line in proc.stderr:
logger.critic('stderr: %s' % (line.strip()))
return False
except Exception as e:
logger.error('Failed starting slurmctld %s' % (e))
return -1
logger.info('slurmctld started')
#pdb.set_trace()
n = 1
try:
if 'multi' in htab:
for n in range(1, int(htab['multi']) + 1):
slurmd = '%s -N %s%d' % (htab['slurmd'], htab['multiname'], n)
proc = subprocess.Popen(slurmd, shell = True, stdout = None,
stderr = subprocess.PIPE)
rc = proc.wait()
if rc != 0:
logger.critic('Problems starting %s' % (htab['slurmd']))
for line in proc.stderr:
logger.critic('stderr: %s' % (line.strip()))
return False
logger.info('%s started' % (slurmd))
else:
proc = subprocess.Popen(htab['slurmd'], stdout = None,
stderr = subprocess.PIPE)
rc = proc.wait()
if rc != 0:
logger.critic('Problems starting %s' % (htab['slurmd']))
for line in proc.stderr:
logger.critic('stderr: %s' % (line.strip()))
return False
logger.info('slurmd started')
except Exception as e:
logger.error('Failed starting slurmd %s' % (e))
return -1
logger.info('Wait 5 secs for all slurmd to come up...')
time.sleep(5)
# run sinfo to check if all sweet
sinfo = '%s/sinfo --noheader --format=%%T' % (htab['bindir'])
logger.info('sinfo -> %s', sinfo)
proc = subprocess.Popen(sinfo,
shell=True,
stdout=subprocess.PIPE,
stderr=None)
rc = proc.wait()
if rc != 0:
logger.error('sinfo failed to check cluster state')
for line in proc.stdout:
if line.strip() == 'idle':
logger.info( 'Cluster state is ok -> %s' % line.strip())
else:
logger.error('Failed to get correct cluster status %s'
% line.strip())
def kill_daemons(htab):
piddir = '%s/pid' % (htab['logdir'])
ld = [htab['logdir'], piddir]
for l in ld:
os.chdir(l)
for pf in glob.iglob('*.pid'):
try:
f = open(pf, 'r')
except IOError as e:
logger.error('No pidfile? -> %s %s' % (pf, e))
else :
pid = f.readline().strip()
logger.info('Got %s pid -> %s' % (f.name, pid))
try:
os.kill(int(pid), 15)
except OSError as e:
logger.error('Cannot kill %s? %s' % (pid, e))
f.close()
def run_regression(htab):
testdir = '%s/testsuite/expect' % (htab['srcdir'])
regress = '%s/regression.py' % (testdir)
os.chdir(testdir)
logger.info('cd to -> %s', testdir)
# instal globals.local
try:
f = open('globals.local', 'w')
except IOError as e:
logger.error('Error failed opening globals.local %s' % (e))
return -1
z = 'set slurm_dir %s' % (htab['prefix'])
w = 'set mpicc /usr/local/openmpi/bin/mpicc'
print >> f, z
print >> f, w
f.close()
# Write regression output into logfile
regfile = '%s/Regression' % (htab['testlogdir'])
htab['regfile'] = regfile
try:
rf = open(regfile, 'w')
except IOError as e:
logger.error('Error failed to open %s %s' % (regfile, e))
return -1
# pdb.set_trace()
logger.info('running regression %s' % (regress))
try:
proc = subprocess.Popen(regress,
shell=True,
stdout=rf,
stderr=rf)
except OSError as e:
logger.error('Error execution failed %s' % (e))
proc.wait()
rf.close()
def send_result(htab):
if not htab['mailto']:
logger.info('No mail will be sent..')
os.rename(htab['regfile'], 'regression-')
return
os.chdir(htab['testlogdir'])
logger.info('Sending result from %s' % (htab['testlogdir']))
mailmsg = '%s/mailmsg' % (htab['testlogdir'])
try:
f = open(htab['regfile'])
except IOError as e:
logger.error('Error failed to open regression output file %s'
% (e))
try:
fp = open(mailmsg, 'w')
except IOError as e:
logger.error('Error failed open mailmsg file %s' % (e))
print >> fp, 'Finished test', htab['section'], htab['version'], htab['arch']
# open the regression file and send the tail
# of it starting at 'Ending'
ended = False
for line in f:
lstr = line.strip('\n')
if not ended and lstr.find('Ended') != -1:
ended = True
if ended:
print >> fp, lstr
try:
f.close()
except IOError as e:
logger.error('Failed closing %s did the regression ran all right ?'
% (e))
try:
fp.close()
except IOError as e:
logger.error('Failed closing %s did the regression terminated all right ?'
% (e))
# Open a plain text file for reading. For this example, assume that
# the text file contains only ASCII characters.
fp = open(mailmsg, 'rb')
# Create a text/plain message
msg = MIMEText(fp.read())
fp.close()
me = '[email protected]'
to = htab['mailto']
# me == the sender's email address
# to == the recipient's email address
msg['Subject'] = 'Regression results %s@%s' % (htab['section'], htab['cas'])
msg['From'] = me
msg['To'] = to
# msg['CC'] = cc
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP('localhost')
# s.sendmail(me, [to] + [cc], msg.as_string())
s.sendmail(me, [to], msg.as_string())
s.quit()
# ciao ... save latest copies...
logger.info('email sent to %s' % (to))
os.rename(mailmsg, 'mailsmsg-')
os.rename(htab['regfile'], 'regression-')
def set_environ(htab):
os.environ['PATH'] = '/bin:/usr/bin:%s' % (htab['bindir'])
logger.info('PATH-> %s' % (os.environ['PATH']))
os.environ['LD_LIBRARY_PATH'] = '/usr/local/openmpi/lib'
logger.info('LD_LIBRARY_PATH-> %s' % (os.environ['LD_LIBRARY_PATH']))
# Da main of da driver
def main():
init_console()
# Define program argument list, create and invoke
# the parser
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--config_file',
help = 'specify the location of the config file')
args = parser.parse_args()
if not args.config_file:
if os.path.isfile('driver.conf'):
cfile = 'driver.conf'
else :
logger.critical('path to configuration file not specified')
logger.critical('default driver.conf not found')
return -1
else:
cfile = args.config_file
# pdb.set_trace()
dt = datetime.datetime.now()
cas = '%s-%s:%s:%s' % (dt.month, dt.day, dt.hour, dt.minute)
logger.info('Starting %s cas -> %s', os.getpid(), cas)
# process the configuration file
conf = read_config(cfile)
for section in conf.sections():
htab = {}
htab['cas'] = cas
try:
root = conf.get(section, 'root')
htab['root'] = root
logger.info( 'root -> %s', root)
except ConfigParser.NoOptionError as e:
logger.fatal('Error root option missing from configuration %s' % (e))
htab['section'] = section
fh = init_log(htab)
logger.info('Root %s of section %s' % (htab['root'], htab['section']))
configure_and_build(htab, conf, section)
set_environ(htab)
start_daemons(htab)
run_regression(htab)
send_result(htab)
kill_daemons(htab)
logger.info('test %s done daemons killed' % (section))
logger.removeHandler(fh)
logger.info('all tests done...')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | 2,381,391,416,520,161,300 | 30.102609 | 83 | 0.550883 | false |
MasterOdin/gitvier | setup.py | 1 | 1816 | #!/usr/bin/env python
"""Setup script for the package."""
import os
import sys
import setuptools
from gitvier import __project__, __version__, __author__, DESCRIPTION
PACKAGE_NAME = "gitvier"
MINIMUM_PYTHON_VERSION = (3, 5)
def check_python_version():
"""Exit when the Python version is too low."""
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {0}.{1}+ is required.".format(*MINIMUM_PYTHON_VERSION))
def read_descriptions():
"""Build a description for the project from documentation files."""
try:
readme = open("README.rst").read()
except IOError:
return "<placeholder>"
else:
return readme
check_python_version()
setuptools.setup(
name=__project__,
version=__version__,
author=__author__,
author_email='[email protected]',
description=DESCRIPTION,
long_description=read_descriptions(),
url='https://github.com/MasterOdin/gitvier',
packages=setuptools.find_packages(),
entry_points={'console_scripts': [
'gitvier = gitvier.cli:main'
]},
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Version Control',
'Topic :: System :: Software Distribution'
],
install_requires=[
'colorama',
'GitPython',
'PyYAML'
]
)
| mit | 4,129,258,678,640,155,000 | 26.515152 | 80 | 0.616189 | false |
thecookieraider/PyStar | Maze Generation/Pathfinder.py | 1 | 14841 | from math import sqrt
from random import shuffle, randint, seed
from Mazers import Depth_First
from pygame.locals import *
from time import time
import sys
import pygame
import Main
class Pathfinder:
START_COLOR = (0, 0, 255)
END_COLOR = (255, 20, 147)
SEARCHED_COLOR = (255, 0, 0)
PATH_COLOR = (0, 255, 0)
FPS = 60
DRAWING = 0x01
AUTO = 0x02
def __init__(self, main_args, displaysurf, width, height):
self.surf = displaysurf
self.fps = pygame.time.Clock()
self.w = width
self.h = height
self.main_args = main_args
self.maze = Depth_First.Maze(width, height, False)
self.points = []
self.keys = {}
self.mode = self.AUTO
self.seed = randint(0, 1000)
if main_args['type'] == Main.REG_MAZE:
self.maze.generate(self.seed)
self.blitMethod = Depth_First.Maze.gen_surf_s
elif main_args['type'] == Main.BOX_MAZE:
self.maze.generate_box(self.seed, main_args['box_dims'], main_args['diagonal'])
self.blitMethod = Depth_First.Maze.gen_surf_box_s
self.cells = self.maze.cells
self.highlighted_cell = [0, 0]
self.update()
pygame.display.update()
self.handle_events()
def a_star(self, start, goal):
openlist = set()
closedlist = set()
current = Node(start, None, 0, self.get_distance(start, goal))
openlist.add(current)
while openlist:
openlist = set(sorted(openlist, key=lambda _node: _node.fCost))
current = openlist.pop()
if current.cell.x == goal.x and current.cell.y == goal.y:
path = []
while current.parent is not None:
path.append(current)
if current.cell.color != self.END_COLOR:
current.cell.color = self.PATH_COLOR
current = current.parent
self.surf.blit(self.blitMethod(self.cells, self.w, self.h), (0, 0))
pygame.display.update()
return path
closedlist.add(current)
if current.cell.color != self.START_COLOR:
current.cell.color = self.SEARCHED_COLOR
self.special_events()
self.surf.blit(self.blitMethod(self.cells, self.w, self.h), (0, 0))
pygame.display.update()
n = [x for x in current.cell.get_neighbors(self.cells) if x.visited]
nodes = []
for cell in n:
gcost = current.gCost + self.get_distance(current.cell, cell)
hcost = self.get_distance(cell, goal)
node = Node(cell, current, gcost, hcost)
nodes.append(node)
for cell in n:
if self.cell_in_list(cell, closedlist):
continue
gcost = current.gCost + self.get_distance(current.cell, cell)
hcost = self.get_distance(cell, goal)
node = Node(cell, current, gcost, hcost)
if not self.cell_in_list(cell, openlist):
openlist.add(node)
return None
@staticmethod
def cell_in_list(cell, nodelist):
for i in nodelist:
if i.cell.x == cell.x and i.cell.y == cell.y:
return True
return False
@staticmethod
def better_sibling(node, openlist):
for i in openlist:
if i.cell == node.cell and i.fCost <= node.fCost:
return True
return False
def get_random_point(self):
l = [i for x in self.cells for i in x if i.visited]
shuffle(l)
return l[randint(0, len(l)-1)]
@staticmethod
def node_sorter(a, b):
if b.fCost < a.fCost:
return 1
if b.fCost > a.fCost:
return -1
return 0
@staticmethod
def get_distance(start, goal):
dx = float(start.x - goal.x)
dy = float(start.y - goal.y)
dist = float(sqrt(dx * dx + dy * dy))
return dist
@staticmethod
def clamp(x, y, maxx, maxy, minx, miny):
pair = []
if x > maxx:
pair.append(maxx)
elif x < minx:
pair.append(minx)
else:
pair.append(x)
if y > maxy:
pair.append(maxy)
elif y < miny:
pair.append(miny)
else:
pair.append(y)
return pair
def generate_random_start_end(self):
self.reset_cell_colors()
seed()
self.points = [self.get_random_point(), self.get_random_point()]
self.points[0].color = self.START_COLOR
self.points[1].color = self.END_COLOR
print("New points generated: Start: {}, {} | End: {}, {}".format(self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y))
self.update()
def set_all_cells_to_color(self, col):
for array in self.cells:
for cell in array:
cell.color = col
def reset_cell_colors(self, leave_start_end=False):
for array in self.cells:
for cell in array:
if cell.visited:
if leave_start_end and cell.color in (self.START_COLOR, self.END_COLOR):
continue
else:
cell.color = (255, 255, 255)
else:
cell.color = (0, 0, 0)
self.update()
def reset_maze(self, new_seed):
if new_seed:
self.seed = randint(0, 1000)
if self.main_args['type'] == Main.BOX_MAZE:
self.maze.generate_box(self.seed, self.main_args['box_dims'], self.main_args['diagonal'])
elif self.main_args['type'] == Main.REG_MAZE:
self.maze.generate(self.seed, self.main_args['diagonal'])
self.cells = self.maze.cells
self.update()
def get_cell(self, x, y):
for array in self.cells:
for cell in array:
if self.main_args['type'] == Main.BOX_MAZE:
if cell.x == int((x / self.main_args['box_dims'][0])) \
and cell.y == int((y / self.main_args['box_dims'][1])):
return cell
else:
if cell.x == x and cell.y == y:
return cell
def handle_events(self):
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit(0)
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
if len(self.points) == 2:
self.points = []
self.reset_cell_colors()
elif len(self.points) == 1:
cell = self.get_cell(event.pos[0], event.pos[1])
if cell.visited:
self.points.append(cell)
cell.color = self.END_COLOR
elif not self.points:
cell = self.get_cell(event.pos[0], event.pos[1])
if cell.visited:
self.points.append(cell)
cell.color = self.START_COLOR
if event.button == 3:
self.keys['button3'] = event.pos
if event.type == MOUSEBUTTONUP:
if 'button'+str(event.button) in self.keys:
del self.keys['button'+str(event.button)]
if event.type == KEYDOWN:
self.keys[event.key] = True
if event.key == K_d:
self.highlighted_cell[0] += self.main_args['box_dims'][0]
elif event.key == K_s:
self.highlighted_cell[1] += self.main_args['box_dims'][1]
elif event.key == K_a:
self.highlighted_cell[0] -= self.main_args['box_dims'][0]
elif event.key == K_w:
self.highlighted_cell[1] -= self.main_args['box_dims'][1]
if event.type == KEYUP:
if event.key in self.keys:
del self.keys[event.key]
if event.key == K_z:
if self.mode == self.AUTO:
self.mode = self.DRAWING
self.set_all_cells_to_color((255, 255, 255))
for r in self.cells:
for c in r:
c.visited = True
else:
self.reset_maze(False)
self.cells = self.maze.cells
self.mode = self.AUTO
self.points = []
if event.key == K_r:
Main.main()
elif event.key == K_f:
self.reset_cell_colors(True)
if not self.points or len(self.points) < 2:
self.generate_random_start_end()
print("Finding path . . .")
print("Start: ({}, {})\nEnd: ({}, {})".format(self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y))
b = time()
self.a_star(self.points[0], self.points[1])
e = time()
print("Done in {} seconds".format(e - b))
elif event.key == K_p:
self.generate_random_start_end()
elif event.key == K_m:
self.reset_maze(True)
elif event.key == K_c:
if self.mode == self.AUTO:
self.reset_cell_colors()
self.points = []
else:
self.set_all_cells_to_color((255, 255, 255))
for r in self.cells:
for c in r:
c.visited = True
self.points = []
elif event.key == K_x:
if self.mode == self.DRAWING:
for r in self.cells:
for c in r:
if not c.visited:
c.color = (255, 255, 255)
c.visited = True
self.reset_cell_colors(True)
else:
self.reset_cell_colors(True)
elif event.key == K_SPACE:
if len(self.points) == 2:
self.points = []
self.reset_cell_colors()
elif len(self.points) == 1:
hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1])
if hcell:
self.points.append(hcell)
hcell.color = self.END_COLOR
elif not self.points:
hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1])
if hcell:
self.points.append(hcell)
hcell.color = self.START_COLOR
self.update()
pygame.event.pump()
if K_RIGHT in self.keys:
self.highlighted_cell[0] += self.main_args['box_dims'][0]
elif K_DOWN in self.keys:
self.highlighted_cell[1] += self.main_args['box_dims'][1]
elif K_LEFT in self.keys:
self.highlighted_cell[0] -= self.main_args['box_dims'][0]
elif K_UP in self.keys:
self.highlighted_cell[1] -= self.main_args['box_dims'][1]
self.highlighted_cell = self.clamp(self.highlighted_cell[0],
self.highlighted_cell[1], self.w - self.main_args['box_dims'][0],
self.h - self.main_args['box_dims'][1], 0, 0)
if K_v in self.keys and self.mode == self.DRAWING:
hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1])
hcell.visited = False
hcell.color = (0, 0, 0)
if 'button3' in self.keys and self.mode == self.DRAWING:
hcell = self.get_cell(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])
hcell.visited = False
hcell.color = (0, 0, 0)
if K_b in self.keys and self.mode == self.DRAWING:
hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1])
hcell.visited = True
hcell.color = (255, 255, 255)
hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1])
pygame.draw.rect(self.surf, (0, 255, 0),
(self.highlighted_cell[0], self.highlighted_cell[1], hcell.box[0], hcell.box[1]))
pygame.display.update()
self.fps.tick(self.FPS)
def special_events(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit(0)
if event.type == KEYUP:
if event.key == K_r:
Main.main()
elif event.key == K_k:
print("A-Star Halted")
self.handle_events()
elif event.key == K_f:
self.reset_cell_colors(True)
self.points = []
self.generate_random_start_end()
print("Finding path . . .")
print("START: ({}, {})\nEND: ({}, {})".format(self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y))
b = time()
self.a_star(self.points[0], self.points[1])
e = time()
print("FOUND PATH IN {} SECONDS".format(e - b))
self.handle_events()
def update(self):
self.surf.blit(self.blitMethod(self.cells, self.w, self.h), (0, 0))
class Node:
def __init__(self, cell, parent, gcost, hcost):
self.cell = cell
self.parent = parent
self.gCost = gcost
self.hCost = hcost
self.fCost = gcost + hcost
| mit | -1,412,383,707,930,343,000 | 37.952756 | 145 | 0.457112 | false |
citrtech/DJLand-Tools | audio_splitter/test/test_slice.py | 1 | 1077 | from pydub import AudioSegment
# pydub does things in milliseconds
ten_seconds = 10 * 1000
one_second = 1000
#Examples
#first_10_seconds = song[:ten_seconds]
#last_5_seconds = song[-5000:]
song = AudioSegment.from_mp3("2016.01.04-09.00.00-S.mp3")
#print("Test")
#last_second = song[-ten_seconds:]
#last_second.export("out/testing.mp3", format="mp3")
#Cool that worked, now lets try looping
#find the duration of the input clip in millliseconds
duration_in_milliseconds = len(song)
#grab each one second slice and save it from the first second to the last whole second in the file
for i in range(0,duration_in_milliseconds,1*one_second):
print ("Second number %s \n" % (int(i/1000)) )
offset = i + one_second
current_second = song[i:offset];
filename = "out/" + str(int(i/1000)) + ".mp3"
current_second.export(filename, format="mp3")
#it works! now we just have to combine it with the other stuff to start from the
#right unix timestamp and check behaviour of last second (where there might not
#be a complete second of audio left)
| gpl-3.0 | -2,184,063,138,072,133,000 | 30.676471 | 98 | 0.707521 | false |
endlessm/chromium-browser | third_party/chromite/lib/failures_lib_unittest.py | 1 | 12792 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the failures_lib module."""
from __future__ import print_function
import json
from chromite.lib import failures_lib
from chromite.lib import failure_message_lib
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
class StepFailureTests(cros_test_lib.TestCase):
"""Tests for StepFailure."""
def testConvertToStageFailureMessage(self):
"""Test ConvertToStageFailureMessage."""
failure = failures_lib.StepFailure('step failure message')
stage_failure_msg = failure.ConvertToStageFailureMessage(
1, 'HWTest [sanity]')
self.assertEqual(stage_failure_msg.stage_name, 'HWTest [sanity]')
self.assertEqual(stage_failure_msg.stage_prefix_name, 'HWTest')
self.assertEqual(stage_failure_msg.exception_type, 'StepFailure')
self.assertEqual(stage_failure_msg.exception_category, 'unknown')
class CompoundFailureTest(cros_test_lib.TestCase):
"""Test the CompoundFailure class."""
def _CreateExceptInfos(self, cls, message='', traceback='', num=1):
"""A helper function to create a list of ExceptInfo objects."""
exc_infos = []
for _ in range(num):
exc_infos.extend(failures_lib.CreateExceptInfo(cls(message), traceback))
return exc_infos
def testHasEmptyList(self):
"""Tests the HasEmptyList method."""
self.assertTrue(failures_lib.CompoundFailure().HasEmptyList())
exc_infos = self._CreateExceptInfos(KeyError)
self.assertFalse(
failures_lib.CompoundFailure(exc_infos=exc_infos).HasEmptyList())
def testHasAndMatchesFailureType(self):
"""Tests the HasFailureType and the MatchesFailureType methods."""
# Create a CompoundFailure instance with mixed types of exceptions.
exc_infos = self._CreateExceptInfos(KeyError)
exc_infos.extend(self._CreateExceptInfos(ValueError))
exc = failures_lib.CompoundFailure(exc_infos=exc_infos)
self.assertTrue(exc.HasFailureType(KeyError))
self.assertTrue(exc.HasFailureType(ValueError))
self.assertFalse(exc.MatchesFailureType(KeyError))
self.assertFalse(exc.MatchesFailureType(ValueError))
# Create a CompoundFailure instance with a single type of exceptions.
exc_infos = self._CreateExceptInfos(KeyError, num=5)
exc = failures_lib.CompoundFailure(exc_infos=exc_infos)
self.assertTrue(exc.HasFailureType(KeyError))
self.assertFalse(exc.HasFailureType(ValueError))
self.assertTrue(exc.MatchesFailureType(KeyError))
self.assertFalse(exc.MatchesFailureType(ValueError))
def testHasFatalFailure(self):
"""Tests the HasFatalFailure method."""
exc_infos = self._CreateExceptInfos(KeyError)
exc_infos.extend(self._CreateExceptInfos(ValueError))
exc = failures_lib.CompoundFailure(exc_infos=exc_infos)
self.assertTrue(exc.HasFatalFailure())
self.assertTrue(exc.HasFatalFailure(whitelist=[KeyError]))
self.assertFalse(exc.HasFatalFailure(whitelist=[KeyError, ValueError]))
exc = failures_lib.CompoundFailure()
self.assertFalse(exc.HasFatalFailure())
def testMessageContainsAllInfo(self):
"""Tests that by default, all information is included in the message."""
exc_infos = self._CreateExceptInfos(KeyError, message='bar1',
traceback='foo1')
exc_infos.extend(self._CreateExceptInfos(ValueError, message='bar2',
traceback='foo2'))
exc = failures_lib.CompoundFailure(exc_infos=exc_infos)
self.assertIn('bar1', str(exc))
self.assertIn('bar2', str(exc))
self.assertIn('KeyError', str(exc))
self.assertIn('ValueError', str(exc))
self.assertIn('foo1', str(exc))
self.assertIn('foo2', str(exc))
def testConvertToStageFailureMessage(self):
"""Test ConvertToStageFailureMessage."""
exc_infos = self._CreateExceptInfos(KeyError, message='bar1',
traceback='foo1')
exc_infos.extend(self._CreateExceptInfos(failures_lib.StepFailure,
message='bar2',
traceback='foo2'))
exc = failures_lib.CompoundFailure(message='compound failure',
exc_infos=exc_infos)
stage_failure_msg = exc.ConvertToStageFailureMessage(1, 'HWTest [sanity]')
self.assertEqual(len(stage_failure_msg.inner_failures), 2)
self.assertEqual(stage_failure_msg.stage_name, 'HWTest [sanity]')
self.assertEqual(stage_failure_msg.stage_prefix_name, 'HWTest')
self.assertEqual(stage_failure_msg.exception_type, 'CompoundFailure')
self.assertEqual(stage_failure_msg.exception_category, 'unknown')
class ReportStageFailureTest(cros_test_lib.MockTestCase):
"""Tests for ReportStageFailure."""
def testReportStageFailure(self):
"""Test ReportStageFailure."""
class FakeStepFailure(failures_lib.StepFailure):
"""A fake StepFailure subclass for unittest."""
EXCEPTION_CATEGORY = 'unittest'
fake_failure = FakeStepFailure('Toot! Toot!')
insert_failure_fn = self.PatchObject(failures_lib,
'_InsertFailureToMonarch')
failures_lib.ReportStageFailure(
fake_failure, {})
insert_failure_fn.assert_called_once_with(exception_category='unittest',
metrics_fields={})
class SetFailureTypeTest(cros_test_lib.TestCase):
"""Test that the SetFailureType decorator works."""
ERROR_MESSAGE = 'You failed!'
class TacoNotTasty(failures_lib.CompoundFailure):
"""Raised when the taco is not tasty."""
class NoGuacamole(TacoNotTasty):
"""Raised when no guacamole in the taco."""
class SubparLunch(failures_lib.CompoundFailure):
"""Raised when the lunch is subpar."""
class FooException(Exception):
"""A foo exception."""
def _GetFunction(self, set_type, raise_type, *args, **kwargs):
"""Returns a function to test.
Args:
set_type: The exception type that the function is decorated with.
raise_type: The exception type that the function raises.
*args: args to pass to the instance of |raise_type|.
Returns:
The function to test.
"""
@failures_lib.SetFailureType(set_type)
def f():
raise raise_type(*args, **kwargs)
return f
def testAssertionFailOnIllegalExceptionType(self):
"""Assertion should fail if the pre-set type is not allowed ."""
self.assertRaises(AssertionError, self._GetFunction, ValueError,
self.FooException)
def testReraiseAsNewException(self):
"""Tests that the pre-set exception type is raised correctly."""
try:
self._GetFunction(self.TacoNotTasty, self.FooException,
self.ERROR_MESSAGE)()
except Exception as e:
self.assertTrue(isinstance(e, self.TacoNotTasty))
self.assertTrue(e.msg, self.ERROR_MESSAGE)
self.assertEqual(len(e.exc_infos), 1)
self.assertEqual(e.exc_infos[0].str, self.ERROR_MESSAGE)
self.assertEqual(e.exc_infos[0].type, self.FooException)
self.assertTrue(isinstance(e.exc_infos[0].traceback, str))
def testReraiseACompoundFailure(self):
"""Tests that the list of ExceptInfo objects are copied over."""
tb1 = 'Dummy traceback1'
tb2 = 'Dummy traceback2'
org_infos = failures_lib.CreateExceptInfo(ValueError('No taco.'), tb1) + \
failures_lib.CreateExceptInfo(OSError('No salsa'), tb2)
try:
self._GetFunction(self.SubparLunch, self.TacoNotTasty,
exc_infos=org_infos)()
except Exception as e:
self.assertTrue(isinstance(e, self.SubparLunch))
# The orignal exceptions stored in exc_infos are preserved.
self.assertEqual(e.exc_infos, org_infos)
# All essential inforamtion should be included in the message of
# the new excpetion.
self.assertIn(tb1, str(e))
self.assertIn(tb2, str(e))
self.assertIn(str(ValueError), str(e))
self.assertIn(str(OSError), str(e))
self.assertIn(str('No taco'), str(e))
self.assertIn(str('No salsa'), str(e))
# Assert that summary does not contain the textual tracebacks.
self.assertFalse(tb1 in e.ToSummaryString())
self.assertFalse(tb2 in e.ToSummaryString())
def testReraiseACompoundFailureWithEmptyList(self):
"""Tests that a CompoundFailure with empty list is handled correctly."""
try:
self._GetFunction(self.SubparLunch, self.TacoNotTasty,
message='empty list')()
except Exception as e:
self.assertTrue(isinstance(e, self.SubparLunch))
self.assertEqual(e.exc_infos[0].type, self.TacoNotTasty)
def testReraiseOriginalException(self):
"""Tests that the original exception is re-raised."""
# NoGuacamole is a subclass of TacoNotTasty, so the wrapper has no
# effect on it.
f = self._GetFunction(self.TacoNotTasty, self.NoGuacamole)
self.assertRaises(self.NoGuacamole, f)
def testPassArgsToWrappedFunctor(self):
"""Tests that we can pass arguments to the functor."""
@failures_lib.SetFailureType(self.TacoNotTasty)
def f(arg):
return arg
@failures_lib.SetFailureType(self.TacoNotTasty)
def g(kwarg=''):
return kwarg
# Test passing arguments.
self.assertEqual(f('foo'), 'foo')
# Test passing keyword arguments.
self.assertEqual(g(kwarg='bar'), 'bar')
class ExceptInfoTest(cros_test_lib.TestCase):
"""Tests the namedtuple class ExceptInfo."""
def testConvertToExceptInfo(self):
"""Tests converting an exception to an ExceptInfo object."""
traceback = 'Dummy traceback'
message = 'Taco is not a valid option!'
except_infos = failures_lib.CreateExceptInfo(
ValueError(message), traceback)
self.assertEqual(except_infos[0].type, ValueError)
self.assertEqual(except_infos[0].str, message)
self.assertEqual(except_infos[0].traceback, traceback)
class FailureTypeListTests(cros_test_lib.TestCase):
"""Tests for failure type lists."""
def testFailureTypeList(self):
"""Test the current failure names are already added to the type lists."""
self.assertTrue(failures_lib.BuildScriptFailure.__name__ in
failure_message_lib.BUILD_SCRIPT_FAILURE_TYPES)
self.assertTrue(failures_lib.PackageBuildFailure.__name__ in
failure_message_lib.PACKAGE_BUILD_FAILURE_TYPES)
class GetStageFailureMessageFromExceptionTests(cros_test_lib.TestCase):
"""Tests for GetStageFailureMessageFromException"""
def testGetStageFailureMessageFromExceptionOnStepFailure(self):
"""Test GetStageFailureMessageFromException on StepFailure."""
exc = failures_lib.StepFailure('step failure message')
msg = failures_lib.GetStageFailureMessageFromException(
'CommitQueueSync', 1, exc)
self.assertEqual(msg.build_stage_id, 1)
self.assertEqual(msg.stage_name, 'CommitQueueSync')
self.assertEqual(msg.stage_prefix_name, 'CommitQueueSync')
self.assertEqual(msg.exception_type, 'StepFailure')
self.assertEqual(msg.exception_category, 'unknown')
def testGetStageFailureMessageFromExceptionOnException(self):
"""Test GetStageFailureMessageFromException on regular exception."""
exc = ValueError('Invalid valure.')
msg = failures_lib.GetStageFailureMessageFromException(
'CommitQueueSync', 1, exc)
self.assertEqual(msg.build_stage_id, 1)
self.assertEqual(msg.stage_name, 'CommitQueueSync')
self.assertEqual(msg.stage_prefix_name, 'CommitQueueSync')
self.assertEqual(msg.exception_type, 'ValueError')
self.assertEqual(msg.exception_category, 'unknown')
class BuildFailuresForFindit(cros_test_lib.TestCase):
"""Test cases for exporting build failures for Findit integration."""
def testBuildFailuresJson(self):
error = cros_build_lib.RunCommandError('run cmd error')
failed_packages = ['sys-apps/mosys', 'chromeos-base/cryptohome']
build_failure = failures_lib.PackageBuildFailure(
error, './build_packages', failed_packages)
self.assertSetEqual(set(failed_packages), build_failure.failed_packages)
failure_json = build_failure.BuildCompileFailureOutputJson()
values = json.loads(failure_json)
failures = values['failures']
self.assertEqual(len(failures), 2)
# Verify both output targets are not equal, this makes sure the loop
# below is correct.
self.assertNotEqual(failures[0]['output_targets'],
failures[1]['output_targets'])
for value in failures:
self.assertEqual(value['rule'], 'emerge')
self.assertIn(value['output_targets'], failed_packages)
| bsd-3-clause | 2,424,521,835,697,124,400 | 40.131833 | 78 | 0.697702 | false |
farseerfc/pacvis | pacvis/pacvis.py | 1 | 7040 | #!/usr/bin/env python
import sys
import json
from types import SimpleNamespace
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from webbrowser import open_new_tab
import tornado.ioloop
import tornado.web
from .console import start_message, append_message, print_message
from .infos import DbInfo, PkgInfo, GroupInfo, VDepInfo
# Tornado entry
class MainHandler(tornado.web.RequestHandler):
def parse_args(self, **kargs):
result = {}
for key in kargs:
defvalue = str(kargs[key])
if type(kargs[key]) is int:
result[key] = int(self.get_argument(key, defvalue))
elif type(kargs[key]) is bool:
result[key] = self.get_argument(key, defvalue) != "False"
else:
result[key] = self.get_argument(key, defvalue)
print_message("get arg %r: %r" % (key, result[key]))
return result
def get(self):
print_message("\n" + str(self.request))
args = SimpleNamespace(**self.parse_args(
maxlevel=1000,
maxreqs=1000,
maxdeps=1000,
drawsize="isize",
usemagic=False,
straightline=False,
enablephysics=False,
aligntop=False,
disableallphysics=False,
debugperformance=False,
byrepos=False,
showallvdeps=False))
dbinfo = DbInfo()
start_message("Loading local database ...")
dbinfo.find_all(args.showallvdeps)
append_message("done")
start_message("Finding all dependency circles ... ")
dbinfo.find_circles()
append_message("done")
dbinfo.topology_sort(args.usemagic, args.aligntop, args.byrepos)
dbinfo.calcSizes()
start_message("Rendering ... ")
nodes = []
links = []
nodes.append({"id": 0,
"label": "level 1 group",
"level": 0,
"shape": "triangleDown",
"isize": 0,
"csize": 0,
"cssize": 0,
"deps": "",
"reqs": "",
"optdeps": "",
"desc": "",
"version": "",
"group": "group",
"groups": "",
"provides": "",
})
ids = 1
for pkg in sorted(dbinfo.all_pkgs.values(), key=lambda x: x.level):
append_message("%s" % pkg.name)
pkg.id = ids
ids += 1
if pkg.level < args.maxlevel:
group = "normal"
if pkg.level == 0:
group = "standalone"
elif type(pkg) is GroupInfo:
group = "group"
elif type(pkg) is VDepInfo:
group = "vdep"
# if not args.showallvdeps and len(pkg.requiredby) == 0:
# continue
elif pkg.explicit:
group = "explicit"
nodes.append({"id": pkg.id,
"label": pkg.name,
"level": pkg.level,
"group": group,
"isize": pkg.isize,
"csize": pkg.csize,
"cssize": pkg.cssize,
"deps": ", ".join(pkg.deps),
"reqs": ", ".join(pkg.requiredby),
"optdeps": ", ".join(pkg.optdeps),
"groups": ", ".join(pkg.groups),
"provides": ", ".join(pkg.provides),
"desc": pkg.desc,
"version": pkg.version,
"repo": pkg.repo,
})
ids = 0
for pkg in sorted(dbinfo.all_pkgs.values(), key=lambda x: x.level):
if pkg.level < args.maxlevel:
if len(pkg.deps) == 0 and len(pkg.requiredby) == 0:
links.append({"id": ids,
"from": pkg.id,
"to": 0})
ids += 1
if len(pkg.deps) < args.maxdeps:
for dep in pkg.deps:
if dep not in pkg.circledeps:
if len(dbinfo.get(dep).requiredby) < args.maxreqs:
links.append({"id": ids,
"from": pkg.id,
"to": dbinfo.get(dep).id})
ids += 1
for dep in pkg.circledeps:
if (pkg.id != dbinfo.get(dep).id):
links.append({"id": ids,
"to": pkg.id,
"from": dbinfo.get(dep).id,
"color": "rgb(244,67,54,0.8)"})
ids += 1
for dep in pkg.optdeps:
if dep in dbinfo.all_pkgs:
links.append({"id": ids,
"from": pkg.id,
"to": dbinfo.get(dep).id,
"dashes": True,
"color": "rgb(255,235,59)"})
ids += 1
print_message("Writing HTML")
self.render("templates/index.template.html",
nodes=json.dumps(nodes),
links=json.dumps(links),
options=args,
optionsjson=json.dumps(args.__dict__))
def make_app():
import os
return tornado.web.Application([
(r"/", MainHandler),
], debug=True,
static_path=os.path.join(os.path.dirname(__file__), "static"))
def main():
argp = ArgumentParser(description='start PacVis server', formatter_class=ArgumentDefaultsHelpFormatter)
argp.add_argument('-p', '--port', type=int, default=8888, help='listen at given port')
argp.add_argument('-s', '--host', type=str, default='localhost', help='listen at given hostname')
argp.add_argument('-b', '--browser', action='store_true', help='start a browser')
args = argp.parse_args()
app = make_app()
app.listen(args.port, address=args.host)
print_message(f"Start PacVis at http://{args.host}:{args.port}/")
if args.browser:
url = f'http://{args.host}:{args.port}/'
print_message(f'open in browser: {url}')
open_new_tab(url)
else:
print_message('use --browser to open a browser automatically.')
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
print_message("Received interrupt from keyboard, shutting down ...")
sys.exit(0)
if __name__ == "__main__":
main()
| mit | 1,197,412,450,875,610,600 | 37.681319 | 107 | 0.446023 | false |
abingham/ackward | src/ackward/logging/LoggerBase.py | 1 | 3176 | from ackward import (Class,
method,
Namespace,
Property,
TranslationUnit)
def tunit():
return TranslationUnit(
forward_declarations=[('ackward', 'logging', 'class Filter'),
('ackward', 'logging', 'class Handler'),
('ackward', 'logging', 'class LogRecord')],
header_includes=[('ackward', 'logging', 'Types.hpp')],
impl_includes=[('ackward', 'logging', 'LoggerBase.hpp'),
('ackward', 'logging', 'Filter.hpp'),
('ackward', 'logging', 'Handler.hpp'),
('ackward', 'logging', 'LogRecord.hpp')])
def methods(parent):
methods = [
('void setLevel(Level l)',
'Sets the threshold for this logger.'),
('bool isEnabledFor(Level l) const',
'Indicates if a message of severity ``lvl`` would be processed by this logger.'),
('Level getEffectiveLevel() const',
'Indicates the effective level for this logger.'),
('void log(Level lvl, std::wstring msg) const',
'Logs a message with level ``lvl`` on this logger.'),
('void addFilter(Filter f)',
'Adds the specified filter ``filt`` to this logger.'),
('void removeFilter(Filter f)',
'Removes the specified filter ``filt`` from this logger.'),
('bool filter(LogRecord r) const',
'Applies this logger\'s filters to the record and returns a true value if the record is to be processed.'),
('void addHandler(Handler h)',
'Adds the specified handler ``hdlr`` to this logger.'),
('void removeHandler(Handler h)',
'Removes the specified handler hdlr from this logger.'),
('void handle(LogRecord r) const',
'Handles a record by passing it to all handlers associated with this logger and its ancestors (until a false value of propagate is found).'),
('void exception(std::wstring msg) const',
'''Logs a message with level ``ERROR`` on this logger.
Exception info is added to the logging message. This method
should only be called from an exception handler.''')
]
for lvl in ['debug', 'info', 'warning', 'error', 'critical']:
methods.append(
('void {0}(std::wstring msg) const'.format(lvl),
'Logs a message with level ``{0}`` on this logger.'.format(lvl.upper())))
for m in methods:
docstring='''\\rst
{0}
\\endrst'''.format(m[1])
method(m[0], parent=parent, doc=docstring)
def definition(env):
t = tunit()
ns = Namespace('ackward', 'logging', parent=t)
cls = Class(name='LoggerBase',
wrapped_class='logging.Logger',
parent=ns)
# TODO: docstring for propagate
Property(name='propagate',
type='bool',
parent=cls).doc='If this evaluates to false, logging messages are not passed by this logger or by its child loggers to the handlers of higher level (ancestor) loggers.'
methods(parent=cls)
return t
| mit | 8,258,628,040,254,271,000 | 43.111111 | 181 | 0.567695 | false |
Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/azure/servicebus/management/_generated/operations/_entity_operations.py | 1 | 10884 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EntityOperations(object):
"""EntityOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.servicebus.management._generated.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
entity_name, # type: str
enrich=False, # type: Optional[bool]
api_version="2017_04", # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> object
"""Get the details about the Queue or Topic with the given entityName.
Get Queue or Topic.
:param entity_name: The name of the queue or topic relative to the Service Bus namespace.
:type entity_name: str
:param enrich: A query parameter that sets enrich to true or false.
:type enrich: bool
:param api_version: Api Version.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'entityName': self._serialize.url("entity_name", entity_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if enrich is not None:
query_parameters['enrich'] = self._serialize.query("enrich", enrich, 'bool')
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/xml'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{entityName}'} # type: ignore
def put(
self,
entity_name, # type: str
request_body, # type: object
api_version="2017_04", # type: Optional[str]
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> object
"""Create or update a queue or topic at the provided entityName.
:param entity_name: The name of the queue or topic relative to the Service Bus namespace.
:type entity_name: str
:param request_body: Parameters required to make or edit a queue or topic.
:type request_body: object
:param api_version: Api Version.
:type api_version: str
:param if_match: Match condition for an entity to be updated. If specified and a matching
entity is not found, an error will be raised. To force an unconditional update, set to the
wildcard character (*). If not specified, an insert will be performed when no existing entity
is found to update and a replace will be performed if an existing entity is found.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/atom+xml")
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'entityName': self._serialize.url("entity_name", entity_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/xml'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request_body, 'object', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/{entityName}'} # type: ignore
def delete(
self,
entity_name, # type: str
api_version="2017_04", # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> object
"""Delete the Queue or Topic with the given entityName.
Delete Queue or Topic.
:param entity_name: The name of the queue or topic relative to the Service Bus namespace.
:type entity_name: str
:param api_version: Api Version.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'entityName': self._serialize.url("entity_name", entity_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/xml'
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{entityName}'} # type: ignore
| mit | -4,868,659,356,502,138,000 | 43.243902 | 116 | 0.63552 | false |
faisal-oead/My-Twitter-Bot | main-bot.py | 1 | 1851 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2014 faisal oead <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import tweepy
import time
# == معلومات الامان ==
consumer_key=""
consumer_secret=""
access_token=""
access_token_secret=""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
x = 0
y = 0
try:
api = tweepy.API(auth)
user = api.me()
followers_list = api.followers_ids(user)
friends_list = api.friends_ids(user)
# الغاء متابعة الغير المتابعين
try:
for friend in friends_list:
if friend not in followers_list:
api.destroy_friendship(friend)
time.sleep(60)
x = x + 1
if x == 200:
break
except tweepy.error.TweepError as ee:
print ee
pass
#متابعة اشخاص جدد
try:
for follower in api.followers_ids("MohamadAlarefe"):
if follower not in followers_list:
api.create_friendship(follower)
time.sleep(60)
y = y + 1
if y == 100:
break
except tweepy.error.TweepError as eee:
print eee
pass
except tweepy.error.TweepError as e:
print e
pass
| gpl-2.0 | -3,045,754,460,581,140,500 | 25.072464 | 71 | 0.707615 | false |
MatthewShao/mitmproxy | test/pathod/test_pathod.py | 1 | 7341 | import io
import pytest
from pathod import pathod
from mitmproxy.net import tcp
from mitmproxy import exceptions
from mitmproxy.test import tutils
from . import tservers
class TestPathod:
def test_logging(self):
s = io.StringIO()
p = pathod.Pathod(("127.0.0.1", 0), logfp=s)
assert len(p.get_log()) == 0
id = p.add_log(dict(s="foo"))
assert p.log_by_id(id)
assert len(p.get_log()) == 1
p.clear_log()
assert len(p.get_log()) == 0
for _ in range(p.LOGBUF + 1):
p.add_log(dict(s="foo"))
assert len(p.get_log()) <= p.LOGBUF
class TestTimeout(tservers.DaemonTests):
timeout = 0.01
def test_timeout(self):
# FIXME: Add float values to spec language, reduce test timeout to
# increase test performance
# This is a bodge - we have some platform difference that causes
# different exceptions to be raised here.
with pytest.raises(Exception):
self.pathoc(["get:/:p1,1"])
assert self.d.last_log()["type"] == "timeout"
class TestNotAfterConnect(tservers.DaemonTests):
ssl = False
ssloptions = dict(
not_after_connect=True
)
def test_connect(self):
r, _ = self.pathoc(
[r"get:'http://foo.com/p/202':da"],
connect_to=("localhost", self.d.port)
)
assert r[0].status_code == 202
class TestCustomCert(tservers.DaemonTests):
ssl = True
ssloptions = dict(
certs=[("*", tutils.test_data.path("pathod/data/testkey.pem"))],
)
def test_connect(self):
r, _ = self.pathoc([r"get:/p/202"])
r = r[0]
assert r.status_code == 202
assert r.sslinfo
assert "test.com" in str(r.sslinfo.certchain[0].get_subject())
class TestSSLCN(tservers.DaemonTests):
ssl = True
ssloptions = dict(
cn=b"foo.com"
)
def test_connect(self):
r, _ = self.pathoc([r"get:/p/202"])
r = r[0]
assert r.status_code == 202
assert r.sslinfo
assert r.sslinfo.certchain[0].get_subject().CN == "foo.com"
class TestNohang(tservers.DaemonTests):
nohang = True
def test_nohang(self):
r = self.get("200:p0,0")
assert r.status_code == 800
l = self.d.last_log()
assert "Pauses have been disabled" in l["response"]["msg"]
class TestHexdump(tservers.DaemonTests):
hexdump = True
def test_hexdump(self):
assert self.get(r"200:b'\xf0'")
class TestNocraft(tservers.DaemonTests):
nocraft = True
def test_nocraft(self):
r = self.get(r"200:b'\xf0'")
assert r.status_code == 800
assert b"Crafting disabled" in r.content
class CommonTests(tservers.DaemonTests):
def test_binarydata(self):
assert self.get(r"200:b'\xf0'")
assert self.d.last_log()
# FIXME: Other binary data elements
def test_sizelimit(self):
r = self.get("200:b@1g")
assert r.status_code == 800
l = self.d.last_log()
assert "too large" in l["response"]["msg"]
def test_preline(self):
r, _ = self.pathoc([r"get:'/p/200':i0,'\r\n'"])
assert r[0].status_code == 200
def test_logs(self):
self.d.clear_log()
assert self.get("202:da")
assert self.d.expect_log(1)
self.d.clear_log()
assert len(self.d.log()) == 0
def test_disconnect(self):
with pytest.raises(Exception, match="Unexpected EOF"):
self.get("202:b@100k:d200")
def test_parserr(self):
rsp = self.get("400:msg,b:")
assert rsp.status_code == 800
def test_static(self):
rsp = self.get("200:b<file")
assert rsp.status_code == 200
assert rsp.content.strip() == b"testfile"
def test_anchor(self):
rsp = self.getpath("/anchor/foo")
assert rsp.status_code == 202
def test_invalid_first_line(self):
c = tcp.TCPClient(("localhost", self.d.port))
with c.connect():
if self.ssl:
c.convert_to_tls()
c.wfile.write(b"foo\n\n\n")
c.wfile.flush()
l = self.d.last_log()
assert l["type"] == "error"
assert "foo" in l["msg"]
def test_invalid_content_length(self):
with pytest.raises(exceptions.HttpException):
self.pathoc(["get:/:h'content-length'='foo'"])
l = self.d.last_log()
assert l["type"] == "error"
assert "Unparseable Content Length" in l["msg"]
def test_invalid_headers(self):
with pytest.raises(exceptions.HttpException):
self.pathoc(["get:/:h'\t'='foo'"])
l = self.d.last_log()
assert l["type"] == "error"
assert "Invalid headers" in l["msg"]
def test_access_denied(self):
rsp = self.get("=nonexistent")
assert rsp.status_code == 800
def test_source_access_denied(self):
rsp = self.get("200:b</foo")
assert rsp.status_code == 800
assert b"File access denied" in rsp.content
def test_proxy(self):
r, _ = self.pathoc([r"get:'http://foo.com/p/202':da"])
assert r[0].status_code == 202
def test_websocket(self):
r, _ = self.pathoc(["ws:/p/"], ws_read_limit=0)
assert r[0].status_code == 101
r, _ = self.pathoc(["ws:/p/ws"], ws_read_limit=0)
assert r[0].status_code == 101
def test_websocket_frame(self):
r, _ = self.pathoc(
["ws:/p/", "wf:f'wf:b\"test\"':pa,1"],
ws_read_limit=1
)
assert r[1].payload == b"test"
def test_websocket_frame_reflect_error(self):
r, _ = self.pathoc(
["ws:/p/", "wf:-mask:knone:f'wf:b@10':i13,'a'"],
ws_read_limit=1,
timeout=1
)
# FIXME: Race Condition?
assert "Parse error" in self.d.text_log()
def test_websocket_frame_disconnect_error(self):
self.pathoc(["ws:/p/", "wf:b@10:d3"], ws_read_limit=0)
assert self.d.last_log()
class TestDaemon(CommonTests):
ssl = False
def test_connect(self):
r, _ = self.pathoc(
[r"get:'http://foo.com/p/202':da"],
connect_to=("localhost", self.d.port),
ssl=True
)
assert r[0].status_code == 202
def test_connect_err(self):
with pytest.raises(exceptions.HttpException):
self.pathoc([r"get:'http://foo.com/p/202':da"], connect_to=("localhost", self.d.port))
class TestDaemonSSL(CommonTests):
ssl = True
def test_ssl_conn_failure(self):
c = tcp.TCPClient(("localhost", self.d.port))
c.rbufsize = 0
c.wbufsize = 0
with c.connect():
c.wfile.write(b"\0\0\0\0")
with pytest.raises(exceptions.TlsException):
c.convert_to_tls()
l = self.d.last_log()
assert l["type"] == "error"
assert "SSL" in l["msg"]
def test_ssl_cipher(self):
r, _ = self.pathoc([r"get:/p/202"])
assert r[0].status_code == 202
assert self.d.last_log()["cipher"][1] > 0
class TestHTTP2(tservers.DaemonTests):
ssl = True
nohang = True
def test_http2(self):
r, _ = self.pathoc(["GET:/"], ssl=True, use_http2=True)
assert r[0].status_code == 800
| mit | 7,214,238,150,954,249,000 | 27.126437 | 98 | 0.558507 | false |
pi19404/robosub-1 | src/microcontroller_interface/microcontroller_debugging_interface.py | 1 | 11005 | #!/usr/bin/python
#import statements
import serial
import os
import time
#Global Constants#############################################################################
#These values are temporary, for testing. They WILL change in the final product
#It was recommended that these values should be placed in a dictionary
control_byte = '\n'
ACL_1_X_addr = 0x10
ACL_1_Y_addr = 0x11
ACL_1_Z_addr = 0x12
GYRO_1_X_addr = 0x20
GYRO_1_Y_addr = 0x21
GYRO_1_Z_addr = 0x22
ADC_DEPTH = 0x30
ADC_BATT = 0x31
THRUSTER_BOW_SB = 0x10
THRUSTER_BOW_PORT = 0x11
THRUSTER_DEPTH_SB = 0x12
THRUSTER_DEPTH_PORT = 0x13
THRUSTER_STERN_SB = 0x14
THRUSTER_STERN_PORT = 0x15
mag = 127
#Function Definitions#########################################################################
"""
Here we are trying to make sure we have actually found
a control byte, so we receive several packets, then look
at where we expect the control bytes to be. If they are not in the expected
locastions, we wait for a new control byte and try again.
X000X000X
012345678
"""
def get_lock() :
#variables for the sync loop
current_byte = '\0'
packet_array = ""
in_sync = False
#reset the serial port
s.close()
s.open()
print "Aquiring stream sync"
while in_sync == False:
#read a packet from the serial port
current_byte = s.read()
#if the byte is the control_byte, then receive several packets
#otherwise, we will jump back to the top of the loop and get another byte
if current_byte == control_byte :
packet_array = "" # clear out the array
packet_array += current_byte # add the byte to the array
#receive several packets
while len(packet_array) != 9 :
packet_array += s.read()
#check to see if the control byte is in the proper location in the received packets
if (packet_array[0] == control_byte and \
packet_array[4] == control_byte and \
packet_array[8] == control_byte) :
#throw away rest of last packet
s.read(3)
#say we are in sync so we can break out of the loop
in_sync = True
print "sync locked"
#end get_lock()
"""
This function reads a 4-byte packet from the serial port.
It will also check to make sure we are still in sync, and
pauses the program if we lose sync. It will then attempt
to get back into sync with the serial stream.
"""
def get_packet() :
success = False
while success == False :
#read 4 bytes from the serial port
packet = s.read(4)
#ensure we are in sync by checking that the control byte is in the correct place
if packet[0] != control_byte : #if we are not in sync
print "Error: lost sync. Press the [Enter] key to attempt to re-sync"
raw_input() #waits for the user to press the enter key
s.flushInput() #flushes the serial rx buffer
get_lock() #get back into sync
else : #if we are in sync, break out of loop
success = True
return packet
#end get_packet()
"""
cmd_thruster() sends a thruster control command to the microncontroller
It takes an id, and a value between +127 and -127 (negative is reverse)
"""
def cmd_thruster(thruster_id, magnitude) :
raw_thruster_id = '\0'
direction_mag = 0;
raw_direction_mag = '\0'
raw_cmd = ""
#the chr() command converts the integer to the ascii character representation, which is a raw byte
#convert the thruster id to a raw binary value
raw_thruster_id = chr(thruster_id)
#make sure magnitude is within bounds
if (magnitude > 127) :
magnitude = 127
elif (magnitude < -127) :
magnitude = -127
#convert direction and magnitude variable into a raw byte
raw_magnitude = chr(magnitude & 0xFF)
CONTROL_BYTE = '\n'
#combine the raw bytes
raw_cmd = CONTROL_BYTE + raw_thruster_id + raw_magnitude
#send the commmand to the microcontroller
s.write(raw_cmd)
#end cmd_thruster()
#here are some example functions controlling the thrusters for movement
#causes the sub to move forward
def cmd_move_forward() :
cmd_thruster(THRUSTER_BOW_SB, -mag)
cmd_thruster(THRUSTER_BOW_PORT, mag)
cmd_thruster(THRUSTER_STERN_SB, mag)
cmd_thruster(THRUSTER_STERN_PORT, -mag)
#end cmd_move_forward()
#causes the sub to move backwards
def cmd_move_backward() :
cmd_thruster(THRUSTER_BOW_SB, mag)
cmd_thruster(THRUSTER_BOW_PORT, -mag)
cmd_thruster(THRUSTER_STERN_SB, -mag)
cmd_thruster(THRUSTER_STERN_PORT, mag)
#end cmd_move_forward()
#causes the sub to dive
def cmd_dive() :
cmd_thruster(THRUSTER_DEPTH_SB, mag)
cmd_thruster(THRUSTER_DEPTH_PORT, mag)
#end cmd_move_forward()
#causes the sub to surface
def cmd_surface() :
cmd_thruster(THRUSTER_DEPTH_SB, -mag)
cmd_thruster(THRUSTER_DEPTH_PORT, -mag)
#end cmd_move_forward()
#causes the sub to rotate clockwise
def cmd_rotate_cw() :
cmd_thruster(THRUSTER_BOW_SB, mag)
cmd_thruster(THRUSTER_BOW_PORT, mag)
cmd_thruster(THRUSTER_STERN_SB, -mag)
cmd_thruster(THRUSTER_STERN_PORT, -mag)
#end cmd_rotate_cw()
#causes the sub to rotate counter-clockwise
def cmd_rotate_ccw() :
cmd_thruster(THRUSTER_BOW_SB, -mag)
cmd_thruster(THRUSTER_BOW_PORT, -mag)
cmd_thruster(THRUSTER_STERN_SB, mag)
cmd_thruster(THRUSTER_STERN_PORT, mag)
#end cmd_rotate_ccw()
#stops the depth control thrusters
def cmd_stop_depth() :
cmd_thruster(THRUSTER_DEPTH_SB, 0)
cmd_thruster(THRUSTER_DEPTH_PORT, 0)
#end cmd_move_forward()
#stops all thrusters
def cmd_stop_all() :
cmd_thruster(THRUSTER_BOW_SB, 0)
cmd_thruster(THRUSTER_BOW_PORT, 0)
cmd_thruster(THRUSTER_STERN_SB, 0)
cmd_thruster(THRUSTER_STERN_PORT, 0)
cmd_thruster(THRUSTER_DEPTH_SB, 0)
cmd_thruster(THRUSTER_DEPTH_PORT, 0)
#end cmd_move_forward()
#Main code####################################################################################
#initialize the serial port
s = serial.Serial() #get instance of serial class
s.port = "/dev/ttyUSB0" #this may change, depending on what port the OS gives the microcontroller
s.baudrate = 56818 #the baudrate may change in the future
s.open() #attempt to open the serial port (there is no guard code, I'm assuming this does not fail)
f = open("slog", "w")
#clear the screen
os.system('clear')
get_lock() #get in sync with the stream
#Initialize some variables
ACL_1_X_val = -1
ACL_1_Y_val = -1
ACL_1_Z_val = -1
GYRO_1_X_val = -1
GYRO_1_Y_val = -1
GYRO_1_Z_val = -1
ADC_DEPTH_val = -1
ADC_BATT_val = -1
buffer_size_max = 0
buffer_tick = 1
buffer_total = 1
sent_time = 0
received_time = 0
min_ping_time = 500
ping_tick = 1
ping_total = 1
wait_time = time.time() + 1
x_update = time.time()
x_period = 500
x_total = 1
x_tick = 1
UART_queue_len = 500
UART_queue_len_max = 0
start_time = time.time()
cmd_stop_all()
cmd_stop_all()
cmd_stop_all()
cmd_stop_all()
"""
time.sleep(5)
cmd_stop_all()
time.sleep(.1)
cmd_move_forward()
time.sleep(5)
cmd_stop_all()
time.sleep(.1)
cmd_move_backward()
time.sleep(5)
cmd_stop_all()
time.sleep(.1)
cmd_dive()
time.sleep(5)
cmd_stop_all()
time.sleep(.1)
cmd_surface()
time.sleep(5)
cmd_stop_all()
time.sleep(.1)
cmd_rotate_cw()
time.sleep(5)
cmd_stop_all()
time.sleep(.1)
cmd_rotate_ccw()
time.sleep(5)
cmd_stop_all()
"""
ACL_1_X_val_old = 0
flipflop = 0
#Main reading loop
while 1 :
#cmd_dive()
#cmd_move_forward()
if (time.time() > wait_time) :
#cmd_dive()
if flipflop == 0 :
cmd_move_forward()
cmd_dive()
else :
cmd_move_backward()
cmd_surface()
flipflop = ~flipflop
#cmd_move_forward()
"""
cmd_move_forward()
cmd_thruster(THRUSTER_BOW_SB, mag, 1)
cmd_thruster(THRUSTER_STERN_SB, mag, 0)
cmd_thruster(THRUSTER_BOW_PORT, mag, 0)
cmd_thruster(THRUSTER_STERN_PORT, mag, 1)
cmd_dive()
cmd_stop_all()
cmd_move_forward()
cmd_dive()
cmd_move_backward()
cmd_rotate_cw()
cmd_stop_depth()
cmd_surface()
cmd_thruster(THRUSTER_BOW_SB, 100, 0)
cmd_thruster(THRUSTER_STERN_SB, 25, 0)
cmd_thruster(THRUSTER_BOW_SB, 0, 0)
cmd_thruster(THRUSTER_STERN_SB, 0, 0)
cmd_stop_all()
"""
sent_time = time.time()
wait_time = sent_time + .5
#receive a packet
received_packet = get_packet()
#pull the device information out of the packet
device = ord(received_packet[1]) #second byte of packet is device information (first byte is always control byte)
os.system('clear')
print "Sensor Test"
#if-elif statement looks for what device the packet is concerning, and handles the data appropriately
if device == ACL_1_X_addr :
#pulls the data out the last two bytes of the packet
ACL_1_X_val = int(( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 ))
#data is stored in 2's complement form, this does the appropriate conversion
if ACL_1_X_val > 32767 :
ACL_1_X_val = (ACL_1_X_val-65536)
f.write("X," + str(ACL_1_X_val) + '\n')
elif device == ACL_1_Y_addr :
ACL_1_Y_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
if ACL_1_Y_val > 32767 :
ACL_1_Y_val = (ACL_1_Y_val-65536)
f.write("Y," + str(ACL_1_Y_val) + '\n')
elif device == ACL_1_Z_addr :
ACL_1_Z_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
if ACL_1_Z_val > 32767 :
ACL_1_Z_val = (ACL_1_Z_val-65536)
f.write("Z," + str(ACL_1_Z_val) + '\n')
elif device == GYRO_1_X_addr :
GYRO_1_X_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
if GYRO_1_X_val > 32767 :
GYRO_1_X_val = (GYRO_1_X_val-65536)
elif device == GYRO_1_Y_addr :
GYRO_1_Y_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
if GYRO_1_Y_val > 32767 :
GYRO_1_Y_val = (GYRO_1_Y_val-65536)
elif device == GYRO_1_Z_addr :
GYRO_1_Z_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
if GYRO_1_Z_val > 32767 :
GYRO_1_Z_val = (GYRO_1_Z_val-65536)
elif device == ADC_DEPTH :
ADC_DEPTH_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
elif device == ADC_BATT :
ADC_BATT_val = ( ord(received_packet[2]) ) | \
( ord(received_packet[3]) << 8 )
elif device == ord('P') :
received_time = time.time()
if ping_tick > 20 :
ping_total /= 2
ping_tick /= 2
ping_total += received_time - sent_time
ping_tick += 1
print "ACL X: %d" % (ACL_1_X_val)
print "ACL Y: %d" % (ACL_1_Y_val)
print "ACL Z: %d" % (ACL_1_Z_val)
print "GYRO X: %d" % (GYRO_1_X_val)
print "GYRO Y: %d" % (GYRO_1_Y_val)
print "GYRO Z: %d" % (GYRO_1_Z_val)
print "ADC Depth: %d" % ((ADC_DEPTH_val) )
print "ADC Battery: %lf" % ((ADC_BATT_val) * 3.3/1024 * 7.5)
print "Average Ping Time: %lf" % (ping_total/ping_tick)
print "buffer size: %d" % (s.inWaiting())
print "Run Time (minutes): %lf" % ((time.time() - start_time)/60)
if ACL_1_X_val_old == ACL_1_X_val :
samecount = samecount + 1
else :
samecount = 0
print "samecount: %d" % (samecount)
if samecount >= 500 :
print "FROZEN"
samecount = 0
raw_input()
#s.close()
#time.sleep(.5)
#s.open()
ACL_1_X_val_old = ACL_1_X_val
#time.sleep(0.001) #time.sleep(seconds)
#end of reading while loop
#close the serial port
s.close()
| gpl-3.0 | -8,604,657,200,176,266,000 | 22.2173 | 114 | 0.649159 | false |
yeasy/hyperledger-py | tests/function_test.py | 1 | 4553 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hyperledger.client import Client
# import base64
import json
import sys
import time
API_URL = 'http://127.0.0.1:7050'
def query_value(chaincode_name, arg_list):
"""
Query a list of values.
:param chaincode_name: The name of the chaincode.
:param arg_list: List of arguments.
:return: A list of values.
"""
result, resp = [], {}
print("Query value will try at most 20 times.")
for arg in arg_list:
for i in range(20):
try:
resp = c.chaincode_query(chaincode_name=chaincode_name,
function="query",
args=[arg])
if resp['result']['status'] == 'OK':
result.append(resp['result']['message'])
break
except KeyError:
print("Wait 1 seconds for the {0} query".format(i))
time.sleep(1)
return result
# Usage:
# * python function_test.py [API_URL=http://127.0.0.1:7050] will deploy first
# * python function_test.py [API_URL=http://127.0.0.1:7050] [chaincode_name]
# E.g.,
# "f389486d91f54d1f8775940f24b1d3bd9f8a8e75d364e158ac92328ddacad629607a3c42be156fc4a7da7173adca2ac7d7eef29afc59c6f07f3ad14abee34f68"
if __name__ == '__main__':
if len(sys.argv) not in [2, 3]:
print("Usage: python function_test.py ["
"API_URL=http://127.0.0.1:7050] [chaincode_name]")
exit()
API_URL = sys.argv[1]
chaincode_name = ""
# chaincode_name = "7be1529ee16969baf9f3156247a0ee8e7eee99a6a0a816776acff65e6e1def71249f4cb1cad5e0f0b60b25dd2a6975efb282741c0e1ecc53fa8c10a9aaa31137" # noqa
if len(sys.argv) == 3:
chaincode_name = sys.argv[2]
c = Client(base_url=API_URL)
print("Checking cluster at {}".format(API_URL))
if not chaincode_name:
print(">>>Test: deploy the default chaincode")
res = c.chaincode_deploy(args=["a", "10000", "b", "20000"])
chaincode_name = res['result']['message']
assert res['result']['status'] == 'OK'
print("Successfully deploy chaincode with returned name = " +
chaincode_name)
print("Wait 15 seconds to make sure deployment is done.")
time.sleep(25)
print(">>>Check the initial value: a, b")
values = query_value(chaincode_name, ["a", "b"])
print(values)
# assert values == ['10000', '20000']
print(">>>Test: invoke a chaincode: a-->b 1")
res = c.chaincode_invoke(chaincode_name=chaincode_name, function="invoke",
args=["a", "b", "1"])
assert res["result"]["status"] == "OK"
transaction_uuid = res["result"]["message"]
print("Transaction id = {0}".format(transaction_uuid))
# TODO: sleep 3 seconds till invoke done.
print("Wait 5 seconds to make sure invoke is done.")
time.sleep(5)
print(">>>Check the after value: a, b")
values = query_value(chaincode_name, ["a", "b"])
print(values)
exit(0)
# assert values == ['9999', '20001']
time.sleep(1)
print(">>>Test: Check the transaction content")
res = c.transaction_get(transaction_uuid)
# res["chaincodeID"] = base64.b64decode(res["chaincodeID"])
print(json.dumps(res, sort_keys=True, indent=4))
assert res["uuid"] == transaction_uuid
print(">>>Test: list the peers")
res = c.peer_list()
print(json.dumps(res, sort_keys=True, indent=4))
assert len(res['peers']) > 0
print(">>>Test: list the chain")
res = c.chain_list()
print(json.dumps(res, sort_keys=True, indent=4))
assert res['height'] > 0
print("Existing block number = {0}".format(res["height"]))
print(">>>Test: get the content of block 1")
res = c.block_get(block='1')
print(json.dumps(res, sort_keys=True, indent=4))
print(">>>Test: get the content of block 2")
res = c.block_get(block='2')
print(json.dumps(res, sort_keys=True, indent=4))
| apache-2.0 | 7,446,954,543,845,993,000 | 34.850394 | 161 | 0.618054 | false |
andreabrambilla/libres | python/res/enkf/queue_config.py | 1 | 5865 | # Copyright (C) 2017 Equinor ASA, Norway.
#
# The file 'site_config.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from ecl.util.util import StringList, Hash
from res import ResPrototype
from res.enkf import ConfigKeys
from res.job_queue import JobQueue, ExtJoblist, Driver
class QueueConfig(BaseCClass):
TYPE_NAME = "queue_config"
_free = ResPrototype("void queue_config_free( queue_config )")
_alloc_job_queue = ResPrototype("job_queue_obj queue_config_alloc_job_queue( queue_config )")
_alloc = ResPrototype("void* queue_config_alloc_load(char*)", bind=False)
_alloc_full = ResPrototype("void* queue_config_alloc_full(char*, bool, int, int, int)", bind=False)
_alloc_content = ResPrototype("void* queue_config_alloc(config_content)", bind=False)
_alloc_local_copy = ResPrototype("queue_config_obj queue_config_alloc_local_copy( queue_config )")
_has_job_script = ResPrototype("bool queue_config_has_job_script( queue_config )")
_get_job_script = ResPrototype("char* queue_config_get_job_script(queue_config)")
_max_submit = ResPrototype("int queue_config_get_max_submit(queue_config)")
_queue_system = ResPrototype("char* queue_config_get_queue_system(queue_config)")
_queue_driver = ResPrototype("driver_ref queue_config_get_queue_driver(queue_config, char*)")
_get_num_cpu = ResPrototype("int queue_config_get_num_cpu(queue_config)")
def __init__(self, user_config_file=None, config_content=None, config_dict=None):
configs = sum([1 for x in [user_config_file, config_content, config_dict] if x is not None])
if configs > 1:
raise ValueError("Attempting to create QueueConfig object with multiple config objects")
if configs == 0:
raise ValueError("Attempting to create QueueConfig object with no config objects")
c_ptr = None
if user_config_file is not None:
c_ptr = self._alloc(user_config_file)
if config_content is not None:
c_ptr = self._alloc_content(config_content)
if config_dict is not None:
c_ptr = self._alloc_full(
config_dict[ConfigKeys.JOB_SCRIPT],
config_dict[ConfigKeys.USER_MODE],
config_dict[ConfigKeys.MAX_SUBMIT],
config_dict[ConfigKeys.NUM_CPU],
config_dict[ConfigKeys.QUEUE_SYSTEM]
)
if not c_ptr:
raise ValueError("Unable to create QueueConfig instance")
super(QueueConfig, self).__init__(c_ptr)
#Need to create
if config_dict is not None:
queue_options = config_dict[ConfigKeys.QUEUE_OPTION]
for option in queue_options:
self.driver.set_option(option['NAME'], option['VALUE'])
def create_job_queue(self):
return self._alloc_job_queue()
def create_local_copy(self):
return self._alloc_local_copy()
def has_job_script(self):
return self._has_job_script()
def free(self):
self._free()
@property
def max_submit(self):
return self._max_submit()
@property
def queue_name(self):
return self.driver.get_option(ConfigKeys.LSF_QUEUE_NAME_KEY)
@property
def queue_system(self):
"""The queue system in use, e.g. LSF or LOCAL"""
return self._queue_system()
@property
def job_script(self):
return self._get_job_script()
@property
def driver(self):
return self._queue_driver(self.queue_system).setParent(self)
def _assert_lsf(self, key='driver'):
sys = self.queue_system
if sys != ConfigKeys.LSF_KEY:
fmt = 'Cannot fetch LSF {key}, current queue is {system}'
raise ValueError(fmt.format(key=key,
system=self.queue_system))
@property
def _lsf_driver(self):
self._assert_lsf()
driver = self._queue_driver(ConfigKeys.LSF_KEY)
return driver.setParent(self)
@property
def lsf_resource(self):
self._assert_lsf(key=ConfigKeys.LSF_RESOURCE_KEY)
return self._lsf_driver.get_option(ConfigKeys.LSF_RESOURCE_KEY)
@property
def lsf_server(self):
self._assert_lsf(key=ConfigKeys.LSF_SERVER_KEY)
return self._lsf_driver.get_option(ConfigKeys.LSF_SERVER_KEY)
@property
def num_cpu(self):
return self._get_num_cpu()
def __eq__(self, other):
if self.max_submit != other.max_submit:
return False
if self.queue_system != other.queue_system:
return False
if self.num_cpu != other.num_cpu:
return False
if self.job_script != other.job_script:
return False
if self.queue_system != 'LOCAL':
if self.queue_name != other.queue_name:
return False
if self.lsf_resource != other.lsf_resource:
return False
if self.lsf_server != other.lsf_server:
return False
return True
| gpl-3.0 | -8,782,080,870,309,868,000 | 36.356688 | 114 | 0.608014 | false |
hivetech/dna | python/dna/cli.py | 1 | 2375 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
:copyright (c) 2014 Xavier Bruhiere.
:license: MIT, see LICENSE for more details.
'''
import os
import abc
import click
class Cli(object):
'''
Convenient wrapper around UI interface boilerplate.
Make the app prettier and more robust.
'''
__metaclass__ = abc.ABCMeta
# TODO Allow more styling
_default_style = {
'primary-color': 'blue',
'success-color': 'green',
'heading-color': 'white',
'error-color': 'red'
}
def __init__(self, title, style=None):
self._mode = None
self.style = style or self._default_style
click.clear()
self.heading('{} [{} mode]'.format(title, self.mode))
def _print(self, text, **kwargs):
click.secho(text, **kwargs)
@property
def mode(self):
return self._mode or os.environ.get('APP_ENV', 'development')
@mode.setter
def mode(self, value):
self._mode = value
def heading(self, text):
self._print('\n{}\n'.format(text),
bold=True, fg=self.style['heading-color'], underline=True)
def msg(self, text, **kwargs):
self._print(text, fg=self.style['primary-color'], **kwargs)
def success(self, text):
self._print(text, fg=self.style['success-color'], bold=True)
def error(self, text):
self._print('\n{}\n'.format(text),
fg=self.style['error-color'], bold=True)
@abc.abstractmethod
def run(self):
pass
def __call__(self, *args, **kwargs):
''' Golang style function that safely calls main routine '''
exit_result = None
exit_error = None
try:
exit_result = self.run(*args, **kwargs)
self.success('Done without error.')
except KeyboardInterrupt:
self.error('Received SIGINT signal, aborting.')
except Exception as error:
self.error('!!!!!! CRASH !!!!!!')
if self.mode == 'development':
raise
exit_error = ['{}: {}'.format(type(error).__name__, str(error))]
if hasattr(error, 'kwargs'):
for k, v in error.kwargs.iteritems():
exit_error.append('{}: {}'.format(k, v))
exit_error = '\n'.join(exit_error)
return exit_result, exit_error
| apache-2.0 | 2,027,917,352,932,338,700 | 27.27381 | 78 | 0.548632 | false |
boegel/easybuild-easyblocks | easybuild/easyblocks/p/psi.py | 1 | 11600 | ##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = CMakeMake.extra_options()
extra_vars.update({
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
})
# Doesn't work with out-of-source build
extra_vars['separate_build_dir'][0] = False
return extra_vars
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir(objdir)
except OSError as err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5") and self.name == "PSI":
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
self.log.info("Using configure based build")
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.log.info("Using CMake based build")
self.cfg.update('configopts', ' -DPYTHON_EXECUTABLE=%s' % os.path.join(pythonroot, 'bin', 'python'))
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
self.log.info("Remove the CMAKE_BUILD_TYPE test in PSI4 source and the downloaded dependencies!")
self.log.info("Use PATCH_COMMAND in the corresponding CMakeLists.txt")
self.cfg['build_type'] = 'EasyBuildRelease'
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', " -DENABLE_MPI=ON")
if get_software_root('imkl'):
self.cfg.update('configopts', " -DENABLE_CSR=ON -DBLAS_TYPE=MKL")
if self.name == 'PSI4':
pcmsolverroot = get_software_root('PCMSolver')
if pcmsolverroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
pcmsolver = 'PCMSolver'
else:
pcmsolver = 'PCMSOLVER'
self.cfg.update('configopts', " -DENABLE_%s=ON" % pcmsolver)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DPCMSOLVER_ROOT=%s" % pcmsolverroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_PCMSolver=ON "
"-DPCMSolver_DIR=%s/share/cmake/PCMSolver" % pcmsolverroot)
chempsroot = get_software_root('CheMPS2')
if chempsroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
chemps2 = 'CheMPS2'
else:
chemps2 = 'CHEMPS2'
self.cfg.update('configopts', " -DENABLE_%s=ON" % chemps2)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DCHEMPS2_ROOT=%s" % chempsroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_CheMPS2=ON "
"-DCheMPS2_DIR=%s/share/cmake/CheMPS2" % chempsroot)
# Be aware, PSI4 wants exact versions of the following deps! built with CMake!!
# If you want to use non-CMake build versions, the you have to provide the
# corresponding Find<library-name>.cmake scripts
# In PSI4 version 1.2.1, you can check the corresponding CMakeLists.txt file
# in external/upstream/<library-name>/
if LooseVersion(self.version) >= LooseVersion("1.2"):
for dep in ['libxc', 'Libint', 'pybind11', 'gau2grid']:
deproot = get_software_root(dep)
if deproot:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_%s=ON" % dep)
dep_dir = os.path.join(deproot, 'share', 'cmake', dep)
self.cfg.update('configopts', " -D%s_DIR=%s " % (dep, dep_dir))
CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for PSI."""
super(EB_PSI, self).install_step()
# the obj and unpacked sources must remain available for working with plugins
try:
for subdir in ['obj', self.psi_srcdir]:
# copy symlinks as symlinks to work around broken symlinks
shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir),
symlinks=True)
except OSError as err:
raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err)
def test_step(self):
"""
Run the testsuite of PSI4
"""
testdir = tempfile.mkdtemp()
env.setvar('PSI_SCRATCH', testdir)
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
if self.cfg['runtest']:
paracmd = ''
# Run ctest parallel, but limit to maximum 4 jobs (in case of slow disks)
if self.cfg['parallel']:
if self.cfg['parallel'] > 4:
paracmd = '-j 4'
else:
paracmd = "-j %s" % self.cfg['parallel']
cmd = "ctest %s %s" % (paracmd, self.cfg['runtest'])
run_cmd(cmd, log_all=True, simple=False)
else:
super(EB_PSI, self).test_step()
try:
shutil.rmtree(testdir)
except OSError as err:
raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err)
def sanity_check_step(self):
"""Custom sanity check for PSI."""
custom_paths = {
'files': ['bin/psi4'],
'dirs': ['include', ('share/psi', 'share/psi4')],
}
super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom variables for PSI module."""
txt = super(EB_PSI, self).make_module_extra()
share_dir = os.path.join(self.installdir, 'share')
if os.path.exists(share_dir):
psi4datadir = glob.glob(os.path.join(share_dir, 'psi*'))
if len(psi4datadir) == 1:
txt += self.module_generator.set_environment('PSI4DATADIR', psi4datadir[0])
else:
raise EasyBuildError("Failed to find exactly one PSI4 data dir: %s", psi4datadir)
return txt
| gpl-2.0 | -9,018,316,180,705,056,000 | 44.669291 | 116 | 0.585259 | false |
diplomacy/research | diplomacy_research/models/policy/order_based/dataset/base.py | 1 | 19541 | # ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Order Based Base Dataset Builder
- Base class responsible for generating the protocol buffers to be used by the model
"""
import logging
import numpy as np
from diplomacy import Map
from diplomacy_research.models.datasets.base_builder import FixedProtoField, VarProtoField
from diplomacy_research.models.policy.base_policy_builder import BasePolicyBuilder
from diplomacy_research.models.self_play.reward_functions import DefaultRewardFunction, DEFAULT_GAMMA
from diplomacy_research.models.state_space import get_order_tokens, get_order_based_mask, \
get_possible_orders_for_powers, get_issued_orders_for_powers, proto_to_board_state, GO_ID, NB_NODES, \
NB_SUPPLY_CENTERS, POWER_VOCABULARY_KEY_TO_IX, order_to_ix, MAX_CANDIDATES, NB_FEATURES, NB_ORDERS_FEATURES, \
NB_PREV_ORDERS, NB_PREV_ORDERS_HISTORY, get_board_alignments, get_orderable_locs_for_powers, get_current_season, \
proto_to_prev_orders_state
# Constants
LOGGER = logging.getLogger(__name__)
class BaseDatasetBuilder(BasePolicyBuilder):
""" This object is responsible for maintaining the data and feeding it into the model """
@staticmethod
def get_proto_fields():
""" Returns the proto fields used by this dataset builder """
# Creating proto fields
proto_fields = {
'request_id': FixedProtoField([], None),
'player_seed': FixedProtoField([], np.int32),
'board_state': FixedProtoField([NB_NODES, NB_FEATURES], np.uint8),
'board_alignments': VarProtoField([NB_NODES * NB_SUPPLY_CENTERS], np.uint8),
'prev_orders_state': FixedProtoField([NB_PREV_ORDERS, NB_NODES, NB_ORDERS_FEATURES], np.uint8),
'decoder_inputs': VarProtoField([1 + NB_SUPPLY_CENTERS], np.int32),
'decoder_lengths': FixedProtoField([], np.int32),
'candidates': VarProtoField([None, MAX_CANDIDATES], np.int32),
'noise': FixedProtoField([], np.float32),
'temperature': FixedProtoField([], np.float32),
'dropout_rate': FixedProtoField([], np.float32),
'current_power': FixedProtoField([], np.int32),
'current_season': FixedProtoField([], np.int32),
'draw_target': FixedProtoField([], np.float32),
'value_target': FixedProtoField([], np.float32)
}
return proto_fields
@staticmethod
def get_feedable_item(locs, state_proto, power_name, phase_history_proto, possible_orders_proto, **kwargs):
""" Computes and return a feedable item (to be fed into the feedable queue)
:param locs: A list of locations for which we want orders
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The power name for which we want the orders and the state values
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: The seed to apply to the player to compute a deterministic mask.
- noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon)
- temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy)
- dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
:return: A feedable item, with feature names as key and numpy arrays as values
"""
# pylint: disable=too-many-branches
# Converting to state space
map_object = Map(state_proto.map)
board_state = proto_to_board_state(state_proto, map_object)
# Building the decoder length
# For adjustment phase, we restrict the number of builds/disbands to what is allowed by the game engine
in_adjustment_phase = state_proto.name[-1] == 'A'
nb_builds = state_proto.builds[power_name].count
nb_homes = len(state_proto.builds[power_name].homes)
# If we are in adjustment phase, making sure the locs are the orderable locs (and not the policy locs)
if in_adjustment_phase:
orderable_locs, _ = get_orderable_locs_for_powers(state_proto, [power_name])
if sorted(locs) != sorted(orderable_locs):
if locs:
LOGGER.warning('Adj. phase requires orderable locs. Got %s. Expected %s.', locs, orderable_locs)
locs = orderable_locs
# WxxxA - We can build units
# WxxxA - We can disband units
# Other phase
if in_adjustment_phase and nb_builds >= 0:
decoder_length = min(nb_builds, nb_homes)
elif in_adjustment_phase and nb_builds < 0:
decoder_length = abs(nb_builds)
else:
decoder_length = len(locs)
# Computing the candidates for the policy
if possible_orders_proto:
# Adjustment Phase - Use all possible orders for each location.
if in_adjustment_phase:
# Building a list of all orders for all locations
adj_orders = []
for loc in locs:
adj_orders += possible_orders_proto[loc].value
# Computing the candidates
candidates = [get_order_based_mask(adj_orders)] * decoder_length
# Regular phase - Compute candidates for each location
else:
candidates = []
for loc in locs:
candidates += [get_order_based_mask(possible_orders_proto[loc].value)]
# We don't have possible orders, so we cannot compute candidates
# This might be normal if we are only getting the state value or the next message to send
else:
candidates = []
for _ in range(decoder_length):
candidates.append([])
# Prev orders state
prev_orders_state = []
for phase_proto in reversed(phase_history_proto):
if len(prev_orders_state) == NB_PREV_ORDERS:
break
if phase_proto.name[-1] == 'M':
prev_orders_state = [proto_to_prev_orders_state(phase_proto, map_object)] + prev_orders_state
for _ in range(NB_PREV_ORDERS - len(prev_orders_state)):
prev_orders_state = [np.zeros((NB_NODES, NB_ORDERS_FEATURES), dtype=np.uint8)] + prev_orders_state
prev_orders_state = np.array(prev_orders_state)
# Building (order) decoder inputs [GO_ID]
decoder_inputs = [GO_ID]
# kwargs
player_seed = kwargs.get('player_seed', 0)
noise = kwargs.get('noise', 0.)
temperature = kwargs.get('temperature', 0.)
dropout_rate = kwargs.get('dropout_rate', 0.)
# Building feedable data
item = {
'player_seed': player_seed,
'board_state': board_state,
'board_alignments': get_board_alignments(locs,
in_adjustment_phase=in_adjustment_phase,
tokens_per_loc=1,
decoder_length=decoder_length),
'prev_orders_state': prev_orders_state,
'decoder_inputs': decoder_inputs,
'decoder_lengths': decoder_length,
'candidates': candidates,
'noise': noise,
'temperature': temperature,
'dropout_rate': dropout_rate,
'current_power': POWER_VOCABULARY_KEY_TO_IX[power_name],
'current_season': get_current_season(state_proto)
}
# Return
return item
@property
def proto_generation_callable(self):
""" Returns a callable required for proto files generation.
e.g. return generate_proto(saved_game_bytes, is_validation_set)
Note: Callable args are - saved_game_bytes: A `.proto.game.SavedGame` object from the dataset
- phase_ix: The index of the phase we want to process
- is_validation_set: Boolean that indicates if we are generating the validation set
Note: Used bytes_to_proto from diplomacy_research.utils.proto to convert bytes to proto
The callable must return a list of tf.train.Example to put in the protocol buffer file
"""
raise NotImplementedError()
# ---------- Multiprocessing methods to generate proto buffer ----------------
def get_policy_data(saved_game_proto, power_names, top_victors):
""" Computes the proto to save in tf.train.Example as a training example for the policy network
:param saved_game_proto: A `.proto.game.SavedGame` object from the dataset.
:param power_names: The list of powers for which we want the policy data
:param top_victors: The list of powers that ended with more than 25% of the supply centers
:return: A dictionary with key: the phase_ix
with value: A dict with the power_name as key and a dict with the example fields as value
"""
nb_phases = len(saved_game_proto.phases)
policy_data = {phase_ix: {} for phase_ix in range(nb_phases - 1)}
game_id = saved_game_proto.id
map_object = Map(saved_game_proto.map)
# Determining if we have a draw
nb_sc_to_win = len(map_object.scs) // 2 + 1
has_solo_winner = max([len(saved_game_proto.phases[-1].state.centers[power_name].value)
for power_name in saved_game_proto.phases[-1].state.centers]) >= nb_sc_to_win
survivors = [power_name for power_name in saved_game_proto.phases[-1].state.centers
if saved_game_proto.phases[-1].state.centers[power_name].value]
has_draw = not has_solo_winner and len(survivors) >= 2
# Processing all phases (except the last one)
current_year = 0
for phase_ix in range(nb_phases - 1):
# Building a list of orders of previous phases
previous_orders_states = [np.zeros((NB_NODES, NB_ORDERS_FEATURES), dtype=np.uint8)] * NB_PREV_ORDERS
for phase_proto in saved_game_proto.phases[max(0, phase_ix - NB_PREV_ORDERS_HISTORY):phase_ix]:
if phase_proto.name[-1] == 'M':
previous_orders_states += [proto_to_prev_orders_state(phase_proto, map_object)]
previous_orders_states = previous_orders_states[-NB_PREV_ORDERS:]
prev_orders_state = np.array(previous_orders_states)
# Parsing each requested power in the specified phase
phase_proto = saved_game_proto.phases[phase_ix]
phase_name = phase_proto.name
state_proto = phase_proto.state
phase_board_state = proto_to_board_state(state_proto, map_object)
# Increasing year for every spring or when the game is completed
if phase_proto.name == 'COMPLETED' or (phase_proto.name[0] == 'S' and phase_proto.name[-1] == 'M'):
current_year += 1
for power_name in power_names:
phase_issued_orders = get_issued_orders_for_powers(phase_proto, [power_name])
phase_possible_orders = get_possible_orders_for_powers(phase_proto, [power_name])
phase_draw_target = 1. if has_draw and phase_ix == (nb_phases - 2) and power_name in survivors else 0.
# Data to use when not learning a policy
blank_policy_data = {'board_state': phase_board_state,
'prev_orders_state': prev_orders_state,
'draw_target': phase_draw_target}
# Power is not a top victor - We don't want to learn a policy from him
if power_name not in top_victors:
policy_data[phase_ix][power_name] = blank_policy_data
continue
# Finding the orderable locs
orderable_locations = list(phase_issued_orders[power_name].keys())
# Skipping power for this phase if we are only issuing Hold
for order_loc, order in phase_issued_orders[power_name].items():
order_tokens = get_order_tokens(order)
if len(order_tokens) >= 2 and order_tokens[1] != 'H':
break
else:
policy_data[phase_ix][power_name] = blank_policy_data
continue
# Removing orderable locs where orders are not possible (i.e. NO_CHECK games)
for order_loc, order in phase_issued_orders[power_name].items():
if order not in phase_possible_orders[order_loc] and order_loc in orderable_locations:
if 'NO_CHECK' not in saved_game_proto.rules:
LOGGER.warning('%s not in all possible orders. Phase %s - Game %s.', order, phase_name, game_id)
orderable_locations.remove(order_loc)
# Remove orderable locs where the order is either invalid or not frequent
if order_to_ix(order) is None and order_loc in orderable_locations:
orderable_locations.remove(order_loc)
# Determining if we are in an adjustment phase
in_adjustment_phase = state_proto.name[-1] == 'A'
nb_builds = state_proto.builds[power_name].count
nb_homes = len(state_proto.builds[power_name].homes)
# WxxxA - We can build units
# WxxxA - We can disband units
# Other phase
if in_adjustment_phase and nb_builds >= 0:
decoder_length = min(nb_builds, nb_homes)
elif in_adjustment_phase and nb_builds < 0:
decoder_length = abs(nb_builds)
else:
decoder_length = len(orderable_locations)
# Not all units were disbanded - Skipping this power as we can't learn the orders properly
if in_adjustment_phase and nb_builds < 0 and len(orderable_locations) < abs(nb_builds):
policy_data[phase_ix][power_name] = blank_policy_data
continue
# Not enough orderable locations for this power, skipping
if not orderable_locations or not decoder_length:
policy_data[phase_ix][power_name] = blank_policy_data
continue
# decoder_inputs [GO, order1, order2, order3]
decoder_inputs = [GO_ID]
decoder_inputs += [order_to_ix(phase_issued_orders[power_name][loc]) for loc in orderable_locations]
if in_adjustment_phase and nb_builds > 0:
decoder_inputs += [order_to_ix('WAIVE')] * (min(nb_builds, nb_homes) - len(orderable_locations))
decoder_length = min(decoder_length, NB_SUPPLY_CENTERS)
# Adjustment Phase - Use all possible orders for each location.
if in_adjustment_phase:
build_disband_locs = list(get_possible_orders_for_powers(phase_proto, [power_name]).keys())
phase_board_alignments = get_board_alignments(build_disband_locs,
in_adjustment_phase=in_adjustment_phase,
tokens_per_loc=1,
decoder_length=decoder_length)
# Building a list of all orders for all locations
adj_orders = []
for loc in build_disband_locs:
adj_orders += phase_possible_orders[loc]
# Not learning builds for BUILD_ANY
if nb_builds > 0 and 'BUILD_ANY' in state_proto.rules:
adj_orders = []
# No orders found - Skipping
if not adj_orders:
policy_data[phase_ix][power_name] = blank_policy_data
continue
# Computing the candidates
candidates = [get_order_based_mask(adj_orders)] * decoder_length
# Regular phase - Compute candidates for each location
else:
phase_board_alignments = get_board_alignments(orderable_locations,
in_adjustment_phase=in_adjustment_phase,
tokens_per_loc=1,
decoder_length=decoder_length)
candidates = []
for loc in orderable_locations:
candidates += [get_order_based_mask(phase_possible_orders[loc])]
# Saving results
# No need to return temperature, current_power, current_season
policy_data[phase_ix][power_name] = {'board_state': phase_board_state,
'board_alignments': phase_board_alignments,
'prev_orders_state': prev_orders_state,
'decoder_inputs': decoder_inputs,
'decoder_lengths': decoder_length,
'candidates': candidates,
'draw_target': phase_draw_target}
# Returning
return policy_data
def get_value_data(saved_game_proto, power_names):
""" Computes the proto to save in tf.train.Example as a training example for the value network
:param saved_game_proto: A `.proto.game.SavedGame` object from the dataset.
:param power_names: The list of powers for which we want the policy data
:return: A dictionary with key: the phase_ix
with value: A dict with the power_name as key and a dict with the example fields as value
"""
nb_phases = len(saved_game_proto.phases)
value_data = {phase_ix: {} for phase_ix in range(nb_phases - 1)}
# Computing the value of each phase
for power_name in power_names:
value_targets = []
current_value = 0.
rewards = DefaultRewardFunction().get_episode_rewards(saved_game_proto, power_name)
for reward in reversed(rewards):
current_value = reward + DEFAULT_GAMMA * current_value
value_targets += [current_value]
value_targets += [0]
# Computing the value data
for phase_ix in range(nb_phases - 1):
value_data[phase_ix][power_name] = {'value_target': value_targets[phase_ix]}
# Returning the value of the specified phase for each power
return value_data
| mit | -6,682,590,270,983,825,000 | 51.52957 | 120 | 0.592703 | false |
gencer/sentry | src/sentry/api/permissions.py | 1 | 1494 | from __future__ import absolute_import
from rest_framework import permissions
from sentry.auth.superuser import is_active_superuser
class NoPermission(permissions.BasePermission):
def has_permission(self, request, view):
return False
class ScopedPermission(permissions.BasePermission):
"""
Permissions work depending on the type of authentication:
- A user inherits permissions based on their membership role. These are
still dictated as common scopes, but they can't be checked until the
has_object_permission hook is called.
- ProjectKeys (legacy) are granted only project based scopes. This
- APIKeys specify their scope, and work as expected.
"""
scope_map = {
'HEAD': (),
'GET': (),
'POST': (),
'PUT': (),
'PATCH': (),
'DELETE': (),
}
def has_permission(self, request, view):
# session-based auth has all scopes for a logged in user
if not request.auth:
return request.user.is_authenticated()
allowed_scopes = set(self.scope_map.get(request.method, []))
current_scopes = request.auth.get_scopes()
return any(s in allowed_scopes for s in current_scopes)
def has_object_permission(self, request, view, obj):
return False
class SuperuserPermission(permissions.BasePermission):
def has_permission(self, request, view):
if is_active_superuser(request):
return True
return False
| bsd-3-clause | -241,834,615,578,970,980 | 29.489796 | 75 | 0.657296 | false |
MFry/pyAlgoDataStructures | Interview_Cake/p1_stock_price.py | 1 | 1516 | """
Problem 1
Greedy
"""
import unittest
def brute_get_max_profits(yesterday_prices):
"""
Brute Force method
:param yesterday_prices:
:return:
"""
max_price = float('-inf')
for i, buy_price in enumerate(yesterday_prices):
best_price = float('-inf')
for sell_price in yesterday_prices[i + 1:]:
if best_price < sell_price - buy_price:
best_price = sell_price - buy_price
if best_price > max_price:
max_price = best_price
return max_price
def get_max_profits(yesterday_prices):
"""
Greedy Algorithm O(n)
:param yesterday_prices:
:return:
"""
if len(yesterday_prices) < 2:
raise IndexError('Calculating profit requires at least two values')
min_buy = float('inf')
max_price = float('-inf')
for sell_price in yesterday_prices:
if sell_price - min_buy > max_price:
max_price = sell_price - min_buy
if min_buy > sell_price:
min_buy = sell_price
return max_price
class MyTestCase(unittest.TestCase):
def test_get_max_profits(self):
check_price_yesterday = [10, 7, 5, 8, 11, 9]
self.assertEqual(brute_get_max_profits(check_price_yesterday), 6)
self.assertEqual(get_max_profits(check_price_yesterday), 6)
check_price_yesterday = [10, 11, 12, 50, 60, 100]
ans = brute_get_max_profits(check_price_yesterday)
self.assertEqual(get_max_profits(check_price_yesterday), ans)
| mit | -4,202,641,034,049,540,000 | 29.32 | 75 | 0.60752 | false |
nishant-jain-94/Autofill | src/lstm-3-1024-1024-batchsize-512-epochs-30-Sequence.py | 1 | 4123 | from __future__ import print_function
import json
import os
import numpy as np
import sys
import h5py
from gensim.models import Word2Vec
from gensim.utils import simple_preprocess
from keras.layers import Embedding
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.preprocessing import sequence
from intersect_embeddings import Embeddings
from keras.callbacks import ModelCheckpoint
from nltk.tokenize import word_tokenize
import random
from itertools import groupby
# ## Instantiate Embeddings
embeddings = Embeddings(300, 4, 1, 4)
# ### Getting data from preprocessing
word2vec_model = embeddings.get_intersected_model()
word2index, index2word = embeddings.get_vocabulary()
word2vec_weights = word2vec_model.wv.syn0
tokenized_indexed_sentences = embeddings.get_indexed_sentences()
word2index = {word:index+1 for word, index in word2index.items()}
index2word = {index:word for word, index in word2index.items()}
word2index
tokenized_indexed_sentences[0]
tokenized_indexed_sentences = [np.array(sentence) + 1 for sentence in tokenized_indexed_sentences if len(sentence) > 0]
new_weights = np.zeros((1, word2vec_weights.shape[1]))
new_weights = np.append(new_weights, word2vec_weights, axis = 0)
# ### generating training data
window_size = 5
vocab_size = len(word2index)
print(vocab_size)
maxlen = max([len(sentence) for sentence in tokenized_indexed_sentences])
tokenized_indexed_sentences = sequence.pad_sequences(tokenized_indexed_sentences)
seq_in = []
seq_out = []
# generating dataset
tokenized_indexed_sentences = [sentence for sentence in tokenized_indexed_sentences if len(sentence) > 0]
for sentence in tokenized_indexed_sentences:
x = sentence
y = np.append(sentence[1:], np.array(sentence[len(sentence)-1]))
seq_in.append(x)
seq_out.append([new_weights[index] for index in y])
# converting seq_in and seq_out into numpy array
seq_in = np.array(seq_in)
seq_out = np.array(seq_out)
n_samples = len(seq_in)
print ("Number of samples : ", n_samples)
# ## Defining model
# Changes to the model to be done here
model = Sequential()
model.add(Embedding(input_dim = new_weights.shape[0], output_dim = new_weights.shape[1], weights = [new_weights], mask_zero = True))
model.add(LSTM(1024, return_sequences = True))
model.add(LSTM(1024, return_sequences = True))
model.add(LSTM(300, return_sequences = True))
model.compile(loss='cosine_proximity', optimizer='adam',metrics=['accuracy'])
model.summary()
model_weights_path = "../weights/lstm-3-1024-1024-batchsize-512-epochs-30-Sequence"
if not os.path.exists(model_weights_path):
os.makedirs(model_weights_path)
checkpoint_path = model_weights_path + '/weights.{epoch:02d}.hdf5'
checkpoint = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=False, mode='max')
# ## Train Model
model.fit(seq_in, seq_out, epochs=30, verbose=1, batch_size=512, callbacks=[checkpoint])
# ### model predict
start = 0
sentence_test = "In which regions in particular did"
indexed_sentences = embeddings.get_indexed_query(sentence_test)
print("indexed_sentences ",indexed_sentences)
sent = np.array(indexed_sentences)
pattern = list(sent)
print("\"",' '.join(index2word[index] for index in pattern))
for i in range(10):
prediction = model.predict(np.array([pattern]))
pred_word = word2vec_model.similar_by_vector(prediction[0][prediction.shape[1] - 1])[0][0]
sys.stdout.write(pred_word+" ")
pattern.append(word2index[pred_word])
pattern = pattern[:len(pattern)]
# ## Accuracy
def accuracy():
count = 0
correct = 0
for sub_sample_in, sub_sample_out in zip(seq_in, seq_out):
ypred = model.predict_on_batch(np.expand_dims(sub_sample_in, axis = 0))[0]
ytrue = sub_sample_out
pred_word = word2vec_model.similar_by_vector(ypred)[0][0]
true_word = word2vec_model.similar_by_vector(ytrue)[0][0]
similarity = word2vec_model.similarity(pred_word, true_word)
if similarity == 1:
correct += 1
count += 1
print("Accuracy {0}".format(correct/count)) | gpl-3.0 | 1,490,675,374,157,375,700 | 33.949153 | 132 | 0.732961 | false |
ARMmbed/greentea | test/gtea/gt_cli.py | 1 | 4917 | #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import six
import sys
import unittest
from greentea import greentea_cli
from greentea.gtea.tests_spec import TestSpec
test_spec_def = {
"builds": {
"K64F-ARM": {
"platform": "K64F",
"toolchain": "ARM",
"base_path": "./.build/K64F/ARM",
"baud_rate": 115200,
"tests": {
"mbed-drivers-test-generic_tests": {
"binaries": [
{
"binary_type": "bootable",
"path": "./.build/K64F/ARM/mbed-drivers-test-generic_tests.bin",
}
]
},
"mbed-drivers-test-c_strings": {
"binaries": [
{
"binary_type": "bootable",
"path": "./.build/K64F/ARM/mbed-drivers-test-c_strings.bin",
}
]
},
},
}
}
}
class GreenteaCliFunctionality(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_greentea_version(self):
version = greentea_cli.get_greentea_version()
self.assertIs(type(version), str)
version_list = version.split(".")
self.assertEqual(version_list[0].isdigit(), True)
self.assertEqual(version_list[1].isdigit(), True)
self.assertEqual(version_list[2].isdigit(), True)
def test_print_version(self):
version = greentea_cli.get_greentea_version()
old_stdout = sys.stdout
sys.stdout = stdout_capture = six.StringIO()
greentea_cli.print_version()
sys.stdout = old_stdout
printed_version = stdout_capture.getvalue().splitlines()[0]
self.assertEqual(printed_version, version)
def test_get_hello_string(self):
version = greentea_cli.get_greentea_version()
hello_string = greentea_cli.get_hello_string()
self.assertIs(type(version), str)
self.assertIs(type(hello_string), str)
self.assertIn(version, hello_string)
def test_get_local_host_tests_dir_invalid_path(self):
test_path = greentea_cli.get_local_host_tests_dir("invalid-path")
self.assertEqual(test_path, None)
def test_get_local_host_tests_dir_valid_path(self):
path = "."
test_path = greentea_cli.get_local_host_tests_dir(path)
self.assertEqual(test_path, path)
def test_get_local_host_tests_dir_default_path(self):
import os
import shutil
import tempfile
curr_dir = os.getcwd()
test1_dir = tempfile.mkdtemp()
test2_dir = os.mkdir(os.path.join(test1_dir, "test"))
test3_dir = os.mkdir(os.path.join(test1_dir, "test", "host_tests"))
os.chdir(test1_dir)
test_path = greentea_cli.get_local_host_tests_dir("")
self.assertEqual(test_path, "./test/host_tests")
os.chdir(curr_dir)
shutil.rmtree(test1_dir)
def test_create_filtered_test_list(self):
test_spec = TestSpec()
test_spec.parse(test_spec_def)
test_build = test_spec.get_test_builds()[0]
test_list = greentea_cli.create_filtered_test_list(
test_build.get_tests(),
"mbed-drivers-test-generic_*",
None,
test_spec=test_spec,
)
self.assertEqual(
set(test_list.keys()), set(["mbed-drivers-test-generic_tests"])
)
test_list = greentea_cli.create_filtered_test_list(
test_build.get_tests(), "*_strings", None, test_spec=test_spec
)
self.assertEqual(set(test_list.keys()), set(["mbed-drivers-test-c_strings"]))
test_list = greentea_cli.create_filtered_test_list(
test_build.get_tests(), "mbed*s", None, test_spec=test_spec
)
expected = set(
["mbed-drivers-test-c_strings", "mbed-drivers-test-generic_tests"]
)
self.assertEqual(set(test_list.keys()), expected)
test_list = greentea_cli.create_filtered_test_list(
test_build.get_tests(), "*-drivers-*", None, test_spec=test_spec
)
expected = set(
["mbed-drivers-test-c_strings", "mbed-drivers-test-generic_tests"]
)
self.assertEqual(set(test_list.keys()), expected)
# Should be case insensitive
test_list = greentea_cli.create_filtered_test_list(
test_build.get_tests(), "*-DRIVERS-*", None, test_spec=test_spec
)
expected = set(
["mbed-drivers-test-c_strings", "mbed-drivers-test-generic_tests"]
)
self.assertEqual(set(test_list.keys()), expected)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -5,142,583,259,424,977,000 | 30.928571 | 92 | 0.55725 | false |
b-cube/thredds_catalog_crawler | thredds_catalog_crawler/crawl.py | 1 | 11950 | from thredds_crawler.etree import etree
import urllib
import urlparse
import requests
import os
import sys
import re
from thredds_crawler.utils import construct_url
INV_NS = "http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0"
XLINK_NS = "http://www.w3.org/1999/xlink"
import logging
try:
# Python >= 2.7
from logging import NullHandler
except ImportError:
# Python < 2.7
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger("thredds_crawler")
logger.addHandler(NullHandler())
class Crawl(object):
# TODO: this is super specific
SKIPS = [
# ".*files.*",
# ".*Individual Files.*",
# ".*File_Access.*",
# ".*Forecast Model Run.*",
# ".*Constant Forecast Offset.*",
# ".*Constant Forecast Date.*"
]
def __init__(self, catalog_url, select=None, skip=None, debug=None):
"""
select: a list of dataset IDs. Python regex supported.
skip: list of dataset names and/or a catalogRef titles. Python regex supported.
"""
if debug is True:
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
self.catalog_url = catalog_url
# Only process these dataset IDs
if select is not None:
select = map(lambda x: re.compile(x), select)
self.select = select
# Skip these dataset links, such as a list of files
# ie. "files/"
if skip is None:
skip = Crawl.SKIPS
self.skip = map(lambda x: re.compile(x), skip)
self.visited = []
# datasets = [LeafDataset(url) for url in self._run(url=catalog_url) if url is not None]
# self.datasets = filter(lambda x: x.id is not None, datasets)
def _find_root_url(self):
'''
before parsing the larger tree, check that the catalog_url
is the root node - return the shortest url that's good
'''
parts = urlparse.urlparse(self.catalog_url)
route_parts = parts.path.split('/')
route_parts = [r for r in route_parts if r and r != 'catalog.xml']
founds = []
for i in xrange(len(route_parts) + 1):
route = urlparse.urlunparse(
(parts.scheme,
parts.netloc,
'/'.join(route_parts[:len(route_parts) - i] + ['catalog.xml']),
parts.params,
parts.query,
parts.fragment)
)
req = requests.head(route)
status_code = req.status_code
if status_code in [200, 304]:
founds.append(route)
return self.catalog_url if not founds else min(founds)
def _run(self, url):
if url in self.visited:
logger.debug("Skipping %s (already crawled)" % url)
return
self.visited.append(url)
logger.info("Crawling: %s" % url)
u = urlparse.urlsplit(url)
name, ext = os.path.splitext(u.path)
if ext == ".html":
u = urlparse.urlsplit(url.replace(".html", ".xml"))
url = u.geturl()
# Get an etree object
try:
r = requests.get(url)
tree = etree.XML(str(r.text))
except BaseException:
logger.error("Skipping %s (error parsing getting XML)" % url)
return
# Crawl the catalogRefs:
for ref in tree.findall('.//{%s}catalogRef' % INV_NS):
# Check skips
title = ref.get("{%s}title" % XLINK_NS)
if not any([x.match(title) for x in self.skip]):
for ds in self._run(url=construct_url(url, ref.get("{%s}href" % XLINK_NS))):
yield ds
else:
logger.info("Skipping catalogRef based on 'skips'. Title: %s" % title)
continue
# Get the leaf datasets
ds = []
for leaf in tree.findall('.//{%s}dataset[@urlPath]' % INV_NS):
# Subset by the skips
name = leaf.get("name")
if any([x.match(name) for x in self.skip]):
logger.info("Skipping dataset based on 'skips'. Name: %s" % name)
continue
# Subset by the Selects defined
gid = leaf.get('ID')
if self.select is not None:
if gid is not None and any([x.match(gid) for x in self.select]):
logger.debug("Processing %s" % gid)
yield "%s?dataset=%s" % (url, gid)
else:
logger.info("Ignoring dataset based on 'selects'. ID: %s" % gid)
continue
else:
logger.debug("Processing %s" % gid)
yield "%s?dataset=%s" % (url, gid)
class CatalogRef(object):
def __init__(self, source_url, element):
self.id = None
self.name = None
self.parent_url = source_url
self.elem = element
@property
def href(self):
return
def _parse(self):
# extract everything from the node
name = self.element.attrib.get('name', '')
cat_id = self.element.attrib.get('ID', '')
title = self.element.attrib.get('title', '')
href = self.element.attrib.get('{http://www.w3.org/1999/xlink}href', '')
tag = extract_element_tag(self.element.tag)
# get the parent
parent = self.element.getparent()
parent_tag = extract_element_tag(parent.tag)
parent_id = parent.attrib.get('ID', '') if parent_tag != 'catalog' else ''
class Dataset(object):
def __init_(self, parent_url, elem):
self.id = None
self.name = None
self.parent_url = parent_url
self.elem = elem
def __repr__(self):
return "<Dataset id: %s, name: %s>" % (self.id, self.name)
def _parse_element(self):
'''
first, is it a bucket or a leaf?
if bucket, get children and carry on
if leaf, get endpoint and handle supported service list
get children and carry on (related catalogs, etc)
'''
self.is_leaf = self.elem.xpath('*[local-name()="access"]/@urlPath or @urlPath')
# if it has children, get them and add to follows
# do not add the access url(s) to follows. this is the terminus
class CatalogRef(object):
def __init_(self, parent_url, elem):
self.id = None
self.name = None
self.parent_url = parent_url
self.elem = elem
# self.href_path = href_path
def __repr__(self):
return "<CatalogRef id: %s, name: %s>" % (self.id, self.name)
# TODO: url generation = parent path urljoin with href
@property
def href(self):
parts = urlparse.urlparse(self.href_path)
if parts.scheme and parts.netloc:
# it's a valid url, do nothing
return self.href_path
parts = urlparse.urlparse(self.parent_url)
# just a basic urljoin
if self.parent_type == 'dataset':
return urlparse.urljoin(self.parent_url.replace('catalog.xml', ''), self.href_path)
else:
pass
def follow(self):
req = requests.get(self.href)
# TODO: parse the xml and generate catalogRefs, Datasets x 2
class ParentDataset(object):
'''
a collection object, tagged as dataset, that can
contain catalogRefs, children datasets (likely terminal nodes)
or a metadata blob
this object won't have its own url (should be tied to the catalogRef URL parent)
'''
def __init__(self, parent_url):
self.id = None
self.name = None
self.parent_url = parent_url
self.children = []
def __repr__(self):
return "<ParentDataset id: %s, name: %s>" % (self.id, self.name)
class LeafDataset(object):
def __init__(self, dataset_url, estimate_size=False):
self.services = []
self.id = None
self.name = None
self.metadata = None
self.catalog_url = None
self.data_size = None
self.estimate_size = estimate_size
# Get an etree object
r = requests.get(dataset_url)
try:
tree = etree.XML(str(r.text))
except etree.XMLSyntaxError:
logger.error("Error procesing %s, invalid XML" % dataset_url)
else:
dataset = tree.find("{%s}dataset" % INV_NS)
self.id = dataset.get("ID")
self.name = dataset.get("name")
self.metadata = dataset.find("{%s}metadata" % INV_NS)
self.catalog_url = dataset_url.split("?")[0]
# Data Size - http://www.unidata.ucar.edu/software/thredds/current/tds/
# catalog/InvCatalogSpec.html#dataSize
data_size = dataset.find("{%s}dataSize" % INV_NS)
if data_size is not None:
self.data_size = float(data_size.text)
data_units = data_size.get('units')
# Convert to MB
if data_units == "bytes":
self.data_size *= 1e-6
elif data_units == "Kbytes":
self.data_size *= 0.001
elif data_units == "Gbytes":
self.data_size /= 0.001
elif data_units == "Tbytes":
self.data_size /= 1e-6
# Services
service_tag = dataset.find("{%s}serviceName" % INV_NS)
if service_tag is None:
service_tag = self.metadata.find("{%s}serviceName" % INV_NS)
service_name = service_tag.text
for service in tree.findall(".//{%s}service[@name='%s']" % (INV_NS, service_name)):
if service.get("serviceType") == "Compound":
for s in service.findall("{%s}service" % INV_NS):
url = ''
else:
url = ''
def follow(self):
# TODO: run the head requests for the service + urlPath
# hrefs to make sure they are valid requests
pass
@property
def href(self):
return urlparse.urljoin(
urlparse.urlunparse(
(
parts.scheme,
parts.netloc,
'/'.join(url_paths[0:match_index + 1]),
parts.params,
parts.query,
parts.fragment
)
),
path
)
@property
def size(self):
if self.data_size is not None:
return self.data_size
if self.estimate_size:
try:
dap_endpoint = next(s.get("url") for s in self.services
if s.get("service").lower() in ["opendap", "dap"])
# Get sizes from DDS
try:
import netCDF4
nc = netCDF4.Dataset(dap_endpoint)
bites = 0
for vname in nc.variables:
var = nc.variables.get(vname)
bites += var.dtype.itemsize * var.size
return bites * 1e-6 # Megabytes
except ImportError:
logger.error("The python-netcdf4 library is required for computing the size of this dataset.")
return None
except StopIteration:
return None # We can't calculate
return None
def __repr__(self):
return "<LeafDataset id: %s, name: %s, services: %s>" % (
self.id,
self.name,
str([s.get("service") for s in self.services])
)
| gpl-3.0 | -8,937,101,400,914,662,000 | 32.194444 | 114 | 0.529456 | false |
gsnbng/erpnext | erpnext/healthcare/doctype/fee_validity/test_fee_validity.py | 1 | 2275 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate, add_days
from erpnext.healthcare.doctype.patient_appointment.test_patient_appointment import create_healthcare_docs, create_appointment, create_healthcare_service_items
test_dependencies = ["Company"]
class TestFeeValidity(unittest.TestCase):
def setUp(self):
frappe.db.sql("""delete from `tabPatient Appointment`""")
frappe.db.sql("""delete from `tabFee Validity`""")
frappe.db.sql("""delete from `tabPatient`""")
def test_fee_validity(self):
item = create_healthcare_service_items()
healthcare_settings = frappe.get_single("Healthcare Settings")
healthcare_settings.enable_free_follow_ups = 1
healthcare_settings.max_visits = 2
healthcare_settings.valid_days = 7
healthcare_settings.automate_appointment_invoicing = 1
healthcare_settings.op_consulting_charge_item = item
healthcare_settings.save(ignore_permissions=True)
patient, medical_department, practitioner = create_healthcare_docs()
# appointment should not be invoiced. Check Fee Validity created for new patient
appointment = create_appointment(patient, practitioner, nowdate())
invoiced = frappe.db.get_value("Patient Appointment", appointment.name, "invoiced")
self.assertEqual(invoiced, 0)
# appointment should not be invoiced as it is within fee validity
appointment = create_appointment(patient, practitioner, add_days(nowdate(), 4))
invoiced = frappe.db.get_value("Patient Appointment", appointment.name, "invoiced")
self.assertEqual(invoiced, 0)
# appointment should be invoiced as it is within fee validity but the max_visits are exceeded
appointment = create_appointment(patient, practitioner, add_days(nowdate(), 5), invoice=1)
invoiced = frappe.db.get_value("Patient Appointment", appointment.name, "invoiced")
self.assertEqual(invoiced, 1)
# appointment should be invoiced as it is not within fee validity and the max_visits are exceeded
appointment = create_appointment(patient, practitioner, add_days(nowdate(), 10), invoice=1)
invoiced = frappe.db.get_value("Patient Appointment", appointment.name, "invoiced")
self.assertEqual(invoiced, 1) | agpl-3.0 | -5,500,447,730,551,173,000 | 46.416667 | 159 | 0.766593 | false |
amorwilliams/gsoops | server/gsoops/gsoops/urls.py | 1 | 2045 | from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.core.urlresolvers import reverse_lazy
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import RedirectView
from rest_framework.routers import DefaultRouter
from rest_framework_nested import routers
from users.views import UserViewSet
from gamelog.views import GameLogView, LogStatView, UserInfoView, UserStateView
from gift.views import GiftViewSet, GiftCDKeyViewSet
from server.views import GameServerViewSet, GameServerInfoView
from gamemail.views import GameMailView
from gamenotice.views import GameNoticeViewSet, GameNoticeSendView
router = DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'gifts', GiftViewSet)
router.register(r'cdkeys', GiftCDKeyViewSet)
router.register(r'gameserver', GameServerViewSet)
router.register(r'notice', GameNoticeViewSet)
cdkey_router = routers.NestedSimpleRouter(router, r'gifts', lookup='gift')
cdkey_router.register(r'cdkeys', GiftCDKeyViewSet, base_name='cdkeys')
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/v1/', include('gsoops.authentication.urls')),
url(r'^api/v1/notice/send/', GameNoticeSendView.as_view()),
url(r'^api/v1/gameglobal/(?P<pk>[0-9]+)/$', GameServerInfoView.as_view()),
url(r'^api/v1/logstat/', LogStatView.as_view()),
url(r'^api/v1/userinfo/', UserInfoView.as_view()),
url(r'^api/v1/userstate/', UserStateView.as_view()),
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/', include(cdkey_router.urls)),
url(r'^api/v1/gamelog/', GameLogView.as_view()),
url(r'^api/v1/gamemail/', GameMailView.as_view()),
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
# url(r'^$', RedirectView.as_view(url=reverse_lazy('api-root'), permanent=False)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | -5,263,702,692,553,083,000 | 44.444444 | 86 | 0.755501 | false |
lichuan261/wuand | XX-Net/python27/1.0/lib/noarch/dnslib/ranges.py | 1 | 2427 | # -*- coding: utf-8 -*-
"""
Wrapper around property builtin to restrict attribute to defined
integer value range (throws ValueError).
Intended to ensure that values packed with struct are in the
correct range
>>> class T(object):
... a = range_property('a',-100,100)
... b = B('b')
... c = H('c')
... d = I('d')
>>> t = T()
>>> for i in [0,100,-100]:
... t.a = i
... assert t.a == i
>>> t.a = 101
Traceback (most recent call last):
...
ValueError: Attribute 'a' must be between -100-100 [101]
>>> t.a = -101
Traceback (most recent call last):
...
ValueError: Attribute 'a' must be between -100-100 [-101]
>>> t.a = 'blah'
Traceback (most recent call last):
...
ValueError: Attribute 'a' must be between -100-100 [blah]
"""
import sys
if sys.version < '3':
int_types = (int, long,)
else:
int_types = (int,)
def range_property(attr,min,max):
def getter(obj):
return getattr(obj,"_%s" % attr)
def setter(obj,val):
if isinstance(val,int_types) and min <= val <= max:
setattr(obj,"_%s" % attr,val)
else:
raise ValueError("Attribute '%s' must be between %d-%d [%s]" %
(attr,min,max,val))
return property(getter,setter)
def B(attr):
"""
Unsigned Byte
"""
return range_property(attr,0,255)
def H(attr):
"""
Unsigned Short
"""
return range_property(attr,0,65535)
def I(attr):
"""
Unsigned Long
"""
return range_property(attr,0,4294967295)
def ntuple_range(attr,n,min,max):
f = lambda x : isinstance(x,int_types) and min <= x <= max
def getter(obj):
return getattr(obj,"_%s" % attr)
def setter(obj,val):
if len(val) != n:
raise ValueError("Attribute '%s' must be tuple with %d elements [%s]" %
(attr,n,val))
if all(map(f,val)):
setattr(obj,"_%s" % attr,val)
else:
raise ValueError("Attribute '%s' elements must be between %d-%d [%s]" %
(attr,min,max,val))
return property(getter,setter)
def IP4(attr):
return ntuple_range(attr,4,0,255)
def IP6(attr):
return ntuple_range(attr,16,0,255)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-2.0 | 5,592,855,449,354,411,000 | 25.096774 | 84 | 0.526164 | false |
arefenayat/pysend | asli.py | 1 | 3605 | import sys
import urllib.request
from urllib.parse import urlparse
class check(object):
def __init__(self):pass
def address(self):
add=input("Enter URL Address With HTTP to Send Data: \n")
if add:
o = urlparse(add)
if not o.scheme:
self.address()
else:
self.address=add
else:
self.address()
def method(self):
method=input("Enter Method Name (GET OR POST) \n")
if method:
if method=="POST" or method=="post" or method=="get" or method=="GET":
self.method=method
else:
self.method()
else:
self.method()
def getkey(self):
keys=input("Enter Key's To Send exam: name,family,number \n")
if not len(keys):
self.getkey()
else:
keys=keys.split(',')
self.keys=keys
def getval(self):
values=input("Enter values's To Send exam saeid,ahmadi,2 \n")
if not len(values):
self.getval()
else:
values=values.split(',')
self.values=values
def post_(self,address,**datas):
data = urllib.parse.urlencode(datas)
data = data.encode('utf-8')
request = urllib.request.Request(address)
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
request.add_header("User-Agent","Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11")
try :
f = urllib.request.urlopen(request, data)
print("Response Recived From "+address+" : \n")
print(f.read().decode('utf-8'))
again=input("Do you want to test again ? yes or no")
if again=='yes':
main()
else:
sys.exit(0)
except urllib.error.URLError as err0:
print(err0)
except urllib.error.HTTPError as err1:
print(err1)
def get_(self,address):
request = urllib.request.Request(address)
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
request.add_header("User-Agent","Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11")
request.add_header('Referer', 'http://www.python.org/')
try :
f = urllib.request.urlopen(request)
print("Response Recived From "+address+" : \n")
print(f.read().decode('utf-8'))
again=input("Do you want to test again ? yes or no")
if again=='yes':
main()
else:
sys.exit(0)
except urllib.error.URLError as err0:
print(err0)
except urllib.error.HTTPError as err1:
print(err1)
def main():
barname=check()
barname.address()
barname.method()
barname.getkey()
barname.getval()
address=barname.address
method=barname.method
key=barname.keys
val=barname.values
if method=="GET" or method=="get" :
c=0
datas={}
for i in key:
datas[i]=val[c]
c=c+1
datass=str(datas)
a=datass.replace('}','')
a=a.replace('{','')
a=a.replace("'",'')
a=a.replace(":",'=')
a=a.replace(",",'&')
a=a.replace(" ",'')
j=address+'?'+a
barname.get_(j)
else:
c=0
datas={}
for i in key:
datas[i]=val[c]
c=c+1
barname.post_(address,**datas)
if __name__=="__main__":main()
| cc0-1.0 | 1,671,651,297,372,823,600 | 31.772727 | 107 | 0.521221 | false |
pelmers/dxr | dxr/indexers.py | 3 | 23208 | """Base classes and convenience functions for writing indexers and skimmers"""
from collections import namedtuple
from operator import itemgetter
from os.path import join, islink
from warnings import warn
from funcy import group_by, decorator, imapcat
from dxr.utils import build_offset_map, split_content_lines
STRING_PROPERTY = {
'type': 'string',
'index': 'not_analyzed',
'fields': {
'lower': { # for qualified_type direct searcher
'type': 'string',
'analyzer': 'lowercase'
}
}
}
# An unanalyzed string property that points to a value that can be exact- or
# prefix-matched against and carries start/end bounds for highlighting. Has
# both a name and a qualname.
QUALIFIED_FILE_NEEDLE = {
'type': 'object',
'properties': {
'name': STRING_PROPERTY,
'qualname': STRING_PROPERTY,
}
}
QUALIFIED_LINE_NEEDLE = {
'type': 'object',
'properties': {
'name': STRING_PROPERTY,
# The clang plugin stores both type-distinguished and merely scoped
# names here: both "Thing::foo(int num)" and "Thing::foo". Thus, the
# value may be either a string or a list:
'qualname': STRING_PROPERTY,
'start': {
'type': 'integer',
'index': 'no' # just for highlighting
},
'end': {
'type': 'integer',
'index': 'no'
}
}
}
class PluginConfig(object):
"""Mixin providing access to the plugin-specific configuration of a tree
Expects ``plugin_name`` and ``tree`` instance attrs.
"""
@property
def plugin_config(self):
"""Return a mapping of plugin-specific config options."""
return getattr(self.tree, self.plugin_name)
class FolderToIndex(PluginConfig):
"""The FolderToIndex generates needles for folders and provides an
optional list of headers to display in browse view as `browse_headers`."""
browse_headers = []
def __init__(self, plugin_name, tree, path):
self.plugin_name = plugin_name
self.tree = tree
self.path = path
def needles(self):
return []
class TreeToIndex(PluginConfig):
"""A TreeToIndex performs build environment setup and teardown and serves
as a repository for scratch data that should persist across an entire
indexing run.
Instances must be pickleable so as to make the journey to worker processes.
You might also want to keep the size down. It takes on the order of 2s for
a 150MB pickle to make its way across process boundaries, including
pickling and unpickling time. For this reason, we send the TreeToIndex once
and then have it index several files before sending it again.
"""
def __init__(self, plugin_name, tree, vcs_cache):
"""
:arg tree: The configuration of the tree to index: a TreeConfig
:arg vcs_cache: A :class:`~dxr.vcs.VcsCache` that describes any VCSes
used by this tree. May be None if tree does not contain any VCS
repositories.
"""
# We need source_folder, object_folder, temp_folder, and maybe
# ignore_filenames out of the tree.
self.plugin_name = plugin_name
self.tree = tree
self.vcs_cache = vcs_cache
def environment(self, vars):
"""Return environment variables to add to the build environment.
This is where the environment is commonly twiddled to activate and
parametrize compiler plugins which dump analysis data.
:arg vars: A dict of the already-set variables. You can make decisions
based on these.
You may return a new dict or scribble on ``vars`` and return it. In
either case, the returned dict is merged into those from other plugins,
with later plugins taking precedence in case of conflicting keys.
"""
return vars
def pre_build(self):
"""Hook called before the tree's build command is run
This is a good place to make a temp folder to dump said data in. You
can stash away a reference to it on me so later methods can find it.
"""
def post_build(self):
"""Hook called after the tree's build command completes
This is a good place to do any whole-program analysis, storing it on
me or on disk.
"""
def file_to_index(self, path, contents):
"""Return an object that provides data about a given file.
Return an object conforming to the interface of :class:`FileToIndex`,
generally a subclass of it.
:arg path: A path to the file to index, relative to the tree's source
folder
:arg contents: What's in the file: unicode if we managed to guess an
encoding and decode it, None otherwise
Return None if there is no indexing to do on the file.
Being a method on TreeToIndex, this can easily pass along the location
of a temp directory or other shared setup artifacts. However, beware
of passing mutable things; while the FileToIndex can mutate them,
visibility of those changes will be limited to objects in the same
worker process. Thus, a TreeToIndex-dwelling dict might be a suitable
place for a cache but unsuitable for data that can't evaporate.
If a plugin omits a TreeToIndex class,
:meth:`~dxr.plugins.Plugin.from_namespace()` constructs one
dynamically. The method implementations of that class are inherited
from this class, with one exception: a ``file_to_index()`` method is
dynamically constructed which returns a new instance of the
``FileToIndex`` class the plugin defines, if any.
"""
# This is probably the place to add whatever_indexer()s for other kinds of
# things, like modules, if we ever wanted to support some other view of
# search results than files or lines, like a D3 diagram of an inheritance
# hierarchy or call graph. We'd need to come up with some way of looping
# around those modules to let various plugins contribute. Perhaps we'd
# introduce another kind of plugin: an enumerator.
class FileToSkim(PluginConfig):
"""A source of rendering data about a file, generated at request time
This is appropriate for unindexed files (such as old revisions pulled out
of a VCS) or for data so large or cheap to produce that it's a bad tradeoff
to store it in the index. An instance of me is mostly an opportunity for a
shared cache among my methods.
"""
def __init__(self, path, contents, plugin_name, tree, file_properties=None,
line_properties=None):
"""
:arg path: The (bytestring) conceptual path to the file, relative to
the tree's source folder. Such a file might not exist on disk. This
is useful mostly as a hint for syntax coloring.
:arg contents: What's in the file: unicode if we knew or successfully
guessed an encoding, None otherwise. Don't return any by-line data
for None; the framework won't have succeeded in breaking up the
file by line for display, so there will be no useful UI for those
data to support. In fact, most skimmers won't be be able to do
anything useful with None at all. For unicode, split the file into
lines using universal newlines
(``dxr.utils.split_content_lines()``); that's what the rest of the
framework expects.
:arg tree: The :class:`~dxr.config.TreeConfig` of the tree to which
the file belongs
If the file is indexed, there will also be...
:arg file_properties: Dict of file-wide needles emitted by the indexer
:arg line_properties: List of per-line needle dicts emitted by the
indexer
"""
self.path = path
self.contents = contents
self.plugin_name = plugin_name
self.tree = tree
self.file_properties = file_properties or {}
self.line_properties = line_properties # TODO: not clear what the default here should be. repeat([])?
def is_interesting(self):
"""Return whether it's worthwhile to examine this file.
For example, if this class knows about how to analyze JS files, return
True only if ``self.path.endswith('.js')``. If something falsy is
returned, the framework won't call data-producing methods like
:meth:`~dxr.indexers.FileToSkim.links()`,
:meth:`~dxr.indexers.FileToSkim.refs()`, etc.
The default implementation selects only text files that are not symlinks.
Note: even if a plugin decides that symlinks are interesting, it should
remember that links, refs, regions and by-line annotations will not be
called because views of symlinks redirect to the original file.
"""
return self.contains_text() and not self.is_link()
def links(self):
"""Return an iterable of links for the navigation pane::
(sort order, heading, [(icon, title, href), ...])
File views will replace any {{line}} within the href with the
last-selected line number.
"""
return []
def refs(self):
"""Provide cross references for various spans of text, accessed
through a context menu.
Yield an ordered list of extents and menu items::
(start, end, ref)
``start`` and ``end`` are the bounds of a slice of a Unicode string
holding the contents of the file. (``refs()`` will not be called for
binary files.)
``ref`` is a :class:`~dxr.lines.Ref`.
"""
return []
def regions(self):
"""Yield instructions for syntax coloring and other inline formatting
of code.
Yield an ordered list of extents and CSS classes (encapsulated in
:class:`~dxr.lines.Region` instances)::
(start, end, Region)
``start`` and ``end`` are the bounds of a slice of a Unicode string
holding the contents of the file. (``regions()`` will not be called
for binary files.)
"""
return []
def annotations_by_line(self):
"""Yield extra user-readable information about each line, hidden by
default: compiler warnings that occurred there, for example.
Yield a list of annotation maps for each line::
{'title': ..., 'class': ..., 'style': ...}
"""
# TODO: Why are these just per line? Shouldn't they return extents like
# everybody else? We can still show them per line if we want.
return []
# Convenience methods:
def contains_text(self):
"""Return whether this file can be decoded and divided into lines as
text. Empty files contain text.
This may come in handy as a component of your own
:meth:`~dxr.indexers.FileToSkim.is_interesting()` methods.
"""
return isinstance(self.contents, unicode)
def char_offset(self, row, col):
"""Return the from-BOF unicode char offset for the char at the given
row and column of the file we're indexing.
This is handy for translating row- and column-oriented input to the
format :meth:`~dxr.indexers.FileToSkim.refs()` and
:meth:`~dxr.indexers.FileToSkim.regions()` want.
:arg row: The 1-based line number, according to splitting in universal
newline mode
:arg col: The 0-based column number
"""
return self._line_offsets()[row - 1] + col
# Convenience methods:
def absolute_path(self):
"""Return the (bytestring) absolute path of the file to skim.
Note: in skimmers, the returned path may not exist if the source folder
moved between index and serve time.
"""
return join(self.tree.source_folder, self.path)
def is_link(self):
"""Return whether the file is a symlink.
Note: symlinks are never displayed in file browsing; a request for a symlink redirects
to its target.
"""
return islink(self.absolute_path())
# Private methods:
def _line_offsets(self):
"""Return (and cache) a list mapping 1-based line numbers to from-BOF
Unicode offsets."""
if not hasattr(self, '_line_offset_list'):
if not self.contains_text():
raise ValueError("Can't get line offsets for a file that isn't"
" text.")
lines = split_content_lines(self.contents) if self.contents is not None else []
self._line_offset_list = build_offset_map(lines)
return self._line_offset_list
class FileToIndex(FileToSkim):
"""A source of search and rendering data about one source file"""
def __init__(self, path, contents, plugin_name, tree):
"""Analyze a file or digest an analysis that happened at compile time.
:arg path: The (bytestring) path to the file to index, relative to the
tree's source folder
:arg contents: What's in the file: unicode if we managed to guess at an
encoding and decode it, None otherwise. Don't return any by-line
data for None; the framework won't have succeeded in breaking up
the file by line for display, so there will be no useful UI for
those data to support. Think more along the lines of returning
EXIF data to search by for a JPEG. For unicode, split the file into
lines using universal newlines
(``dxr.utils.split_content_lines()``); that's what the rest of the
framework expects.
:arg tree: The :class:`~dxr.config.TreeConfig` of the tree to which
the file belongs
Initialization-time analysis results may be socked away on an instance
var. You can think of this constructor as a per-file post-build step.
You could do this in a different method, using memoization, but doing
it here makes for less code and less opportunity for error.
FileToIndex classes of plugins may take whatever constructor args they
like; it is the responsibility of their TreeToIndex objects'
:meth:`~dxr.indexers.TreeToIndex.file_to_index()` methods to supply
them. However, the ``path`` and ``contents`` instance vars should be
initialized and have the above semantics, or a lot of the provided
convenience methods and default implementations will break.
"""
# We receive the file contents from the outside for two reasons: (1) so
# we don't repeatedly redo the encoding guessing (which involves
# iterating over potentially the whole file looking for nulls) and (2)
# for symmetry with FileToSkim, so we can share many method
# implementations.
super(FileToIndex, self).__init__(path, contents, plugin_name, tree)
def needles(self):
"""Return an iterable of key-value pairs of search data about the file
as a whole: for example, modification date or file size.
Each pair becomes an elasticsearch property and its value. If the
framework encounters multiple needles of the same key (whether coming
from the same plugin or different ones), all unique values will be
retained using an elasticsearch array.
"""
# We go with pairs rather than a map so we can just chain all these
# together and pass them to a dict constructor: fewer temp vars.
return []
def needles_by_line(self):
"""Return per-line search data for one file: for example, markers that
indicate a function called "foo" is defined on a certain line.
Yield an iterable of key-value pairs for each of a file's lines, one
iterable per line, in order. The data might be data to search on or
data stowed away for a later realtime thing to generate refs or
regions from. In any case, each pair becomes an elasticsearch property
and its value.
If the framework encounters multiple needles of the same key on the
same line (whether coming from the same plugin or different ones), all
unique values will be retained using an elasticsearch array. Values
may be dicts, in which case common keys get merged by
:func:`~dxr.utils.append_update()`.
This method is not called on symlink files, to maintain the illusion
that they do not have contents, seeing as they cannot be viewed in
file browsing.
"""
return []
# Conveniences:
Extent = namedtuple('Extent', ['start', 'end']) # 0-based
Position = namedtuple('Position', ['row', 'col']) # col 0-based, row 1-based
class FuncSig(namedtuple('FuncSig', ['inputs', 'output'])):
def __str__(self):
return '{0} -> {1}'.format(
tuple(self.inputs), self.output).replace("'", '').replace('"', '')
@decorator
def unsparsify(call):
"""Transform a sparse needle list [(key, val, span:Extent)] into the
line-by-line format needles_by_line expects: [[(key, val)]].
"""
return group_needles(by_line(call()))
# Deprecated in favor of iterable_per_line()
def group_needles(line_needles):
"""Group line needles by line, and return a list of needles for each line,
up to the last line with any needles::
[(a, 1), (b, 4), (c, 4)] -> [[a], [], [], [b, c]]
"""
# Jam all the needles of a file into a hash by line number:
line_map = group_by(itemgetter(1), line_needles) # {line: needles}
last_line = max(line_map.iterkeys()) + 1 if line_map else 1
# Pull out the needles for each line, stripping off the line number
# elements of the tuples and producing a blank list for missing lines.
# (The defaultdict returned from group_by takes care of the latter.)
return [[pair for (pair, _) in line_map[line_num]]
for line_num in xrange(1, last_line)]
# Deprecated
def by_line(span_needles):
"""Transform [(_, span:Extent)] into [(_, line:int)].
Converts spans to lines. The resulting iter will have len' >= len.
"""
return ((key_object_pair(*kv_start_end), line_number) for
kv_start_end, line_number in imapcat(span_to_lines, span_needles))
# Deprecated in favor of with_start_and_end()
def key_object_pair((k, v), start, end):
"""Transform a key/value pair, along with start and end columns, to a
key/multi-propertied-object pair that can be stored in elasticsearch and
then used to support searching and highlighting.
"""
return k, {'value': v, 'start': start, 'end': end}
# Deprecated in favor of split_into_lines()
def span_to_lines((kv, span)):
"""Expand ((k, v), span:Extent) into [(((k, v), line_span), line:int)].
line_span has shape: (col1, col2)
"""
if span.end.row == span.start.row:
yield (kv, span.start.col, span.end.col), span.start.row
elif span.end.row < span.start.row:
warn('Bad Extent: end.row < start.row: %s < %s' %
(span.end.row, span.start.row))
else:
# TODO: There are a lot of Nones used as slice bounds below. Do we
# ever translate them back into char offsets? If not, does the
# highlighter or anything else choke on them?
yield (kv, span.start.col, None), span.start.row
# Really wish we could use yield from
for row in xrange(span.start.row + 1, span.end.row):
yield (kv, 0, None), row
yield (kv, 0, span.end.col), span.end.row
def split_into_lines(triples):
"""Split a bunch of (key, mapping, extent) triples into more triples
than those, with each one contained in a line.
"""
def _split_one((key, mapping, extent)):
"""Split a single triple into one or more, each spanning at most one
line.
"""
if extent.end.row == extent.start.row:
yield key, mapping, extent
elif extent.end.row < extent.start.row:
# This indicates a bug in an indexer plugin.
warn('Bad extent: end.row < start.row: %s < %s' %
(extent.end.row, extent.start.row))
else:
# TODO: There are a lot of Nones used as slice bounds below. Do we
# ever translate them back into char offsets? If not, does the
# highlighter or anything else choke on them?
yield key, mapping, Extent(Position(row=extent.start.row,
col=extent.start.col),
Position(row=extent.start.row,
col=None))
# Really wish we could use yield from
for row in xrange(extent.start.row + 1, extent.end.row):
yield key, mapping, Extent(Position(row=row,
col=0),
Position(row=row,
col=None))
yield key, mapping, Extent(Position(row=extent.end.row,
col=0),
Position(row=extent.end.row,
col=extent.end.col))
return imapcat(_split_one, triples)
def with_start_and_end(triples):
"""Add 'start' and 'end' column keys to the value mappings of one-line
triples, and yield them back.
"""
for key, mapping, extent in triples:
mapping['start'] = extent.start.col
mapping['end'] = extent.end.col
yield key, mapping, extent
def iterable_per_line(triples):
"""Yield iterables of (key, value mapping), one for each line."""
# Jam all the triples of a file into a hash by line number:
line_map = group_by(lambda (k, v, extent): extent.start.row, triples) # {line: triples}
last_line = max(line_map.iterkeys()) + 1 if line_map else 1
# Pull out the needles for each line, stripping off the extents and
# producing a blank list for missing lines. (The defaultdict returned from
# group_by takes care of the latter.)
return [[(k, v) for (k, v, e) in line_map[line_num]]
for line_num in xrange(1, last_line)]
# If this has to be generic so we can use it on annotations_by_line as well, pass in a key function that extracts the line number and maybe another that constructs the return value.
def iterable_per_line_sorted(triples):
"""Yield iterables of (key, value mapping), one for each line, where triples are sorted already."""
last_row = 1
last_row_kvs = []
for k, v, extent in triples:
if extent.start.row == last_row:
last_row_kvs.append((k, v))
else:
yield last_row_kvs
# Yield empty lists for any skipped lines.
for _ in xrange(last_row + 1, extent.start.row):
yield []
last_row_kvs = [(k, v)]
last_row = extent.start.row
# Emit anything on the last line.
yield last_row_kvs
| mit | -9,087,692,361,082,704,000 | 37.809365 | 185 | 0.629137 | false |
idegtiarov/ceilometer | ceilometer/tests/unit/dispatcher/test_http.py | 1 | 4503 | #
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
import requests
from ceilometer.dispatcher import http
from ceilometer.event.storage import models as event_models
from ceilometer.publisher import utils
class TestDispatcherHttp(base.BaseTestCase):
def setUp(self):
super(TestDispatcherHttp, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
self.msg['message_signature'] = utils.compute_signature(
self.msg, self.CONF.publisher.telemetry_secret,
)
def test_http_dispatcher_config_options(self):
self.CONF.dispatcher_http.target = 'fake'
self.CONF.dispatcher_http.timeout = 2
dispatcher = http.HttpDispatcher(self.CONF)
self.assertEqual('fake', dispatcher.target)
self.assertEqual(2, dispatcher.timeout)
def test_http_dispatcher_with_no_target(self):
self.CONF.dispatcher_http.target = ''
dispatcher = http.HttpDispatcher(self.CONF)
# The target should be None
self.assertEqual('', dispatcher.target)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_metering_data(self.msg)
# Since the target is not set, no http post should occur, thus the
# call_count should be zero.
self.assertEqual(0, post.call_count)
def test_http_dispatcher_with_no_metadata(self):
self.CONF.dispatcher_http.target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_metering_data(self.msg)
self.assertEqual(1, post.call_count)
class TestEventDispatcherHttp(base.BaseTestCase):
def setUp(self):
super(TestEventDispatcherHttp, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_http_dispatcher(self):
self.CONF.dispatcher_http.event_target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_events(event)
self.assertEqual(1, post.call_count)
def test_http_dispatcher_bad(self):
self.CONF.dispatcher_http.event_target = ''
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch('ceilometer.dispatcher.http.LOG',
mock.MagicMock()) as LOG:
dispatcher.record_events(event)
self.assertTrue(LOG.exception.called)
def test_http_dispatcher_share_target(self):
self.CONF.dispatcher_http.target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_events(event)
self.assertEqual('fake', post.call_args[0][0])
| apache-2.0 | -2,222,199,186,189,764,900 | 36.214876 | 78 | 0.618699 | false |
gratipay/gratipay.com | tests/py/test_project_review_process.py | 1 | 3272 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from cStringIO import StringIO
import mock
from gratipay.testing import T
from gratipay.testing.email import QueuedEmailHarness
from pytest import raises
from gratipay.project_review_process import ConsolePoster, ProjectReviewProcess
from gratipay.exceptions import NoTeams
class ENV_GH(object):
project_review_repo = 'some/repo'
project_review_username = 'cheeseburger'
project_review_token = 'di3tc0ke'
class ENV(object):
project_review_repo = ''
project_review_username = ''
project_review_token = ''
class Tests(QueuedEmailHarness):
def setUp(self):
QueuedEmailHarness.setUp(self)
self.project_review_process = ProjectReviewProcess(ENV, self.db, self.app.email_queue)
def test_console_poster_posts_to_fp(self):
fp = StringIO()
poster = ConsolePoster(fp)
poster.post('{"blah": "blah blah"}')
fp.seek(0)
assert fp.read() == '''\
------------------------------------------------------------------------------
{u'blah': u'blah blah'}
------------------------------------------------------------------------------
'''
@mock.patch('gratipay.project_review_process.requests.post')
def test_github_poster_attempts_to_post_to_github(self, post):
foo = self.make_team(name='foo')
bar = self.make_team(name='bar')
baz = self.make_team(name='baz')
post.return_value = ''
ProjectReviewProcess(ENV_GH, self.db, self.app.email_queue).start(foo, bar, baz)
assert post.call_count == 1
args, kwargs = post.mock_calls[0][1:]
assert args[0] == 'https://api.github.com/repos/some/repo/issues'
assert kwargs['data'] == (
'{"body": "*This application will remain open for at least a week.*\\n\\n'
'## Projects\\n\\nhttps://gratipay.com/foo/\\nhttps://gratipay.com/bar/\\n'
'https://gratipay.com/baz/\\n\\n'
'## Badge\\n\\n'
'Add a [badge](http://shields.io/) to your README?\\n\\n'
'[](https://gratipay.com/foo/)\\n\\n'
'```markdown\\n'
'[](https://gratipay.com/foo/)\\n'
'```", "title": "foo and 2 other projects"}')
assert kwargs['auth'] == ('cheeseburger', 'di3tc0ke')
def test_team_objects_get_review_url(self):
foo = self.make_team(name='foo')
assert foo.review_url is None
self.project_review_process.start(foo)
assert foo.review_url == T('foo').review_url == 'some-github-issue'
def test_owner_gets_an_email_notification(self):
foo = self.make_team(name='foo')
self.project_review_process.start(foo)
assert self.get_last_email()['subject'] == 'We have your application!'
def test_no_teams_raises(self):
raises(NoTeams, self.project_review_process.start)
def test_multiple_owners_raises(self):
foo = self.make_team(name='foo')
bar = self.make_team(name='bar', owner='crusher')
raises(AssertionError, self.project_review_process.start, foo, bar)
| mit | -8,832,342,293,843,735,000 | 34.565217 | 109 | 0.600856 | false |
iniverno/RnR-LLC | simics-3.0-install/simics-3.0.31/amd64-linux/lib/leon2_components.py | 1 | 10174 | # MODULE: leon2-components
# CLASS: leon2-simple
from sim_core import *
from components import *
def _make_leon_cfg(dsu, sdram, wpts, mac, nwin, icsz, ilsz, dcsz, dlsz, div,
mul, wdog, mst, fpu, pci, wp):
leoncfg = 0
leoncfg = leoncfg | dsu << 30 # Debug Support Unit
leoncfg = leoncfg | sdram << 29 # SDRAM Controller
leoncfg = leoncfg | wpts << 26 # N Watchpoints
leoncfg = leoncfg | mac << 25 # MAC Instr Available
leoncfg = leoncfg | (nwin-1) << 20 # Number of Register Windows
leoncfg = leoncfg | icsz << 17 # I-Cache Size
leoncfg = leoncfg | ilsz << 15 # I-Cache Line Size
leoncfg = leoncfg | dcsz << 12 # D-Cache Size
leoncfg = leoncfg | dlsz << 10 # D-Cache Line Size
leoncfg = leoncfg | div << 9 # Integer Divide Instructions Enabled
leoncfg = leoncfg | mul << 8 # Integer Multiply Instructions Enabled
leoncfg = leoncfg | wdog << 7 # Watchdog Present
leoncfg = leoncfg | mst << 6 # Mem Stat and Fail Addr Regs Available
leoncfg = leoncfg | fpu << 4 # FPU Type (00 = None, 01 = Meiko)
leoncfg = leoncfg | pci << 2 # PCI Core (00 = None, 01 = InSilicon,
# 10 = ESA, 11 = Other)
leoncfg = leoncfg | wp << 0 # Write Protection Type (00 = None,
# 01 = Standard)
return leoncfg
class leon2_simple(component_object):
classname = 'leon2-simple'
basename = 'system'
description = ('A simple LEON2 based component including a CPU and some memory.')
connectors = {
'uart1' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False},
'uart2' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False},
}
def __init__(self, parse_obj):
component_object.__init__(self, parse_obj)
def get_cpu_frequency(self, idx):
return self.freq_mhz
def set_cpu_frequency(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.freq_mhz = val
return Sim_Set_Ok
def get_prom_size(self, idx):
return self.prom_size
def set_prom_size(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.prom_size = val
return Sim_Set_Ok
def get_sram_size(self, idx):
return self.sram_size
def set_sram_size(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.sram_size = val
return Sim_Set_Ok
def set_has_sram(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.has_sram = val
return Sim_Set_Ok
def get_has_sram(self, idx):
return self.has_sram
def get_sdram_size(self, idx):
return self.sdram_size
def set_sdram_size(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.sdram_size = val
return Sim_Set_Ok
def set_num_windows(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
# must be a power of two and in the range [2, 32]
if ((val & (val - 1)) != 0) and (val < 2) and (val > 32):
return Sim_Set_Illegal_Value
self.num_windows = val
return Sim_Set_Ok
def set_has_v8e_mac(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.has_v8e_mac = val
return Sim_Set_Ok
def set_has_v8_mul(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.has_v8_mul = val
return Sim_Set_Ok
def set_has_v8_div(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.has_v8_div = val
return Sim_Set_Ok
def get_num_windows(self, idx):
return self.num_windows
def get_has_v8e_mac(self, idx):
return self.has_v8e_mac
def get_has_v8_mul(self, idx):
return self.has_v8_mul
def get_has_v8_div(self, idx):
return self.has_v8_div
def add_objects(self):
self.o.amba = pre_obj('amba$', 'memory-space')
self.o.cpu = pre_obj('cpu$', 'leon2')
self.o.cpu.processor_number = get_next_cpu_number()
self.o.cpu.freq_mhz = self.freq_mhz
self.o.cpu.physical_memory = self.o.amba
self.o.cpu.num_windows = self.num_windows
self.o.cpu.has_v8e_mac = self.has_v8e_mac
self.o.cpu.has_v8_mul = self.has_v8_mul
self.o.cpu.has_v8_div = self.has_v8_div
# Interrupt controller
self.o.irq1 = pre_obj("irq$", "leon2_irq")
self.o.irq1.cpu = self.o.cpu
self.o.irq1.queue = self.o.cpu
self.o.cpu.interrupt_controller = self.o.irq1
# Onchip prom
self.o.prom_image = pre_obj('prom$_image', 'image')
self.o.prom_image.size = self.prom_size
self.o.prom_memory = pre_obj('prom$_memory', 'rom')
self.o.prom_memory.image = self.o.prom_image
# Onchip sram / sdram
self.o.sram_image = pre_obj('sram$_image', 'image')
self.o.sram_image.size = self.sram_size
self.o.sram_memory = pre_obj('sram$_memory', 'ram')
self.o.sram_memory.image = self.o.sram_image
self.o.sdram_image = pre_obj('sdram$_image', 'image')
self.o.sdram_image.size = self.sdram_size
self.o.sdram_memory = pre_obj('sdram$_memory', 'ram')
self.o.sdram_memory.image = self.o.sdram_image
# UARTS
self.o.uart1 = pre_obj('uart$', 'leon2_uart')
self.o.uart1.irq = self.o.irq1
self.o.uart1.interrupt = 3
self.o.uart1.queue = self.o.cpu
self.o.uart2 = pre_obj('uart$', 'leon2_uart')
self.o.uart2.irq = self.o.irq1
self.o.uart2.interrupt = 2
self.o.uart2.queue = self.o.cpu
# Timer
self.o.timer = pre_obj("timer$", "leon2_timer")
self.o.timer.irq = self.o.irq1
self.o.timer.queue = self.o.cpu
# Configuration registers (power down reg, memcfg, ccr, etc)
self.o.cfg = pre_obj("cfg$", "leon2_cfg")
self.o.cfg.cpu = self.o.cpu
# Set the LEON2 configuration register
sdram = 1
if self.sdram_size == 0:
sdram = 0
self.o.cfg.b_leonconfig = _make_leon_cfg(0, sdram, 0, self.has_v8e_mac,
self.num_windows, 0, 0, 0, 0,
self.has_v8_div, self.has_v8_mul,
0, 0, 1, 0, 0)
# Parallel IO
self.o.ioport = pre_obj("ioport$", "leon2_ioport")
# Ethernet
self.o.eth = pre_obj("eth$", "opencores_eth")
self.o.eth.irq_ctrl = self.o.irq1
self.o.amba.map = [
[0x00000000, self.o.prom_memory, 0, 0, self.prom_size],
[0x80000000, self.o.cfg, 0, 0, 0x28],
[0x80000040, self.o.timer, 0, 0, 0x28],
[0x80000070, self.o.uart1, 0, 0, 16],
[0x80000080, self.o.uart2, 0, 0, 16],
[0x80000090, self.o.irq1, 0, 0, 16],
[0x800000a0, self.o.ioport, 0, 0, 12],
[0xb0000000, self.o.eth, 0, 0, 0x0001ffff]]
# physical memory map
if self.has_sram == 1:
self.o.amba.map = self.o.amba.map + [
[0x00000000, self.o.prom_memory, 0, 0, self.prom_size],
[0x40000000, self.o.sram_memory, 0, 0, self.sram_size],
[0x60000000, self.o.sdram_memory, 0, 0, self.sdram_size]]
else:
self.o.amba.map = self.o.amba.map + [
[0x00000000, self.o.prom_memory, 0, 0, self.prom_size],
[0x40000000, self.o.sdram_memory, 0, 0, self.sdram_size]]
def add_connector_info(self):
self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name]
self.connector_info['uart2'] = [None, self.o.uart2, self.o.uart2.name]
def connect_serial(self, connector, link, console):
if connector == 'uart1':
if link:
self.o.uart1.link = link
else:
self.o.uart1.console = console
elif connector == 'uart2':
if link:
self.o.uart2.link = link
else:
self.o.uart2.console = console
def disconnect_serial(self, connector):
if connector == 'uart1':
self.o.uart1.console = None
elif connector == 'uart2':
self.o.uart2.console = None
def instantiation_done(self):
component_object.instantiation_done(self)
conf.sim.handle_outside_memory = 1
def get_clock(self):
return self.o.cpu
def get_processors(self):
return [self.o.cpu]
leon2_simple_attributes = [
['cpu_frequency', Sim_Attr_Required, 'f',
'Processor frequency in MHz.'],
['prom_size', Sim_Attr_Required, 'i',
'Size of PROM in bytes'],
['has_sram', Sim_Attr_Required, 'i',
'True if SRAM is available (if so, SDRAM starts at 0x60000000)'],
['sram_size', Sim_Attr_Required, 'i',
'Size of SRAM in bytes'],
['sdram_size', Sim_Attr_Required, 'i',
'Size of SDRAM in bytes'],
['num_windows', Sim_Attr_Required, 'i',
'Number of register windows, (must be a power of 2)'],
['has_v8e_mac', Sim_Attr_Required, 'b',
'TRUE if the V8E UMAC / SMAC instructions are to be allowed'],
['has_v8_mul', Sim_Attr_Required, 'b',
'TRUE if the V8 IMUL instructions are to be allowed'],
['has_v8_div', Sim_Attr_Required, 'b',
'TRUE if the V8 IDIV instructions are to be allowed']]
register_component_class(
leon2_simple,
leon2_simple_attributes,
top_level = True)
| gpl-2.0 | 7,219,553,931,094,871,000 | 35.597122 | 85 | 0.546098 | false |
leoc/home-assistant | homeassistant/components/climate/nest.py | 1 | 6636 | """
Support for Nest thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.nest/
"""
import logging
import voluptuous as vol
import homeassistant.components.nest as nest
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE)
from homeassistant.const import (
TEMP_CELSIUS, CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN)
from homeassistant.util.temperature import convert as convert_temperature
DEPENDENCIES = ['nest']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Nest thermostat."""
temp_unit = hass.config.units.temperature_unit
add_devices([NestThermostat(structure, device, temp_unit)
for structure, device in nest.devices()])
# pylint: disable=abstract-method,too-many-public-methods
class NestThermostat(ClimateDevice):
"""Representation of a Nest thermostat."""
def __init__(self, structure, device, temp_unit):
"""Initialize the thermostat."""
self._unit = temp_unit
self.structure = structure
self.device = device
self._fan_list = [STATE_ON, STATE_AUTO]
self._operation_list = [STATE_HEAT, STATE_COOL, STATE_AUTO,
STATE_OFF]
@property
def name(self):
"""Return the name of the nest, if any."""
location = self.device.where
name = self.device.name
if location is None:
return name
else:
if name == '':
return location.capitalize()
else:
return location.capitalize() + '(' + name + ')'
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
# Move these to Thermostat Device and make them global
return {
"humidity": self.device.humidity,
"target_humidity": self.device.target_humidity,
}
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode == 'cool':
return STATE_COOL
elif self.device.mode == 'heat':
return STATE_HEAT
elif self.device.mode == 'range':
return STATE_AUTO
elif self.device.mode == 'off':
return STATE_OFF
else:
return STATE_UNKNOWN
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.device.mode != 'range' and not self.is_away_mode_on:
return self.device.target
else:
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.is_away_mode_on and self.device.away_temperature[0]:
# away_temperature is always a low, high tuple
return self.device.away_temperature[0]
if self.device.mode == 'range':
return self.device.target[0]
else:
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.is_away_mode_on and self.device.away_temperature[1]:
# away_temperature is always a low, high tuple
return self.device.away_temperature[1]
if self.device.mode == 'range':
return self.device.target[1]
else:
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self.structure.away
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TARGET_TEMP_LOW) is not None and \
kwargs.get(ATTR_TARGET_TEMP_HIGH) is not None:
target_temp_high = convert_temperature(kwargs.get(
ATTR_TARGET_TEMP_HIGH), self._unit, TEMP_CELSIUS)
target_temp_low = convert_temperature(kwargs.get(
ATTR_TARGET_TEMP_LOW), self._unit, TEMP_CELSIUS)
if self.device.mode == 'range':
temp = (target_temp_low, target_temp_high)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
self.device.target = temp
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode == STATE_HEAT:
self.device.mode = 'heat'
elif operation_mode == STATE_COOL:
self.device.mode = 'cool'
elif operation_mode == STATE_AUTO:
self.device.mode = 'range'
elif operation_mode == STATE_OFF:
self.device.mode = 'off'
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
def turn_away_mode_on(self):
"""Turn away on."""
self.structure.away = True
def turn_away_mode_off(self):
"""Turn away off."""
self.structure.away = False
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
return STATE_ON if self.device.fan else STATE_AUTO
@property
def fan_list(self):
"""List of available fan modes."""
return self._fan_list
def set_fan_mode(self, fan):
"""Turn fan on/off."""
self.device.fan = fan.lower()
@property
def min_temp(self):
"""Identify min_temp in Nest API or defaults if not available."""
temp = self.device.away_temperature.low
if temp is None:
return super().min_temp
else:
return temp
@property
def max_temp(self):
"""Identify max_temp in Nest API or defaults if not available."""
temp = self.device.away_temperature.high
if temp is None:
return super().max_temp
else:
return temp
def update(self):
"""Python-nest has its own mechanism for staying up to date."""
pass
| mit | 3,391,957,943,504,084,000 | 31.851485 | 74 | 0.602471 | false |
bazwilliams/openhomedevice | tests/DidlLiteTest.py | 1 | 4675 | import unittest
from openhomedevice.didl_lite import generate_string, parse, parse_duration, parse_int
class DidlLiteTests(unittest.TestCase):
def test_int_parsing(self):
self.assertEqual(parse_duration("42"), 42)
self.assertEqual(parse_duration("42.5"), 42)
self.assertIsNone(parse_int("forty"))
self.assertIsNone(parse_int(None))
def test_duration_parsing(self):
self.assertEqual(parse_duration("0:07:40.000"), 460)
self.assertEqual(parse_duration("1:00.000"), 60)
self.assertEqual(parse_duration("42.000"), 42)
self.assertEqual(parse_duration("2:0.5"), 120)
self.assertIsNone(parse_duration("forty"))
self.assertIsNone(parse_duration(None))
def test_parse_empty_didlite(self):
result = parse(None)
self.assertEqual(result, {})
def test_parse_corrupt_didlite(self):
result = parse(
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></itemX></DIDL-Lite>'
)
self.assertEqual(result, {})
def test_parse_didlite_missing_item(self):
result = parse(
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"></DIDL-Lite>'
)
self.assertEqual(result, {})
def test_empty_track_details(self):
track_details = {}
result = generate_string(track_details)
self.assertEqual(
result,
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>',
)
def test_track_details_title_is_none(self):
track_details = {}
track_details["title"] = None
result = generate_string(track_details)
self.assertEqual(
result,
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>',
)
def test_track_details_uri_is_none(self):
track_details = {}
track_details["uri"] = None
result = generate_string(track_details)
self.assertEqual(
result,
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>',
)
def test_track_details_albumArtwork_is_none(self):
track_details = {}
track_details["albumArtwork"] = None
result = generate_string(track_details)
self.assertEqual(
result,
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>',
)
def test_track_details(self):
track_details = {}
track_details["albumArtwork"] = "ALBUMARTWORK"
track_details["title"] = "TITLE"
track_details["uri"] = "URI"
result = generate_string(track_details)
self.assertEqual(
result,
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title>TITLE</dc:title><res protocolInfo="*:*:*:*">URI</res><upnp:albumArtURI>ALBUMARTWORK</upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>',
)
| mit | -3,629,042,253,428,072,000 | 56.716049 | 395 | 0.643422 | false |
Barrog/C4-Datapack | data/jscript/quests/8_AnAdventureBegins/__init__.py | 1 | 2997 | # Created by CubicVirtuoso
# Any problems feel free to drop by #l2j-datapack on irc.freenode.net
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
#NPCs
JASMINE = 7134
ROSELYN = 7355
HARNE = 7144
#ITEM
ROSELYNS_NOTE = 7573
#REWARDS
ADENA = 57
SCROLL_OF_ESCAPE_GIRAN = 7559
MARK_OF_TRAVELER = 7570
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "7134-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "7355-02.htm" :
st.giveItems(ROSELYNS_NOTE,1)
st.set("cond","2")
st.set("id","2")
st.playSound("ItemSound.quest_middle")
elif event == "7144-02.htm" :
st.takeItems(ROSELYNS_NOTE,-1)
st.set("cond","3")
st.set("id","3")
st.playSound("ItemSound.quest_middle")
elif event == "7134-06.htm" :
st.giveItems(SCROLL_OF_ESCAPE_GIRAN,1)
st.giveItems(MARK_OF_TRAVELER, 1)
st.set("cond","0")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (Self,npc,st):
htmltext = "<html><head><body>I have nothing to say you</body></html>"
npcId = npc.getNpcId()
cond = st.getInt("cond")
id = st.getState()
if id == CREATED :
st.set("cond","0")
if st.getPlayer().getRace().ordinal() == 2 :
if st.getPlayer().getLevel() >= 3 :
htmltext = "7134-02.htm"
else :
htmltext = "<html><head><body>Quest for characters level 3 and above.</body></html>"
st.exitQuest(1)
else :
htmltext = "7134-01.htm"
st.exitQuest(1)
elif npcId == JASMINE and id == COMPLETED :
htmltext = "<html><head><body>I can't supply you with another Giran Scroll of Escape. Sorry traveller.</body></html>"
elif npcId == JASMINE and cond == 1 :
htmltext = "7134-04.htm"
elif npcId == ROSELYN and cond :
if st.getQuestItemsCount(ROSELYNS_NOTE) == 0 :
htmltext = "7355-01.htm"
else :
htmltext = "7355-03.htm"
elif npcId == HARNE and cond == 2 and st.getQuestItemsCount(ROSELYNS_NOTE) > 0 :
htmltext = "7144-01.htm"
elif npcId == JASMINE and cond == 3 :
htmltext = "7134-05.htm"
return htmltext
QUEST = Quest(8,"8_AnAdventureBegins","An Adventure Begins")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(JASMINE)
CREATED.addTalkId(JASMINE)
COMPLETED.addTalkId(JASMINE)
STARTED.addTalkId(JASMINE)
STARTED.addTalkId(ROSELYN)
STARTED.addTalkId(HARNE)
STARTED.addQuestDrop(JASMINE,ROSELYNS_NOTE,1)
print "importing quests: 8: An Adventure Begins"
| gpl-2.0 | -565,340,082,806,344,960 | 29.272727 | 123 | 0.634968 | false |
USGSDenverPychron/pychron | pychron/processing/analyses/view/error_components_view.py | 1 | 4244 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, List, Str, Float, Bool
from traitsui.api import View, UItem, VGroup, VSplit
from traitsui.editors import TableEditor
from traitsui.table_column import ObjectColumn
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.formatting import floatfmt
from pychron.processing.analyses.view.magnitude_editor import MagnitudeColumn
from pychron.pychron_constants import INTERFERENCE_KEYS
# class ErrorComponentAdapter(TabularAdapter):
# columns=[('Component', 'name'), ('Value', 'value')]
# value_text = Property
#
# def _get_value_text(self):
# return floatfmt(self.item.value, n=2)
class ErrorComponent(HasTraits):
name = Str
value = Float
class ErrorComponentsView(HasTraits):
name = 'Error Components'
error_components = List
# pie_canvas = Instance(PieChartCanvas, ())
pie_enabled = Bool(False)
def __init__(self, an, *args, **kw):
super(ErrorComponentsView, self).__init__(*args, **kw)
self._load(an)
def _load(self, an):
es = []
for k in an.isotope_keys:
iso = an.isotopes[k]
es.append(ErrorComponent(name=k,
value=iso.age_error_component))
for k in an.isotope_keys:
d = '{} D'.format(k)
es.append(ErrorComponent(name=d,
value=an.get_error_component(d)))
for k in an.isotope_keys:
d = '{} bk'.format(k)
es.append(ErrorComponent(name=d,
value=an.get_error_component(d)))
for k in INTERFERENCE_KEYS + ('J',):
v = an.get_error_component(k)
es.append(ErrorComponent(name=k, value=v))
# for var, error in an.uage.error_components().items():
# print var.tag
# print sum([e.value for e in es])
self.error_components = es
# self.pie_canvas.load_scene(es)
def traits_view(self):
cols = [ObjectColumn(name='name', label='Component'),
MagnitudeColumn(name='value',
label='',
width=200),
ObjectColumn(name='value', label='Value',
format_func=lambda x: floatfmt(x, n=2))]
editor = TableEditor(columns=cols,
sortable=False,
editable=False)
v = View(VGroup(
# Item('pie_enabled', label='Show Pie Chart',
# visible_when='pie_enabled'),
# HGroup(Item('pie_enabled', label='Show Pie Chart')),
VGroup(
UItem('error_components', editor=editor),
visible_when='not pie_enabled'),
VSplit(
UItem('error_components', editor=editor),
UItem('pie_canvas', editor=ComponentEditor()),
visible_when='pie_enabled')))
return v
# def traits_view(self):
# v = View(UItem('error_components',
# editor=TabularEditor(adapter=ErrorComponentAdapter(),
# editable=False)))
# return v
# ============= EOF =============================================
| apache-2.0 | 4,124,247,835,074,852,400 | 37.234234 | 82 | 0.536522 | false |
windflyer/apport | test/test_backend_apt_dpkg.py | 1 | 41266 | import unittest, gzip, imp, subprocess, tempfile, shutil, os, os.path, time
import glob, urllib
from apt import apt_pkg
if os.environ.get('APPORT_TEST_LOCAL'):
impl = imp.load_source('', 'backends/packaging-apt-dpkg.py').impl
else:
from apport.packaging_impl import impl
def _has_internet():
'''Return if there is sufficient network connection for the tests.
This checks if http://ddebs.ubuntu.com/ can be downloaded from, to check if
we can run the online tests.
'''
if os.environ.get('SKIP_ONLINE_TESTS'):
return False
if _has_internet.cache is None:
_has_internet.cache = False
try:
f = urllib.request.urlopen('http://ddebs.ubuntu.com/dbgsym-release-key.asc', timeout=30)
if f.readline().startswith(b'-----BEGIN PGP'):
_has_internet.cache = True
except (IOError, urllib.error.URLError):
pass
return _has_internet.cache
_has_internet.cache = None
class T(unittest.TestCase):
def setUp(self):
# save and restore configuration file
self.orig_conf = impl.configuration
self.workdir = tempfile.mkdtemp()
try:
impl.get_available_version('coreutils-dbgsym')
self.has_dbgsym = True
except ValueError:
self.has_dbgsym = False
def tearDown(self):
impl.configuration = self.orig_conf
shutil.rmtree(self.workdir)
def test_check_files_md5(self):
'''_check_files_md5().'''
td = tempfile.mkdtemp()
try:
f1 = os.path.join(td, 'test 1.txt')
f2 = os.path.join(td, 'test:2.txt')
sumfile = os.path.join(td, 'sums.txt')
with open(f1, 'w') as fd:
fd.write('Some stuff')
with open(f2, 'w') as fd:
fd.write('More stuff')
# use one relative and one absolute path in checksums file
with open(sumfile, 'wb') as fd:
fd.write(b'2e41290da2fa3f68bd3313174467e3b5 ' + f1[1:].encode() + b'\n')
fd.write(b'f6423dfbc4faf022e58b4d3f5ff71a70 ' + f2.encode() + b'\n')
fd.write(b'deadbeef000001111110000011110000 /bin/\xc3\xa4')
self.assertEqual(impl._check_files_md5(sumfile), [], 'correct md5sums')
with open(f1, 'w') as fd:
fd.write('Some stuff!')
self.assertEqual(impl._check_files_md5(sumfile), [f1[1:]], 'file 1 wrong')
with open(f2, 'w') as fd:
fd.write('More stuff!')
self.assertEqual(impl._check_files_md5(sumfile), [f1[1:], f2], 'files 1 and 2 wrong')
with open(f1, 'w') as fd:
fd.write('Some stuff')
self.assertEqual(impl._check_files_md5(sumfile), [f2], 'file 2 wrong')
# check using a direct md5 list as argument
with open(sumfile, 'rb') as fd:
self.assertEqual(impl._check_files_md5(fd.read()),
[f2], 'file 2 wrong')
finally:
shutil.rmtree(td)
def test_get_version(self):
'''get_version().'''
self.assertTrue(impl.get_version('libc6').startswith('2'))
self.assertRaises(ValueError, impl.get_version, 'nonexisting')
self.assertRaises(ValueError, impl.get_version, 'wukrainian')
def test_get_available_version(self):
'''get_available_version().'''
self.assertTrue(impl.get_available_version('libc6').startswith('2'))
self.assertRaises(ValueError, impl.get_available_version, 'nonexisting')
def test_get_dependencies(self):
'''get_dependencies().'''
# package with both Depends: and Pre-Depends:
d = impl.get_dependencies('bash')
self.assertTrue(len(d) > 2)
self.assertTrue('libc6' in d)
for dep in d:
self.assertTrue(impl.get_version(dep))
# Pre-Depends: only
d = impl.get_dependencies('coreutils')
self.assertTrue(len(d) >= 1)
self.assertTrue('libc6' in d)
for dep in d:
self.assertTrue(impl.get_version(dep))
# Depends: only
d = impl.get_dependencies('libc6')
self.assertTrue(len(d) >= 1)
for dep in d:
self.assertTrue(impl.get_version(dep))
def test_get_source(self):
'''get_source().'''
self.assertRaises(ValueError, impl.get_source, 'nonexisting')
self.assertEqual(impl.get_source('bash'), 'bash')
self.assertTrue('glibc' in impl.get_source('libc6'))
def test_get_package_origin(self):
'''get_package_origin().'''
# determine distro name
distro = impl.get_os_version()[0]
self.assertRaises(ValueError, impl.get_package_origin, 'nonexisting')
# this assumes that this package is not installed
self.assertRaises(ValueError, impl.get_package_origin, 'robocode-doc')
# this assumes that bash is native
self.assertEqual(impl.get_package_origin('bash'), distro)
# no non-native test here, hard to come up with a generic one
def test_is_distro_package(self):
'''is_distro_package().'''
self.assertRaises(ValueError, impl.is_distro_package, 'nonexisting')
self.assertTrue(impl.is_distro_package('bash'))
# no False test here, hard to come up with a generic one
def test_get_architecture(self):
'''get_architecture().'''
self.assertRaises(ValueError, impl.get_architecture, 'nonexisting')
# just assume that bash uses the native architecture
d = subprocess.Popen(['dpkg', '--print-architecture'],
stdout=subprocess.PIPE)
system_arch = d.communicate()[0].decode().strip()
assert d.returncode == 0
self.assertEqual(impl.get_architecture('bash'), system_arch)
def test_get_files(self):
'''get_files().'''
self.assertRaises(ValueError, impl.get_files, 'nonexisting')
self.assertTrue('/bin/bash' in impl.get_files('bash'))
def test_get_file_package(self):
'''get_file_package() on installed files.'''
self.assertEqual(impl.get_file_package('/bin/bash'), 'bash')
self.assertEqual(impl.get_file_package('/bin/cat'), 'coreutils')
self.assertEqual(impl.get_file_package('/etc/pam.conf'), 'libpam-runtime')
self.assertEqual(impl.get_file_package('/nonexisting'), None)
def test_get_file_package_uninstalled(self):
'''get_file_package() on uninstalled packages.'''
# generate a test Contents.gz
basedir = tempfile.mkdtemp()
try:
# test Contents.gz for release pocket
mapdir = os.path.join(basedir, 'dists', impl.get_distro_codename())
os.makedirs(mapdir)
with gzip.open(os.path.join(mapdir, 'Contents-%s.gz' %
impl.get_system_architecture()), 'w') as f:
f.write(b'''
foo header
FILE LOCATION
usr/bin/frobnicate foo/frob
usr/bin/frob foo/frob-utils
bo/gu/s na/mypackage
bin/true admin/superutils
''')
# test Contents.gz for -updates pocket
mapdir = os.path.join(basedir, 'dists', impl.get_distro_codename() + '-updates')
os.makedirs(mapdir)
with gzip.open(os.path.join(mapdir, 'Contents-%s.gz' %
impl.get_system_architecture()), 'w') as f:
f.write(b'''
foo header
FILE LOCATION
lib/libnew.so.5 universe/libs/libnew5
''')
# use this as a mirror
impl.set_mirror('file://' + basedir)
self.assertEqual(impl.get_file_package('usr/bin/frob', False), None)
# must not match frob (same file name prefix)
self.assertEqual(impl.get_file_package('usr/bin/frob', True), 'frob-utils')
self.assertEqual(impl.get_file_package('/usr/bin/frob', True), 'frob-utils')
# find files from -updates pocket
self.assertEqual(impl.get_file_package('/lib/libnew.so.5', False), None)
self.assertEqual(impl.get_file_package('/lib/libnew.so.5', True), 'libnew5')
# invalid mirror
impl.set_mirror('file:///foo/nonexisting')
self.assertRaises(IOError, impl.get_file_package, 'usr/bin/frob', True)
# valid mirror, test cache directory
impl.set_mirror('file://' + basedir)
cache_dir = os.path.join(basedir, 'cache')
os.mkdir(cache_dir)
self.assertEqual(impl.get_file_package('usr/bin/frob', True, cache_dir), 'frob-utils')
cache_dir_files = os.listdir(cache_dir)
self.assertEqual(len(cache_dir_files), 2)
self.assertEqual(impl.get_file_package('/bo/gu/s', True, cache_dir), 'mypackage')
# valid cache, should not need to access the mirror
impl.set_mirror('file:///foo/nonexisting')
self.assertEqual(impl.get_file_package('/bin/true', True, cache_dir), 'superutils')
self.assertEqual(impl.get_file_package('/bo/gu/s', True, cache_dir), 'mypackage')
self.assertEqual(impl.get_file_package('/lib/libnew.so.5', True, cache_dir), 'libnew5')
# outdated cache, must refresh the cache and hit the invalid
# mirror
if 'updates' in cache_dir_files[0]:
cache_file = cache_dir_files[1]
else:
cache_file = cache_dir_files[0]
now = int(time.time())
os.utime(os.path.join(cache_dir, cache_file), (now, now - 90000))
self.assertRaises(IOError, impl.get_file_package, '/bo/gu/s', True, cache_dir)
finally:
shutil.rmtree(basedir)
def test_get_file_package_uninstalled_multiarch(self):
'''get_file_package() on foreign arches and releases'''
# map "Foonux 3.14" to "mocky"
orig_distro_release_to_codename = impl._distro_release_to_codename
impl._distro_release_to_codename = lambda r: (r == 'Foonux 3.14') and 'mocky' or None
# generate test Contents.gz for two fantasy architectures
basedir = tempfile.mkdtemp()
try:
mapdir = os.path.join(basedir, 'dists', impl.get_distro_codename())
os.makedirs(mapdir)
with gzip.open(os.path.join(mapdir, 'Contents-even.gz'), 'w') as f:
f.write(b'''
foo header
FILE LOCATION
usr/lib/even/libfrob.so.1 foo/libfrob1
usr/bin/frob foo/frob-utils
''')
with gzip.open(os.path.join(mapdir, 'Contents-odd.gz'), 'w') as f:
f.write(b'''
foo header
FILE LOCATION
usr/lib/odd/libfrob.so.1 foo/libfrob1
usr/bin/frob foo/frob-utils
''')
# and another one for fantasy release
os.mkdir(os.path.join(basedir, 'dists', 'mocky'))
with gzip.open(os.path.join(basedir, 'dists', 'mocky', 'Contents-even.gz'), 'w') as f:
f.write(b'''
foo header
FILE LOCATION
usr/lib/even/libfrob.so.0 foo/libfrob0
usr/bin/frob foo/frob
''')
# use this as a mirror
impl.set_mirror('file://' + basedir)
# must not match system architecture
self.assertEqual(impl.get_file_package('usr/bin/frob', False), None)
# must match correct architecture
self.assertEqual(impl.get_file_package('usr/bin/frob', True, arch='even'),
'frob-utils')
self.assertEqual(impl.get_file_package('usr/bin/frob', True, arch='odd'),
'frob-utils')
self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1', True, arch='even'),
'libfrob1')
self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1', True, arch='odd'),
None)
self.assertEqual(impl.get_file_package('/usr/lib/odd/libfrob.so.1', True, arch='odd'),
'libfrob1')
# for mocky release ("Foonux 3.14")
self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1',
True, release='Foonux 3.14', arch='even'),
None)
self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.0',
True, release='Foonux 3.14', arch='even'),
'libfrob0')
self.assertEqual(impl.get_file_package('/usr/bin/frob',
True, release='Foonux 3.14', arch='even'),
'frob')
# invalid mirror
impl.set_mirror('file:///foo/nonexisting')
self.assertRaises(IOError, impl.get_file_package,
'/usr/lib/even/libfrob.so.1', True, arch='even')
self.assertRaises(IOError, impl.get_file_package,
'/usr/lib/even/libfrob.so.0', True, release='Foonux 3.14', arch='even')
# valid mirror, test caching
impl.set_mirror('file://' + basedir)
cache_dir = os.path.join(basedir, 'cache')
os.mkdir(cache_dir)
self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1',
True, cache_dir, arch='even'),
'libfrob1')
self.assertEqual(len(os.listdir(cache_dir)), 1)
cache_file = os.listdir(cache_dir)[0]
self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.0',
True, cache_dir, release='Foonux 3.14', arch='even'),
'libfrob0')
self.assertEqual(len(os.listdir(cache_dir)), 2)
# valid cache, should not need to access the mirror
impl.set_mirror('file:///foo/nonexisting')
self.assertEqual(impl.get_file_package('usr/bin/frob', True, cache_dir, arch='even'),
'frob-utils')
self.assertEqual(impl.get_file_package('usr/bin/frob', True, cache_dir,
release='Foonux 3.14', arch='even'),
'frob')
# but no cached file for the other arch
self.assertRaises(IOError, impl.get_file_package, 'usr/bin/frob',
True, cache_dir, arch='odd')
# outdated cache, must refresh the cache and hit the invalid
# mirror
now = int(time.time())
os.utime(os.path.join(cache_dir, cache_file), (now, now - 90000))
self.assertRaises(IOError, impl.get_file_package, 'usr/bin/frob',
True, cache_dir, arch='even')
finally:
shutil.rmtree(basedir)
impl._distro_release_to_codename = orig_distro_release_to_codename
def test_get_file_package_diversion(self):
'''get_file_package() for a diverted file.'''
# pick first diversion we have
p = subprocess.Popen('LC_ALL=C dpkg-divert --list | head -n 1',
shell=True, stdout=subprocess.PIPE)
out = p.communicate()[0].decode('UTF-8')
assert p.returncode == 0
assert out
fields = out.split()
file = fields[2]
pkg = fields[-1]
self.assertEqual(impl.get_file_package(file), pkg)
def test_mirror_from_apt_sources(self):
s = os.path.join(self.workdir, 'sources.list')
# valid file, should grab the first mirror
with open(s, 'w') as f:
f.write('''# some comment
deb-src http://source.mirror/foo tuxy main
deb http://binary.mirror/tuxy tuxy main
deb http://secondary.mirror tuxy extra
''')
f.flush()
self.assertEqual(impl._get_primary_mirror_from_apt_sources(s),
'http://binary.mirror/tuxy')
# valid file with options
with open(s, 'w') as f:
f.write('''# some comment
deb-src http://source.mirror/foo tuxy main
deb [arch=flowerpc,leghf] http://binary.mirror/tuxy tuxy main
deb http://secondary.mirror tuxy extra
''')
f.flush()
self.assertEqual(impl._get_primary_mirror_from_apt_sources(s),
'http://binary.mirror/tuxy')
# empty file
with open(s, 'w') as f:
f.flush()
self.assertRaises(SystemError, impl._get_primary_mirror_from_apt_sources, s)
def test_get_modified_conffiles(self):
'''get_modified_conffiles()'''
# very shallow
self.assertEqual(type(impl.get_modified_conffiles('bash')), type({}))
self.assertEqual(type(impl.get_modified_conffiles('apport')), type({}))
self.assertEqual(type(impl.get_modified_conffiles('nonexisting')), type({}))
def test_get_system_architecture(self):
'''get_system_architecture().'''
arch = impl.get_system_architecture()
# must be nonempty without line breaks
self.assertNotEqual(arch, '')
self.assertTrue('\n' not in arch)
def test_get_library_paths(self):
'''get_library_paths().'''
paths = impl.get_library_paths()
# must be nonempty without line breaks
self.assertNotEqual(paths, '')
self.assertTrue(':' in paths)
self.assertTrue('/lib' in paths)
self.assertTrue('\n' not in paths)
def test_compare_versions(self):
'''compare_versions.'''
self.assertEqual(impl.compare_versions('1', '2'), -1)
self.assertEqual(impl.compare_versions('1.0-1ubuntu1', '1.0-1ubuntu2'), -1)
self.assertEqual(impl.compare_versions('1.0-1ubuntu1', '1.0-1ubuntu1'), 0)
self.assertEqual(impl.compare_versions('1.0-1ubuntu2', '1.0-1ubuntu1'), 1)
self.assertEqual(impl.compare_versions('1:1.0-1', '2007-2'), 1)
self.assertEqual(impl.compare_versions('1:1.0-1~1', '1:1.0-1'), -1)
def test_enabled(self):
'''enabled.'''
impl.configuration = '/nonexisting'
self.assertEqual(impl.enabled(), True)
f = tempfile.NamedTemporaryFile()
impl.configuration = f.name
f.write('# configuration file\nenabled = 1'.encode())
f.flush()
self.assertEqual(impl.enabled(), True)
f.close()
f = tempfile.NamedTemporaryFile()
impl.configuration = f.name
f.write('# configuration file\n enabled =0 '.encode())
f.flush()
self.assertEqual(impl.enabled(), False)
f.close()
f = tempfile.NamedTemporaryFile()
impl.configuration = f.name
f.write('# configuration file\nnothing here'.encode())
f.flush()
self.assertEqual(impl.enabled(), True)
f.close()
def test_get_kernel_package(self):
'''get_kernel_package().'''
self.assertTrue('linux' in impl.get_kernel_package())
def test_package_name_glob(self):
'''package_name_glob().'''
self.assertTrue(len(impl.package_name_glob('a*')) > 5)
self.assertTrue('bash' in impl.package_name_glob('ba*h'))
self.assertEqual(impl.package_name_glob('bash'), ['bash'])
self.assertEqual(impl.package_name_glob('xzywef*'), [])
@unittest.skipUnless(_has_internet(), 'online test')
def test_install_packages_versioned(self):
'''install_packages() with versions and with cache'''
self._setup_foonux_config(updates=True)
obsolete = impl.install_packages(self.rootdir, self.configdir,
'Foonux 1.2',
[('coreutils', '8.21-1ubuntu5'), # should not come from updates
('libc6', '2.19-0ubuntu6'),
('tzdata', None), # should come from -updates, > 2014b-1
], False, self.cachedir)
def sandbox_ver(pkg):
with gzip.open(os.path.join(self.rootdir, 'usr/share/doc', pkg,
'changelog.Debian.gz')) as f:
return f.readline().decode().split()[1][1:-1]
self.assertEqual(obsolete, '')
# packages get installed
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/stat')))
self.assert_elf_arch(os.path.join(self.rootdir, 'usr/bin/stat'),
impl.get_system_architecture())
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/lib/debug/usr/bin/stat')))
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/share/zoneinfo/zone.tab')))
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/share/doc/libc6/copyright')))
# their versions are as expected
self.assertEqual(sandbox_ver('coreutils'), '8.21-1ubuntu5')
self.assertEqual(sandbox_ver('libc6'), '2.19-0ubuntu6')
self.assertEqual(sandbox_ver('libc6-dbg'), '2.19-0ubuntu6')
self.assertGreater(sandbox_ver('tzdata'), '2015')
with open(os.path.join(self.rootdir, 'packages.txt')) as f:
pkglist = f.read().splitlines()
self.assertIn('coreutils 8.21-1ubuntu5', pkglist)
self.assertIn('coreutils-dbgsym 8.21-1ubuntu5', pkglist)
self.assertIn('libc6 2.19-0ubuntu6', pkglist)
self.assertIn('libc6-dbg 2.19-0ubuntu6', pkglist)
self.assertIn('tzdata ' + sandbox_ver('tzdata'), pkglist)
self.assertEqual(len(pkglist), 5, str(pkglist))
# does not clobber config dir
self.assertEqual(os.listdir(self.configdir), ['Foonux 1.2'])
self.assertEqual(sorted(os.listdir(os.path.join(self.configdir, 'Foonux 1.2'))),
['armhf', 'codename', 'sources.list'])
self.assertEqual(os.listdir(os.path.join(self.configdir, 'Foonux 1.2', 'armhf')),
['sources.list'])
# caches packages, and their versions are as expected
cache = os.listdir(os.path.join(self.cachedir, 'Foonux 1.2', 'apt',
'var', 'cache', 'apt', 'archives'))
cache_versions = {}
for p in cache:
try:
(name, ver) = p.split('_')[:2]
cache_versions[name] = ver
except ValueError:
pass # not a .deb, ignore
self.assertEqual(cache_versions['coreutils'], '8.21-1ubuntu5')
self.assertEqual(cache_versions['coreutils-dbgsym'], '8.21-1ubuntu5')
self.assertIn('tzdata', cache_versions)
self.assertEqual(cache_versions['libc6'], '2.19-0ubuntu6')
self.assertEqual(cache_versions['libc6-dbg'], '2.19-0ubuntu6')
# installs cached packages
os.unlink(os.path.join(self.rootdir, 'usr/bin/stat'))
os.unlink(os.path.join(self.rootdir, 'packages.txt'))
obsolete = impl.install_packages(self.rootdir, self.configdir,
'Foonux 1.2',
[('coreutils', '8.21-1ubuntu5'),
], False, self.cachedir)
self.assertEqual(obsolete, '')
self.assertTrue(os.path.exists(
os.path.join(self.rootdir, 'usr/bin/stat')))
# complains about obsolete packages
result = impl.install_packages(self.rootdir, self.configdir,
'Foonux 1.2', [('gnome-common', '1.1')])
self.assertEqual(len(result.splitlines()), 1)
self.assertTrue('gnome-common' in result)
self.assertTrue('1.1' in result)
# ... but installs the current version anyway
self.assertTrue(os.path.exists(
os.path.join(self.rootdir, 'usr/bin/gnome-autogen.sh')))
self.assertGreaterEqual(sandbox_ver('gnome-common'), '3.1.0-0ubuntu1')
# does not crash on nonexisting packages
result = impl.install_packages(self.rootdir, self.configdir,
'Foonux 1.2', [('buggerbogger', None)])
self.assertEqual(len(result.splitlines()), 1)
self.assertTrue('buggerbogger' in result)
self.assertTrue('not exist' in result)
# can interleave with other operations
dpkg = subprocess.Popen(['dpkg-query', '-Wf${Version}', 'dash'],
stdout=subprocess.PIPE)
dash_version = dpkg.communicate()[0].decode()
self.assertEqual(dpkg.returncode, 0)
self.assertEqual(impl.get_version('dash'), dash_version)
self.assertRaises(ValueError, impl.get_available_version, 'buggerbogger')
# still installs packages after above operations
os.unlink(os.path.join(self.rootdir, 'usr/bin/stat'))
os.unlink(os.path.join(self.rootdir, 'packages.txt'))
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('coreutils', '8.21-1ubuntu5'),
('dpkg', None),
], False, self.cachedir)
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/stat')))
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/dpkg')))
@unittest.skipUnless(_has_internet(), 'online test')
def test_install_packages_unversioned(self):
'''install_packages() without versions and no cache'''
self._setup_foonux_config()
obsolete = impl.install_packages(self.rootdir, self.configdir,
'Foonux 1.2',
[('coreutils', None),
('tzdata', None),
], False, None)
self.assertEqual(obsolete, '')
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/stat')))
self.assert_elf_arch(os.path.join(self.rootdir, 'usr/bin/stat'),
impl.get_system_architecture())
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/lib/debug/usr/bin/stat')))
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/share/zoneinfo/zone.tab')))
# does not clobber config dir
self.assertEqual(os.listdir(self.configdir), ['Foonux 1.2'])
self.assertEqual(sorted(os.listdir(os.path.join(self.configdir, 'Foonux 1.2'))),
['armhf', 'codename', 'sources.list'])
self.assertEqual(os.listdir(os.path.join(self.configdir, 'Foonux 1.2', 'armhf')),
['sources.list'])
# no cache
self.assertEqual(os.listdir(self.cachedir), [])
# keeps track of package versions
with open(os.path.join(self.rootdir, 'packages.txt')) as f:
pkglist = f.read().splitlines()
self.assertIn('coreutils 8.21-1ubuntu5', pkglist)
self.assertIn('coreutils-dbgsym 8.21-1ubuntu5', pkglist)
self.assertIn('tzdata 2014b-1', pkglist)
self.assertEqual(len(pkglist), 3, str(pkglist))
@unittest.skipUnless(_has_internet(), 'online test')
def test_install_packages_system(self):
'''install_packages() with system configuration'''
# trigger an unrelated package query here to get the cache set up,
# reproducing an install failure when the internal caches are not
# reset properly
impl.get_version('dash')
self._setup_foonux_config()
result = impl.install_packages(self.rootdir, None, None,
[('coreutils', impl.get_version('coreutils')),
('tzdata', '1.1'),
], False, self.cachedir)
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/stat')))
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/share/zoneinfo/zone.tab')))
# complains about obsolete packages
self.assertGreaterEqual(len(result.splitlines()), 1)
self.assertTrue('tzdata' in result)
self.assertTrue('1.1' in result)
# caches packages
cache = os.listdir(os.path.join(self.cachedir, 'system', 'apt',
'var', 'cache', 'apt', 'archives'))
cache_names = [p.split('_')[0] for p in cache]
self.assertTrue('coreutils' in cache_names)
self.assertEqual('coreutils-dbgsym' in cache_names, self.has_dbgsym)
self.assertTrue('tzdata' in cache_names)
# works with relative paths and existing cache
os.unlink(os.path.join(self.rootdir, 'usr/bin/stat'))
os.unlink(os.path.join(self.rootdir, 'packages.txt'))
orig_cwd = os.getcwd()
try:
os.chdir(self.workdir)
impl.install_packages('root', None, None,
[('coreutils', None)], False, 'cache')
finally:
os.chdir(orig_cwd)
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/stat')))
@unittest.skipUnless(_has_internet(), 'online test')
def test_install_packages_error(self):
'''install_packages() with errors'''
# sources.list with invalid format
self._setup_foonux_config()
with open(os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'), 'w') as f:
f.write('bogus format')
try:
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('tzdata', None)], False, self.cachedir)
self.fail('install_packages() unexpectedly succeeded with broken sources.list')
except SystemError as e:
self.assertTrue('bogus' in str(e))
self.assertFalse('Exception' in str(e))
# sources.list with wrong server
with open(os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'), 'w') as f:
f.write('deb http://archive.ubuntu.com/nosuchdistro/ trusty main\n')
try:
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('tzdata', None)], False, self.cachedir)
self.fail('install_packages() unexpectedly succeeded with broken server URL')
except SystemError as e:
self.assertTrue('nosuchdistro' in str(e), str(e))
self.assertTrue('index files failed to download' in str(e))
@unittest.skipUnless(_has_internet(), 'online test')
def test_install_packages_permanent_sandbox(self):
'''install_packages() with a permanent sandbox'''
self._setup_foonux_config()
zonetab = os.path.join(self.rootdir, 'usr/share/zoneinfo/zone.tab')
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('tzdata', None)], False, self.cachedir, permanent_rootdir=True)
# This will now be using a Cache with our rootdir.
archives = apt_pkg.config.find_dir('Dir::Cache::archives')
tzdata = glob.glob(os.path.join(archives, 'tzdata*.deb'))
if not tzdata:
self.fail('tzdata was not downloaded')
tzdata_written = os.path.getctime(tzdata[0])
zonetab_written = os.path.getctime(zonetab)
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('coreutils', None), ('tzdata', None)], False, self.cachedir,
permanent_rootdir=True)
if not glob.glob(os.path.join(archives, 'coreutils*.deb')):
self.fail('coreutils was not downloaded.')
self.assertEqual(os.path.getctime(tzdata[0]), tzdata_written,
'tzdata downloaded twice.')
self.assertEqual(zonetab_written, os.path.getctime(zonetab),
'zonetab written twice.')
self.assertTrue(os.path.exists(
os.path.join(self.rootdir, 'usr/bin/stat')))
# Prevent packages from downloading.
apt_pkg.config.set('Acquire::http::Proxy', 'http://nonexistent')
orig_env = os.environ.copy()
os.environ['http_proxy'] = 'http://nonexistent'
try:
del os.environ['no_proxy']
except KeyError:
pass
self.assertRaises(SystemExit, impl.install_packages, self.rootdir,
self.configdir, 'Foonux 1.2', [('libc6', None)], False,
self.cachedir, permanent_rootdir=True)
os.environ = orig_env
# These packages exist, so attempting to install them should not fail.
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('coreutils', None), ('tzdata', None)], False, self.cachedir,
permanent_rootdir=True)
# even without cached debs, trying to install the same versions should
# be a no-op and succeed
for f in glob.glob('%s/Foonux 1.2/apt/var/cache/apt/archives/coreutils*' % self.cachedir):
os.unlink(f)
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('coreutils', None)], False, self.cachedir,
permanent_rootdir=True)
# trying to install another package should fail, though
self.assertRaises(SystemExit, impl.install_packages, self.rootdir,
self.configdir, 'Foonux 1.2', [('aspell-doc', None)], False,
self.cachedir, permanent_rootdir=True)
apt_pkg.config.set('Acquire::http::Proxy', '')
@unittest.skipUnless(_has_internet(), 'online test')
def test_install_packages_permanent_sandbox_repack(self):
self._setup_foonux_config()
include_path = os.path.join(self.rootdir, 'usr/include/krb5.h')
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('libkrb5-dev', None)], False, self.cachedir,
permanent_rootdir=True)
self.assertIn('mit-krb5/', os.readlink(include_path))
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('heimdal-dev', None)], False, self.cachedir,
permanent_rootdir=True)
self.assertIn('heimdal/', os.readlink(include_path))
impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('libkrb5-dev', None)], False, self.cachedir,
permanent_rootdir=True)
self.assertIn('mit-krb5/', os.readlink(include_path))
@unittest.skipUnless(_has_internet(), 'online test')
@unittest.skipIf(impl.get_system_architecture() == 'armhf', 'native armhf architecture')
def test_install_packages_armhf(self):
'''install_packages() for foreign architecture armhf'''
self._setup_foonux_config()
obsolete = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2',
[('coreutils', '8.21-1ubuntu5'),
('libc6', '2.19-0ubuntu5'),
], False, self.cachedir,
architecture='armhf')
self.assertEqual(obsolete, 'libc6 version 2.19-0ubuntu5 required, but 2.19-0ubuntu6 is available\n')
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/bin/stat')))
self.assert_elf_arch(os.path.join(self.rootdir, 'usr/bin/stat'), 'armhf')
self.assertTrue(os.path.exists(os.path.join(self.rootdir,
'usr/share/doc/libc6/copyright')))
# caches packages
cache = os.listdir(os.path.join(self.cachedir, 'Foonux 1.2', 'apt',
'var', 'cache', 'apt', 'archives'))
self.assertTrue('coreutils_8.21-1ubuntu5_armhf.deb' in cache, cache)
self.assertTrue('libc6_2.19-0ubuntu6_armhf.deb' in cache, cache)
@unittest.skipUnless(_has_internet(), 'online test')
def test_get_source_tree_sandbox(self):
self._setup_foonux_config()
out_dir = os.path.join(self.workdir, 'out')
os.mkdir(out_dir)
impl._build_apt_sandbox(self.rootdir, os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'))
res = impl.get_source_tree('base-files', out_dir, sandbox=self.rootdir,
apt_update=True)
self.assertTrue(os.path.isdir(os.path.join(res, 'debian')))
# this needs to be updated when the release in _setup_foonux_config
# changes
self.assertTrue(res.endswith('/base-files-7.2ubuntu5'),
'unexpected version: ' + res.split('/')[-1])
def _setup_foonux_config(self, updates=False):
'''Set up directories and configuration for install_packages()'''
self.cachedir = os.path.join(self.workdir, 'cache')
self.rootdir = os.path.join(self.workdir, 'root')
self.configdir = os.path.join(self.workdir, 'config')
os.mkdir(self.cachedir)
os.mkdir(self.rootdir)
os.mkdir(self.configdir)
os.mkdir(os.path.join(self.configdir, 'Foonux 1.2'))
with open(os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'), 'w') as f:
f.write('deb http://archive.ubuntu.com/ubuntu/ trusty main\n')
f.write('deb-src http://archive.ubuntu.com/ubuntu/ trusty main\n')
f.write('deb http://ddebs.ubuntu.com/ trusty main\n')
if updates:
f.write('deb http://archive.ubuntu.com/ubuntu/ trusty-updates main\n')
f.write('deb-src http://archive.ubuntu.com/ubuntu/ trusty-updates main\n')
f.write('deb http://ddebs.ubuntu.com/ trusty-updates main\n')
os.mkdir(os.path.join(self.configdir, 'Foonux 1.2', 'armhf'))
with open(os.path.join(self.configdir, 'Foonux 1.2', 'armhf', 'sources.list'), 'w') as f:
f.write('deb http://ports.ubuntu.com/ trusty main\n')
f.write('deb-src http://ports.ubuntu.com/ trusty main\n')
f.write('deb http://ddebs.ubuntu.com/ trusty main\n')
if updates:
f.write('deb http://ports.ubuntu.com/ trusty-updates main\n')
f.write('deb-src http://ports.ubuntu.com/ trusty-updates main\n')
f.write('deb http://ddebs.ubuntu.com/ trusty-updates main\n')
with open(os.path.join(self.configdir, 'Foonux 1.2', 'codename'), 'w') as f:
f.write('trusty')
def assert_elf_arch(self, path, expected):
'''Assert that an ELF file is for an expected machine type.
Expected is a Debian-style architecture (i386, amd64, armhf)
'''
archmap = {
'i386': '80386',
'amd64': 'X86-64',
'armhf': 'ARM',
}
# get ELF machine type
readelf = subprocess.Popen(['readelf', '-e', path], env={},
stdout=subprocess.PIPE,
universal_newlines=True)
out = readelf.communicate()[0]
assert readelf.returncode == 0
for line in out.splitlines():
if line.startswith(' Machine:'):
machine = line.split(maxsplit=1)[1]
break
else:
self.fail('could not fine Machine: in readelf output')
self.assertTrue(archmap[expected] in machine,
'%s has unexpected machine type "%s" for architecture %s' % (
path, machine, expected))
# only execute if dpkg is available
try:
if subprocess.call(['dpkg', '--help'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0:
unittest.main()
except OSError:
pass
| gpl-2.0 | 9,061,224,673,467,465,000 | 45.004459 | 108 | 0.551471 | false |
sony/nnabla | build-tools/code_generator/update_function_types.py | 1 | 1554 | # Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import exists
import code_generator_utils as utils
from collections import OrderedDict
def get_args():
import argparse
p = argparse.ArgumentParser()
p.add_argument('path_types', type=str)
p.add_argument('--default-type', type=str, default=None)
args = p.parse_args()
return args
def main():
args = get_args()
func_info = utils.load_function_info(flatten=True)
if exists(args.path_types):
func_types = utils.load_yaml_ordered(open(args.path_types, 'r'))
else:
func_types = OrderedDict()
for name, func in func_info.items():
if name in func_types:
continue
print("Processing %s..." % name)
types = OrderedDict()
if args.default_type is not None:
types[args.default_type] = [args.default_type]
func_types[name] = types
utils.dump_yaml(func_types, open(args.path_types, 'w'))
if __name__ == '__main__':
main()
| apache-2.0 | 1,218,484,060,768,999,700 | 31.375 | 74 | 0.676319 | false |
david2307/backend_159 | activities/migrations/0001_initial.py | 1 | 1103 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('persons', '0007_auto_20150711_2332'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=200)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('begin_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('latitude', models.DecimalField(null=True, max_digits=23, decimal_places=20)),
('longitude', models.DecimalField(null=True, max_digits=23, decimal_places=20)),
('minimum_assitant', models.IntegerField()),
('town', models.ForeignKey(to='persons.Town')),
],
),
]
| gpl-3.0 | -1,431,205,959,632,305,700 | 37.034483 | 114 | 0.56573 | false |
xiang12835/python_web | py2_web2py/web2py/gluon/restricted.py | 1 | 10754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Restricted environment to execute application's code
-----------------------------------------------------
"""
import sys
from gluon._compat import pickle, ClassType, unicodeT, to_bytes
import traceback
import types
import os
import logging
from gluon.storage import Storage
from gluon.http import HTTP
from gluon.html import BEAUTIFY, XML
from gluon.settings import global_settings
logger = logging.getLogger("web2py")
__all__ = ['RestrictedError', 'restricted', 'TicketStorage', 'compile2']
class TicketStorage(Storage):
"""
Defines the ticket object and the default values of its members (None)
"""
def __init__(
self,
db=None,
tablename='web2py_ticket'
):
Storage.__init__(self)
self.db = db
self.tablename = tablename
def store(self, request, ticket_id, ticket_data):
"""
Stores the ticket. It will figure out if this must be on disk or in db
"""
if self.db:
self._store_in_db(request, ticket_id, ticket_data)
else:
self._store_on_disk(request, ticket_id, ticket_data)
def _store_in_db(self, request, ticket_id, ticket_data):
self.db._adapter.reconnect()
try:
table = self._get_table(self.db, self.tablename, request.application)
table.insert(ticket_id=ticket_id,
ticket_data=pickle.dumps(ticket_data, pickle.HIGHEST_PROTOCOL),
created_datetime=request.now)
self.db.commit()
message = 'In FILE: %(layer)s\n\n%(traceback)s\n'
except Exception:
self.db.rollback()
message =' Unable to store in FILE: %(layer)s\n\n%(traceback)s\n'
self.db.close()
logger.error(message % ticket_data)
def _store_on_disk(self, request, ticket_id, ticket_data):
ef = self._error_file(request, ticket_id, 'wb')
try:
pickle.dump(ticket_data, ef)
finally:
ef.close()
def _error_file(self, request, ticket_id, mode, app=None):
root = request.folder
if app:
root = os.path.join(os.path.join(root, '..'), app)
errors_folder = os.path.abspath(
os.path.join(root, 'errors')) # .replace('\\', '/')
return open(os.path.join(errors_folder, ticket_id), mode)
def _get_table(self, db, tablename, app):
tablename = tablename + '_' + app
table = db.get(tablename)
if not table:
table = db.define_table(
tablename,
db.Field('ticket_id', length=100),
db.Field('ticket_data', 'text'),
db.Field('created_datetime', 'datetime'))
return table
def load(
self,
request,
app,
ticket_id,
):
if not self.db:
try:
ef = self._error_file(request, ticket_id, 'rb', app)
except IOError:
return {}
try:
return pickle.load(ef)
finally:
ef.close()
else:
table = self._get_table(self.db, self.tablename, app)
rows = self.db(table.ticket_id == ticket_id).select()
return pickle.loads(rows[0].ticket_data) if rows else {}
class RestrictedError(Exception):
"""
Class used to wrap an exception that occurs in the restricted environment
below. The traceback is used to log the exception and generate a ticket.
"""
def __init__(
self,
layer='',
code='',
output='',
environment=None,
):
"""
Layer here is some description of where in the system the exception
occurred.
"""
if environment is None:
environment = {}
self.layer = layer
self.code = code
self.output = output
self.environment = environment
if layer:
try:
try:
self.traceback = traceback.format_exc()
except:
self.traceback = traceback.format_exc(limit=1)
except:
self.traceback = 'no traceback because template parsing error'
try:
self.snapshot = snapshot(context=10, code=code,
environment=self.environment)
except:
self.snapshot = {}
else:
self.traceback = '(no error)'
self.snapshot = {}
def log(self, request):
"""
Logs the exception.
"""
try:
d = {
'layer': str(self.layer),
'code': str(self.code),
'output': str(self.output),
'traceback': str(self.traceback),
'snapshot': self.snapshot,
}
ticket_storage = TicketStorage(db=request.tickets_db)
ticket_storage.store(request, request.uuid.split('/', 1)[1], d)
cmd_opts = global_settings.cmd_options
if cmd_opts and cmd_opts.print_errors:
logger.error(self.traceback)
return request.uuid
except:
logger.error(self.traceback)
return None
def load(self, request, app, ticket_id):
"""
Loads a logged exception.
"""
ticket_storage = TicketStorage(db=request.tickets_db)
d = ticket_storage.load(request, app, ticket_id)
self.layer = d.get('layer')
self.code = d.get('code')
self.output = d.get('output')
self.traceback = d.get('traceback')
self.snapshot = d.get('snapshot')
def __str__(self):
# safely show an useful message to the user
try:
output = self.output
if not isinstance(output, str, bytes, bytearray):
output = str(output)
if isinstance(output, unicodeT):
output = to_bytes(output)
except:
output = ""
return output
def compile2(code, layer):
return compile(code, layer, 'exec')
def restricted(ccode, environment=None, layer='Unknown', scode=None):
"""
Runs code in environment and returns the output. If an exception occurs
in code it raises a RestrictedError containing the traceback. Layer is
passed to RestrictedError to identify where the error occurred.
"""
if environment is None:
environment = {}
environment['__file__'] = layer
environment['__name__'] = '__restricted__'
try:
exec(ccode, environment)
except HTTP:
raise
except RestrictedError:
# do not encapsulate (obfuscate) the original RestrictedError
raise
except Exception as error:
# extract the exception type and value (used as output message)
etype, evalue, tb = sys.exc_info()
# XXX Show exception in Wing IDE if running in debugger
if __debug__ and 'WINGDB_ACTIVE' in os.environ:
sys.excepthook(etype, evalue, tb)
del tb
output = "%s %s" % (etype, evalue)
# Save source code in ticket when available
scode = scode if scode else ccode
raise RestrictedError(layer, scode, output, environment)
def snapshot(info=None, context=5, code=None, environment=None):
"""Return a dict describing a given traceback (based on cgitb.text)."""
import time
import linecache
import inspect
import pydoc
import cgitb
# if no exception info given, get current:
etype, evalue, etb = info or sys.exc_info()
if isinstance(etype, ClassType):
etype = etype.__name__
# create a snapshot dict with some basic information
s = {}
s['pyver'] = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + ' (prefix: %s)' % sys.prefix
s['date'] = time.ctime(time.time())
# start to process frames
records = inspect.getinnerframes(etb, context)
del etb # Prevent circular references that would cause memory leaks
s['frames'] = []
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
# basic frame information
f = {'file': file, 'func': func, 'call': call, 'lines': {},
'lnum': lnum}
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try:
return linecache.getline(file, lnum[0])
finally:
lnum[0] += 1
vars = cgitb.scanvars(reader, frame, locals)
# if it is a view, replace with generated code
if file.endswith('html'):
lmin = lnum > context and (lnum - context) or 0
lmax = lnum + context
lines = code.split("\n")[lmin:lmax]
index = min(context, lnum) - 1
if index is not None:
i = lnum - index
for line in lines:
f['lines'][i] = line.rstrip()
i += 1
# dump local variables (referenced in current line only)
f['dump'] = {}
for name, where, value in vars:
if name in f['dump']:
continue
if value is not cgitb.__UNDEF__:
if where == 'global':
name = 'global ' + name
elif where != 'local':
name = where + name.split('.')[-1]
f['dump'][name] = pydoc.text.repr(value)
else:
f['dump'][name] = 'undefined'
s['frames'].append(f)
# add exception type, value and attributes
s['etype'] = str(etype)
s['evalue'] = str(evalue)
s['exception'] = {}
if isinstance(evalue, BaseException):
for name in dir(evalue):
value = pydoc.text.repr(getattr(evalue, name))
s['exception'][name] = value
# add all local values (of last frame) to the snapshot
s['locals'] = {}
for name, value in locals.items():
s['locals'][name] = pydoc.text.repr(value)
# add web2py environment variables
for k, v in environment.items():
if k in ('request', 'response', 'session'):
s[k] = XML(str(BEAUTIFY(v)))
return s
| apache-2.0 | -6,934,575,283,434,433,000 | 31.489426 | 106 | 0.548075 | false |
ikreymer/pywb | tests/memento_fixture.py | 1 | 1343 | import re
MEMENTO_DATETIME = 'Memento-Datetime'
ACCEPT_DATETIME = 'Accept-Datetime'
LINK = 'Link'
VARY = 'Vary'
LINK_FORMAT = 'application/link-format'
class MementoMixin(object):
def _timemap_get(self, url, fmod=True, **kwargs):
app = self.testapp if fmod else self.testapp_non_frame
return app.get(url, extra_environ={'REQUEST_URI': url}, **kwargs)
def get_links(self, resp):
return list(map(lambda x: x.strip(), re.split(', (?![0-9])', resp.headers[LINK])))
def make_timemap_link(self, url, coll='pywb'):
format_ = '<http://localhost:80/{2}/timemap/link/{0}>; rel="timemap"; type="{1}"'
return format_.format(url, LINK_FORMAT, coll)
def make_original_link(self, url):
format_ = '<{0}>; rel="original"'
return format_.format(url)
def make_timegate_link(self, url, fmod='', coll='pywb'):
fmod_slash = fmod + '/' if fmod else ''
format_ = '<http://localhost:80/{2}/{1}{0}>; rel="timegate"'
return format_.format(url, fmod_slash, coll)
def make_memento_link(self, url, ts, dt, fmod='', coll='pywb', include_coll=True):
format_ = '<http://localhost:80/{4}/{1}{3}/{0}>; rel="memento"; datetime="{2}"'
if include_coll:
format_ += '; collection="{4}"'
return format_.format(url, ts, dt, fmod, coll)
| gpl-3.0 | 9,091,554,098,200,876,000 | 36.305556 | 90 | 0.597915 | false |
jbfavre/exabgp | lib/exabgp/bgp/message/update/attribute/mpurnlri.py | 1 | 2146 | # encoding: utf-8
"""
mprnlri.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
from struct import unpack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.protocol.ip.address import Address
from exabgp.bgp.message import IN
from exabgp.bgp.message.update.attribute.attribute import Attribute
from exabgp.bgp.message.update.nlri.nlri import NLRI
from exabgp.bgp.message.notification import Notify
# ================================================================= MP NLRI (14)
class MPURNLRI (Attribute,Address):
FLAG = Attribute.Flag.OPTIONAL
ID = Attribute.ID.MP_UNREACH_NLRI
MULTIPLE = True
__slots__ = ['nlris']
def __init__ (self,afi,safi,nlris):
Address.__init__(self,afi,safi)
self.nlris = nlris
def packed_attributes (self,addpath):
if not self.nlris:
return
mpurnlri = {}
for nlri in self.nlris:
mpurnlri.setdefault((nlri.afi.pack(),nlri.safi.pack()),[]).append(nlri.pack(addpath))
for (pafi,psafi),nlris in mpurnlri.iteritems():
yield self._attribute(pafi + psafi + ''.join(nlris))
def pack (self,addpath):
return ''.join(self.packed_attributes(addpath))
def __len__ (self):
return len(self.pack())
def __str__ (self):
return "MP_UNREACH_NLRI for %s %s with %d NLRI(s)" % (self.afi,self.safi,len(self.nlris))
@classmethod
def unpack (cls,data,negotiated):
nlris = []
# -- Reading AFI/SAFI
afi,safi = unpack('!HB',data[:3])
offset = 3
data = data[offset:]
if (afi,safi) not in negotiated.families:
raise Notify(3,0,'presented a non-negotiated family %s %s' % (AFI(afi),SAFI(safi)))
# Is the peer going to send us some Path Information with the route (AddPath)
addpath = negotiated.addpath.receive(afi,safi)
while data:
length,nlri = NLRI.unpack(afi,safi,data,addpath,None,IN.withdrawn)
nlris.append(nlri)
data = data[length:]
#logger.parser(LazyFormat("parsed withdraw mp nlri %s payload " % nlri,od,data[:length]))
return cls(afi,safi,nlris)
MPURNLRI.register_attribute()
EMPTY_MPURNLRI = MPURNLRI(AFI(AFI.undefined),SAFI(SAFI.undefined),[])
| bsd-3-clause | 2,751,306,918,672,450,000 | 26.164557 | 92 | 0.687325 | false |
aakashsinha19/Aspectus | Image Classification/models/swivel/swivel.py | 1 | 15962 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Submatrix-wise Vector Embedding Learner.
Implementation of SwiVel algorithm described at:
http://arxiv.org/abs/1602.02215
This program expects an input directory that contains the following files.
row_vocab.txt, col_vocab.txt
The row an column vocabulary files. Each file should contain one token per
line; these will be used to generate a tab-separate file containing the
trained embeddings.
row_sums.txt, col_sum.txt
The matrix row and column marginal sums. Each file should contain one
decimal floating point number per line which corresponds to the marginal
count of the matrix for that row or column.
shards.recs
A file containing the sub-matrix shards, stored as TFRecords. Each shard is
expected to be a serialzed tf.Example protocol buffer with the following
properties:
global_row: the global row indicies contained in the shard
global_col: the global column indicies contained in the shard
sparse_local_row, sparse_local_col, sparse_value: three parallel arrays
that are a sparse representation of the submatrix counts.
It will generate embeddings, training from the input directory for the specified
number of epochs. When complete, it will output the trained vectors to a
tab-separated file that contains one line per embedding. Row and column
embeddings are stored in separate files.
"""
from __future__ import print_function
import glob
import math
import os
import sys
import time
import threading
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
flags = tf.app.flags
flags.DEFINE_string('input_base_path', '/tmp/swivel_data',
'Directory containing input shards, vocabularies, '
'and marginals.')
flags.DEFINE_string('output_base_path', '/tmp/swivel_data',
'Path where to write the trained embeddings.')
flags.DEFINE_integer('embedding_size', 300, 'Size of the embeddings')
flags.DEFINE_boolean('trainable_bias', False, 'Biases are trainable')
flags.DEFINE_integer('submatrix_rows', 4096, 'Rows in each training submatrix. '
'This must match the training data.')
flags.DEFINE_integer('submatrix_cols', 4096, 'Rows in each training submatrix. '
'This must match the training data.')
flags.DEFINE_float('loss_multiplier', 1.0 / 4096,
'constant multiplier on loss.')
flags.DEFINE_float('confidence_exponent', 0.5,
'Exponent for l2 confidence function')
flags.DEFINE_float('confidence_scale', 0.25, 'Scale for l2 confidence function')
flags.DEFINE_float('confidence_base', 0.1, 'Base for l2 confidence function')
flags.DEFINE_float('learning_rate', 1.0, 'Initial learning rate')
flags.DEFINE_integer('num_concurrent_steps', 2,
'Number of threads to train with')
flags.DEFINE_integer('num_readers', 4,
'Number of threads to read the input data and feed it')
flags.DEFINE_float('num_epochs', 40, 'Number epochs to train for')
flags.DEFINE_float('per_process_gpu_memory_fraction', 0,
'Fraction of GPU memory to use, 0 means allow_growth')
flags.DEFINE_integer('num_gpus', 0,
'Number of GPUs to use, 0 means all available')
FLAGS = flags.FLAGS
def log(message, *args, **kwargs):
tf.logging.info(message, *args, **kwargs)
def get_available_gpus():
return [d.name for d in device_lib.list_local_devices()
if d.device_type == 'GPU']
def embeddings_with_init(vocab_size, embedding_dim, name):
"""Creates and initializes the embedding tensors."""
return tf.get_variable(name=name,
shape=[vocab_size, embedding_dim],
initializer=tf.random_normal_initializer(
stddev=math.sqrt(1.0 / embedding_dim)))
def count_matrix_input(filenames, submatrix_rows, submatrix_cols):
"""Reads submatrix shards from disk."""
filename_queue = tf.train.string_input_producer(filenames)
reader = tf.WholeFileReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
'sparse_value': tf.VarLenFeature(dtype=tf.float32)
})
global_row = features['global_row']
global_col = features['global_col']
sparse_local_row = features['sparse_local_row'].values
sparse_local_col = features['sparse_local_col'].values
sparse_count = features['sparse_value'].values
sparse_indices = tf.concat([tf.expand_dims(sparse_local_row, 1),
tf.expand_dims(sparse_local_col, 1)], 1)
count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
sparse_count)
queued_global_row, queued_global_col, queued_count = tf.train.batch(
[global_row, global_col, count],
batch_size=1,
num_threads=FLAGS.num_readers,
capacity=32)
queued_global_row = tf.reshape(queued_global_row, [submatrix_rows])
queued_global_col = tf.reshape(queued_global_col, [submatrix_cols])
queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols])
return queued_global_row, queued_global_col, queued_count
def read_marginals_file(filename):
"""Reads text file with one number per line to an array."""
with open(filename) as lines:
return [float(line) for line in lines]
def write_embedding_tensor_to_disk(vocab_path, output_path, sess, embedding):
"""Writes tensor to output_path as tsv"""
# Fetch the embedding values from the model
embeddings = sess.run(embedding)
with open(output_path, 'w') as out_f:
with open(vocab_path) as vocab_f:
for index, word in enumerate(vocab_f):
word = word.strip()
embedding = embeddings[index]
out_f.write(word + '\t' + '\t'.join([str(x) for x in embedding]) + '\n')
def write_embeddings_to_disk(config, model, sess):
"""Writes row and column embeddings disk"""
# Row Embedding
row_vocab_path = config.input_base_path + '/row_vocab.txt'
row_embedding_output_path = config.output_base_path + '/row_embedding.tsv'
log('Writing row embeddings to: %s', row_embedding_output_path)
write_embedding_tensor_to_disk(row_vocab_path, row_embedding_output_path,
sess, model.row_embedding)
# Column Embedding
col_vocab_path = config.input_base_path + '/col_vocab.txt'
col_embedding_output_path = config.output_base_path + '/col_embedding.tsv'
log('Writing column embeddings to: %s', col_embedding_output_path)
write_embedding_tensor_to_disk(col_vocab_path, col_embedding_output_path,
sess, model.col_embedding)
class SwivelModel(object):
"""Small class to gather needed pieces from a Graph being built."""
def __init__(self, config):
"""Construct graph for dmc."""
self._config = config
# Create paths to input data files
log('Reading model from: %s', config.input_base_path)
count_matrix_files = glob.glob(config.input_base_path + '/shard-*.pb')
row_sums_path = config.input_base_path + '/row_sums.txt'
col_sums_path = config.input_base_path + '/col_sums.txt'
# Read marginals
row_sums = read_marginals_file(row_sums_path)
col_sums = read_marginals_file(col_sums_path)
self.n_rows = len(row_sums)
self.n_cols = len(col_sums)
log('Matrix dim: (%d,%d) SubMatrix dim: (%d,%d)',
self.n_rows, self.n_cols, config.submatrix_rows, config.submatrix_cols)
self.n_submatrices = (self.n_rows * self.n_cols /
(config.submatrix_rows * config.submatrix_cols))
log('n_submatrices: %d', self.n_submatrices)
with tf.device('/cpu:0'):
# ===== CREATE VARIABLES ======
# Get input
global_row, global_col, count = count_matrix_input(
count_matrix_files, config.submatrix_rows, config.submatrix_cols)
# Embeddings
self.row_embedding = embeddings_with_init(
embedding_dim=config.embedding_size,
vocab_size=self.n_rows,
name='row_embedding')
self.col_embedding = embeddings_with_init(
embedding_dim=config.embedding_size,
vocab_size=self.n_cols,
name='col_embedding')
tf.summary.histogram('row_emb', self.row_embedding)
tf.summary.histogram('col_emb', self.col_embedding)
matrix_log_sum = math.log(np.sum(row_sums) + 1)
row_bias_init = [math.log(x + 1) for x in row_sums]
col_bias_init = [math.log(x + 1) for x in col_sums]
self.row_bias = tf.Variable(
row_bias_init, trainable=config.trainable_bias)
self.col_bias = tf.Variable(
col_bias_init, trainable=config.trainable_bias)
tf.summary.histogram('row_bias', self.row_bias)
tf.summary.histogram('col_bias', self.col_bias)
# Add optimizer
l2_losses = []
sigmoid_losses = []
self.global_step = tf.Variable(0, name='global_step')
opt = tf.train.AdagradOptimizer(config.learning_rate)
all_grads = []
devices = ['/gpu:%d' % i for i in range(FLAGS.num_gpus)] \
if FLAGS.num_gpus > 0 else get_available_gpus()
self.devices_number = len(devices)
with tf.variable_scope(tf.get_variable_scope()):
for dev in devices:
with tf.device(dev):
with tf.name_scope(dev[1:].replace(':', '_')):
# ===== CREATE GRAPH =====
# Fetch embeddings.
selected_row_embedding = tf.nn.embedding_lookup(
self.row_embedding, global_row)
selected_col_embedding = tf.nn.embedding_lookup(
self.col_embedding, global_col)
# Fetch biases.
selected_row_bias = tf.nn.embedding_lookup(
[self.row_bias], global_row)
selected_col_bias = tf.nn.embedding_lookup(
[self.col_bias], global_col)
# Multiply the row and column embeddings to generate predictions.
predictions = tf.matmul(
selected_row_embedding, selected_col_embedding,
transpose_b=True)
# These binary masks separate zero from non-zero values.
count_is_nonzero = tf.to_float(tf.cast(count, tf.bool))
count_is_zero = 1 - count_is_nonzero
objectives = count_is_nonzero * tf.log(count + 1e-30)
objectives -= tf.reshape(
selected_row_bias, [config.submatrix_rows, 1])
objectives -= selected_col_bias
objectives += matrix_log_sum
err = predictions - objectives
# The confidence function scales the L2 loss based on the raw
# co-occurrence count.
l2_confidence = (config.confidence_base +
config.confidence_scale * tf.pow(
count, config.confidence_exponent))
l2_loss = config.loss_multiplier * tf.reduce_sum(
0.5 * l2_confidence * err * err * count_is_nonzero)
l2_losses.append(tf.expand_dims(l2_loss, 0))
sigmoid_loss = config.loss_multiplier * tf.reduce_sum(
tf.nn.softplus(err) * count_is_zero)
sigmoid_losses.append(tf.expand_dims(sigmoid_loss, 0))
loss = l2_loss + sigmoid_loss
grads = opt.compute_gradients(loss)
all_grads.append(grads)
with tf.device('/cpu:0'):
# ===== MERGE LOSSES =====
l2_loss = tf.reduce_mean(tf.concat(l2_losses, 0), 0, name="l2_loss")
sigmoid_loss = tf.reduce_mean(tf.concat(sigmoid_losses, 0), 0,
name="sigmoid_loss")
self.loss = l2_loss + sigmoid_loss
average = tf.train.ExponentialMovingAverage(0.8, self.global_step)
loss_average_op = average.apply((self.loss,))
tf.summary.scalar("l2_loss", l2_loss)
tf.summary.scalar("sigmoid_loss", sigmoid_loss)
tf.summary.scalar("loss", self.loss)
# Apply the gradients to adjust the shared variables.
apply_gradient_ops = []
for grads in all_grads:
apply_gradient_ops.append(opt.apply_gradients(
grads, global_step=self.global_step))
self.train_op = tf.group(loss_average_op, *apply_gradient_ops)
self.saver = tf.train.Saver(sharded=True)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
start_time = time.time()
# Create the output path. If this fails, it really ought to fail
# now. :)
if not os.path.isdir(FLAGS.output_base_path):
os.makedirs(FLAGS.output_base_path)
# Create and run model
with tf.Graph().as_default():
model = SwivelModel(FLAGS)
# Create a session for running Ops on the Graph.
gpu_opts = {}
if FLAGS.per_process_gpu_memory_fraction > 0:
gpu_opts["per_process_gpu_memory_fraction"] = \
FLAGS.per_process_gpu_memory_fraction
else:
gpu_opts["allow_growth"] = True
gpu_options = tf.GPUOptions(**gpu_opts)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# Run the Op to initialize the variables.
sess.run(tf.global_variables_initializer())
# Start feeding input
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Calculate how many steps each thread should run
n_total_steps = int(FLAGS.num_epochs * model.n_rows * model.n_cols) / (
FLAGS.submatrix_rows * FLAGS.submatrix_cols)
n_steps_per_thread = n_total_steps / (
FLAGS.num_concurrent_steps * model.devices_number)
n_submatrices_to_train = model.n_submatrices * FLAGS.num_epochs
t0 = [time.time()]
n_steps_between_status_updates = 100
status_i = [0]
status_lock = threading.Lock()
msg = ('%%%dd/%%d submatrices trained (%%.1f%%%%), %%5.1f submatrices/sec |'
' loss %%f') % len(str(n_submatrices_to_train))
def TrainingFn():
for _ in range(int(n_steps_per_thread)):
_, global_step, loss = sess.run((
model.train_op, model.global_step, model.loss))
show_status = False
with status_lock:
new_i = global_step // n_steps_between_status_updates
if new_i > status_i[0]:
status_i[0] = new_i
show_status = True
if show_status:
elapsed = float(time.time() - t0[0])
log(msg, global_step, n_submatrices_to_train,
100.0 * global_step / n_submatrices_to_train,
n_steps_between_status_updates / elapsed, loss)
t0[0] = time.time()
# Start training threads
train_threads = []
for _ in range(FLAGS.num_concurrent_steps):
t = threading.Thread(target=TrainingFn)
train_threads.append(t)
t.start()
# Wait for threads to finish.
for t in train_threads:
t.join()
coord.request_stop()
coord.join(threads)
# Write out vectors
write_embeddings_to_disk(FLAGS, model, sess)
# Shutdown
sess.close()
log("Elapsed: %s", time.time() - start_time)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 7,985,290,216,479,188,000 | 37.555556 | 80 | 0.643466 | false |
kreatorkodi/repository.torrentbr | plugin.video.youtube/resources/lib/youtube_plugin/youtube/helper/tv.py | 1 | 5888 | __author__ = 'bromix'
from six import PY2
from ... import kodion
from ...youtube.helper import utils
from ...kodion.items.video_item import VideoItem
def my_subscriptions_to_items(provider, context, json_data, do_filter=False):
result = []
video_id_dict = {}
incognito = str(context.get_param('incognito', False)).lower() == 'true'
filter_list = []
black_list = False
if do_filter:
black_list = context.get_settings().get_bool('youtube.filter.my_subscriptions_filtered.blacklist', False)
filter_list = context.get_settings().get_string('youtube.filter.my_subscriptions_filtered.list', '')
filter_list = filter_list.replace(', ', ',')
filter_list = filter_list.split(',')
filter_list = [x.lower() for x in filter_list]
items = json_data.get('items', [])
for item in items:
channel = item['channel'].lower()
channel = channel.replace(',', '')
if PY2:
channel = channel.encode('utf-8', 'ignore')
if not do_filter or (do_filter and (not black_list) and (channel in filter_list)) or \
(do_filter and black_list and (channel not in filter_list)):
video_id = item['id']
item_params = {'video_id': video_id}
if incognito:
item_params.update({'incognito': incognito})
item_uri = context.create_uri(['play'], item_params)
video_item = VideoItem(item['title'], uri=item_uri)
if incognito:
video_item.set_play_count(0)
result.append(video_item)
video_id_dict[video_id] = video_item
use_play_data = not incognito and context.get_settings().use_playback_history()
channel_item_dict = {}
utils.update_video_infos(provider, context, video_id_dict, channel_items_dict=channel_item_dict, use_play_data=use_play_data)
utils.update_fanarts(provider, context, channel_item_dict)
# next page
next_page_token = json_data.get('next_page_token', '')
if next_page_token or json_data.get('continue', False):
new_params = {}
new_params.update(context.get_params())
new_params['next_page_token'] = next_page_token
new_params['offset'] = int(json_data.get('offset', 0))
new_context = context.clone(new_params=new_params)
current_page = int(new_context.get_param('page', 1))
next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context))
result.append(next_page_item)
return result
def tv_videos_to_items(provider, context, json_data):
result = []
video_id_dict = {}
incognito = str(context.get_param('incognito', False)).lower() == 'true'
items = json_data.get('items', [])
for item in items:
video_id = item['id']
item_params = {'video_id': video_id}
if incognito:
item_params.update({'incognito': incognito})
item_uri = context.create_uri(['play'], item_params)
video_item = VideoItem(item['title'], uri=item_uri)
if incognito:
video_item.set_play_count(0)
result.append(video_item)
video_id_dict[video_id] = video_item
use_play_data = not incognito and context.get_settings().use_playback_history()
channel_item_dict = {}
utils.update_video_infos(provider, context, video_id_dict, channel_items_dict=channel_item_dict, use_play_data=use_play_data)
utils.update_fanarts(provider, context, channel_item_dict)
# next page
next_page_token = json_data.get('next_page_token', '')
if next_page_token or json_data.get('continue', False):
new_params = {}
new_params.update(context.get_params())
new_params['next_page_token'] = next_page_token
new_params['offset'] = int(json_data.get('offset', 0))
new_context = context.clone(new_params=new_params)
current_page = int(new_context.get_param('page', 1))
next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context))
result.append(next_page_item)
return result
def saved_playlists_to_items(provider, context, json_data):
result = []
playlist_id_dict = {}
incognito = str(context.get_param('incognito', False)).lower() == 'true'
thumb_size = context.get_settings().use_thumbnail_size()
items = json_data.get('items', [])
for item in items:
title = item['title']
channel_id = item['channel_id']
playlist_id = item['id']
image = utils.get_thumbnail(thumb_size, item.get('thumbnails', {}))
item_params = {}
if incognito:
item_params.update({'incognito': incognito})
item_uri = context.create_uri(['channel', channel_id, 'playlist', playlist_id], item_params)
playlist_item = kodion.items.DirectoryItem(title, item_uri, image=image)
playlist_item.set_fanart(provider.get_fanart(context))
result.append(playlist_item)
playlist_id_dict[playlist_id] = playlist_item
channel_items_dict = {}
utils.update_playlist_infos(provider, context, playlist_id_dict, channel_items_dict)
utils.update_fanarts(provider, context, channel_items_dict)
# next page
next_page_token = json_data.get('next_page_token', '')
if next_page_token or json_data.get('continue', False):
new_params = {}
new_params.update(context.get_params())
new_params['next_page_token'] = next_page_token
new_params['offset'] = int(json_data.get('offset', 0))
new_context = context.clone(new_params=new_params)
current_page = int(new_context.get_param('page', 1))
next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context))
result.append(next_page_item)
return result
| gpl-2.0 | 8,029,044,386,622,593,000 | 37.48366 | 129 | 0.631284 | false |
lwerdna/chess | PgnParser.py | 1 | 9869 | #!/usr/bin/python
# Copyright 2012-2016 Andrew Lamoureux
#
# This file is a part of FunChess
#
# FunChess is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
import copy
import Common
import ChessMove
import PgnTokenizer
from ChessState import ChessState
###############################################################################
# Match
# - contains tags, comments, moves, and states of a bughouse chess match
# - is able to load itself from bpgn match text
###############################################################################
class PgnChessMatch:
def __init__(self):
self.initState = ChessState(Common.initChessFEN)
self.moves = []
self.tags = {}
self.comments = []
self.states = [self.initState]
self.result = None
def copy(self):
return copy.deepcopy(self)
# - parses, populates the tags member
# - parses, populates the moves member
# - parses, populates the comments member
# - calculates the states member
#
def parsePgn(self, text):
tokens = PgnTokenizer.tokenize(text)
currMoveNum = 0
player = 'W'
while tokens:
token = tokens.pop(0)
#print "on token: -%s-" % token
# tag tokens eg: [Event "May 2013 Tourney"]
m = re.match(r'\[(.*?) "(.*?)"\]', token)
if m:
self.tags[m.group(1)] = m.group(2)
continue
# comment tokens eg: { good move! also consider Rxe8 }
m = re.match('^{(.*)}$', token)
if m:
# if we're in the moves section, comment applies to a move
if self.moves:
self.moves[-1].addComment(m.group(1))
# else it applies to the match comments
else:
self.comments.append(m.group(1))
continue
# result tokens eg: 0-1
m = re.match(Common.regexResults, token)
if m:
self.result = token
if tokens:
raise Exception("result token was not the final token! next is: " + tokens[0])
continue
# move number token eg: 34.
m = re.match(r'(\d+)\.', token)
if m:
if currMoveNum + 1 != int(m.group(1)):
raise Exception("out of order move number: " + token)
player = 'w'
currMoveNum += 1
# normal move (SAN)
m = re.match(Common.regexSanChess, token)
if m:
move = ChessMove.ChessMove()
move.moveNum = currMoveNum
move.player = player
move.san = token
self.moves.append(move)
player = {'w':'b', 'b':'w'}[player]
# calculate all board states
#
# initial state? or special state? (Fischer960, etc.)
if 'SetUp' in self.tags and self.tags['SetUp'] == '1':
if 'FEN' in self.tags:
self.initState = ChessState(self.tags['FEN'])
self.states = [self.initState]
# loop over all moves...
for move in self.moves:
# exceptions (repeated moves due to time forfeiture, etc.) just carry state along...
if 'TIME_FORFEIT' in move.flags:
self.states.append(self.states[-1])
continue
currState = self.states[-1]
nextState = currState.transition(move)
self.states.append(nextState)
def __str__(self):
answer = ''
#answer = '%s[%s],%s[%s] vs %s[%s],%s[%s]\n' % ( \
# self.tags['WhiteA'], self.tags['WhiteAElo'], self.tags['BlackA'], self.tags['BlackAElo'], \
# self.tags['BlackB'], self.tags['BlackBElo'], self.tags['WhiteA'], self.tags['WhiteAElo'] \
#)
for tag,value in self.tags.iteritems():
answer += "[%s \"%s\"]\n" % (tag, value)
#answer += "COMMENTS:\n"
#for c in self.comments:
# answer += c + "\n"
#answer += "MOVES (%d total):\n" % len(self.moves)
for m in self.moves:
answer += str(m) + ' '
# blah
answer += self.result
# done
return answer
###############################################################################
# PgnChessMatchIteratorFile
# - return matches from file containing multiple matches
# - basically, split the text around '[Event "..."]' tags
###############################################################################
class PgnChessMatchIteratorFile:
def __init__(self, path):
self.path = path
self.fp = open(path, 'r')
self.lineNum = -1
def __iter__(self):
self.fp.seek(0, 0)
self.lineNum = -1
return self
def peekLine(self, doStrip=1):
line = self.fp.readline()
self.fp.seek(-1*len(line), 1)
if doStrip:
line = line.rstrip()
return line
def readLine(self):
self.lineNum += 1
temp = self.fp.readline()
#print "read: %s" % temp
return temp
def consumeNewLines(self):
while 1:
line = self.peekLine(False)
if not line:
return False
if not re.match(r'^\s+$', line):
break
self.readLine()
return True
# strategy here is simple: consume lines until an Event tag is found
# in other words, Event tags delimit the matches
def next(self):
if not self.consumeNewLines():
raise StopIteration
matchText = self.readLine()
if not re.match(r'^\[Event', matchText):
raise Exception(("expected Event tag at %s:%d\n" + \
"(instead got: %s)") % (self.path, self.lineNum, matchText))
# so long as the next line is not an Event tag, add to current match
while 1:
line = self.peekLine()
if re.match(r'^\[Event ', line):
break
matchText += '\n' + line
# consume the peek'd line, breaking if error
if not self.readLine():
break
# return a match
match = PgnChessMatch()
match.path = self.path
match.parsePgn(matchText)
return match
def __del__(self):
if self.fp:
self.fp.close()
self.fp = None
###############################################################################
# MatchIteratorDir
# - return matches from a directory containing files
# - basically, loop over MatchIteratorFile for every file in a directory
###############################################################################
class PgnChessMatchIteratorDir:
def __init__(self, path):
self.walkObj = os.walk(path)
self.matchIterFileObj = None
self.filesList = []
def __iter__(self):
return self
def next(self):
while 1:
# first level: does the file iterator still have something left?
if self.matchIterFileObj:
try:
return self.matchIterFileObj.next()
except StopIteration:
self.matchIterFileObj = None
# second level, is current list of files exhausted? can we create a new
# file iterator?
if self.filesList:
self.matchIterFileObj = PgnChessMatchIteratorFile(self.filesList.pop())
continue
# third level: no file iterator, no files list, descend!
# purposely don't trap exception: StopIterations should bubble up and tell
# caller that we're done
(root, subFolder, files) = self.walkObj.next()
for f in files:
(dummy, ext) = os.path.splitext(f)
if ext == '.bpgn':
self.filesList.append(os.path.join(root, f))
###############################################################################
# main()
###############################################################################
if __name__ == '__main__':
gamesCount = 0
goodGamesCount = 0
path = sys.argv[1]
it = None
if os.path.isfile(path):
it = PgnChessMatchIteratorFile(path)
elif os.path.isdir(path):
it = PgnChessMatchIteratorDir(path)
else:
raise Exception("WTF?")
for m in it:
gamesCount += 1
try:
m.sanityCheck()
#except MatchMovesOOOException as e:
# print "%s: skipping match due to out of order (or missing) moves\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e))
# continue
#except MatchZeroMovesException as e:
# print "%s: skipping match due to it being empty (no moves whatsoever)\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e))
# continue
except Exception as e:
print e
for s in m.states:
print s
goodGamesCount += 1
#raw_input("hit enter for next game")
print "%d/%d games are good (%02.2f%%)" % (goodGamesCount, gamesCount, 100.0*goodGamesCount/gamesCount)
| gpl-3.0 | 1,263,938,477,565,290,800 | 31.251634 | 133 | 0.510791 | false |
poppogbr/genropy | gnrpy/gnr/pdf/test/testsuite.py | 1 | 1391 | from gnr.pdf.gnrpdf import GnrPdf
from reportlab.lib.units import inch
def testPage(root):
for x in range(2):
page = root.page(x=1 * inch, y=1 * inch)
page.setFont("Helvetica", 9)
pane1 = page.pane(x_=1, y_=15)
pane2 = page.pane(x_=9, y_=9)
pane2.setFont("Helvetica", 12)
pane1.rect(x=0, y=0, width_='10', height_='5')
pane3 = pane1.pane(x_=2, y_=2)
pane3.rect(x=0, y=0, width_='7', height_='2')
pane1.setFillGray(gray=0.4)
pane1.drawString(x_=1, y_=4, text="Hello World")
pane2.drawString(x_=1, y_=4, text="Hello World")
#
#textobject = pane2.textObject(x_=1, y_=3)
##textobject = canvas.beginText()
##textobject.setTextOrigin(inch, 2.5*inch)
#textobject.setFont("Helvetica-Oblique", 14)
#for line in lyrics:
# textobject.textLine(line)
#textobject.setFillGray(0.4)
#textobject.textLines('''
#With many apologies to the Beach Boys
#and anyone else who finds this objectionable
#''')
##canvas.drawText(textobject)
#
if __name__ == '__main__':
pdf = GnrPdf('/testsuite.pdf', unit='cm')
root = pdf.root
testPage(root)
pdf.draw()
pdf.save()
pdf.toXml('/testsuite.xml')
f = open('/testsuite.txt', 'w')
f.write('\n'.join(pdf._log))
f.close() | lgpl-2.1 | -5,206,272,216,680,700,000 | 26.294118 | 56 | 0.558591 | false |
ecbtln/1411wrightfisher | wright_fisher.py | 1 | 24312 | __author__ = '[email protected], [email protected]'
import math
import numpy as np
from random import choice
from inspect import isfunction
from matplotlib import pyplot as plt
from UserDict import IterableUserDict
import random
import operator
import heapq
from progressbar import AnimatedProgressBar
# This is used to help debug the code in case of unexpected output. This will start the simulation at a particular
# state (a tuple of the signals_sent, and the receiver strategies), where each is a list of dictionaries of the
# appropriate length.
DEBUG_STATE = None
# The precision of the decimal comparison operations this should not need any changing
DECIMAL_PRECISION = 5
# Colors used to plot the senders and receivers
GRAPH_COLORS = 'mcrgbyk'
class SparseDictionary(IterableUserDict):
"""
A helper dictionary that helps minimize the overhead of storing continuous actions. Instead of storing keys
for every possible strategy, we make use of the fact that keys will be queried in order and that this dictionary
will only be used to store cumulative frequencies.
"""
def __init__(self, asc=True, default=0.0, *args, **kwargs):
"""
Initialize the sparse SparseDictionary
:param asc: whether the dictionary will be queried in ascending or descending order. Ascending corresponds
to sender payoffs where we accumulate upwards, and descending corresponds to receiver payoffs where we are
accumulating downwards
:param default: The default value to return if the key does not have a value associated with it
"""
IterableUserDict.__init__(self, *args, **kwargs)
self.default = default
if asc:
self.cmp = operator.lt
else:
self.cmp = operator.gt
self.history = []
self.last_key, self.last_value = None, None
def __getitem__(self, item):
try:
out = IterableUserDict.__getitem__(self, item)
self.last_key = item
self.last_value = out
return out
except KeyError as e:
if self.last_key is None or self.cmp(item, self.last_key):
return self.default
else:
return self.last_value
class WrightFisher(object):
"""
A robust Wright-Fisher simulator of the costly signaling model, that allows for a variety of sender/receiver
modifications and combinations and options for parameters.
"""
def __init__(self, wages=(5,), sender_dist=(2.0/3.0, 1.0/3.0), w=0.15, u=0.02, receiver_prop=1.0/2.0, cost_fns = (lambda x: x * 3, lambda x: x), signals=(0, 1, 2, 3), receiver_dist = (1.0,), receiver_payoffs=((0, 10),), pop_size=100, fitness_func = lambda p, w: math.e**(p*w), animated_progress=True):
"""
Construct a WrightFisher simulator with the desired parameters to be simulated one or more times.
:param wages: a list of wages that receiver i needs to pay any sender whom it accepts.
:param sender_dist: a probability distribution identifying how the senders will be divided by sender type.
The sum of this must be 1, and this will also specify the number of types of senders there are
:param w: the selection strength associated with the simulation
:param u: the mutation rate, the probability that a given individual does not keep the same strategy but instead
randomly chooses a new strategy
:param receiver_prop: the proportion of the pop_size that wll be devoted to receivers, (1 - receiver_prop) will
be devoted to senders.
:param cost_fns: The cost functions for each type of sender, which can be passed in as callables or dictionaries
mapping a signal to its cost
:param signals: a list of all possible signals that can be sent
:param receiver_dist: the distribute of proportion of receivers to each possible receiver type.
:param receiver_payoffs: a list of payoffs that the receiver of type i receives for accepting a sender of type j
:param pop_size: the population size used for the simulations, note this this should be sufficiently large
relative to the number of possible signals
:param fitness_func: a function that takes as arguments a payoff and selection strength and outputs fitness
:param animated_progress: whether or not to display an animated progress bar while performing the simulation
"""
# Verify the correctness and compatibility of the parameters
assert math.fsum(sender_dist) == 1.0, "the sender distribution must be a valid probability distribution"
assert math.fsum(receiver_dist) == 1.0, "the receiver distribution must be a valid probability distribution"
assert len(sender_dist) == len(cost_fns), "the number of elements in the sender distribution must be equal to the number of elements in the cost functions"
for x in receiver_payoffs:
assert len(x) == len(sender_dist), "the number of elements in each of the receiver payoffs must be equal to the number of senders"
assert len(receiver_dist) == len(receiver_payoffs) == len(wages), "the number of of elements in the receiver distribution, the receiver's payoffs, and the number of wages must all equal the number of total receiver types"
assert len(sender_dist) > 1, "this model only makes sense with more than one type of sender"
assert len(receiver_dist) > 0, "this model only makes sense with a nonzero number of senders"
assert isinstance(pop_size, int), "the population size must be an integer, not something else"
assert len(signals) == len(set(signals)), "the list of signals should not have any repeated elements"
self.animated_progress = animated_progress
self.wages = wages # benefit for being accepted by a given receiver
self.sender_dist = sender_dist
self.receiver_dist = receiver_dist
self.n_types_of_senders = len(sender_dist)
self.n_types_of_receivers = len(receiver_dist)
self.w = w
self.u = u
self.num_signals = len(signals)
self.signals = signals
cost_functions_by_index = []
# cost_fns can be inputted as either arrays (corresponding to the signals), or functions (mapping signal to cost)
# we want to map them to arrays before we begin
for f in cost_fns:
if isinstance(f, (tuple, list)):
assert len(f) == self.num_signals, "the list of payoffs for a given sender must be equal to the number of signals"
cost_functions_by_index.append(f)
else:
assert isfunction(f)
x = [f(s) for s in self.signals]
cost_functions_by_index.append(x)
self.cost_fns_by_signal_index = cost_functions_by_index # for each sender, a lookup table mapping the signal's index (in the signals array) to its cost
# for convenience, we also want to make a direct mapping of all signals to their costs
self.cost_fns = [{signals[i]:x[i] for i, s in enumerate(signals)} for x in cost_functions_by_index]
self.signals = signals
self.receiver_payoffs = receiver_payoffs
self.n_types_of_receivers = len(receiver_dist)
self.fitness_func = lambda p: fitness_func(p, w)
assert pop_size is not None
self.num_senders = [pop_size * x * (1 - receiver_prop) for x in sender_dist]
total_receivers = pop_size * receiver_prop
self.num_receivers = [total_receivers * x for x in receiver_dist]
self.pop_size = pop_size
self.num_senders = self._round_individuals(self.num_senders)
self.num_receivers = self._round_individuals(self.num_receivers)
self.index_of_signal = {s:i for i, s in enumerate(self._possible_receiver_strategies())}
def _round_given_type(self, unrounded_dict, desired_total):
"""
Converts a given sender or receiver's distribution, given as a dictionary, and scales it proportionally to add
to the desired_total
:param unrounded_dict: a weighted distribution of the number of senders and receivers sending each signal
:param desired_total: the total to which the aggregate sum should be scaled
"""
unrounded_total = sum(unrounded_dict[k] for k in unrounded_dict)
total = int(round(unrounded_total, DECIMAL_PRECISION))
assert total == desired_total
int_nums = {k:int(unrounded_dict[k]) for k in unrounded_dict}
diff = total - sum(int_nums[k] for k in int_nums)
if diff > 0:
thresh = [((int_nums[k] - unrounded_dict[k]), k) for k in int_nums]
heapq.heapify(thresh)
while diff > 0:
v, i = heapq.heappop(thresh)
int_nums[i] += 1
diff -= 1
assert sum(int_nums[k] for k in int_nums) == total
return int_nums
def _round_individuals(self, unrounded_frequencies):
"""
Due to integer cutoffs, the number of senders and receivers might not be consistent. This take the integer part of each
of the inputs and then assign the remaining few leftovers (so that the sum is the sum of the original floats)
in a way such that the numbers with higher decimal parts will get the extra int before those with lower.
"""
unrounded_total = math.fsum(unrounded_frequencies)
total = int(round(unrounded_total, DECIMAL_PRECISION))
int_num_senders = [int(x) for x in unrounded_frequencies]
diff = total - sum(int_num_senders)
if diff > 0:
# note the difference needs to be negative, because heapq's only implement a minimum priority queue but we want max priority queue
thresh = [((x - y), i) for i, (x, y) in enumerate(zip(int_num_senders, unrounded_frequencies))]
heapq.heapify(thresh)
while diff > 0:
v, i = heapq.heappop(thresh)
int_num_senders[i] += 1
diff -= 1
assert sum(int_num_senders) == total, "the total number of individuals after rounding must be the same as before rounding"
return int_num_senders
def _normalize_to_pop_size(self, senders, receivers):
""" Takes in a list of distributions of senders and receivers and rounds each distribution of each type such that
each type is scaled back to the appropriate total (since each type's population remains constant
:param senders: the list of sender proportions
:param receivers: the list of receiver proportions
:return sender, receivers: a tuple of the scaled versions of the inputs
"""
# to normalize, the sum at index i of senders should correspond to self.sender_dist at index i
total_senders = [sum(d[k] for k in d) for d in senders]
total_receivers = [sum(d[k] for k in d) for d in receivers]
signals_sent = [{k:y[k] * N / total for k in y} for y, N, total in zip(senders, self.num_senders, total_senders)]
receiver_strats = [{k:y[k] * N / total for k in y} for y, N, total in zip(receivers, self.num_receivers, total_receivers)]
for i in xrange(self.n_types_of_senders):
signals = signals_sent[i]
signals_sent[i] = self._round_given_type(signals, self.num_senders[i])
assert sum(sum(x[k] for k in x) for x in signals_sent) == sum(self.num_senders)
for i in xrange(self.n_types_of_receivers):
signals = receiver_strats[i]
receiver_strats[i] = self._round_given_type(signals, self.num_receivers[i])
assert sum(sum(x[k] for k in x) for x in receiver_strats) == sum(self.num_receivers)
return signals_sent, receiver_strats
def _compute_avg_cost(self, signals_by_sender_type):
"""
:param signals_by_sender_type: an array of senders, and each sender has a dictionary mapping a signal sent and the proportion of the population sending that signal.
:Returns: the average signal sent by each sender type, as an array
"""
out = []
for f, signals in zip(self.cost_fns, signals_by_sender_type):
sum_n = 0
sum_v = 0
for k in signals:
sum_n += signals[k]
sum_v += signals[k] * f[k]
out.append(float(sum_v) / sum_n)
return out
def _compute_acceptance_frequencies(self, receiver_strategies):
"""
:returns: an array of dictionaries mapping a key (the signal sent) to a value (the proportion of receivers accepting
that signal) for every type of receiver
"""
overall_out = []
for z in receiver_strategies:
out = {}
def increment(k, v):
out[k] = out.get(k, 0) + v
for k in z:
increment(k, z[k])
signals = sorted(list(out.keys()))
# make the frequency distribution into cumulative sums
for i in xrange(len(signals) - 1):
out[signals[i+1]] += out[signals[i]]
frequency_accepted = SparseDictionary()
for x in signals:
frequency_accepted[x] = float(out[x])/out[signals[-1]]
overall_out.append(frequency_accepted)
return overall_out
def _compute_type_frequencies(self, signals_sent_by_sender):
"""
:returns: a dictionary mapping a key (the signal accepted), to an array, where each value at index i is the
likelihood of having accepted a sender with that type
"""
out = {}
sums = {}
def increment(x, s_index, val):
sums[x] = sums.get(x, 0) + val
likelihood = out.get(x, None)
if likelihood is None:
out[x] = np.zeros(self.n_types_of_senders)
likelihood = out[x]
likelihood[s_index] += val
for s_index, sender in enumerate(signals_sent_by_sender):
for x in sender:
increment(x, s_index, sender[x])
signals = sorted(list(out.keys()))
# we go in opposite order as above because we are now change the receiver signal chosen, so lower means more, not
# less, will be accepted
for i in reversed(xrange(1, len(signals))):
out[signals[i-1]] += out[signals[i]] # numpy element-wise addition
total = sum(out[signals[0]])
retvalue = SparseDictionary(asc=False, default=[0]*self.n_types_of_senders)
for s in signals:
retvalue[s] = out[s]
return retvalue
def _mean_of_frequency_table(self, freq_table):
""" Compute the mean of a frequency table, which is a dictionary mapping values to their frequencies """
s = 0
tv = 0
for k in freq_table:
num = freq_table[k]
s += num
tv += k * num
return float(tv)/s
def _possible_receiver_strategies(self):
return self.signals
def simulate(self, num_gens=1000, show_signals_graph=True):
"""
Performs a simulation on the given WrightFisher simulation object to a desired number of generations and
defaulting to showing both the average cost of each sender type as well as the average signals of each sender
and receiver type
:param num_gens: the number of iterations to run the simulation for
:param show_signals_graph: whether or not to show the supplemental graph
"""
# if the DEBUG flag is turned on
if DEBUG_STATE is not None:
signals_sent, receiver_strats = DEBUG_STATE
else:
# initialize the state of the world to same random state, given the restrictions on the counts for the number of each player population
# for each type of sender, randomly initialize a signal for each sender and store them as a frequency table
signals_sent = []
for x in self.num_senders:
sender_freqs = {}
for i in xrange(x):
c = choice(self.signals)
sender_freqs[c] = sender_freqs.get(c, 0) + 1
signals_sent.append(sender_freqs)
# for each receiver, randomly initialize a strategy based on the existing signals (plus a reject all)
possible_receiver_strats = self._possible_receiver_strategies()
receiver_strats = []
for x in self.num_receivers:
receiver_freqs = {}
for i in xrange(x):
c = choice(possible_receiver_strats)
receiver_freqs[c] = receiver_freqs.get(c, 0) + 1
receiver_strats.append(receiver_freqs)
avg_cost_signals_sent = np.zeros((num_gens, self.n_types_of_senders))
avg_signals_sent = np.zeros((num_gens, self.n_types_of_senders))
avg_signals_sent[0, :] = [self._mean_of_frequency_table(x) for x in signals_sent]
avg_cost_signals_sent[0, :] = self._compute_avg_cost(signals_sent)
avg_signals_accepted = np.zeros((num_gens, self.n_types_of_receivers))
avg_signals_accepted[0, :] = [self._mean_of_frequency_table(x) for x in receiver_strats]
if self.animated_progress:
# start the animated progress bar, if the bool is enabled
progress_bar = AnimatedProgressBar(end=num_gens, width=80)
print progress_bar,
# Iterate through all the generations
for t in xrange(num_gens - 1):
# determine payoffs of each player
# 1. for each type of receiver and for each strategy, determine proportion of receivers
# accepting that strategy
acceptance_ratios = self._compute_acceptance_frequencies(receiver_strats)
# 2. for each type of sender, compute payoff for each possible signal
sender_payoffs = [[sum(acceptance_ratios[r_i][s]*w - f[s] for r_i, w in enumerate(self.wages)) for s in self.signals] for f in self.cost_fns]
# 3. compute payoffs for each possible receiver strategy for each possible receiver
sender_likelihoods = self._compute_type_frequencies(signals_sent)
receiver_payoffs = [[sum(sender_likelihoods[x][i]* (r_payoffs[i] - w) for i in reversed(xrange(self.n_types_of_senders))) / (self.pop_size / 5) for x in self._possible_receiver_strategies()] for w, r_payoffs in zip(self.wages, self.receiver_payoffs)]
# compute fitnesses
# this is a lookup table, where for each type of sender, we have the function for each possible strategy
f_senders = [[self.fitness_func(p) for p in x] for x in sender_payoffs]
f_receivers = [[self.fitness_func(p) for p in x] for x in receiver_payoffs]
# generate frequencies for next generation, with some mutation rate ,u
# we use a slightly different strategy than that included in the problem set. Instead of using a random
# number generate to index into the cumulative distribution of fitnesses of individuals, we instead allocate
# the exact fitness (as a decimal) for number of people in the ensuing population, and then normalize over
# the sum of these fitnesses. This strategy seems to be more effective, as it reduces the random noise that
# was present in the simulations for the problem set.
new_signals_sent = []
for i, signals_sent_by_sender in enumerate(signals_sent):
new_freqs = {}
fitnesses = f_senders[i]
for signal in signals_sent_by_sender:
num = signals_sent_by_sender[signal]
for j in xrange(num):
if random.random() < self.u:
cur_signal = choice(self.signals)
else:
cur_signal = signal
idx = self.index_of_signal[cur_signal]
assert cur_signal == self.signals[idx] # make sure the lookup table is correct
f = fitnesses[idx]
old = new_freqs.get(cur_signal, 0)
new_freqs[cur_signal] = old + f
assert new_freqs[cur_signal] > old # make sure no overflow
new_signals_sent.append(new_freqs)
signals_sent = new_signals_sent
#needs to repeat for all types of senders
new_signals_received = []
for i, signals_sent_by_receiver in enumerate(receiver_strats):
new_freqs = {}
fitnesses = f_receivers[i]
for signal in signals_sent_by_receiver:
num = signals_sent_by_receiver[signal]
for j in xrange(num):
if random.random() < self.u:
cur_signal = choice(self.signals)
else:
cur_signal = signal
idx = self.index_of_signal[cur_signal]
assert cur_signal == self.signals[idx]
f = fitnesses[idx]
old = new_freqs.get(cur_signal, 0)
new_freqs[cur_signal] = old + f
assert new_freqs[cur_signal] > old # make sure no overflow
new_signals_received.append(new_freqs)
receiver_strats = new_signals_received
# now we need to normalize new_signals and receiver_strats back down to their original population sizes
signals_sent, receiver_strats = self._normalize_to_pop_size(signals_sent, receiver_strats)
# We now want to update our running totals
avg_signals_sent[t + 1, :] = [self._mean_of_frequency_table(x) for x in signals_sent]
avg_cost_signals_sent[t + 1, :] = self._compute_avg_cost(signals_sent)
avg_signals_accepted[t + 1, :] = [self._mean_of_frequency_table(x) for x in receiver_strats]
if self.animated_progress:
# print the progress bar, if it is enabled
print '\r',
print progress_bar + 1,
# plot the results
self._plot_results(avg_signals_sent, avg_cost_signals_sent, avg_signals_accepted, num_gens, show_signals_graph=show_signals_graph)
def _plot_results(self, avg_signals_sent, avg_costs, avg_accepted, t, show_signals_graph=False):
colors = GRAPH_COLORS
x_axis = range(t)
plt.figure(1, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
if show_signals_graph:
plt.subplot(211)
for sender_type_idx in xrange(self.n_types_of_senders):
plt.plot(x_axis, avg_costs[: t, sender_type_idx], colors[sender_type_idx], label='S_%d' % sender_type_idx)
if not show_signals_graph:
plt.legend(borderaxespad=0, bbox_to_anchor=(1.01, 1), loc=2)
plt.xlabel('Generation')
plt.title('Costly Signaling in Wright Fisher')
plt.ylabel('Average cost of signal')
plt.ylim(self.signals[0], np.max(avg_costs))
# show supplemental graph to help interpret results, this one will just show the signal sent and received by
# all parties over time
if show_signals_graph:
plt.subplot(212)
for sender_type_idx in xrange(self.n_types_of_senders):
plt.plot(x_axis, avg_signals_sent[: t, sender_type_idx], colors[sender_type_idx], label='S_%d' % sender_type_idx)
for receiver_type_idx in xrange(self.n_types_of_receivers):
plt.plot(x_axis, avg_accepted[: t, receiver_type_idx], colors[self.n_types_of_senders + receiver_type_idx], label='R_%d' % receiver_type_idx)
plt.legend(loc=3, borderaxespad=0, ncol=self.n_types_of_senders + self.n_types_of_receivers, mode="expand", bbox_to_anchor=(0., -.22, 1., .102))
plt.ylabel('Average signal')
plt.ylim(self.signals[0], self.signals[-1])
plt.show()
if __name__ == '__main__':
w = WrightFisher(pop_size=100, signals=(0, 1, 2, 4))
w.simulate(num_gens=10000)
| mit | 6,413,463,132,501,530,000 | 46.116279 | 305 | 0.622861 | false |
Jigsaw-Code/censoredplanet-analysis | pipeline/metadata/ip_metadata.py | 1 | 10754 | # Copyright 2020 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IP Metadata is a class to add network metadata to IPs."""
import csv
import datetime
import logging
import re
from typing import Dict, Optional, Tuple, Iterator
import apache_beam.io.filesystem as apache_filesystem
import apache_beam.io.filesystems as apache_filesystems
import pyasn
from pipeline.metadata.ip_metadata_interface import IpMetadataInterface
# These are the latest CAIDA files stored in CLOUD_DATA_LOCATION
# TODO: Add a feature to update.py that updates these files automatically
# and get the latest file here instead.
LATEST_AS2ORG_FILEPATH = "as-organizations/20200701.as-org2info.txt.gz"
LATEST_AS2CLASS_FILEPATH = "as-classifications/20200801.as2types.txt.gz"
# The as-org2info.txt file contains two tables
# Comment lines with these headers divide the tables.
ORG_TO_COUNTRY_HEADER = "# format:org_id|changed|org_name|country|source"
AS_TO_ORG_HEADER = "# format:aut|changed|aut_name|org_id|opaque_id|source"
def _read_compressed_file(filepath: str) -> Iterator[str]:
"""Read in a compressed file as a decompressed string iterator.
Args:
filepath: a path to a compressed file. Could be either local like
'/tmp/text.txt.gz' or a gcs file like
'gs://censoredplanet_geolocation/caida/as-classifications/as2types.txt.gz'
Returns:
An generator per-line reader for the file
"""
f: apache_filesystem.CompressedFile = apache_filesystems.FileSystems.open(
filepath)
while True:
line = f.readline()
if not line:
f.close()
return
# Remove the newline char
yield str(line, "utf-8")[:-1]
def _parse_asn_db(f: Iterator[str]) -> pyasn.pyasn:
"""Returns a pyasn db from a routeview file.
Args:
f: an routeview file Iterator
Returns:
pyasn database object
"""
# CAIDA file lines are stored in the format
# 1.0.0.0\t24\t13335
# but pyasn wants lines in the format
# 1.0.0.0/24\t13335
formatted_lines = map(
lambda line: re.sub(r"(.*)\t(.*)\t(.*)", r"\1/\2\t\3", line), f)
as_str = "\n".join(formatted_lines)
del formatted_lines
asn_db = pyasn.pyasn(None, ipasn_string=as_str)
return asn_db
def _parse_as_to_org_map(
f: Iterator[str]) -> Dict[int, Tuple[str, Optional[str], Optional[str]]]:
org2country_map = _parse_org_name_to_country_map(f)
as2org_map = _parse_as_to_org_map_remainder(f, org2country_map)
return as2org_map
def _parse_org_name_to_country_map(
f: Iterator[str]) -> Dict[str, Tuple[str, str]]:
# pyformat: disable
"""Returns a mapping of AS org short names to country info from a file.
This function only reads up to the AS_TO_ORG_HEADER and leaves the rest of the
iterator still readable.
Args:
f: an iterator containing the content of a as-org2info.txt file
File is of the format
# some comment lines
ORG_TO_COUNTRY_HEADER
1|20120224|LVLT-1|LVLT-ARIN|e5e3b9c13678dfc483fb1f819d70883c_ARIN|ARIN
...
AS_TO_ORG_HEADER
LVLT-ARIN|20120130|Level 3 Communications, Inc.|US|ARIN
...
Returns:
Dict {as_name -> ("readable name", country_code)}
ex: {"8X8INC-ARIN": ("8x8, Inc.","US")}
"""
# pyformat: enable
line = next(f)
while line != ORG_TO_COUNTRY_HEADER:
# Throw away starter comment lines
line = next(f)
org_name_to_country_map: Dict[str, Tuple[str, str]] = {}
while line != AS_TO_ORG_HEADER:
org_id, changed_date, org_name, country, source = line.split("|")
org_name_to_country_map[org_id] = (org_name, country)
line = next(f)
return org_name_to_country_map
def _parse_as_to_org_map_remainder(
f: Iterator[str], org_id_to_country_map: Dict[str, Tuple[str, str]]
) -> Dict[int, Tuple[str, Optional[str], Optional[str]]]:
# pyformat: disable
"""Returns a mapping of ASNs to organization info from a file.
Args:
f: an iterator containing the content of an as-org2info.txt file which
has already been iterated over by _parse_org_name_to_country_map so the
only remaining line are of the format
LVLT-ARIN|20120130|Level 3 Communications, Inc.|US|ARIN
org_id_to_country_map: Dict {as_name -> ("readable name", country_code)}
Returns:
Dict {asn -> (asn_name, readable_name, country)}
ex {204867 : ("LIGHTNING-WIRE-LABS", "Lightning Wire Labs GmbH", "DE")}
The final 2 fields may be None
"""
# pyformat: enable
asn_to_org_info_map: Dict[int, Tuple[str, Optional[str], Optional[str]]] = {}
for line in f:
asn, changed_date, asn_name, org_id, opaque_id, source = line.split("|")
try:
readable_name, country = org_id_to_country_map[org_id]
asn_to_org_info_map[int(asn)] = (asn_name, readable_name, country)
except KeyError as e:
logging.warning("Missing org country info for asn %s, %s", asn, e)
asn_to_org_info_map[int(asn)] = (asn_name, None, None)
return asn_to_org_info_map
def _parse_as_to_type_map(f: Iterator[str]) -> Dict[int, str]:
"""Returns a mapping of ASNs to org type info from a file.
Args:
f: as2type file object
Returns:
Dict {asn -> network_type}
ex {398243 : "Enterprise", 13335: "Content", 4: "Transit/Access"}
"""
# filter comments
data_lines = [line for line in f if line[0] != "#"]
type_data = list(csv.reader(data_lines, delimiter="|"))
as_to_type_map: Dict[int, str] = {}
for line in type_data:
asn, source, org_type = line
as_to_type_map[int(asn)] = org_type
return as_to_type_map
class IpMetadata(IpMetadataInterface):
"""A lookup table which contains network metadata about IPs."""
def __init__(
self,
date: datetime.date,
cloud_data_location: str,
allow_previous_day: bool,
) -> None:
"""Create an IP Metadata object by reading/parsing all needed data.
Args:
date: a date to initialize the asn database to
cloud_data_location: GCS bucket folder name like "gs://bucket/folder/"
allow_previous_day: If the given date's routeview file doesn't exist,
allow the one from the previous day instead. This is useful when
processing very recent data where the newest file may not yet exist.
"""
super().__init__(date, cloud_data_location, allow_previous_day)
self.cloud_data_location = cloud_data_location
self.as_to_org_map = self._get_asn2org_map()
self.as_to_type_map = self._get_asn2type_map()
self.asn_db = self._get_asn_db(date, allow_previous_day)
def lookup(
self, ip: str
) -> Tuple[str, int, Optional[str], Optional[str], Optional[str],
Optional[str]]:
"""Lookup metadata infomation about an IP.
Args:
ip: string of the format 1.1.1.1 (ipv4 only)
Returns:
Tuple(netblock, asn, as_name, as_full_name, as_type, country)
("1.0.0.1/24", 13335, "CLOUDFLARENET", "Cloudflare Inc.", "Content", "US")
The final 4 fields may be None
Raises:
KeyError: when the IP's ASN can't be found
"""
asn, netblock = self.asn_db.lookup(ip)
if not asn:
raise KeyError("Missing IP {} at {}".format(ip, self.date.isoformat()))
if asn not in self.as_to_org_map:
logging.warning("Missing asn %s in org name map", asn)
as_name, as_full_name, country = self.as_to_org_map.get(
asn, (None, None, None))
if asn not in self.as_to_type_map:
logging.warning("Missing asn %s in type map", asn)
as_type = self.as_to_type_map.get(asn, None)
return (netblock, asn, as_name, as_full_name, as_type, country)
def _get_asn2org_map(
self) -> Dict[int, Tuple[str, Optional[str], Optional[str]]]:
as_to_org_filename = self.cloud_data_location + LATEST_AS2ORG_FILEPATH
as_to_org_file = _read_compressed_file(as_to_org_filename)
return _parse_as_to_org_map(as_to_org_file)
def _get_asn2type_map(self) -> Dict[int, str]:
as_to_type_filename = self.cloud_data_location + LATEST_AS2CLASS_FILEPATH
as_to_type_file = _read_compressed_file(as_to_type_filename)
return _parse_as_to_type_map(as_to_type_file)
def _get_asn_db(self, date: datetime.date,
allow_previous_day: bool) -> pyasn.pyasn:
"""Return an ASN database object.
Args:
date: a date to initialize the asn database to
allow_previous_day: allow using previous routeview file
Returns:
pyasn database
Raises:
FileNotFoundError: when no allowable routeview file is found
"""
try:
self.date = date
return self._get_dated_asn_db(self.date)
except FileNotFoundError as ex:
if allow_previous_day:
self.date = date - datetime.timedelta(days=1)
return self._get_dated_asn_db(self.date)
raise ex
def _get_dated_asn_db(self, date: datetime.date) -> pyasn.pyasn:
"""Finds the right routeview file for a given date and returns an ASN DB.
Args:
date: date object to initialize the database to
Returns:
A pyasn DB for the dated routeview file.
Raises:
FileNotFoundError: when no exactly matching routeview file is found
"""
file_pattern = f"routeviews-rv2-{date:%Y%m%d}*.pfx2as.gz"
filepath_pattern = self.cloud_data_location + "routeviews/" + file_pattern
match = apache_filesystems.FileSystems.match([filepath_pattern], limits=[1])
try:
filepath = match[0].metadata_list[0].path
return _parse_asn_db(_read_compressed_file(filepath))
except IndexError as ex:
raise FileNotFoundError(filepath_pattern) from ex
def get_firehook_ip_metadata_db(
date: datetime.date,
allow_previous_day: bool = False,
) -> IpMetadata:
"""Factory to return an IPMetadata object which reads in firehook files.
Args:
date: a date to initialize the asn database to
allow_previous_day: If the given date's routeview file doesn't exist, allow
the one from the previous day instead. This is useful when processing very
recent data where the newest file may not yet exist.
Returns:
an IpMetadata for the given date.
"""
# import here to avoid beam pickling issues
import firehook_resources # pylint: disable=import-outside-toplevel
return IpMetadata(date, firehook_resources.CAIDA_FILE_LOCATION,
allow_previous_day)
| apache-2.0 | 3,357,140,666,437,993,000 | 32.294118 | 80 | 0.675749 | false |
swirkert/ipcai2016 | mc/test/test_tissue_parser.py | 1 | 1783 |
"""
Copyright (c) German Cancer Research Center,
Computer Assisted Interventions.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE for details
"""
'''
Created on Aug 19, 2016
@author: avemuri
'''
import unittest
import os
from mc import tissueparser
this_dir, this_filename = os.path.split(__file__)
DATA_PATH = os.path.join(this_dir, "..", "data", "tissues")
PARAMS = ['sao2', 'a_mie', 'b_mie', 'a_ray', 'g', 'n', 'd']
class TestTissueParser(unittest.TestCase):
def setUp(self):
self.tissue_instance = tissueparser.read_tissue_config(
os.path.join(DATA_PATH, 'tissue_config_test.ini'))
def test_tissue_parser(self):
self.assertEquals(len(self.tissue_instance), 4,
"Number of layers read is incorrect.")
for i, layer in enumerate(self.tissue_instance):
self.assertEquals(len(layer.parameter_list), 7,
"Number of parameters read is incorrect for Layer-" +
str(i))
for desired_parameter, read_parameter in zip(PARAMS, layer.parameter_list):
self.assertEquals(read_parameter.name,
desired_parameter,
"Parameter: " +
read_parameter.name +
" in Layer-" + str(i) + " is incorrect.")
def test_tissue_parser_wrong_filename(self):
with self.assertRaises(IOError):
tissueparser.read_tissue_config("fakefiledoesnotexists.ini")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| bsd-3-clause | -6,729,565,260,189,982,000 | 28.229508 | 87 | 0.593943 | false |
google-research/tf-slim | tf_slim/losses/__init__.py | 1 | 1814 | # coding=utf-8
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network losses.
See [Contrib Losses](https://tensorflow.org/api_guides/python/contrib.losses).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tf_slim.losses import metric_learning
# pylint: disable=wildcard-import
from tf_slim.losses.loss_ops import *
from tf_slim.losses.metric_learning import *
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'absolute_difference',
'add_loss',
'cluster_loss',
'compute_weighted_loss',
'contrastive_loss',
'cosine_distance',
'get_losses',
'get_regularization_losses',
'get_total_loss',
'hinge_loss',
'lifted_struct_loss',
'log_loss',
'mean_pairwise_squared_error',
'mean_squared_error',
'metric_learning',
'npairs_loss',
'npairs_loss_multilabel',
'sigmoid_cross_entropy',
'softmax_cross_entropy',
'sparse_softmax_cross_entropy',
'triplet_semihard_loss',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 8,135,164,806,222,323,000 | 30.275862 | 80 | 0.688534 | false |
us-ignite/us_ignite | us_ignite/apps/importer.py | 1 | 4966 | import logging
import pytz
import requests
from StringIO import StringIO
from django.contrib.auth.models import User
from django.core import files
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from us_ignite.apps.models import Application, ApplicationURL, Domain
from us_ignite.profiles.models import Profile
logger = logging.getLogger('us_ignite.apps.importer')
TIMEZONE = 'America/New_York'
def parse_date(date_str):
naive = parse_datetime(date_str)
return pytz.timezone(TIMEZONE).localize(naive, is_dst=None)
def import_author(data):
email = data['email']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = User.objects.create_user(
data['username'], email, first_name=data['name'][:30])
profile, is_new = Profile.objects.get_or_create(user=user)
if is_new:
profile.website = data['website']
profile.save()
return user
def get_domain(data):
categories = {
'education': 'education-workforce',
'advanced-manufacturing': 'advanced-manufacturing',
'health-it': 'healthcare',
'public-safety': 'public-safety',
'clean-energy': 'energy',
}
old_slug = data['slug']
if old_slug in categories:
return Domain.objects.get(slug=categories[old_slug])
assert False, data
def get_stage(data):
name = data['name'].lower()
stages = {
'development': Application.ALPHA,
'ideation': Application.IDEA,
}
if name in stages:
return stages[name]
assert False, name
def import_urls(application, blog, repo):
if blog:
blog_url, is_new = (ApplicationURL.objects
.get_or_create(application=application, url=blog))
blog_url.name = 'Blog'
blog_url.save()
else:
blog_url = None
if repo:
repo_url, is_new = (ApplicationURL.objects
.get_or_create(application=application, url=repo))
repo_url.name = 'Repository'
repo_url.save()
else:
repo_url = None
return (blog_url, repo_url)
def import_image(path, key):
url = 'https://mozillaignite.org%s' % path
if default_storage.exists(key):
logger.debug('Ignoring existing file: %s', key)
return key
logger.debug('Downloading: %s', url)
response = requests.get(url, verify=False)
if response.status_code == 200:
image_file = files.File(StringIO(response.content))
return default_storage.save(key, ContentFile(image_file.read()))
return u''
def _get_key_from_url(url, prefix='apps'):
suffix = url.split('/')[-1]
return u'%s/%s' % (prefix, suffix)
_title = lambda t: u'\n###%s\n' % t
def import_app(data):
author_data = data.get('created_by')
author = import_author(author_data) if author_data else None
slug = 'MI-%s' % data['slug']
application, is_new = Application.objects.get_or_create(slug=slug)
application.name = data['name']
application.summary = data['brief_description']
application.team_description = data['collaborators']
application.impact_statement = data['life_improvements']
application.domain = get_domain(data['category'])
application.owner = author
application.stage = get_stage(data['phase'])
application.website = data['blog_url'] or data['repository_url']
application.created = parse_date(data['created_on'])
application.modified = parse_date(data['updated_on'])
if data['is_draft']:
application.status = Application.DRAFT
else:
application.status = Application.PUBLISHED
description_list = [
data['description'],
]
if data['take_advantage']:
description_list += [
_title('How does your idea take advantage of '
'next-generation networks?'),
data['take_advantage']]
if data['required_effort']:
description_list += [
_title('How much effort do you expect this work to take?'),
data['required_effort']]
if data['interest_making']:
description_list += [_title('Interest making'), data['interest_making']]
application.description = '\n'.join(description_list)
application.notes = ('Imported from the Mozilla Ignite site '
'(%s).' % timezone.now())
image_url = data.get('sketh_note')
if image_url:
application.image = import_image(
image_url, _get_key_from_url(image_url))
application.save()
application.tags.add('mozillaignite')
import_urls(application, data['blog_url'], data['repository_url'])
return application
def digest_payload(payload):
imported_apps = []
for app in payload:
imported_apps.append(import_app(app))
return [a for a in imported_apps if a]
| bsd-3-clause | 1,271,934,513,240,245,200 | 30.833333 | 80 | 0.639952 | false |
i3visio/osrframework | osrframework/wrappers/goodreads.py | 1 | 3899 | ################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <[email protected]>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Goodreads(Platform):
"""A <Platform> object for Goodreads"""
def __init__(self):
self.platformName = "Goodreads"
self.tags = ["social", "opinions"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.goodreads.com/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["<title>Page not found</title>"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
| agpl-3.0 | 6,122,434,766,374,014,000 | 37.98 | 80 | 0.524371 | false |
maxharp3r/archive-rotator | setup.py | 1 | 1784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
setup(
name='archive-rotator',
version='0.2.1',
description="Flexible utility for rotating backup files.",
long_description=readme + '\n\n' + history,
author="Max Harper",
author_email='[email protected]',
url='https://github.com/maxharp3r/archive-rotator',
packages=[
'archive_rotator',
],
package_dir={'archive_rotator': 'archive_rotator'},
entry_points={
'console_scripts': [
'archive-rotator = archive_rotator.cli:main',
]
},
include_package_data=True,
install_requires=requirements,
license="MIT License",
zip_safe=False,
keywords='backup rotation',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Archiving :: Backup',
],
test_suite='tests',
tests_require=test_requirements
)
| mit | -6,119,830,027,694,422,000 | 27.31746 | 63 | 0.623879 | false |
Tesora-Release/tesora-trove | trove/db/sqlalchemy/migrate_repo/versions/038_instance_faults.py | 1 | 2042 | # Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from sqlalchemy import ForeignKey
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import Boolean
from trove.db.sqlalchemy.migrate_repo.schema import create_tables
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
from trove.db.sqlalchemy.migrate_repo.schema import drop_tables
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
from trove.db.sqlalchemy.migrate_repo.schema import Text
meta = MetaData()
instance_faults = Table(
'instance_faults',
meta,
Column('id', String(length=64), primary_key=True, nullable=False),
Column('instance_id', String(length=64),
ForeignKey('instances.id', ondelete="CASCADE",
onupdate="CASCADE"), nullable=False),
Column('message', String(length=255), nullable=False),
Column('details', Text(length=65535), nullable=False),
Column('created', DateTime(), nullable=False),
Column('updated', DateTime(), nullable=False),
Column('deleted', Boolean(), default=0, nullable=False),
Column('deleted_at', DateTime()),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
Table('instances', meta, autoload=True)
create_tables([instance_faults])
def downgrade(migrate_engine):
meta.bind = migrate_engine
drop_tables([instance_faults])
| apache-2.0 | -6,658,379,695,200,043,000 | 35.464286 | 78 | 0.728208 | false |
neozhangthe1/coverage_model | groundhog/trainer/SGD.py | 1 | 6790 | """
Stochastic Gradient Descent with momentum.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("KyungHyun Cho "
"Razvan Pascanu "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import time
import theano
import theano.tensor as TT
# from theano.sandbox.scan import scan
from theano.scan_module import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.utils import print_time, print_mem, const
class SGD(object):
"""
Stochastic gradient descent class
"""
def __init__(self,
model,
state,
data):
"""
:type model: groundhog model class
:param model: class depicting the model to be optimized
:type state: dictionary or jobman DD object
:param state: dictionary containing various hyper-parameters. The
class will write into this dictionary updates like the current
training error and so on
:type data: groundhog dataset object
:param data: data iterator over which training is done
"""
#####################################
# Step 0. Constructs shared variables
#####################################
bs = state['bs']
self.model = model
self.rng = numpy.random.RandomState(state['seed'])
srng = RandomStreams(self.rng.randint(213))
self.gs = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name)
for p in model.params]
self.step = 0
self.bs = bs
self.state = state
self.data = data
self.step_timer = time.time()
self.gdata = [theano.shared(numpy.zeros( (2,)*x.ndim,
dtype=x.dtype),
name=x.name) for x in model.inputs]
if 'profile' not in self.state:
self.state['profile'] = 0
###################################
# Step 1. Compile training function
###################################
print('Constructing grad function')
loc_data = self.gdata
lr = TT.scalar('lr')
self.prop_exprs = [x[1] for x in model.properties]
self.prop_names = [x[0] for x in model.properties]
self.update_rules = [x[1] for x in model.updates]
rval = theano.clone(model.param_grads + self.update_rules + \
self.prop_exprs + [model.train_cost],
replace=list(zip(model.inputs, loc_data)))
nparams = len(model.params)
nouts = len(self.prop_exprs)
nrules = len(self.update_rules)
gs = rval[:nparams]
rules = rval[nparams:nparams + nrules]
outs = rval[nparams + nrules:]
norm_gs = sum(TT.sum(x**2)
for x,p in zip(gs,
self.model.params)
if p not in self.model.exclude_params_for_norm)
if 'cutoff' in state and state['cutoff'] > 0:
c = numpy.float32(state['cutoff'])
if state['cutoff_rescale_length']:
c = c * TT.cast(loc_data[0].shape[0], 'float32')
notfinite = TT.or_(TT.isnan(norm_gs), TT.isinf(norm_gs))
_gs = []
for g,p in zip(gs,self.model.params):
if p not in self.model.exclude_params_for_norm:
tmpg = TT.switch(TT.ge(norm_gs, c), g*c/norm_gs, g)
_gs.append(
TT.switch(notfinite, numpy.float32(.1)*p,
tmpg))
else:
_gs.append(g)
gs = _gs
store_gs = [(s,g) for s,g in zip(self.gs, gs)]
updates = store_gs + [(s[0], r) for s,r in zip(model.updates, rules)]
print('Compiling grad function')
st = time.time()
self.train_fn = theano.function(
[], outs, name='train_function',
updates = updates,
givens = list(zip(model.inputs, loc_data)),
profile=self.state['profile'])
print('took', time.time() - st)
self.lr = numpy.float32(state['lr'])
new_params = [p - s*lr*g for s, p, g in zip(model.params_grad_scale, model.params, self.gs)]
self.update_fn = theano.function(
[lr], [], name='update_function',
allow_input_downcast=True,
updates = list(zip(model.params, new_params)),
profile=self.state['profile'])
self.old_cost = 1e20
self.schedules = model.get_schedules()
self.return_names = self.prop_names + \
['cost',
'time_step',
'whole_time',
'lr']
def __call__(self):
batch = next(self.data)
# Perturb the data (! and the model)
if isinstance(batch, dict):
batch = self.model.perturb(**batch)
else:
batch = self.model.perturb(*batch)
# Load the dataset into GPU
# Note: not the most efficient approach in general, as it involves
# each batch is copied individually on gpu
if isinstance(batch, dict):
for gdata in self.gdata:
gdata.set_value(batch[gdata.name], borrow=True)
else:
for gdata, data in zip(self.gdata, batch):
gdata.set_value(data, borrow=True)
# Run the trianing function
g_st = time.time()
rvals = self.train_fn()
for schedule in self.schedules:
schedule(self, rvals[-1])
self.update_fn(self.lr)
g_ed = time.time()
self.state['lr'] = float(self.lr)
cost = rvals[-1]
self.old_cost = cost
whole_time = time.time() - self.step_timer
if self.step % self.state['trainFreq'] == 0:
msg = '.. iter %4d cost %.3f'
vals = [self.step, cost]
for dx, prop in enumerate(self.prop_names):
msg += ' '+prop+' %.2e'
vals += [float(numpy.array(rvals[dx]))]
msg += ' step time %s whole time %s lr %.2e'
vals += [print_time(g_ed - g_st),
print_time(time.time() - self.step_timer),
float(self.lr)]
print(msg % tuple(vals))
self.step += 1
ret = dict([('cost', float(cost)),
('lr', float(self.lr)),
('time_step', float(g_ed - g_st)),
('whole_time', float(whole_time))]+list(zip(self.prop_names, rvals)))
return ret
| bsd-3-clause | -7,551,729,599,220,333,000 | 36.103825 | 100 | 0.508837 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.