repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abcamus/gadgets | note-editor/parser.py | 1 | 3780 | import platform
'''
This file defines Token class and Lexer class
'''
class Token():
def __init__(self):
self.type = None
self.content = ''
self.s_line = 1
self.s_col = 0
self.e_line = 1
self.e_col = 0
operation_list = ['+', '-', '*', '/']
def isoperator(char):
if char in operation_list:
return True
else:
return False
class Lexer():
'''
state = 'PROCESS', 'DONE'
'''
def __init__(self):
self.token_list = []
self.token_num = 0
self.cur_line = 1
self.cur_col = 0
self.systype = platform.system()
self.NewLine = False
def update_pos(self):
self.cur_col += 1
def get_next_char(self):
if self.cur_idx+1 <= len(self.string)-1:
self.cur_idx += 1
self.cur_ch = self.string[self.cur_idx]
self.update_pos()
else:
self.cur_ch = None
def update_token(self, Found_token):
if Found_token and self.cur_ch != None:
(self.token.e_line, self.token.e_col) = self.step_back()
else:
(self.token.e_line, self.token.e_col) = (self.cur_line, self.cur_col)
self.token.end_pos = str(self.token.e_line)+'.'+str(self.token.e_col)
def step_back(self):
return (self.cur_line, self.cur_col-1)
def skip_whitespace(self):
while self.cur_ch == ' ':
self.token.content += self.cur_ch
self.get_next_char()
self.token.type = 'WhiteSpace'
# move back the cur_pos
self.update_token(True)
self.token_list.append(self.token)
self.new_token()
def eatID(self):
self.token.type = 'Identifier'
while self.cur_ch != None and (self.cur_ch.isalpha() or self.cur_ch.isdigit()):
self.token.content += self.cur_ch
self.get_next_char()
self.update_token(True)
self.token_list.append(self.token)
self.new_token()
def eatChar(self):
self.token.type = 'Charactor'
self.token.content += self.cur_ch
if self.cur_ch == '\n':
self.NewLine = True
self.get_next_char()
self.update_token(True)
self.token_list.append(self.token)
self.new_token()
def new_token(self):
self.token = Token()
self.token.type = None
self.token.content = ''
if self.NewLine:
self.cur_line += 1
self.cur_col = 0
self.NewLine = False
self.token.s_line = self.cur_line
self.token.s_col = self.cur_col
self.token.start_pos = str(self.token.s_line)+'.'+str(self.token.s_col)
#print "New token start at: %s" %(self.token.start_pos)
def update(self, string):
# prepare for the first token
self.cur_line = 1
self.cur_col = 0
self.string = string
self.token_list = []
self.cur_idx = 0
self.cur_ch = self.string[0]
self.NewLine = False
# alloc the first token
self.new_token()
while self.cur_ch != None:
if self.cur_ch == ' ':
self.skip_whitespace()
elif self.cur_ch.isalpha():
self.eatID()
#elif cur_cur == '\n':
else:
#print "Unknown type"
self.eatChar()
print "Updated"
lexer = Lexer()
from Tkinter import *
def parse(main, string):
text = main.text
#print string
if len(string) > 0:
lexer.update(string)
#for token in lexer.token_list:
#text.tag_add(token.type, token.start_pos, token.end_pos)
#print "Token: %s(%s-%s)" %(token.content, token.start_pos, token.end_pos)
| mit | 7,771,372,619,228,204,000 | 27.208955 | 87 | 0.530688 | false | 3.445761 | false | false | false |
offlinehacker/flumotion | flumotion/common/bundleclient.py | 1 | 6795 | # -*- Mode: Python; test-case-name: flumotion.test.test_bundleclient -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""bundle interface for fetching, caching and importing
"""
import os
import sys
from flumotion.common import bundle, errors, log, package
from flumotion.configure import configure
__all__ = ['BundleLoader']
__version__ = "$Rev$"
class BundleLoader(log.Loggable):
"""
I am an object that can get and set up bundles from a PB server.
@cvar remote: a remote reference to an avatar on the PB server.
"""
remote = None
_unbundler = None
def __init__(self, callRemote):
"""
@type callRemote: callable
"""
self.callRemote = callRemote
self._unbundler = bundle.Unbundler(configure.cachedir)
def getBundles(self, **kwargs):
# FIXME: later on, split out this method into getBundles which does
# not call registerPackagePath, and setupBundles which calls getBundles
# and register. Then change getBundles calls to setupBundles.
"""
Get, extract and register all bundles needed.
Either one of bundleName, fileName or moduleName should be specified
in **kwargs, which should be strings or lists of strings.
@returns: a deferred firing a a list of (bundleName, bundlePath)
tuples, with lowest dependency first.
bundlePath is the directory to register
for this package.
"""
def annotated(d, *extraVals):
def annotatedReturn(ret):
return (ret, ) + extraVals
d.addCallback(annotatedReturn)
return d
def getZips(sums):
# sums is a list of name, sum tuples, highest to lowest
# figure out which bundles we're missing
toFetch = []
for name, md5 in sums:
path = os.path.join(configure.cachedir, name, md5)
if os.path.exists(path):
self.log('%s is up to date', name)
else:
self.log('%s needs fetching', name)
# FIXME: We cannot be completelly sure the bundle has the
# correct content only by checking that the directory exists.
# The worker/manager could have died during a download leaving
# the package incomplete.
toFetch.append(name)
if toFetch:
return annotated(self.callRemote('getBundleZips', toFetch),
toFetch, sums)
else:
return {}, [], sums
def unpackAndRegister((zips, toFetch, sums)):
for name in toFetch:
if name not in zips:
msg = "Missing bundle %s was not received"
self.warning(msg, name)
raise errors.NoBundleError(msg % name)
b = bundle.Bundle(name)
b.setZip(zips[name])
path = self._unbundler.unbundle(b)
# register all package paths; to do so we need to reverse sums
sums.reverse()
ret = []
for name, md5 in sums:
self.log('registerPackagePath for %s' % name)
path = os.path.join(configure.cachedir, name, md5)
if not os.path.exists(path):
self.warning("path %s for bundle %s does not exist",
path, name)
else:
package.getPackager().registerPackagePath(path, name)
ret.append((name, path))
return ret
# get sums for all bundles we need
d = self.callRemote('getBundleSums', **kwargs)
d.addCallback(getZips)
d.addCallback(unpackAndRegister)
return d
def loadModule(self, moduleName):
"""
Load the module given by name.
Sets up all necessary bundles to be able to load the module.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a deferred that will fire when the given module is loaded,
giving the loaded module.
"""
def gotBundles(bundles):
self.debug('Got bundles %r', bundles)
# load up the module and return it
__import__(moduleName, globals(), locals(), [])
self.log('loaded module %s', moduleName)
return sys.modules[moduleName]
self.debug('Loading module %s', moduleName)
# get sums for all bundles we need
d = self.getBundles(moduleName=moduleName)
d.addCallback(gotBundles)
return d
def getBundleByName(self, bundleName):
"""
Get the given bundle locally.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a deferred returning the absolute path under which the
bundle is extracted.
"""
def gotBundles(bundles):
name, path = bundles[-1]
assert name == bundleName
self.debug('Got bundle %s in %s', bundleName, path)
return path
self.debug('Getting bundle %s', bundleName)
d = self.getBundles(bundleName=bundleName)
d.addCallback(gotBundles)
return d
def getFile(self, fileName):
"""
Do everything needed to get the given bundled file.
@returns: a deferred returning the absolute path to a local copy
of the given file.
"""
def gotBundles(bundles):
name, bundlePath = bundles[-1]
path = os.path.join(bundlePath, fileName)
if not os.path.exists(path):
self.warning("path %s for file %s does not exist",
path, fileName)
return path
self.debug('Getting file %s', fileName)
d = self.getBundles(fileName=fileName)
d.addCallback(gotBundles)
return d
| gpl-2.0 | 8,944,481,462,258,507,000 | 34.575916 | 79 | 0.590728 | false | 4.403759 | false | false | false |
numansiddique/contrail-controller | src/nodemgr/database_nodemgr/database_event_manager.py | 2 | 11196 | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from gevent import monkey
monkey.patch_all()
import os
import sys
import socket
import subprocess
import json
import time
import datetime
import platform
import select
import gevent
import ConfigParser
from nodemgr.common.event_manager import EventManager
from ConfigParser import NoOptionError
from supervisor import childutils
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames,\
Module2NodeType, INSTANCE_ID_DEFAULT, SERVICE_CONTRAIL_DATABASE, \
RepairNeededKeyspaces
from subprocess import Popen, PIPE
from StringIO import StringIO
from database.sandesh.database.ttypes import \
NodeStatusUVE, NodeStatus, DatabaseUsageStats,\
DatabaseUsageInfo, DatabaseUsage
from database.sandesh.database.process_info.ttypes import \
ProcessStatus, ProcessState, ProcessInfo, DiskPartitionUsageStats
from database.sandesh.database.process_info.constants import \
ProcessStateNames
class DatabaseEventManager(EventManager):
def __init__(self, rule_file, discovery_server,
discovery_port, collector_addr,
hostip, minimum_diskgb, cassandra_repair_interval):
EventManager.__init__(
self, rule_file, discovery_server,
discovery_port, collector_addr)
self.node_type = "contrail-database"
self.module = Module.DATABASE_NODE_MGR
self.module_id = ModuleNames[self.module]
self.hostip = hostip
self.minimum_diskgb = minimum_diskgb
self.cassandra_repair_interval = cassandra_repair_interval
self.supervisor_serverurl = "unix:///tmp/supervisord_database.sock"
self.add_current_process()
# end __init__
def process(self):
if self.rule_file is '':
self.rule_file = "/etc/contrail/" + \
"supervisord_database_files/contrail-database.rules"
json_file = open(self.rule_file)
self.rules_data = json.load(json_file)
node_type = Module2NodeType[self.module]
node_type_name = NodeTypeNames[node_type]
_disc = self.get_discovery_client()
sandesh_global.init_generator(
self.module_id, socket.gethostname(), node_type_name,
self.instance_id, self.collector_addr, self.module_id, 8103,
['database.sandesh'], _disc)
# sandesh_global.set_logging_params(enable_local_log=True)
self.sandesh_global = sandesh_global
try:
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/cassandra.yaml | grep '-' | cut -d'-' -f2"
else:
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/conf/cassandra.yaml | grep '-' | cut -d'-' -f2"
(cassandra_data_dir, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
cassandra_data_dir = cassandra_data_dir.strip()
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
if os.path.exists(analytics_dir):
self.stderr.write("analytics_dir is " + analytics_dir + "\n")
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$3}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(disk_space_used, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$4}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(disk_space_available, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `du -skL " + analytics_dir + " | awk '{s+=$1}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(analytics_db_size, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
disk_space_total = int(disk_space_used) + int(disk_space_available)
if (disk_space_total / (1024 * 1024) < self.minimum_diskgb):
cmd_str = "service " + SERVICE_CONTRAIL_DATABASE + " stop"
(ret_value, error_value) = Popen(
cmd_str, shell=True, stdout=PIPE).communicate()
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE
self.fail_status_bits &= ~self.FAIL_STATUS_DISK_SPACE_NA
else:
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
except:
sys.stderr.write("Failed to get database usage" + "\n")
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
def send_process_state_db(self, group_names):
self.send_process_state_db_base(
group_names, ProcessInfo, NodeStatus, NodeStatusUVE)
def send_nodemgr_process_status(self):
self.send_nodemgr_process_status_base(
ProcessStateNames, ProcessState, ProcessStatus,
NodeStatus, NodeStatusUVE)
def get_process_state(self, fail_status_bits):
return self.get_process_state_base(
fail_status_bits, ProcessStateNames, ProcessState)
def get_failbits_nodespecific_desc(self, fail_status_bits):
description = ""
if fail_status_bits & self.FAIL_STATUS_DISK_SPACE:
description += "Disk for analytics db is too low," + \
" cassandra stopped."
if fail_status_bits & self.FAIL_STATUS_SERVER_PORT:
if description != "":
description += " "
description += "Cassandra state detected DOWN."
if fail_status_bits & self.FAIL_STATUS_DISK_SPACE_NA:
description += "Disk space for analytics db not retrievable."
return description
def database_periodic(self):
try:
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/cassandra.yaml | grep '-' | cut -d'-' -f2"
else:
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/conf/cassandra.yaml | grep '-' | cut -d'-' -f2"
(cassandra_data_dir, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
cassandra_data_dir = cassandra_data_dir.strip()
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
if os.path.exists(analytics_dir):
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$3}END{print s}'` && echo $1"
(disk_space_used, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$4}END{print s}'` && echo $1"
(disk_space_available, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `du -skL " + analytics_dir + " | awk '{s+=$1}END{print s}'` && echo $1"
(analytics_db_size, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
self.fail_status_bits &= ~self.FAIL_STATUS_DISK_SPACE_NA
db_stat = DatabaseUsageStats()
db_info = DatabaseUsageInfo()
db_stat.disk_space_used_1k = int(disk_space_used)
db_stat.disk_space_available_1k = int(disk_space_available)
db_stat.analytics_db_size_1k = int(analytics_db_size)
db_info.name = socket.gethostname()
db_info.database_usage = [db_stat]
usage_stat = DatabaseUsage(data=db_info)
usage_stat.send()
else:
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
except:
sys.stderr.write("Failed to get database usage" + "\n")
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
cassandra_cli_cmd = "cassandra-cli --host " + self.hostip + \
" --batch < /dev/null | grep 'Connected to:'"
proc = Popen(cassandra_cli_cmd, shell=True, stdout=PIPE, stderr=PIPE)
(output, errout) = proc.communicate()
if proc.returncode != 0:
self.fail_status_bits |= self.FAIL_STATUS_SERVER_PORT
else:
self.fail_status_bits &= ~self.FAIL_STATUS_SERVER_PORT
self.send_nodemgr_process_status()
# Record cluster status and shut down cassandra if needed
subprocess.Popen(["contrail-cassandra-status",
"--log-file", "/var/log/cassandra/status.log",
"--debug"])
# end database_periodic
def cassandra_repair(self):
subprocess.Popen(["contrail-cassandra-repair",
"--log-file", "/var/log/cassandra/repair.log",
"--debug"])
#end cassandra_repair
def send_disk_usage_info(self):
self.send_disk_usage_info_base(
NodeStatusUVE, NodeStatus, DiskPartitionUsageStats)
def runforever(self, test=False):
prev_current_time = int(time.time())
while 1:
# we explicitly use self.stdin, self.stdout, and self.stderr
# instead of sys.* so we can unit test this code
headers, payload = self.listener_nodemgr.wait(
self.stdin, self.stdout)
# self.stderr.write("headers:\n" + str(headers) + '\n')
# self.stderr.write("payload:\n" + str(payload) + '\n')
pheaders, pdata = childutils.eventdata(payload + '\n')
# self.stderr.write("pheaders:\n" + str(pheaders)+'\n')
# self.stderr.write("pdata:\n" + str(pdata))
# check for process state change events
if headers['eventname'].startswith("PROCESS_STATE"):
self.event_process_state(pheaders, headers)
# check for flag value change events
if headers['eventname'].startswith("PROCESS_COMMUNICATION"):
self.event_process_communication(pdata)
# do periodic events
if headers['eventname'].startswith("TICK_60"):
self.database_periodic()
prev_current_time = self.event_tick_60(prev_current_time)
# Perform nodetool repair every cassandra_repair_interval hours
if self.tick_count % (60 * self.cassandra_repair_interval) == 0:
self.cassandra_repair()
self.listener_nodemgr.ok(self.stdout)
| apache-2.0 | -355,458,862,940,138,200 | 45.845188 | 112 | 0.588335 | false | 3.774781 | false | false | false |
evandempsey/porter2-stemmer | setup.py | 1 | 1536 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
]
setup(
name='porter2stemmer',
version='1.0',
description="An implementation of the Porter2 English stemming algorithm.",
long_description=readme + '\n\n' + history,
author="Evan Dempsey",
author_email='[email protected]',
url='https://github.com/evandempsey/porter2-stemmer',
packages=[
'porter2stemmer',
],
package_dir={'porter2stemmer':
'porter2stemmer'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='porter2stemmer',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| bsd-3-clause | 601,649,328,121,916,300 | 25.947368 | 79 | 0.625 | false | 3.783251 | false | true | false |
spulec/pep8ify | pep8ify/fixes/fix_maximum_line_length.py | 2 | 14923 | from __future__ import unicode_literals
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import LParen, RParen
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as symbols
from lib2to3.pytree import Leaf, Node
from textwrap import TextWrapper
from .utils import (tuplize_comments, get_quotes, wrap_leaves,
first_child_leaf, find_indentation, IS_26, add_leaves_method)
MAX_CHARS = 79
OPENING_TOKENS = [token.LPAR, token.LSQB, token.LBRACE]
CLOSING_TOKENS = [token.RPAR, token.RSQB, token.RBRACE]
SYMBOLS_WITH_NEWLINES_IN_COLONS = [symbols.funcdef, symbols.classdef,
symbols.if_stmt, symbols.for_stmt, symbols.while_stmt, symbols.lambdef,
symbols.try_stmt, symbols.with_stmt]
class FixMaximumLineLength(BaseFix):
'''
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
'''
explicit = True # The user must ask for this fixer
def match(self, node):
if (node.type in [token.NEWLINE] or node.type == token.COLON and node.
parent.type in SYMBOLS_WITH_NEWLINES_IN_COLONS):
# Sometimes the newline is wrapped into the next node, so we need
# to check the colons also.
if self.need_to_check_node(node):
# For colon nodes, we need to add the len of the colon also
return True
if any(len(line) > MAX_CHARS for line in node.prefix.split('\n')):
# There is a line in the prefix greater than MAX_CHARS
return True
return False
def transform(self, node, results):
if self.node_needs_splitting(node):
node_to_split = node.prev_sibling
if node_to_split.type == token.STRING:
self.fix_docstring(node_to_split)
else:
if isinstance(node_to_split, Leaf):
node_to_split = node_to_split.parent
combined_prefix = self.fix_leaves(node_to_split)
if combined_prefix:
node.prefix = "%s\n%s" % (node.prefix, combined_prefix.
rstrip())
if (any(len(line) > MAX_CHARS for line in node.prefix.split('\n')) or
node.prefix.count("#") and node.column + len(node.prefix) >
MAX_CHARS):
# Need to fix the prefix
self.fix_prefix(node)
@staticmethod
def need_to_check_node(node):
# Returns if the node or it's docstring might need to be split
if IS_26:
node = add_leaves_method(node)
if node.column > MAX_CHARS:
return True
if (node.type == token.COLON
and node.column + len(node.value) > MAX_CHARS):
return True
if node.prev_sibling and any(child.column + len(child.value)
> MAX_CHARS for child in node.prev_sibling.leaves()):
return True
@staticmethod
def node_needs_splitting(node):
if not node.prev_sibling:
return False
if IS_26:
node = add_leaves_method(node)
if node.type == token.NEWLINE:
node_length = len(node.prefix)
elif node.type == token.COLON:
node_length = len(node.prefix) - len(node.value)
if node.type in [token.NEWLINE, token.COLON]:
if node.column - node_length > MAX_CHARS:
return True
for child in node.prev_sibling.leaves():
if child.type == token.STRING:
lines = node.value.split('\n')
if child.column + len(lines.pop(0)) > MAX_CHARS:
return True
elif any(len(line) > MAX_CHARS for line in lines):
return True
elif child.column + len(child.value) > MAX_CHARS:
return True
def fix_prefix(self, node):
before_comments, comments, after_comments = tuplize_comments(node.
prefix)
# Combine all comment lines together
all_comments = ' '.join([line.replace('#', '', 1).lstrip() for line
in comments.split('\n')])
# It's an inline comment if it has not newlines
is_inline_comment = not node.prefix.count('\n')
initial_indent_level = comments.find('#')
if initial_indent_level == -1:
split_lines = ['']
else:
if is_inline_comment and node.prev_sibling:
# If inline comment, find where the prev sibling started to
# know how to indent lines
initial_indent_level = (first_child_leaf(node.prev_sibling).
column)
indent = '%s# ' % (' ' * initial_indent_level)
wrapper = TextWrapper(width=MAX_CHARS, initial_indent=indent,
subsequent_indent=indent)
split_lines = wrapper.wrap(all_comments)
if is_inline_comment:
# If inline comment is too long, we'll move it to the next line
split_lines[0] = "\n%s" % split_lines[0]
else:
# We need to add back a newline that was lost above
after_comments = "\n%s" % after_comments
new_prefix = '%s%s%s' % (before_comments, '\n'.join(split_lines),
after_comments.lstrip(' '))
# Append the trailing spaces back
if node.prefix != new_prefix:
node.prefix = new_prefix
node.changed()
def fix_docstring(self, node_to_split):
# docstrings
quote_start, quote_end = get_quotes(node_to_split.value)
max_length = MAX_CHARS - node_to_split.column
triple_quoted = quote_start.count('"""') or quote_start.count("'''")
comment_indent = ' ' * (4 + node_to_split.column)
if not triple_quoted:
# If it's not tripled-quoted, we need to start and end each line
# with quotes
comment_indent = '%s%s' % (comment_indent, quote_start)
# Since we will be appending the end_quote after each line after
# the splitting
max_length -= len(quote_end)
# If it's not triple quoted, we need to paren it
node_to_split.value = "(%s)" % node_to_split.value
wrapper = TextWrapper(width=max_length,
subsequent_indent=comment_indent)
split_lines = wrapper.wrap(node_to_split.value)
if not triple_quoted:
# If it's not triple quoted, we need to close each line except for
# the last one
new_split_lines = []
for index, line in enumerate(split_lines):
if index != len(split_lines) - 1:
new_split_lines.append("%s%s" % (line, quote_end))
else:
new_split_lines.append(line)
split_lines = new_split_lines
new_nodes = [Leaf(token.STRING, split_lines.pop(0))]
for line in split_lines:
new_nodes.extend([Leaf(token.NEWLINE, '\n'), Leaf(token.STRING,
line)])
node_to_split.replace(new_nodes)
node_to_split.changed()
def fix_leaves(self, node_to_split):
if IS_26:
node_to_split = add_leaves_method(node_to_split)
parent_depth = find_indentation(node_to_split)
new_indent = "%s%s" % (' ' * 4, parent_depth)
# For now, just indent additional lines by 4 more spaces
child_leaves = []
combined_prefix = ""
prev_leaf = None
for index, leaf in enumerate(node_to_split.leaves()):
if index and leaf.prefix.count('#'):
if not combined_prefix:
combined_prefix = "%s#" % new_indent
combined_prefix += leaf.prefix.split('#')[-1]
# We want to strip all newlines so we can properly insert newlines
# where they should be
if leaf.type != token.NEWLINE:
if leaf.prefix.count('\n') and index:
# If the line contains a newline, we need to strip all
# whitespace since there were leading indent spaces
if (prev_leaf and prev_leaf.type in [token.DOT, token.LPAR]
or leaf.type in [token.RPAR]):
leaf.prefix = ""
else:
leaf.prefix = " "
# Append any trailing inline comments to the combined
# prefix
child_leaves.append(leaf)
prev_leaf = leaf
# Like TextWrapper, but for nodes. We split on MAX_CHARS - 1 since we
# may need to insert a leading parenth. It's not great, but it would be
# hard to do properly.
split_leaves = wrap_leaves(child_leaves, width=MAX_CHARS - 1,
subsequent_indent=new_indent)
new_node = Node(node_to_split.type, [])
# We want to keep track of if we are breaking inside a parenth
open_count = 0
need_parens = False
for line_index, curr_line_nodes in enumerate(split_leaves):
for node_index, curr_line_node in enumerate(curr_line_nodes):
if line_index and not node_index:
# If first node in non-first line, reset prefix since there
# may have been spaces previously
curr_line_node.prefix = new_indent
new_node.append_child(curr_line_node)
if curr_line_node.type in OPENING_TOKENS:
open_count += 1
if curr_line_node.type in CLOSING_TOKENS:
open_count -= 1
if line_index != len(split_leaves) - 1:
# Don't add newline at the end since it it part of the next
# sibling
new_node.append_child(Leaf(token.NEWLINE, '\n'))
# Checks if we ended a line without being surrounded by parens
if open_count <= 0:
need_parens = True
if need_parens:
# Parenthesize the parent if we're not inside parenths, braces,
# brackets, since we inserted newlines between leaves
parenth_before_equals = Leaf(token.EQUAL, "=") in split_leaves[0]
self.parenthesize_parent(new_node, parenth_before_equals)
node_to_split.replace(new_node)
return combined_prefix
def parenthesize_parent(self, node_to_split, parenth_before_equals):
if node_to_split.type == symbols.print_stmt:
self.parenthesize_print_stmt(node_to_split)
elif node_to_split.type == symbols.return_stmt:
self.parenthesize_after_arg(node_to_split, "return")
elif node_to_split.type == symbols.expr_stmt:
if parenth_before_equals:
self.parenthesize_after_arg(node_to_split, "=")
else:
self.parenthesize_expr_stmt(node_to_split)
elif node_to_split.type == symbols.import_from:
self.parenthesize_after_arg(node_to_split, "import")
elif node_to_split.type in [symbols.power, symbols.atom]:
self.parenthesize_call_stmt(node_to_split)
elif node_to_split.type in [symbols.or_test, symbols.and_test, symbols
.not_test, symbols.test, symbols.arith_expr, symbols.comparison]:
self.parenthesize_test(node_to_split)
elif node_to_split.type == symbols.parameters:
# Paramteres are always parenthesized already
pass
def parenthesize_test(self, node_to_split):
first_child = node_to_split.children[0]
if first_child != LParen():
# node_to_split.children[0] is the "print" literal strip the
# current 1st child, since we will be prepending an LParen
if first_child.prefix != first_child.prefix.strip():
first_child.prefix = first_child.prefix.strip()
first_child.changed()
left_paren = LParen()
left_paren.prefix = " "
node_to_split.insert_child(0, left_paren)
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_print_stmt(self, node_to_split):
# print "hello there"
# return a, b
second_child = node_to_split.children[1]
if second_child != LParen():
# node_to_split.children[0] is the "print" literal strip the
# current 1st child, since we will be prepending an LParen
if second_child.prefix != second_child.prefix.strip():
second_child.prefix = second_child.prefix.strip()
second_child.changed()
node_to_split.insert_child(1, LParen())
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_after_arg(self, node_to_split, value):
# parenthesize the leaves after the first node with the value
value_index = 0
for index, child in enumerate(node_to_split.children):
if child.value == value:
value_index = index + 1
break
value_child = node_to_split.children[value_index]
if value_child != LParen():
# strip the current 1st child, since we will be prepending an
# LParen
if value_child.prefix != value_child.prefix.strip():
value_child.prefix = value_child.prefix.strip()
value_child.changed()
# We set a space prefix since this is after the '='
left_paren = LParen()
left_paren.prefix = " "
node_to_split.insert_child(value_index, left_paren)
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_expr_stmt(self, node_to_split):
# x = "foo" + bar
if node_to_split.children[0] != LParen():
node_to_split.insert_child(0, LParen())
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_call_stmt(self, node_to_split):
# a.b().c()
first_child = node_to_split.children[0]
if first_child != LParen():
# Since this can be at the beginning of a line, we can't just
# strip the prefix, we need to keep leading whitespace
first_child.prefix = "%s(" % first_child.prefix
first_child.changed()
node_to_split.append_child(RParen())
node_to_split.changed()
| apache-2.0 | 1,985,185,718,630,253,600 | 42.634503 | 79 | 0.574683 | false | 3.985844 | true | false | false |
mbkulik/ubuntu-make | umake/frameworks/dart.py | 5 | 3196 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import platform
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anyymore)"),
download_page=None, category=category, only_on_archs=_supported_archs, only_for_removal=True)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
category=category, only_on_archs=_supported_archs,
download_page="https://www.dartlang.org/downloads/linux.html",
dir_to_decompress_in_tarball="dart-sdk")
def parse_download_link(self, line, in_download):
"""Parse Dart Lang download link, expect to find a url"""
tag_machine = '64'
if platform.machine() == 'i686':
tag_machine = '32'
download_re = r'<a data-bits="{}" data-os="linux" data-tool="sdk".*href="(.*)">'.format(tag_machine)
p = re.search(download_re, line)
with suppress(AttributeError):
url = p.group(1)
return ((url, None), True)
return ((None, None), False)
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(_("You need to restart a shell session for your installation to work")))
@property
def is_installed(self):
# check path and requirements
if not super().is_installed:
return False
if not os.path.isfile(os.path.join(self.install_path, "bin", "dart")):
logger.debug("{} binary isn't installed".format(self.name))
return False
return True
| gpl-3.0 | 1,782,776,611,232,072,000 | 36.162791 | 118 | 0.663329 | false | 3.940814 | false | false | false |
rodrigocava/HANAcalcViewHierarchy | Project_Calc_Views_Hierarchy/calcViewHierarchy.py | 1 | 2792 | # Software licensed by the MIT License of Open Source (https://opensource.org/licenses/MIT)
import pyhdb
import urllib
from http.server import BaseHTTPRequestHandler, HTTPServer
# Change here the connection parameters for your HANA System
host = "0.0.0.0"
port = 30041
user = "USER"
pwsd = "PASSWORD"
# Recursive function to get all the dependencies from a view. It returns a JSON ready so the D3.js UI can render a hierarchy graph
def getDependent(view,parent,type,cursor,cache):
if view not in cache:
sql = 'SELECT BASE_OBJECT_NAME, BASE_OBJECT_TYPE FROM "PUBLIC"."OBJECT_DEPENDENCIES" WHERE DEPENDENT_OBJECT_NAME = \'' + view + '\' AND BASE_OBJECT_TYPE IN (\'VIEW\',\'TABLE\') AND DEPENDENCY_TYPE = 1';
cursor.execute(sql)
cache[view] = cursor.fetchall()
result = cache[view]
node = {}
node['name'] = view
node['parent'] = parent
node['value'] = 10 # Standard size choosen
node['type'] = 'black' # Standard color choosen
if type == 'VIEW':
node['level'] = 'red' # Meaning views
else:
node['level'] = 'green' # Meaning tables
if len(result) > 0:
node['children'] = []
for i in range(len(result)):
node['children'].append(getDependent(result[i][0],view,result[i][1],cursor,cache))
print('Hierarchy processed: ',node['name'])
return node
# Open the connection to HANA DB and saves the result in a file at the same folder
def viewHierarchy(view):
connection = pyhdb.connect(host = host, port = port, user = user, password = pwsd )
cursor = connection.cursor()
f = open('resultCalcViewHierarchy.json', 'w')
f.write(str(getDependent(view,'null','VIEW',cursor,{})).replace("'",'"'))
f.close()
connection.close()
# If you want just wanna call the function withou the UI comment everything below and run this:
# viewHierarchy('<path>/<view>')
# Just a simple handler and HTTP Server set up
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
if '/calcViewHierarchy' in self.path:
p = self.path.split("?")
path = p[0][1:].split("/")
params = {}
if len(p) > 1:
params = urllib.parse.parse_qs(p[1], True, True)
print('Starting Hierarchy JSON with ',params['object'][0])
viewHierarchy(params['object'][0])
print('Finished Hierarchy JSON')
if '/viewHierarchy' in self.path:
f = open('viewHierarchy.html','rb')
self.send_response(200)
self.send_header('Content-type','text-html')
self.end_headers()
self.wfile.write(f.read())
f.close()
if self.path == '/resultCalcViewHierarchy':
f = open('resultCalcViewHierarchy.json','rb')
self.send_response(200)
self.wfile.write(f.read())
f.close()
def run():
print('http server is starting...')
httpd = HTTPServer(("", 5000), MyHandler)
print('http server is running...')
httpd.serve_forever()
if __name__ == '__main__':
run() | mit | 6,841,338,584,560,991,000 | 32.650602 | 204 | 0.679799 | false | 3.158371 | false | false | false |
Cynerva/pyipmi | pyipmi/commands/info.py | 2 | 4185 | # Copyright (c) 2012, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from .. import Command
from pyipmi.info import *
from pyipmi.tools.responseparser import ResponseParserMixIn
from pyipmi import IpmiError
class InfoBasicCommand(Command, ResponseParserMixIn):
""" Describes the cxoem info basic IPMI command
"""
name = "Retrieve basic SoC info"
ipmitool_args = ["cxoem", "info", "basic"]
def parse_results(self, out, err):
""" Parse ipmitool output
"""
result = InfoBasicResult()
if out.startswith("Calxeda SoC"):
for line in out.splitlines():
line = line.lstrip()
if line.startswith("Calxeda SoC"):
result.iana = int(line.split()[2].strip("()"), 16)
elif line.startswith("Firmware Version"):
result.firmware_version = line.partition(":")[2].strip()
elif line.startswith("SoC Version"):
result.ecme_version = line.partition(":")[2].strip()
elif line.startswith("Build Number"):
result.ecme_build_number = line.partition(":")[2].strip()
elif line.startswith("Timestamp"):
result.ecme_timestamp = int(line.split()[1].strip(":()"))
elif line.startswith("Node EEPROM Image Version"):
result.node_eeprom_version = line.partition(":")[2].strip()
elif line.startswith("Node EEPROM CFG id"):
result.node_eeprom_config = line.partition(":")[2].strip()
elif line.startswith("Slot EEPROM Image Version"):
result.slot_eeprom_version = line.partition(":")[2].strip()
elif line.startswith("Slot EEPROM CFG id"):
result.slot_eeprom_config = line.partition(":")[2].strip()
elif err.startswith("Error: "):
raise IpmiError(err.splitlines()[0][7:])
else:
raise IpmiError("Unknown Error")
return result
class InfoCardCommand(Command, ResponseParserMixIn):
""" Describes the cxoem info card IPMI command
"""
name = "Retrieve card info"
ipmitool_args = ["cxoem", "info", "card"]
result_type = InfoCardResult
response_fields = {
'Board Type' : {'attr' : 'type'},
'Board Revision' : {'attr' : 'revision'}
}
def parse_results(self, out, err):
result = ResponseParserMixIn.parse_results(self, out, err)
if not (hasattr(result, 'type') and hasattr(result, 'revision')):
raise IpmiError(out.strip())
return result
info_commands = {
"info_basic" : InfoBasicCommand,
"info_card" : InfoCardCommand
}
| bsd-3-clause | -6,216,887,270,168,991,000 | 40.435644 | 79 | 0.651613 | false | 4.301131 | false | false | false |
xuhaibahmad/PELL | pell/price_notifier/scrapers/MegaComputerScraper.py | 1 | 3847 | import math
import urllib.parse
import requests
from bs4 import BeautifulSoup as bSoup
from utils import utils
class MegaComputerScraper:
# Declare URL and class names to picked
BASE_URL = 'http://www.megacomputer.pk/catalogsearch/result/index/?is_ajax=1&limit=36&q={}'
PRODUCT_PRICE_CLASS_NAME = "old-price"
PRODUCT_PRICE_CLASS_NAME_SECONDARY = "price-box"
PRODUCT_SPECIAL_PRICE_CLASS_NAME = "special-price"
PRODUCT_TITLE_CLASS_NAME = "product-name"
@staticmethod
def search_item(product):
# Read the page contents and get structured data using beautiful soup
url = MegaComputerScraper.BASE_URL.format(urllib.parse.quote(product.name))
data = bSoup(requests.get(url).text, "html.parser")
# Find main container
main = data.find("div", {"class", "main"})
# Find all the item containers
containers = main.findAll("li", {"class", "item"})
# Get item information for each item in container
if len(containers) > 0:
for item in containers:
title_div = item.find(
"h2", {"class", MegaComputerScraper.PRODUCT_TITLE_CLASS_NAME}
)
price_div = item.find(
"p", {"class", MegaComputerScraper.PRODUCT_PRICE_CLASS_NAME}
)
special_price_div = item.find(
"p", {"class", MegaComputerScraper.PRODUCT_SPECIAL_PRICE_CLASS_NAME}
)
secondary_price_div = item.find(
"div", {"class", MegaComputerScraper.PRODUCT_PRICE_CLASS_NAME_SECONDARY}
)
has_price_div = price_div is not None
has_special_price_div = special_price_div is not None
has_secondary_price_div = secondary_price_div is not None
price_div = price_div.findAll("span", {})[1] \
if has_price_div else None
special_price_div = special_price_div.findAll("span", {})[1] \
if has_special_price_div else None
secondary_price_div = secondary_price_div.findAll("span", {})[1] \
if has_secondary_price_div else None
title = title_div.a["title"]
brand = str(title).split(" ", 1)
brand = brand[0] if len(brand) > 0 else "-"
link = title_div.a["href"]
if has_special_price_div:
price = special_price_div.text
elif has_price_div:
price = price_div.text
elif has_secondary_price_div:
price = secondary_price_div.text
else:
price = 0
price = MegaComputerScraper.extract_price(price)
is_valid_price = price is not None and price > 0
if is_valid_price and int(price) <= int(product.baseline_price):
prompt = "\"" + title.replace(",", "|") + "\" is now available in: " + str(
price) + " at Mega Computer (Baseline: " + product.baseline_price + ")"
details = utils.get_details(brand, price, title, link)
if utils.is_similar(title, product.description):
utils.print_similarity(title, product.description)
utils.display_windows_notification(brand, prompt)
utils.write_to_csv(details)
@staticmethod
def extract_price(price):
if price is None:
return 0
price = str(price).lower().replace(" ", "").replace("pkr", "").replace(",", "")
value = [int(s) for s in price.split() if s.isdigit()]
price = price if len(value) == 0 else value[0]
return math.floor(float(price))
| gpl-3.0 | -746,995,803,090,889,300 | 41.744444 | 95 | 0.54744 | false | 4.058017 | false | false | false |
lewisc/spark-tk | regression-tests/sparktkregtests/testcases/frames/ecdf_test.py | 13 | 3032 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests the ECDF functionality """
import unittest
import random
from sparktkregtests.lib import sparktk_test
class ecdfTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(ecdfTest, self).setUp()
# generate a dataset to test ecdf on
# it will just be a single column of ints
column = [[random.randint(0, 5)] for index in xrange(0, 20)]
schema = [("C0", int)]
self.frame = self.context.frame.create(column,
schema=schema)
def validate_ecdf(self):
# call sparktk ecdf function on the data and get as pandas df
ecdf_sparktk_result = self.frame.ecdf("C0")
pd_ecdf = ecdf_sparktk_result.to_pandas(ecdf_sparktk_result.row_count)
# get the original frame as pandas df so we can calculate our own result
pd_original_frame = self.frame.to_pandas(self.frame.row_count)
# the formula for calculating ecdf is
# F(x) = 1/n * sum from 1 to n of I(x_i)
# where I = { 1 if x_i <= x, 0 if x_i > x }
# i.e., for each element in our data column count
# the number of items in that row which are less than
# or equal to that item, divide by the number
# of total items in the column
grouped = pd_original_frame.groupby("C0").size()
our_result = grouped.sort_index().cumsum()*1.0/len(pd_original_frame)
# finaly we iterate through the sparktk result and compare it with our result
for index, row in pd_ecdf.iterrows():
self.assertAlmostEqual(row["C0"+'_ecdf'],
our_result[int(row["C0"])])
def test_ecdf_bad_name(self):
"""Test ecdf with an invalid column name."""
with self.assertRaisesRegexp(Exception, "No column named bad_name"):
self.frame.ecdf("bad_name")
def test_ecdf_bad_type(self):
"""Test ecdf with an invalid column type."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.frame.ecdf(5)
def test_ecdf_none(self):
"""Test ecdf with a None for the column name."""
with self.assertRaisesRegexp(Exception, "column is required"):
self.frame.ecdf(None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,987,978,931,252,181,000 | 37.363636 | 85 | 0.63304 | false | 3.418981 | true | false | false |
mpurzynski/MozDef | cron/update_geolite_db.py | 2 | 3321 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
import sys
import requests
import tempfile
import gzip
from configlib import getConfig, OptionParser
from tempfile import mkstemp
from os import close, fsync, path, rename
from mozdef_util.geo_ip import GeoIP
from mozdef_util.utilities.logger import logger, initLogger
def fetch_db_data(db_file):
db_download_location = 'https://updates.maxmind.com/geoip/databases/' + db_file[:-5] + '/update'
logger.debug('Fetching db data from ' + db_download_location)
auth_creds = (options.account_id, options.license_key)
response = requests.get(db_download_location, auth=auth_creds)
if not response.ok:
raise Exception("Received bad response from maxmind server: {0}".format(response.text))
db_raw_data = response.content
with tempfile.NamedTemporaryFile(mode='wb', prefix=db_file + '.zip.', suffix='.tmp', dir=options.db_store_location) as temp:
logger.debug('Writing compressed gzip to temp file: ' + temp.name)
temp.write(db_raw_data)
temp.flush()
logger.debug('Extracting gzip data from ' + temp.name)
gfile = gzip.GzipFile(temp.name, "rb")
data = gfile.read()
return data
def save_db_data(db_file, db_data):
save_path = path.join(options.db_store_location, db_file)
fd, temp_path = mkstemp(suffix='.tmp', prefix=db_file, dir=options.db_store_location)
with open(temp_path, 'wb') as temp:
logger.debug("Saving db data to " + temp_path)
temp.write(db_data)
fsync(temp.fileno())
temp.flush()
logger.debug("Testing temp geolite db file")
geo_ip = GeoIP(temp_path)
# Do a generic lookup to verify we don't get any errors (malformed data)
geo_ip.lookup_ip('8.8.8.8')
logger.debug("Moving temp file to " + save_path)
close(fd)
rename(temp_path, save_path)
def main():
logger.debug('Starting')
db_data = fetch_db_data(options.db_file)
asn_db_data = fetch_db_data(options.asn_db_file)
save_db_data(options.db_file, db_data)
save_db_data(options.asn_db_file, asn_db_data)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig('output', 'stdout', options.configfile)
options.sysloghostname = getConfig('sysloghostname', 'localhost', options.configfile)
options.syslogport = getConfig('syslogport', 514, options.configfile)
options.db_store_location = getConfig('db_store_location', '', options.configfile)
options.db_file = getConfig('db_file', '', options.configfile)
options.asn_db_file = getConfig('asn_db_file', '', options.configfile)
options.account_id = getConfig('account_id', '', options.configfile)
options.license_key = getConfig('license_key', '', options.configfile)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
"-c",
dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
initLogger(options)
main()
| mpl-2.0 | -4,587,281,079,884,816,400 | 36.314607 | 128 | 0.672689 | false | 3.503165 | true | false | false |
nathanielvarona/airflow | airflow/providers/amazon/aws/sensors/glue_crawler.py | 3 | 2635 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue_crawler import AwsGlueCrawlerHook
from airflow.sensors.base import BaseSensorOperator
class AwsGlueCrawlerSensor(BaseSensorOperator):
"""
Waits for an AWS Glue crawler to reach any of the statuses below
'FAILED', 'CANCELLED', 'SUCCEEDED'
:param crawler_name: The AWS Glue crawler unique name
:type crawler_name: str
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
"""
def __init__(self, *, crawler_name: str, aws_conn_id: str = 'aws_default', **kwargs) -> None:
super().__init__(**kwargs)
self.crawler_name = crawler_name
self.aws_conn_id = aws_conn_id
self.success_statuses = 'SUCCEEDED'
self.errored_statuses = ('FAILED', 'CANCELLED')
self.hook: Optional[AwsGlueCrawlerHook] = None
def poke(self, context):
hook = self.get_hook()
self.log.info("Poking for AWS Glue crawler: %s", self.crawler_name)
crawler_state = hook.get_crawler(self.crawler_name)['State']
if crawler_state == 'READY':
self.log.info("State: %s", crawler_state)
crawler_status = hook.get_crawler(self.crawler_name)['LastCrawl']['Status']
if crawler_status == self.success_statuses:
self.log.info("Status: %s", crawler_status)
return True
else:
raise AirflowException(f"Status: {crawler_status}")
else:
return False
def get_hook(self) -> AwsGlueCrawlerHook:
"""Returns a new or pre-existing AwsGlueCrawlerHook"""
if self.hook:
return self.hook
self.hook = AwsGlueCrawlerHook(aws_conn_id=self.aws_conn_id)
return self.hook
| apache-2.0 | -321,722,669,545,936,100 | 39.538462 | 97 | 0.677799 | false | 3.863636 | false | false | false |
juditacs/wikt2dict | wikt2dict/article.py | 1 | 5004 | import re
import logging
from collections import defaultdict
template_re = re.compile(r"\{\{[^\}]*\}\}", re.UNICODE)
default_translation_re = re.compile(
ur"\{\{(t[\u00d8|\-\+])\|([^}]+)\}\}", re.UNICODE)
global_features = ["sourcewc", "article", "has_article"]
# tester method
def uprint(str_):
print str_.encode('utf8')
class ArticleParser(object):
""" Base class for all article parsers.
This class should not be instantiated.
"""
def __init__(self, wikt_cfg, parser_cfg, filter_langs=None):
self.cfg = parser_cfg
self.wikt_cfg = wikt_cfg
self.pairs = list()
self.titles = set()
self.stats = defaultdict(list)
self.build_skip_re()
self.build_trim_re()
if self.cfg['lower'] and self.cfg['lower'] == 1:
self.lower_all = True
else:
self.lower_all = False
def build_trim_re(self):
if self.cfg['trim_re']:
self.trim_re = re.compile(ur'' + self.cfg['trim_re'].decode('utf8'),
re.UNICODE)
def build_skip_re(self):
if not self.cfg['skip_translation']:
self.skip_translation_re = None
else:
self.skip_translation_re = re.compile(ur'' + self.cfg['skip_translation'].decode('utf8'), re.UNICODE)
if not self.cfg['skip_translation_line']:
self.skip_translation_line_re = None
else:
self.skip_translation_line_re = re.compile(self.cfg['skip_translation_line'], re.UNICODE)
def skip_translation_line(self, line):
if 'PAGENAME' in line:
return True
if self.skip_translation_line_re and self.skip_translation_line_re.search(line):
return True
return False
def parse_article(self, article, source_wc=None):
if self.skip_article(article) == True:
self.stats["skip_article"].append(article[0])
return None
title, text = article
if self.lower_all:
text = text.lower()
self.titles.add(title)
self.stats["ok"].append(title)
t = self.get_pairs(text)
if t:
self.store_translations(title, t, source_wc)
def get_pairs(self, text):
return dict()
def skip_article(self, article):
if not article[0] or not article[1]:
return True
if not article[1].strip() or not article[0].strip():
return True
# ASSUMPTION: articles with a namespace contain no useful data
if ':' in article[0]:
return True
return False
def store_translations(self, this_word, translations, source_wc=None):
for wc in translations.keys():
if len(translations[wc]) > 0:
self.pairs.extend(
[[source_wc, this_word, wc, i, "sourcewc=" + self.wc, \
"article=" + this_word]
for i in translations[wc]])
def write_word_pairs_to_file(self, append=True):
""" Write output to file
One pair and its features are written to tab separated file
"""
fn = self.cfg['dumpdir'] + '/' + self.cfg['fullname'] + '/' + self.cfg[\
'word_pairs_outfile']
if append:
outf = open(fn, 'a+')
else:
outf = open(fn, 'w')
for p in self.pairs:
out_str = self.generate_out_str(self.add_features_to_word_pair(p))
if out_str:
outf.write(out_str.encode('utf8'))
outf.close()
def generate_out_str(self, pair):
if not pair:
return None
if len(pair) < 4:
return None
# alphabetic order
if pair[0] < pair[2]:
outstr = "\t".join(pair[0:4])
else:
outstr = "\t".join(pair[2:4] + pair[0:2])
feat_d = dict()
for feat in pair[4:]:
fields = feat.split('=')
if not fields[0] in global_features:
self.log_handler.error('Feature not found {0}'.format(feat))
continue
if len(fields) > 1:
feat_d[fields[0]] = fields[1]
else:
feat_d[fields[0]] = '1'
for feat in global_features:
if feat in feat_d:
outstr += "\t" + feat_d[feat]
else:
outstr += "\t0"
outstr += "\n"
return outstr
def add_features_to_word_pair(self, pair):
""" Adding features to translation pairs
"""
# article of the word exists
if pair[3] in self.titles:
pair.append("has_article")
return pair
def trim_translation(self, text):
if self.cfg['trim_re']:
text = self.trim_re.sub(r'\1\2', text)
text = text.replace('[', '')
text = text.replace(']', '')
text = text.replace('{', '')
text = text.replace('}', '')
return text.strip()
| lgpl-3.0 | -2,061,081,859,329,934,600 | 32.583893 | 113 | 0.526179 | false | 3.819847 | false | false | false |
seadsystem/website | seadssite/models.py | 2 | 3066 | from __future__ import print_function
import sys
from functools import partial
from django.contrib.auth.models import User
from django.db import models
from django.core.exceptions import FieldError
'''
error function for adding errors to stdrr
'''
error = partial(print, sys.stderr)
'''
Model Manager for doing table actions on devices. Extends base model manager class to include method
for registering (creating) devices
'''
class DeviceManager(models.Manager):
def register_device(self, device_id, device_name, current_user):
if Device.objects.all().filter(user=current_user, device_id=device_id):
raise FieldError('This device has already been registered to this user.', device_id, current_user)
elif Device.objects.all().filter(device_id=device_id, is_active=True):
raise FieldError('This device has already been registered to a different user.', device_id)
try:
newDevice = Device(device_id=device_id, name=device_name, user=current_user)
newDevice.save()
except FieldError as fieldError:
errString = str(type(fieldError)) + ": " + str(fieldError.message)
error(errString)
except (ValueError, TypeError):
error("Invalid Device ID")
'''
model for SEADS devices like the SEADS plug, eGuage etc
# Required fields
# - device_id (primary_key) corresponds to a device id in the data_raw table
# - name (string) name of devices, defaults to 'Seads Device'
# Foreign keys
# - user_id (ForeignKey) corresponds to the user who 'owns' this device, allows null (device has not been registered)
'''
class Device(models.Model):
device_id = models.IntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=200, default='Seads Device')
connection = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
user = models.ForeignKey(User, null=True)
objects = DeviceManager()
'''
# deativate_device()
# Summary: This will deactivate the device which removes it from view,
# doesn't do a full delete, as this would cause problems with foreign keys
'''
def deactivate_device(self):
if Device.objects.filter(device_id = self.device_id, is_active=False):
raise FieldError('This device has already been disactivated.', self)
self.user = None
self.is_active = False
self.save()
'''
# reactivate_device()
# Summary: This will reactivate the device which removes has already been deactivated,
'''
def reactivate_device(self, user):
if Device.objects.filter(device_id = self.device_id, is_active=True):
raise FieldError('This device is currently active.', self)
self.user = user
self.is_active = True
self.save()
'''
model for Images displayed on the site
'''
# Required Fields
# - docfile (file) corrensponds to a file upload path
class Document(models.Model):
docfile = models.FileField(upload_to='documents/%Y/%m/%d')
| mit | -1,256,448,640,234,698,800 | 35.939759 | 121 | 0.683953 | false | 3.966365 | false | false | false |
USGSDenverPychron/pychron | pychron/hardware/gauges/granville_phillips/pychron_micro_ion_controller.py | 1 | 2366 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.gauges.granville_phillips.micro_ion_controller import MicroIonController
class PychronMicroIonController(MicroIonController):
def get_pressure(self, name, **kw):
return self.ask('Get Pressure {} {}'.format(self.name, name), **kw)
def get_ion_pressure(self, **kw):
return self.ask('Get Pressure {} IG'.format(self.name))
def get_convectron_a_pressure(self, **kw):
return self.ask('GetPressure {} CG1'.format(self.name))
def get_convectron_b_pressure(self, **kw):
return self.ask('GetPressure {} CG2'.format(self.name))
class QtegraMicroIonController(MicroIonController):
def get_pressures(self, verbose=False):
kw = {'verbose': verbose, 'force': True}
for d in self.gauges:
ig = self.ask('GetParameter {}'.format(d.name), **kw)
self._set_gauge_pressure(d.name, ig)
# def get_pressure(self, name, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
#
# def get_ion_pressure(self, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
#
# def get_convectron_a_pressure(self, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
#
# def get_convectron_b_pressure(self, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
# ============= EOF =============================================
| apache-2.0 | -1,608,491,260,895,939,300 | 36.555556 | 94 | 0.562975 | false | 3.923715 | false | false | false |
deo1/deo1 | KaggleTitanic/models/model_2017_09_17_18_25_35-0.9877.py | 1 | 1033 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
StackingEstimator(estimator=LogisticRegression(C=10.0))
),
LogisticRegression(C=10.0)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| mit | -7,004,613,287,593,911,000 | 40.32 | 122 | 0.775411 | false | 3.650177 | false | false | false |
yfu/tools | fasta_splitter.py | 1 | 1492 | #!/usr/bin/env python
# Read in a fasta file and split it into multiple files
# Author: Yu Fu (yfu at yfu dot me)
import argparse
parser = argparse.ArgumentParser(description='Split a fasta file into multiple files')
parser.add_argument('-p', '--parts', help='The number of (almost) equal parts', required=True)
parser.add_argument('-f', '--file', help='The fasta file', required=True)
parser.add_argument('-v', '--verbose', help='Print lots of useless information', action="store_true")
args = parser.parse_args()
n = int(args.parts)
fn = args.file
verbose = args.verbose
total = 0
fh = open(fn, 'r')
for line in fh.readlines():
line = line.strip()
if line[0] == '>':
total += 1
fh.close()
# Do notice that total might not be a multiple of parts, say 151 total line and 3 parts.
each = int(total / float(n))
fh = open(fn, 'r')
output = []
# Notice that inside the program, the index of files starts from 0
# and the filenames start from 1
for i in range(n):
output.append( open(fn + '.' + str(i+1), "w") )
counter = -1;
for line in fh.readlines():
if(line[0] == '>'):
counter += 1
line = line.strip()
file_number = int(counter / each)
# In order to put the last bit of the file into the last file...
if( counter / each > n-1 ):
file_number = n-1
# print file_number, line
if(verbose==True):
print str(file_number) +"\t" + line
print >>output[file_number], line
for i in range(n):
output[i].close()
| gpl-3.0 | 8,845,160,813,846,692,000 | 26.127273 | 101 | 0.63874 | false | 3.243478 | false | false | false |
Ervii/garage-time | pajamas/src/python/twitter/common/java/perfdata/bin/jammystat.py | 14 | 2823 | # ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import glob
from twitter.common import app
from twitter.common.java.perfdata import PerfData
app.add_option(
'-f',
dest='filename',
default=None,
help='Filename to load hsperfdata from.')
app.add_option(
'--hsperfdata_root',
dest='hsperfdata_root',
default='/tmp',
help='Root directory to search for hsperfdata files.')
app.add_option(
'-l',
dest='list',
default=False,
action='store_true',
help='List pids.')
app.add_option(
'-p',
dest='pid',
default=None,
type=int,
help='PID to load hsperfdata from.')
def file_provider():
options = app.get_options()
def provider():
with open(options.filename, 'rb') as fp:
return fp.read()
return provider
def list_pids():
options = app.get_options()
pattern = os.path.join(options.hsperfdata_root, 'hsperfdata_*', '*')
for path in glob.glob(pattern):
root, pid = os.path.split(path)
dirname = os.path.basename(root)
role = dirname[len('hsperfdata_'):]
yield path, role, int(pid)
def print_pids():
for path, role, pid in list_pids():
print('role %s pid %d path %s' % (role, pid, path))
def pid_provider():
options = app.get_options()
for path, _, pid in list_pids():
if pid == options.pid:
break
else:
app.error('Could not find pid %s' % options.pid)
def loader():
with open(path, 'rb') as fp:
return fp.read()
return loader
def main(args, options):
if len(args) > 0:
app.error('Must provide hsperfdata via -f/-p')
if options.list:
print_pids()
return
perfdata = None
if options.filename:
perfdata = PerfData.get(file_provider())
elif options.pid:
perfdata = PerfData.get(pid_provider())
if perfdata is None:
app.error('No hsperfdata provider specified!')
perfdata.sample()
for key in sorted(perfdata):
print('%s: %s' % (key, perfdata[key]))
app.main()
| apache-2.0 | 8,797,133,664,789,124,000 | 23.763158 | 100 | 0.590861 | false | 3.784182 | false | false | false |
Delosari/dazer | bin/user_conf/Delete_IntermediateFiles.py | 1 | 2897 | '''
Created on Dec 16, 2014
@author: delosari
'''
from os import remove
from os.path import isfile
import CodeTools.PlottingManager as plotMan
from PipeLineMethods.ManageFlow import DataToTreat
Pv = plotMan.myPickle()
LogFiles_Extension = ".plot"
CombinedFits = "WHT.fits"
NebularFits = "_Neb.fits"
StellarRemovedFits = "_WHT_Neb_Star.fits"
StellarContinuum = "_StellarContinuum.fits"
MaskFile = "_mask.txt"
TexFiles = ['.tex']
OldFiles = ['_LinesLog_v2.txt', '_LinesLog_v3.txt', '.LinesLog_v3']
RootFolder = DataToTreat()
Pattern = [LogFiles_Extension, NebularFits, StellarContinuum, StellarRemovedFits, CombinedFits] + OldFiles + TexFiles
LogImages_Extension = ".png"
ForceDelete = True
#Find and organize files from terminal command or .py file
FilesList = Pv.FindAndOrganize(Pattern, RootFolder, CheckComputer=True)
#Loop through files
for m in range(len(FilesList)):
for j in range(len(FilesList[m])):
CodeName, FileName, FileFolder = Pv.FileAnalyzer(FilesList[m][j], Texting=False)
#Case of logs
if LogFiles_Extension in FileName:
LogName = FileName
ImageName = LogName.replace(LogFiles_Extension, LogImages_Extension)
#Deleting log
if isfile(FileFolder + LogName):
print '--', LogName
if ForceDelete == True:
remove(FileFolder + LogName)
#Deleting images
if isfile(FileFolder + ImageName):
print '--', ImageName
if ForceDelete == True:
remove(FileFolder + ImageName)
#Case of fits file
if 'fit' in FileName:
print '\n-Fits Found'
FitsFile = FileName
print '--',FitsFile,'\n'
#Deleting Fits file
if ForceDelete == True:
remove(FileFolder + FitsFile)
#Case of line logs file
if ('LinesLog' in FileName) or ('.LinesLog_v3' in FileName):
print '\n-Lines Log Found2'
LinesLog = FileName
print '--',LinesLog,'\n'
#Deleting Fits file
if ForceDelete == True:
remove(FileFolder + LinesLog)
#Case of many tex files:
# for tex_extension in TexFiles:
# if tex_extension in FileName:
# print 'Tex file found3:'
# Tex_File = FileName
# print '--', Tex_File, '\n'
#
# #Deleting Fits file
# if ForceDelete == True:
# remove(FileFolder + Tex_File)
| mit | -6,235,822,879,463,987,000 | 29.1875 | 129 | 0.525026 | false | 4.063114 | false | false | false |
hisen630/my_stock | download_daily_data.py | 1 | 4108 | #!/usr/bin/env python2.7
#coding=utf-8
import logging
import sys
import time
import argparse
import pandas as pd
from pandas import DataFrame,Series
import tushare as ts
import lib.mylog
import lib.utils as utils
import conf.conf as conf
class DailyDataDownloader(object):
def __init__(self,date, interval=10, retryTimes=5):
self.date = date
self.interval = interval if not conf.DEBUG else 1
self.retryTimes = retryTimes
self.stockBasics = utils.downloadStockBasics()
def download(self):
codes = self.stockBasics.index.values
fqFactorDF = DataFrame()
codeDF = DataFrame()
for code in codes:
descStr = " (%s, %s) "%(code, self.date)
_intervalFactor = 2
_interval = self.interval
_retryCount = 0
while _retryCount < self.retryTimes:
_retryCount += 1
logging.info("Downloading daily %s trying %d times."%(descStr, _retryCount))
_interval *= _intervalFactor
try:
# a brand new code into market may cause '_parase_fq_factor' raise exceptions
_df = ts.get_realtime_quotes(code)
if _df is None: # if the code is off the market, this could happen
break
_df = _df[['code','open','high','pre_close','price','low','volume','amount','date']].set_index('date')
_df.rename(columns={'price':'close'},inplace=True)
# a brand new code into market, could also like this, the get_realtime_quotes may return something
if ((float(_df['high']) == 0) & (float(_df['low'])==0)):
break # no need to store
_fqDF = ts.stock.trading._parase_fq_factor(code,'','')
_fqDF.insert(0,"code",code,True)
_fqDF = _fqDF.drop_duplicates('date').set_index('date').sort_index(ascending=False)
#_fqDF = _fqDF.ix[self.date]
_fqDF = _fqDF.head(1)
# stock may exit the market or just pause
if ((float(_df['high']) == 0) & (float(_df['low'])==0)):
break # no need to store
#_rate = float(_fqDF['factor'])/float(_df['pre_close'])
else:
_rate = float(_fqDF['factor'])/float(_df['close'])
_df = _df.drop('pre_close',axis=1)
for label in ['open', 'high', 'close', 'low']:
_df[label] = float(_df[label]) * _rate
#_df[label] = _df[label].map(lambda x:'%.2f'%x)
_df[label] = _df[label].astype(float)
except Exception, e:
if _retryCount + 1 == self.retryTimes or conf.DEBUG:
raise e
logging.info("Download error, waiting for %d secs."%_interval)
time.sleep(_interval)
continue
fqFactorDF = pd.concat([fqFactorDF,_fqDF])
codeDF = pd.concat([codeDF, _df])
break
if conf.DEBUG:
break
self._save(fqFactorDF, codeDF)
def _save(self, fqFactorDF, codeDF):
logging.info("Saving daily fq factor.")
fqFactorDF.to_sql(name='t_daily_fqFactor', con=utils.getEngine(), if_exists='append', chunksize=20000)
logging.info("Saved daily fq factor.")
logging.info("Saving daily hfq data.")
codeDF.to_sql(name='t_daily_hfq_stock', con=utils.getEngine(), if_exists='append')
logging.info("Saved daily hfq data.")
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--production", action="store_true", help='''defalt is in debug mode, which only plays a little''')
args = parser.parse_args()
if args.production:
conf.DEBUG = False
downloader = DailyDataDownloader('')
downloader.download()
| gpl-2.0 | 6,877,951,573,446,795,000 | 37.754717 | 129 | 0.525803 | false | 4.063304 | false | false | false |
uhef/fs-uae-gles | launcher/fs_uae_launcher/ui/Book.py | 2 | 1326 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import fs_uae_launcher.fsui as fsui
class Book(fsui.Panel):
def __init__(self, parent):
fsui.Panel.__init__(self, parent)
self.layout = fsui.VerticalLayout()
self.page_titles = []
self.pages = []
self.current_page = None
def add_page(self, function, title=""):
self.page_titles.append(title)
self.pages.append(function)
def set_page(self, page):
try:
index = page + 0
except TypeError:
for i, p in enumerate(self.pages):
if page == p:
index = i
break
else:
raise Exception("page not found")
if self.current_page:
self.current_page.hide()
self.layout.remove(self.current_page)
if callable(self.pages[index]):
page = self.pages[index](self)
self.pages[index] = page
else:
page = self.pages[index]
self.layout.add(page, fill=True, expand=True)
self.current_page = page
page.show()
if hasattr(page, "on_show"):
page.on_show()
self.layout.update()
| gpl-2.0 | -5,525,024,875,432,784,000 | 28.466667 | 53 | 0.546757 | false | 4.030395 | false | false | false |
BhallaLab/moose-full | moose-core/tests/python/test_synchan.py | 2 | 3130 | # test_synchan.py ---
#
# Filename: test_synchan.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Apr 23 12:00:01 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
sys.path.append('../../python')
import moose
def make_synapse(path):
"""Create a synapse with two time constants. Connect a spikegen to the
synapse. Create a pulsegen to drive the spikegen."""
syn = moose.SynChan(path)
syn.tau1 = 5.0 # ms
syn.tau2 = 1.0 # ms
syn.Gk = 1.0 # mS
syn.Ek = 0.0
## NOTE: This is old implementation.
#syn.synapse.num = 1
## syn.bufferTime = 1.0 # ms
#syn.synapse.delay = 1.0
#syn.synapse.weight = 1.0
#print 'Synapses:', len(syn.synapse), 'w=', syn.synapse[0].weight
# IN new implementation, there is SimpleSynHandler class which takes cares
# of multiple synapses. Class SynChan does not have any .synapse field.
synH = moose.SimpleSynHandler( '%s/SynHandler' % path)
synH.synapse.num = 1
## syn.bufferTime = 1.0 # ms
synH.synapse.delay = 1.0
synH.synapse.weight = 1.0
synH.connect('activationOut', syn, 'activation')
print(('Synapses:', len(synH.synapse), 'w=', synH.synapse[0].weight ))
spikegen = moose.SpikeGen('%s/spike' % (syn.parent.path))
spikegen.edgeTriggered = False # Make it fire continuously when input is high
spikegen.refractT = 10.0 # With this setting it will fire at 1 s / 10 ms = 100 Hz
spikegen.threshold = 0.5
# This will send alternatind -1 and +1 to SpikeGen to make it fire
spike_stim = moose.PulseGen('%s/spike_stim' % (syn.parent.path))
spike_stim.delay[0] = 1.0
spike_stim.level[0] = 1.0
spike_stim.width[0] = 100.0
moose.connect(spike_stim, 'output', spikegen, 'Vm')
m = moose.connect(spikegen, 'spikeOut', synH.synapse.vec, 'addSpike', 'Sparse')
m.setRandomConnectivity(1.0, 1)
m = moose.connect(spikegen, 'spikeOut', synH.synapse[0], 'addSpike') # this causes segfault
return syn, spikegen
if __name__ == '__main__':
model = moose.Neutral('/model')
syn, spikegen = make_synapse('/model/synchan')
moose.setClock(0, 0.01)
moose.useClock(0, '/model/##', 'process')
moose.reinit()
moose.start(100)
#
# test_synchan.py ends here
| gpl-2.0 | 5,338,567,939,501,106,000 | 29.096154 | 95 | 0.659105 | false | 3.000959 | false | false | false |
andrewyoung1991/supriya | supriya/tools/synthdeftools/DoneAction.py | 1 | 1041 | # -*- encoding: utf-8 -*-
from supriya.tools.systemtools.Enumeration import Enumeration
class DoneAction(Enumeration):
r'''An enumeration of scsynth UGen "done" actions.
::
>>> from supriya.tools import synthdeftools
>>> synthdeftools.DoneAction(2)
DoneAction.FREE_SYNTH
::
>>> synthdeftools.DoneAction.from_expr('pause synth')
DoneAction.PAUSE_SYNTH
'''
### CLASS VARIABLES ###
NOTHING = 0
PAUSE_SYNTH = 1
FREE_SYNTH = 2
FREE_SYNTH_AND_PRECEDING_NODE = 3
FREE_SYNTH_AND_FOLLOWING_NODE = 4
FREE_SYNTH_AND_FREEALL_PRECEDING_NODE = 5
FREE_SYNTH_AND_FREEALL_FOLLOWING_NODE = 6
FREE_SYNTH_AND_ALL_PRECEDING_NODES_IN_GROUP = 7
FREE_SYNTH_AND_ALL_FOLLOWING_NODES_IN_GROUP = 8
FREE_SYNTH_AND_PAUSE_PRECEDING_NODE = 9
FREE_SYNTH_AND_PAUSE_FOLLOWING_NODE = 10
FREE_SYNTH_AND_DEEPFREE_PRECEDING_NODE = 11
FREE_SYNTH_AND_DEEPFREE_FOLLOWING_NODE = 12
FREE_SYNTH_AND_ALL_SIBLING_NODES = 13
FREE_SYNTH_AND_ENCLOSING_GROUP = 14 | mit | 1,936,751,285,137,782,800 | 27.162162 | 61 | 0.667627 | false | 2.790885 | false | false | false |
The-Compiler/qutebrowser | qutebrowser/browser/inspector.py | 1 | 7851 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Base class for a QtWebKit/QtWebEngine web inspector."""
import base64
import binascii
import enum
from typing import cast, Optional
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QEvent
from PyQt5.QtGui import QCloseEvent
from qutebrowser.browser import eventfilter
from qutebrowser.config import configfiles
from qutebrowser.utils import log, usertypes, utils
from qutebrowser.keyinput import modeman
from qutebrowser.misc import miscwidgets, objects
def create(*, splitter: 'miscwidgets.InspectorSplitter',
win_id: int,
parent: QWidget = None) -> 'AbstractWebInspector':
"""Get a WebKitInspector/WebEngineInspector.
Args:
splitter: InspectorSplitter where the inspector can be placed.
win_id: The window ID this inspector is associated with.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webengineinspector
return webengineinspector.WebEngineInspector(splitter, win_id, parent)
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import webkitinspector
return webkitinspector.WebKitInspector(splitter, win_id, parent)
raise utils.Unreachable(objects.backend)
class Position(enum.Enum):
"""Where the inspector is shown."""
right = enum.auto()
left = enum.auto()
top = enum.auto()
bottom = enum.auto()
window = enum.auto()
class Error(Exception):
"""Raised when the inspector could not be initialized."""
class _EventFilter(QObject):
"""Event filter to enter insert mode when inspector was clicked.
We need to use this with a ChildEventFilter (rather than just overriding
mousePressEvent) for two reasons:
- For QtWebEngine, we need to listen for mouse events on its focusProxy(),
which can change when another page loads (which might be possible with an
inspector as well?)
- For QtWebKit, we need to listen for mouse events on the QWebView used by
the QWebInspector.
"""
clicked = pyqtSignal()
def eventFilter(self, _obj: QObject, event: QEvent) -> bool:
"""Translate mouse presses to a clicked signal."""
if event.type() == QEvent.MouseButtonPress:
self.clicked.emit()
return False
class AbstractWebInspector(QWidget):
"""Base class for QtWebKit/QtWebEngine inspectors.
Attributes:
_position: position of the inspector (right/left/top/bottom/window)
_splitter: InspectorSplitter where the inspector can be placed.
Signals:
recreate: Emitted when the inspector should be recreated.
"""
recreate = pyqtSignal()
def __init__(self, splitter: 'miscwidgets.InspectorSplitter',
win_id: int,
parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(QWidget, None)
self._layout = miscwidgets.WrapperLayout(self)
self._splitter = splitter
self._position: Optional[Position] = None
self._win_id = win_id
self._event_filter = _EventFilter(parent=self)
self._event_filter.clicked.connect(self._on_clicked)
self._child_event_filter = eventfilter.ChildEventFilter(
eventfilter=self._event_filter,
parent=self)
def _set_widget(self, widget: QWidget) -> None:
self._widget = widget
self._widget.setWindowTitle("Web Inspector")
self._widget.installEventFilter(self._child_event_filter)
self._layout.wrap(self, self._widget)
def _load_position(self) -> Position:
"""Get the last position the inspector was in."""
pos = configfiles.state['inspector'].get('position', 'right')
return Position[pos]
def _save_position(self, position: Position) -> None:
"""Save the last position the inspector was in."""
configfiles.state['inspector']['position'] = position.name
def _needs_recreate(self) -> bool:
"""Whether the inspector needs recreation when detaching to a window.
This is done due to an unknown QtWebEngine bug which sometimes prevents
inspector windows from showing up.
Needs to be overridden by subclasses.
"""
return False
@pyqtSlot()
def _on_clicked(self) -> None:
"""Enter insert mode if a docked inspector was clicked."""
if self._position != Position.window:
modeman.enter(self._win_id, usertypes.KeyMode.insert,
reason='Inspector clicked', only_if_normal=True)
def set_position(self, position: Optional[Position]) -> None:
"""Set the position of the inspector.
If the position is None, the last known position is used.
"""
if position is None:
position = self._load_position()
else:
self._save_position(position)
if position == self._position:
self.toggle()
return
if (position == Position.window and
self._position is not None and
self._needs_recreate()):
# Detaching to window
self.recreate.emit()
self.shutdown()
return
elif position == Position.window:
self.setParent(None) # type: ignore[call-overload]
self._load_state_geometry()
else:
self._splitter.set_inspector(self, position)
self._position = position
self._widget.show()
self.show()
def toggle(self) -> None:
"""Toggle visibility of the inspector."""
if self.isVisible():
self.hide()
else:
self.show()
def _load_state_geometry(self) -> None:
"""Load the geometry from the state file."""
try:
data = configfiles.state['inspector']['window']
geom = base64.b64decode(data, validate=True)
except KeyError:
# First start
pass
except binascii.Error:
log.misc.exception("Error while reading geometry")
else:
log.init.debug("Loading geometry from {!r}".format(geom))
ok = self._widget.restoreGeometry(geom)
if not ok:
log.init.warning("Error while loading geometry.")
def closeEvent(self, _e: QCloseEvent) -> None:
"""Save the geometry when closed."""
data = self._widget.saveGeometry().data()
geom = base64.b64encode(data).decode('ASCII')
configfiles.state['inspector']['window'] = geom
def inspect(self, page: QWidget) -> None:
"""Inspect the given QWeb(Engine)Page."""
raise NotImplementedError
@pyqtSlot()
def shutdown(self) -> None:
"""Clean up the inspector."""
self.close()
self.deleteLater()
| gpl-3.0 | -2,090,504,158,504,276,200 | 33.134783 | 79 | 0.648325 | false | 4.236913 | true | false | false |
glyph/cryptography | setup.py | 1 | 3258 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from distutils.command.build import build
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
about = {}
with open(os.path.join(base_dir, "cryptography", "__about__.py")) as f:
exec(f.read(), about)
CFFI_DEPENDENCY = "cffi>=0.6"
SIX_DEPENDENCY = "six>=1.4.1"
requirements = [
CFFI_DEPENDENCY,
SIX_DEPENDENCY
]
class cffi_build(build):
"""
This class exists, instead of just providing ``ext_modules=[...]`` directly
in ``setup()`` because importing cryptography requires we have several
packages installed first.
By doing the imports here we ensure that packages listed in
``setup_requires`` are already installed.
"""
def finalize_options(self):
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives import constant_time, padding
self.distribution.ext_modules = [
Binding().ffi.verifier.get_extension(),
constant_time._ffi.verifier.get_extension(),
padding._ffi.verifier.get_extension()
]
build.finalize_options(self)
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
],
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=requirements,
setup_requires=requirements,
# for cffi
zip_safe=False,
ext_package="cryptography",
cmdclass={
"build": cffi_build,
}
)
| apache-2.0 | 5,299,818,349,371,945,000 | 29.448598 | 79 | 0.643033 | false | 4.171575 | false | false | false |
alivecor/tensorflow | tensorflow/contrib/rnn/python/ops/rnn_cell.py | 14 | 89649 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
"""
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=True,
activation=math_ops.tanh, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(rnn_cell_impl.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
self._reuse = reuse
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq],
1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell_impl.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=True,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
super(GridLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
self._reuse = reuse
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"GridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
freq_inputs = self._make_tf_features(inputs)
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block], block, state, batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self, freq_inputs, block, state, batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d"% block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time +
w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_timef * c_prev_freq +
w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time +
w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, 0, slice_offset], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, -slice_offset, 0], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d",
len(self._num_frequency_blocks))
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int((cur_size - self._feature_size) / (
self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs, [0, start_index + slice_offset + f *
self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
backward_slice_offset: (optional) int32, default 0, the starting offset to
slice the feature for backward processing.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BidirectionalGridLSTMCell, self).__init__(
num_units, use_peepholes, share_time_frequency_weights, cell_clip,
initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,
num_frequency_blocks, start_freqindex_list, end_freqindex_list,
couple_input_forget_gates, True, reuse)
self._backward_slice_offset = int(backward_slice_offset)
state_names = ""
for direction in ["fwd", "bwd"]:
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index,
block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"BidirectionalGridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks * 2))
self._output_size = 2 * num_units * self._total_blocks * 2
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, num_units].
state: tuple of Tensors, 2D, [batch, state_size].
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
fwd_inputs = self._make_tf_features(inputs)
if self._backward_slice_offset:
bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)
else:
bwd_inputs = fwd_inputs
# Forward processing
with vs.variable_scope("fwd"):
fwd_m_out_lst = []
fwd_state_out_lst = []
for block in range(len(fwd_inputs)):
fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(
fwd_inputs[block], block, state, batch_size,
state_prefix="fwd_state", state_is_tuple=True)
fwd_m_out_lst.extend(fwd_m_out_lst_current)
fwd_state_out_lst.extend(fwd_state_out_lst_current)
# Backward processing
bwd_m_out_lst = []
bwd_state_out_lst = []
with vs.variable_scope("bwd"):
for block in range(len(bwd_inputs)):
# Reverse the blocks
bwd_inputs_reverse = bwd_inputs[block][::-1]
bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(
bwd_inputs_reverse, block, state, batch_size,
state_prefix="bwd_state", state_is_tuple=True)
bwd_m_out_lst.extend(bwd_m_out_lst_current)
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
# pylint: disable=protected-access
_linear = rnn_cell_impl._linear
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell_impl.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/abs/1409.0473.
"""
def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None,
input_size=None, state_is_tuple=True, reuse=None):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
super(AttentionCellWrapper, self).__init__(_reuse=reuse)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError("Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s"
% str(cell.state_size))
if attn_length <= 0:
raise ValueError("attn_length should be greater than zero, got %s"
% str(attn_length))
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
self._reuse = reuse
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def call(self, inputs, state):
"""Long short-term memory cell with attention (LSTMA)."""
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
cell_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
output = _linear([cell_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("attention"):
k = vs.get_variable(
"attn_w", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("attn_v", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
y = _linear(query, self._attn_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
class HighwayWrapper(rnn_cell_impl.RNNCell):
"""RNNCell wrapper that adds highway connection on cell input and output.
Based on:
R. K. Srivastava, K. Greff, and J. Schmidhuber, "Highway networks",
arXiv preprint arXiv:1505.00387, 2015.
https://arxiv.org/abs/1505.00387
"""
def __init__(self, cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _highway(self, inp, out):
input_size = inp.get_shape().with_rank(2)[1].value
carry_weight = vs.get_variable("carry_w", [input_size, input_size])
carry_bias = vs.get_variable(
"carry_b", [input_size],
initializer=init_ops.constant_initializer(
self._carry_bias_init))
carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
if self._couple_carry_transform_gates:
transform = 1 - carry
else:
transform_weight = vs.get_variable("transform_w",
[input_size, input_size])
transform_bias = vs.get_variable(
"transform_b", [input_size],
initializer=init_ops.constant_initializer(
-self._carry_bias_init))
transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp,
transform_weight,
transform_bias))
return inp * carry + out * transform
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
class LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell):
"""LSTM unit with layer normalization and recurrent dropout.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
"""
def __init__(self, num_units, forget_bias=1.0,
input_size=None, activation=math_ops.tanh,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
dropout_keep_prob=1.0, dropout_prob_seed=None,
reuse=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._g = norm_gain
self._b = norm_shift
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(self._g)
beta_init = init_ops.constant_initializer(self._b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
weights = vs.get_variable("kernel", [proj_size, out_size])
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("bias", [out_size])
out = nn_ops.bias_add(out, bias)
return out
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class NASCell(rnn_cell_impl.RNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
Barret Zoph and Quoc V. Le.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
def __init__(self, num_units, num_proj=None,
use_biases=False, reuse=None):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_biases: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
relu = nn_ops.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# Variables for the NAS cell. W_m is all matrices multiplying the
# hiddenstate and W_inputs is all matrices multiplying the inputs.
concat_w_m = vs.get_variable(
"recurrent_kernel", [num_proj, 8 * self._num_units],
dtype)
concat_w_inputs = vs.get_variable(
"kernel", [input_size.value, 8 * self._num_units],
dtype)
m_matrix = math_ops.matmul(m_prev, concat_w_m)
inputs_matrix = math_ops.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = vs.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
m_matrix = nn_ops.bias_add(m_matrix, b)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = array_ops.split(axis=1, num_or_size_splits=8,
value=m_matrix)
inputs_matrix_splits = array_ops.split(axis=1, num_or_size_splits=8,
value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
concat_w_proj = vs.get_variable(
"projection_weights", [self._num_units, self._num_proj],
dtype)
new_m = math_ops.matmul(new_m, concat_w_proj)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m)
return new_m, new_state
class UGRNNCell(rnn_cell_impl.RNNCell):
"""Update Gate Recurrent Neural Network (UGRNN) cell.
Compromise between a LSTM/GRU and a vanilla RNN. There is only one
gate, and that is to determine whether the unit should be
integrating or computing instantaneously. This is the recurrent
idea of the feedforward Highway Network.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
"""
def __init__(self, num_units, initializer=None, forget_bias=1.0,
activation=math_ops.tanh, reuse=None):
"""Initialize the parameters for an UGRNN cell.
Args:
num_units: int, The number of units in the UGRNN cell
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gate, used to reduce the scale of forgetting at the beginning
of the training.
activation: (optional) Activation function of the inner states.
Default is `tf.tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(UGRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._activation = activation
self._reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of UGRNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_output: batch x num units, Tensor representing the output of the UGRNN
after reading `inputs` when previous state was `state`. Identical to
`new_state`.
new_state: batch x num units, Tensor representing the state of the UGRNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(vs.get_variable_scope(),
initializer=self._initializer):
cell_inputs = array_ops.concat([inputs, state], 1)
rnn_matrix = _linear(cell_inputs, 2 * self._num_units, True)
[g_act, c_act] = array_ops.split(
axis=1, num_or_size_splits=2, value=rnn_matrix)
c = self._activation(c_act)
g = sigmoid(g_act + self._forget_bias)
new_state = g * state + (1.0 - g) * c
new_output = new_state
return new_output, new_state
class IntersectionRNNCell(rnn_cell_impl.RNNCell):
"""Intersection Recurrent Neural Network (+RNN) cell.
Architecture with coupled recurrent gate as well as coupled depth
gate, designed to improve information flow through stacked RNNs. As the
architecture uses depth gating, the dimensionality of the depth
output (y) also should not change through depth (input size == output size).
To achieve this, the first layer of a stacked Intersection RNN projects
the inputs to N (num units) dimensions. Therefore when initializing an
IntersectionRNNCell, one should set `num_in_proj = N` for the first layer
and use default settings for subsequent layers.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
The Intersection RNN is built for use in deeply stacked
RNNs so it may not achieve best performance with depth 1.
"""
def __init__(self, num_units, num_in_proj=None,
initializer=None, forget_bias=1.0,
y_activation=nn_ops.relu, reuse=None):
"""Initialize the parameters for an +RNN cell.
Args:
num_units: int, The number of units in the +RNN cell
num_in_proj: (optional) int, The input dimensionality for the RNN.
If creating the first layer of an +RNN, this should be set to
`num_units`. Otherwise, this should be set to `None` (default).
If `None`, dimensionality of `inputs` should be equal to `num_units`,
otherwise ValueError is thrown.
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
y_activation: (optional) Activation function of the states passed
through depth. Default is 'tf.nn.relu`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(IntersectionRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._num_input_proj = num_in_proj
self._y_activation = y_activation
self._reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of the Intersection RNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_y: batch x num units, Tensor representing the output of the +RNN
after reading `inputs` when previous state was `state`.
new_state: batch x num units, Tensor representing the state of the +RNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from `inputs` via
static shape inference.
ValueError: If input size != output size (these must be equal when
using the Intersection RNN).
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(vs.get_variable_scope(),
initializer=self._initializer):
# read-in projections (should be used for first layer in deep +RNN
# to transform size of inputs from I --> N)
if input_size.value != self._num_units:
if self._num_input_proj:
with vs.variable_scope("in_projection"):
inputs = _linear(inputs, self._num_units, True)
else:
raise ValueError("Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init.")
n_dim = i_dim = self._num_units
cell_inputs = array_ops.concat([inputs, state], 1)
rnn_matrix = _linear(cell_inputs, 2*n_dim + 2*i_dim, True)
gh_act = rnn_matrix[:, :n_dim] # b x n
h_act = rnn_matrix[:, n_dim:2*n_dim] # b x n
gy_act = rnn_matrix[:, 2*n_dim:2*n_dim+i_dim] # b x i
y_act = rnn_matrix[:, 2*n_dim+i_dim:2*n_dim+2*i_dim] # b x i
h = tanh(h_act)
y = self._y_activation(y_act)
gh = sigmoid(gh_act + self._forget_bias)
gy = sigmoid(gy_act + self._forget_bias)
new_state = gh * state + (1.0 - gh) * h # passed thru time
new_y = gy * inputs + (1.0 - gy) * y # passed thru depth
return new_y, new_state
_REGISTERED_OPS = None
class CompiledWrapper(rnn_cell_impl.RNNCell):
"""Wraps step execution in an XLA JIT scope."""
def __init__(self, cell, compile_stateful=False):
"""Create CompiledWrapper cell.
Args:
cell: Instance of `RNNCell`.
compile_stateful: Whether to compile stateful ops like initializers
and random number generators (default: False).
"""
self._cell = cell
self._compile_stateful = compile_stateful
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
if self._compile_stateful:
compile_ops = True
else:
def compile_ops(node_def):
global _REGISTERED_OPS
if _REGISTERED_OPS is None:
_REGISTERED_OPS = op_def_registry.get_registered_ops()
return not _REGISTERED_OPS[node_def.op].is_stateful
with jit.experimental_jit_scope(compile_ops=compile_ops):
return self._cell(inputs, state, scope)
def _random_exp_initializer(minval,
maxval,
seed=None,
dtype=dtypes.float32):
"""Returns an exponential distribution initializer.
Args:
minval: float or a scalar float Tensor. With value > 0. Lower bound of the
range of random values to generate.
maxval: float or a scalar float Tensor. With value > minval. Upper bound of
the range of random values to generate.
seed: An integer. Used to create random seeds.
dtype: The data type.
Returns:
An initializer that generates tensors with an exponential distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
del partition_info # Unused.
return math_ops.exp(
random_ops.random_uniform(
shape,
math_ops.log(minval),
math_ops.log(maxval),
dtype,
seed=seed))
return _initializer
class PhasedLSTMCell(rnn_cell_impl.RNNCell):
"""Phased LSTM recurrent network cell.
https://arxiv.org/pdf/1610.09513v1.pdf
"""
def __init__(self,
num_units,
use_peepholes=False,
leak=0.001,
ratio_on=0.1,
trainable_ratio_on=True,
period_init_min=1.0,
period_init_max=1000.0,
reuse=None):
"""Initialize the Phased LSTM cell.
Args:
num_units: int, The number of units in the Phased LSTM cell.
use_peepholes: bool, set True to enable peephole connections.
leak: float or scalar float Tensor with value in [0, 1]. Leak applied
during training.
ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the
period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initialized period.
The period values are initialized by drawing from the distribution:
e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initialized period.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(PhasedLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._leak = leak
self._ratio_on = ratio_on
self._trainable_ratio_on = trainable_ratio_on
self._period_init_min = period_init_min
self._period_init_max = period_init_max
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _mod(self, x, y):
"""Modulo function that propagates x gradients."""
return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x
def _get_cycle_ratio(self, time, phase, period):
"""Compute the cycle ratio in the dtype of the time."""
phase_casted = math_ops.cast(phase, dtype=time.dtype)
period_casted = math_ops.cast(period, dtype=time.dtype)
shifted_time = time - phase_casted
cycle_ratio = self._mod(shifted_time, period_casted) / period_casted
return math_ops.cast(cycle_ratio, dtype=dtypes.float32)
def call(self, inputs, state):
"""Phased LSTM Cell.
Args:
inputs: A tuple of 2 Tensor.
The first Tensor has shape [batch, 1], and type float32 or float64.
It stores the time.
The second Tensor has shape [batch, features_size], and type float32.
It stores the features.
state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.
Returns:
A tuple containing:
- A Tensor of float32, and shape [batch_size, num_units], representing the
output of the cell.
- A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape
[batch_size, num_units], representing the new state and the output.
"""
(c_prev, h_prev) = state
(time, x) = inputs
in_mask_gates = [x, h_prev]
if self._use_peepholes:
in_mask_gates.append(c_prev)
with vs.variable_scope("mask_gates"):
mask_gates = math_ops.sigmoid(
_linear(in_mask_gates, 2 * self._num_units, True))
[input_gate, forget_gate] = array_ops.split(
axis=1, num_or_size_splits=2, value=mask_gates)
with vs.variable_scope("new_input"):
new_input = math_ops.tanh(
_linear([x, h_prev], self._num_units, True))
new_c = (c_prev * forget_gate + input_gate * new_input)
in_out_gate = [x, h_prev]
if self._use_peepholes:
in_out_gate.append(new_c)
with vs.variable_scope("output_gate"):
output_gate = math_ops.sigmoid(
_linear(in_out_gate, self._num_units, True))
new_h = math_ops.tanh(new_c) * output_gate
period = vs.get_variable(
"period", [self._num_units],
initializer=_random_exp_initializer(
self._period_init_min, self._period_init_max))
phase = vs.get_variable(
"phase", [self._num_units],
initializer=init_ops.random_uniform_initializer(
0., period.initial_value))
ratio_on = vs.get_variable(
"ratio_on", [self._num_units],
initializer=init_ops.constant_initializer(self._ratio_on),
trainable=self._trainable_ratio_on)
cycle_ratio = self._get_cycle_ratio(time, phase, period)
k_up = 2 * cycle_ratio / ratio_on
k_down = 2 - k_up
k_closed = self._leak * cycle_ratio
k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed)
k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k)
new_c = k * new_c + (1 - k) * c_prev
new_h = k * new_h + (1 - k) * h_prev
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).
use_bias: Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape)-1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden],
self._kernel_shape,
4*self._output_channels,
self._use_bias)
gates = array_ops.split(value=new_hidden,
num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
class Conv1DLSTMCell(ConvLSTMCell):
"""1D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
"""2D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
"""3D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)
def _conv(args,
filter_size,
num_features,
bias,
bias_start=0.0):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter height and width.
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3,4,5]:
raise ValueError("Conv Linear expects 3D, 4D or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args to be of same Dimensiton: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length*[1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length*[1]
# Now the computation.
kernel = vs.get_variable(
"kernel",
filter_size + [total_arg_size_depth, num_features],
dtype=dtype)
if len(args) == 1:
res = conv_op(args[0],
kernel,
strides,
padding='SAME')
else:
res = conv_op(array_ops.concat(axis=shape_length-1, values=args),
kernel,
strides,
padding='SAME')
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self, num_units, initializer=None, num_proj=None,
number_of_groups=1, forget_bias=1.0, activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
else:
self._group_shape = [int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_units].
state: this must be a tuple of state Tensors, both `2-D`,
with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[self._get_input_for_group(inputs, group_id,
self._group_shape[0]),
self._get_input_for_group(m_prev, group_id,
self._group_shape[0])], axis=1)
R_k = _linear(x_g_id, 4 * self._group_shape[1], bias=False)
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bj = vs.get_variable(name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bf = vs.get_variable(name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bo = vs.get_variable(name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
m = _linear(m, self._num_proj, bias=False)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
| apache-2.0 | -2,546,763,614,361,656,000 | 38.440827 | 97 | 0.623654 | false | 3.58381 | false | false | false |
jriehl/numba | numba/typed/typedlist.py | 1 | 9005 |
"""
Python wrapper that connects CPython interpreter to the Numba typed-list.
This is the code that is used when creating typed lists outside of a `@jit`
context and when returning a typed-list from a `@jit` decorated function. It
basically a Python class that has a Numba allocated typed-list under the hood
and uses `@jit` functions to access it. Since it inherits from MutableSequence
it should really quack like the CPython `list`.
"""
from numba.six import MutableSequence
from numba.types import ListType, TypeRef
from numba.targets.imputils import numba_typeref_ctor
from numba import listobject
from numba import njit, types, cgutils, errors, typeof
from numba.extending import (
overload_method,
overload,
box,
unbox,
NativeValue,
type_callable,
)
@njit
def _make_list(itemty):
return listobject._as_meminfo(listobject.new_list(itemty))
@njit
def _length(l):
return len(l)
@njit
def _append(l, item):
l.append(item)
@njit
def _setitem(l, i, item):
l[i] = item
@njit
def _getitem(l, i):
return l[i]
@njit
def _contains(l, item):
return item in l
@njit
def _count(l, item):
return l.count(item)
@njit
def _pop(l, i):
return l.pop(i)
@njit
def _delitem(l, i):
del l[i]
@njit
def _extend(l, iterable):
return l.extend(iterable)
@njit
def _insert(l, i, item):
l.insert(i, item)
@njit
def _remove(l, item):
l.remove(item)
@njit
def _clear(l):
l.clear()
@njit
def _reverse(l):
l.reverse()
@njit
def _copy(l):
return l.copy()
@njit
def _eq(t, o):
return t == o
@njit
def _ne(t, o):
return t != o
@njit
def _lt(t, o):
return t < o
@njit
def _le(t, o):
return t <= o
@njit
def _gt(t, o):
return t > o
@njit
def _ge(t, o):
return t >= o
@njit
def _index(l, item, start, end):
return l.index(item, start, end)
def _from_meminfo_ptr(ptr, listtype):
return List(meminfo=ptr, lsttype=listtype)
class List(MutableSequence):
"""A typed-list usable in Numba compiled functions.
Implements the MutableSequence interface.
"""
@classmethod
def empty_list(cls, item_type):
"""Create a new empty List with *item_type* as the type for the items
of the list .
"""
return cls(lsttype=ListType(item_type))
def __init__(self, **kwargs):
"""
For users, the constructor does not take any parameters.
The keyword arguments are for internal use only.
Parameters
----------
lsttype : numba.types.ListType; keyword-only
Used internally for the list type.
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
if kwargs:
self._list_type, self._opaque = self._parse_arg(**kwargs)
else:
self._list_type = None
def _parse_arg(self, lsttype, meminfo=None):
if not isinstance(lsttype, ListType):
raise TypeError('*lsttype* must be a ListType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_list(lsttype.item_type)
return lsttype, opaque
@property
def _numba_type_(self):
if self._list_type is None:
raise TypeError("invalid operation on untyped list")
return self._list_type
@property
def _typed(self):
"""Returns True if the list is typed.
"""
return self._list_type is not None
def _initialise_list(self, item):
lsttype = types.ListType(typeof(item))
self._list_type, self._opaque = self._parse_arg(lsttype)
def __len__(self):
if not self._typed:
return 0
else:
return _length(self)
def __eq__(self, other):
return _eq(self, other)
def __ne__(self, other):
return _ne(self, other)
def __lt__(self, other):
return _lt(self, other)
def __le__(self, other):
return _le(self, other)
def __gt__(self, other):
return _gt(self, other)
def __ge__(self, other):
return _ge(self, other)
def append(self, item):
if not self._typed:
self._initialise_list(item)
_append(self, item)
def __setitem__(self, i, item):
if not self._typed:
self._initialise_list(item)
_setitem(self, i, item)
def __getitem__(self, i):
if not self._typed:
raise IndexError
else:
return _getitem(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __contains__(self, item):
return _contains(self, item)
def __delitem__(self, i):
_delitem(self, i)
def insert(self, i, item):
if not self._typed:
self._initialise_list(item)
_insert(self, i, item)
def count(self, item):
return _count(self, item)
def pop(self, i=-1):
return _pop(self, i)
def extend(self, iterable):
if not self._typed:
# Need to get the first element of the iterable to initialise the
# type of the list. FIXME: this may be a problem if the iterable
# can not be sliced.
self._initialise_list(iterable[0])
self.append(iterable[0])
return _extend(self, iterable[1:])
return _extend(self, iterable)
def remove(self, item):
return _remove(self, item)
def clear(self):
return _clear(self)
def reverse(self):
return _reverse(self)
def copy(self):
return _copy(self)
def index(self, item, start=None, stop=None):
return _index(self, item, start, stop)
def __str__(self):
buf = []
for x in self:
buf.append("{}".format(x))
return '[{0}]'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._list_type)
return "{prefix}({body})".format(prefix=prefix, body=body)
# XXX: should we have a better way to classmethod
@overload_method(TypeRef, 'empty_list')
def typedlist_empty(cls, item_type):
if cls.instance_type is not ListType:
return
def impl(cls, item_type):
return listobject.new_list(item_type)
return impl
@box(types.ListType)
def box_lsttype(typ, val, c):
context = c.context
builder = c.builder
# XXX deduplicate
ctor = cgutils.create_struct_proxy(typ)
lstruct = ctor(context, builder, value=val)
# Returns the plain MemInfo
boxed_meminfo = c.box(
types.MemInfoPointer(types.voidptr),
lstruct.meminfo,
)
modname = c.context.insert_const_string(
c.builder.module, 'numba.typed.typedlist',
)
typedlist_mod = c.pyapi.import_module_noblock(modname)
fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr')
lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, lsttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typedlist_mod)
c.pyapi.decref(boxed_meminfo)
return res
@unbox(types.ListType)
def unbox_listtype(typ, val, c):
context = c.context
builder = c.builder
miptr = c.pyapi.object_getattr_string(val, '_opaque')
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
mi = native.value
ctor = cgutils.create_struct_proxy(typ)
lstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(
data_pointer,
listobject.ll_list_type.as_pointer(),
)
lstruct.data = builder.load(data_pointer)
lstruct.meminfo = mi
lstobj = lstruct._getvalue()
c.pyapi.decref(miptr)
return NativeValue(lstobj)
#
# The following contains the logic for the type-inferred constructor
#
@type_callable(ListType)
def typedlist_call(context):
"""
Defines typing logic for ``List()``.
Produces List[undefined]
"""
def typer():
return types.ListType(types.undefined)
return typer
@overload(numba_typeref_ctor)
def impl_numba_typeref_ctor(cls):
"""
Defines ``List()``, the type-inferred version of the list ctor.
Parameters
----------
cls : TypeRef
Expecting a TypeRef of a precise ListType.
See also: `redirect_type_ctor` in numba/target/bulitins.py
"""
list_ty = cls.instance_type
if not isinstance(list_ty, types.ListType):
msg = "expecting a ListType but got {}".format(list_ty)
return # reject
# Ensure the list is precisely typed.
if not list_ty.is_precise():
msg = "expecting a precise ListType but got {}".format(list_ty)
raise errors.LoweringError(msg)
item_type = types.TypeRef(list_ty.item_type)
def impl(cls):
# Simply call .empty_list with the item types from *cls*
return List.empty_list(item_type)
return impl
| bsd-2-clause | 2,869,093,582,424,535,000 | 21.179803 | 78 | 0.608329 | false | 3.407113 | false | false | false |
tadek-project/tadek-daemon | src/accessibility/constants.py | 1 | 5948 | ################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to [email protected] ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to [email protected] ##
## ##
################################################################################
__all__ = ["ActionSet", "ButtonSet", "KeySet", "RelationSet",
"RoleSet", "StateSet", "keyset"]
from tadek.core.constants import *
class ConstantSet(object):
'''
Defines a read-only set of related constans, which can be initialized only
once - by current implementation of the accessibility interface.
'''
__slots__ = ("_name", "_items")
def __init__(self, name, *items):
self._name = name
self._items = {}
# Intializes items of the constant set with None
for i in items:
self._items[i] = None
def __getattr__(self, name):
'''
Gets a item value of the constant set given by the name.
'''
if name in self.__slots__:
return object.__getattribute__(self, name)
elif name in self._items and self._items[name] is not None:
return self._items[name]
else:
raise AttributeError("'%s' set has no item '%s'"
% (self._name, name))
def __setattr__(self, name, value):
'''
Sets new item value of the constant set given by the name.
'''
if name in self.__slots__:
object.__setattr__(self, name, value)
elif name not in self._items:
raise AttributeError("'%s' set has no item '%s'"
% (self._name, name))
elif self._items[name] is not None:
raise ValueError("'%s' item of '%s' set already initialized"
% (name, self._name))
else:
self._items[name] = value
def __iter__(self):
'''
Iterator that yields one item name of the constant set per iteration.
'''
for i in self._items:
if self._items[i] is not None:
yield self._items[i]
def name(self, value):
'''
Returns a item name of the constant set given by its value.
'''
if value is not None:
for n, v in self._items.iteritems():
if v == value:
return n
return None
class ActionSet(ConstantSet):
'''
An action set.
'''
def __init__(self):
ConstantSet.__init__(self, "Action", *ACTIONS)
class RelationSet(ConstantSet):
'''
A relation set.
'''
def __init__(self):
ConstantSet.__init__(self, "Relation", *RELATIONS)
class RoleSet(ConstantSet):
'''
A role set.
'''
def __init__(self):
ConstantSet.__init__(self, "Role", *ROLES)
class StateSet(ConstantSet):
'''
A state set.
'''
def __init__(self):
ConstantSet.__init__(self, "State", *STATES)
class ButtonSet(ConstantSet):
'''
A button set.
'''
def __init__(self):
ConstantSet.__init__(self, "Button", *BUTTONS)
class KeySet(ConstantSet):
'''
A key set.
'''
def __init__(self):
ConstantSet.__init__(self, "Key", *KEY_SYMS.keys())
keyset = KeySet()
# Set default values:
for key, code in KEY_SYMS.iteritems():
setattr(keyset, key, code)
| gpl-3.0 | 8,102,291,572,339,792,000 | 37.623377 | 80 | 0.457297 | false | 4.792909 | false | false | false |
baku89/Subete-ga-F-ni-naru-ED | golly/export_large.py | 1 | 3439 | from __future__ import division
import golly as g
from PIL import Image
from math import floor, ceil, log
import os
import json
#---------------------------------------------
# settings
exportDir = "/Volumes/MugiRAID1/Works/2015/13_0xff/ca/golly-exported/otca"
# common
# duration = 147 #int( g.getgen() )
otcaDur = 35328
worldWidth = 30730
# 1/16x (1920)
duration = 64
ratio = 8
subdiv = 1
skipBound = 1
# skipFrame = int(genDur / duration)
# ad
# name = "ad_x%02d" % ratio
# bound = [4, 3, 1, 1]
# da
# name = "da_x%02d" % ratio
# bound = [5, 2, 1, 1]
# dd
# name = "dd_x%02d" % ratio
# bound = [0, 0, 1, 1]
# aa
name = "aa_x%02d" % ratio
bound = [8, 3, 1, 1]
bound[0] -= 8
bound[1] -= 8
# 1/8x
# ratio = 8
# subdiv = 2
# skipBound = 2
# skipFrame = 1000
# bound = [2, 2, 11, 11]
# 1/4x
# ratio = 4
# subdiv = 2
# skipBound = 2
# skipFrame = 1000
# bound = [2, 2, 9, 8]
# 1/2x
# ratio = 2
# subdiv = 2
# skipBound = 2
# skipFrame = 1000
# bound = [3, 3, 6, 6]
# 1/1x
# ratio = 1
# subdiv = 1
# skipBound = 1
# skipFrame = 1
# dead or alive
# bound = [0, 0, 1, 1]
# mode = "dead"
# bound = [2, 5, 1, 1]
# mode = "aliv"
#---------------------------------------------
# settings
def log( data ):
g.note( json.dumps(data) )
def getPalette():
colors = g.getcolors()
palette = {}
for i in xrange( 0, len(colors), 4 ):
state = colors[ i ]
rVal = colors[ i + 1 ]
gVal = colors[ i + 2 ]
bVal = colors[ i + 3 ]
palette[ state ] = ( rVal, gVal, bVal )
return palette
def main( step = 1, start = 0 ):
global bound, expand
g.reset()
g.run(otcaDur)
# get current Golly states
cellWidth = 2048
bound[ 0 ] = bound[0] * cellWidth
bound[ 1 ] = bound[1] * cellWidth
bound[ 2 ] = bound[2] * cellWidth
bound[ 3 ] = bound[3] * cellWidth
left = bound[ 0 ]
top = bound[ 1 ]
width = bound[ 2 ]
height = bound[ 3 ]
palette = getPalette()
cells = g.getcells( [0, 0, 1, 1] )
isMultiStates = len(cells) % 2 == 1
cellW = 2
# create image and destination directory
dstDir = "%s/%s" % (exportDir, name)
if not os.path.exists( dstDir ):
os.makedirs( dstDir )
# else:
# g.note( "destination folder already exists" )
# g.exit()
imgWidth = int( width / ratio )
imgHeight = int ( height / ratio )
boundWidth = ratio / subdiv
pb = [0, 0, boundWidth, boundWidth] # pixel bound
i = x = y = bx = by = 0
frameCount = 0
step = int(otcaDur / duration)
for i in xrange(0, duration):
g.show("Processing... %d / %d" % (i+1, duration))
g.run(step)
g.update()
img = Image.new("RGB", (imgWidth, imgHeight))
for y in xrange(imgHeight):
for x in xrange(imgWidth):
for by in xrange(0, subdiv, skipBound):
for bx in xrange(0, subdiv, skipBound):
pb[0] = left + x * ratio + bx * boundWidth
pb[1] = top + y * ratio + by * boundWidth
cells = g.getcells(pb)
if len( cells ) > 0:
img.putpixel((x, y), (255, 255, 255))
break
else:
continue
break
# save
# img.save( "%s/%s_%02dx_%s_%08d.png" % (dstDir, name, ratio, mode, i) )
# img.save( "%s/%s_%02dx_%08d.png" % (dstDir, name, ratio, i) )
img.save("%s/%s_%04d.png" % (dstDir, name, i))
g.show("Done.")
#---------------------------------------------
# main
main()
| mit | -5,444,031,110,624,764,000 | 16.390374 | 74 | 0.52399 | false | 2.566418 | false | false | false |
tempbottle/chillaxd | chillaxd/raft/serverstate.py | 3 | 7641 | # -*- coding: utf-8 -*-
# Copyright Yassine Lamgarchal <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from chillaxd import commands
from chillaxd.raft import message
LOG = logging.getLogger(__name__)
class ServerState(object):
# The different Raft states.
_LEADER = "LEADER"
_CANDIDATE = "CANDIDATE"
_FOLLOWER = "FOLLOWER"
def __init__(self, server, log, queued_commands, private_endpoint):
self._server = server
self._log = log
self._queued_commands = queued_commands
self._private_endpoint = private_endpoint
# The server state, initially a follower.
self._current_state = ServerState._FOLLOWER
# The current known leader.
self._leader = None
# Set of peers that voted for this server in current term.
self._voters = set()
# TODO(yassine): must be persisted
# The candidate the server has voted for in current term.
self._voted_for = None
# Index of highest log entry known to be committed
# (initialized to 0, increases monotonically)
self._commit_index = 0
# Index of highest log entry applied to state machine
# (initialized to 0, increases monotonically)
self._last_applied = 0
# For each remote peer, index of the next log entry to send
# (initialized to leader last log index + 1)
self._next_index = {}
# For each remote peer, index of the highest log entry known to be
# replicated on that peer (initialized to 0, increases monotonically)
self._match_index = {}
# Latest term the server has seen
# (initialized to 0 on first boot, increases monotonically).
self._current_term = 0
def is_leader(self):
return self._current_state == ServerState._LEADER
def is_candidate(self):
return self._current_state == ServerState._CANDIDATE
def is_follower(self):
return self._current_state == ServerState._FOLLOWER
def switch_to_leader(self):
"""Switch to leader state.
Enable the heartbeat periodic call and
stop to check if a leader is still alive.
"""
if (not self._server.is_standalone() and
self._current_state != ServerState._CANDIDATE):
raise InvalidState(
"Invalid state '%s' while transiting to leader state." %
self._current_state)
self._current_state = ServerState._LEADER
self._voters.clear()
self._voted_for = None
LOG.info("switched to leader, term='%d'" % self._current_term)
for remote_peer in self._server.remote_peers():
self._next_index[remote_peer] = self._log.last_index() + 1
self._match_index[remote_peer] = 0
if not self._server.is_standalone():
self._server.broadcast_append_entries()
self._server.heartbeating.start()
self._server.checking_leader_timeout.stop()
if not self._server.is_standalone():
command_id, noop_message = commands.build_no_operation()
self._log.append_entry(self._current_term, noop_message)
self._queued_commands[command_id] = (None, -1)
def switch_to_follower(self, m_term, m_leader):
"""Switch to follower state.
Disable the heartbeat periodic call and
start to check if the leader is still alive.
:param m_term: The last recent known term.
:type m_term: int
:param m_leader: The leader if a valid append entry has
been received, None otherwise.
:type: str
"""
if self._current_state == ServerState._LEADER:
self._server.checking_leader_timeout.start()
self._server.heartbeating.stop()
self._current_state = ServerState._FOLLOWER
self._leader = m_leader
self._current_term = max(m_term, self._current_term)
self._voters.clear()
self._voted_for = None
LOG.info("switched to follower, term='%d'" % self._current_term)
def switch_to_candidate(self):
"""Switch to candidate state.
Increment the current term, vote for self, and broadcast a
request vote. The election timeout is randomly reinitialized.
"""
if self._current_state == ServerState._LEADER:
raise InvalidState(
"Invalid state '%s' while transiting to candidate state." %
self._current_state)
self._current_term += 1
self._current_state = ServerState._CANDIDATE
LOG.debug("switched to candidate, term='%d'" % self._current_term)
self._voters.clear()
self._voters.add(self._private_endpoint)
self._voted_for = self._private_endpoint
l_l_i, l_l_t = self._log.index_and_term_of_last_entry()
rv_message = message.build_request_vote(self._current_term, l_l_i,
l_l_t)
# Broadcast request vote and reset election timeout.
self._server.broadcast_message(rv_message)
self._server.reset_election_timeout()
def init_indexes(self, remote_peer_id):
"""Initialize next_index and match_index of a remote peer.
:param remote_peer_id: The id of the remote peer.
:type remote_peer_id: six.binary_type
"""
self._next_index[remote_peer_id] = self._log.last_index() + 1
self._match_index[remote_peer_id] = 0
def next_index(self, peer_id):
return self._next_index[peer_id]
def update_next_index(self, peer_id, new_next_index):
self._next_index[peer_id] = new_next_index
def match_index_values(self):
return self._match_index.values()
def update_match_index(self, peer_id, new_match_index):
self._match_index[peer_id] = max(self._match_index[peer_id],
new_match_index)
def commit_index(self):
return self._commit_index
def update_commit_index(self, new_commit_index):
self._commit_index = new_commit_index
def no_commands_to_apply(self):
return self._last_applied == self._commit_index
def last_applied(self):
return self._last_applied
def update_last_applied(self):
self._last_applied = self._commit_index
def clear(self):
self._next_index.clear()
self._match_index.clear()
def term(self):
return self._current_term
def add_voter(self, peer_id):
self._voters.add(peer_id)
def grant_vote(self, peer_id):
if not self._voted_for or self._voted_for == peer_id:
self._voted_for = peer_id
return True
return False
def number_of_voters(self):
return len(self._voters)
def update_leader(self, leader):
self._leader = leader
def is_leader_alive(self):
return self._leader is not None
class InvalidState(Exception):
"""Exception raised when the server try to perform an action which
is not allowed in its current state.
"""
| apache-2.0 | -6,309,576,981,796,895,000 | 32.809735 | 77 | 0.621777 | false | 3.930556 | false | false | false |
djnugent/GameNet | duel_dqn_cartpole.py | 1 | 2104 | import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'CartPole-v0'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model regardless of the dueling architecture
# if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically
# Also, you can build a dueling network by yourself and turn off the dueling network in DQN.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=False, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=5, visualize=False)
| gpl-3.0 | 6,829,196,167,816,234,000 | 36.254545 | 106 | 0.738593 | false | 3.168675 | false | false | false |
indodutch/sim-city-client | simcity/ensemble.py | 2 | 2821 | # SIM-CITY client
#
# Copyright 2015 Netherlands eScience Center, Jan Bot
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""
Ensemble metadata
"""
def ensemble_view(task_db, name, version, url=None, ensemble=None):
"""
Create a view for an ensemble.
This checks if the view already exists. If not, a new view is created with
a new design document name. The view is then called all_docs under that
design document.
@param task_db: task database
@param name: simulator name
@param version: simulator version
@param url: base url of the database. If none, use the task database url.
@param ensemble: ensemble name. If None, no selection on ensemble is made.
@return design document
"""
if ensemble is None:
design_doc = '{0}_{1}'.format(name, version)
ensemble_condition = ''
else:
design_doc = '{0}_{1}_{2}'.format(name, version, ensemble)
ensemble_condition = ' && doc.ensemble === "{0}"'.format(ensemble)
doc_id = '_design/{0}'.format(design_doc)
try:
task_db.get(doc_id)
except ValueError:
if url is None:
url = task_db.url
if not url.endswith('/'):
url += '/'
map_fun = '''
function(doc) {{
if (doc.type === "task" && doc.name === "{name}" &&
doc.version === "{version}" && !doc.archive
{ensemble_condition}) {{
emit(doc._id, {{
_id: doc._id,
_rev: doc._rev,
url: "{url}" + doc._id,
input: {{
ensemble: doc.input.ensemble,
simulation: doc.input.simulation,
}},
error: doc.error,
lock: doc.lock,
done: doc.done,
input: doc.input,
files: doc.files,
_attachments: doc._attachments,
typeUrl: doc.typeUrl,
defaultFeatureType: doc.defaultFeatureType
}});
}}
}}'''.format(name=name, version=version,
ensemble_condition=ensemble_condition, url=url)
task_db.add_view('all_docs', map_fun, design_doc=design_doc)
return design_doc
| apache-2.0 | -6,904,070,903,719,162,000 | 33.402439 | 78 | 0.564693 | false | 4.223054 | false | false | false |
sibis-platform/sibis-platform.github.io | pelicanconf.py | 1 | 3272 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = "SRI International"
SITENAME = "Scalable Informatics for Biomedical Imaging Studies"
SITESUBTITLE = ""
SITEURL = ""
TIMEZONE = "America/Vancouver"
DEFAULT_LANG = "en"
conda_env = os.environ.get('CONDA_PREFIX', "")
# Theme
if conda_env == "":
THEME = "/Users/nicholsn/Repos/pelican-themes/pelican-bootstrap3"
else:
THEME = conda_env + "/lib/python2.7/site-packages/pelican-themes/pelican-bootstrap3"
# Theme specific config
MENUITEMS = [['Scalable Informatics for Biomedical Imaging Studies', '/index.html'],
['About', '/pages/about.html'],
['Team', '/pages/team.html'],
['Contact', '/pages/contact.html']]
BOOTSTRAP_THEME = "spacelab"
PYGMENTS_STYLE = 'solarizedlight'
SITELOGO = "images/logo/SIBIS-logo-header.png"
SITELOGO_SIZE = "60%"
HIDE_SITENAME = True
#DISPLAY_BREADCRUMBS = True
#DISPLAY_CATEGORY_IN_BREADCRUMBS = True
BOOTSTRAP_NAVBAR_INVERSE = False
FAVICON = "images/logo/favicon.png"
DISPLAY_ARTICLE_INFO_ON_INDEX = True
ABOUT_ME = ""
AVATAR = ""
CC_LICENSE = "CC-BY"
SHOW_ARTICLE_AUTHOR = True
SHOW_ARTICLE_CATEGORY = True
USE_PAGER = True
BOOTSTRAP_FLUID = True
RELATED_POSTS_MAX = 10
USE_OPEN_GRAPH = True
# Notebook Rendering
NOTEBOOK_DIR = 'notebooks'
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
CUSTOM_CSS = 'static/custom.css'
# Template settings
DISPLAY_PAGES_ON_MENU = False
DISPLAY_CATEGORIES_ON_MENU = False
DIRECT_TEMPLATES = ('index', 'categories', 'authors', 'archives', 'search')
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),)
TAG_CLOUD_MAX_ITEMS = 20
DISPLAY_TAGS_ON_SIDEBAR = True
DISPLAY_TAGS_INLINE = True
DISPLAY_CATEGORIES_ON_SIDEBAR = False
HIDE_SIDEBAR = True
# Articles per page
DEFAULT_PAGINATION = 10
RECENT_POST_COUNT = 5
# Plugins
if conda_env == "":
PLUGIN_PATHS = ["/Users/nicholsn/Repos/pelican-plugins"]
else:
PLUGIN_PATHS = [conda_env + "/lib/python2.7/site-packages/pelican-plugins"]
PLUGINS = ['related_posts', 'tipue_search', 'liquid_tags.img',
'liquid_tags.video', 'liquid_tags.youtube',
'liquid_tags.vimeo', 'liquid_tags.include_code',
'liquid_tags.notebook']
# Static paths and cname mapping
PATH = "content"
STATIC_PATHS = ['images', 'extra/custom.css', 'form']
EXTRA_PATH_METADATA = {
'extra/custom.css': {'path': 'static/custom.css'}
}
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
ARTICLE_EXCLUDES = ['.']
# Social widget
SOCIAL = (('Github', 'https://github.com/sibis-platform'),)
# Disqus config
DISQUS_SITENAME = ""
# Addthis
ADDTHIS_PROFILE = ""
ADDTHIS_DATA_TRACK_ADDRESSBAR = False
# Github
GITHUB_USER = "sibis-platform"
GITHUB_REPO_COUNT = 3
GITHUB_SKIP_FORK = True
GITHUB_SHOW_USER_LINK = True
ARTICLE_EDIT_LINK = 'https://github.com/sibis-platform/sibis-platform.github.io/blob/gh-pages/content/%(slug)s.md'
# Google registration
GOOGLE_SEARCH = ""
GOOGLE_ANALYTICS_UNIVERSAL = ""
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = False
CATEGORY_FEED_ATOM = False
TRANSLATION_FEED_ATOM = None
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
| apache-2.0 | -3,414,933,232,028,860,000 | 26.266667 | 114 | 0.694377 | false | 2.87522 | false | false | false |
Codeepy/Share.it | foodbank/settings.py | 1 | 2503 | """
Django settings for foodbank project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h4ker#*e3a^ke)-rc4#$h4^j27ct^l8uktv&o0!tid+p%@e_+0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'share_it',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'foodbank.urls'
WSGI_APPLICATION = 'foodbank.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'fr33l0v3'
EMAIL_USE_SSL = True
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
) | apache-2.0 | 7,187,523,240,684,971,000 | 23.31068 | 71 | 0.718738 | false | 3.156368 | false | false | false |
rvanlaar/easy-transifex | src/transifex/transifex/resources/formats/registry.py | 1 | 7208 | # -*- coding: utf-8 -*-
"""
Register the available formats and their capabilities.
"""
import magic
from django.conf import settings
from transifex.txcommon import import_to_python
from transifex.txcommon.log import logger
class _FormatsRegistry(object):
"""Registry class for the formats."""
def __init__(self, methods=None, handlers=None):
"""It initializes the variables of the registry.
The variables are:
methods: A dictionary of the available methods.
handlers: A dictionary of the available handlers.
"""
self.methods = methods or settings.I18N_METHODS
self.handlers = {}
handlers = handlers or settings.I18N_HANDLER_CLASS_NAMES
for method, klass in handlers.iteritems():
self.handlers[method] = import_to_python(klass)
def _string_to_list(self, string):
"""
Convert a string of multiple items separated by commas and spaces
to a list.
"""
return string.split(', ')
def add_handler(self, m, klass, priority=False):
"""Register a new handler for the type m.
Args:
m: A i18n_method.
klass: A handler class for the specified method.
priority: if this is a priority request, then register the
handler for the method anyway. Else, ignore the request.
Returns:
True, if the handler was added successfully, False otherwise.
"""
if m in self.handlers and not priority:
return False
self.handlers[m] = klass
return True
@property
def available_methods(self):
"""Get the available methods."""
methods = self.methods.copy()
del methods['POT']
return methods
def descriptions(self):
"""Get the available descriptions along with the
method they correspond to.
"""
return [
(m, '%s (%s)' % (v['description'], v['file-extensions']))
for m, v in self.methods.items()
if m != 'POT'
]
def extensions_for(self, m):
"""Get the extensions for the specified method.
Returns:
A list of file extensions or an empty list,
in case no such method exists.
"""
if m not in self.methods:
return []
return self._string_to_list(self.methods[m]['file-extensions'])
def guess_method(self, filename=None, mimetype=None):
"""
Return an appropriate Handler class for given file.
The handler is selected based on libmagic and the file extension
or the mime type.
Args:
filename: The path to the file.
mimetype: The mime type of the file.
Returns:
An appropriate handler class for the file.
"""
i18n_type = None
if filename is not None:
try:
m = magic.Magic(mime=True)
# guess mimetype and remove charset
mime_type = m.from_file(filename)
except AttributeError, e:
m = magic.open(magic.MAGIC_NONE)
m.load()
mime_type = m.file(filename)
m.close()
except IOError, e:
# file does not exist in the storage
mime_type = None
except Exception, e:
logger.error("Uncaught exception: %s" % e.message, exc_info=True)
# We don't have the actual file. Depend on the filename only
mime_type = None
for method, info in self.methods.items():
if filter(filename.endswith, info['file-extensions'].split(', ')) or\
mime_type in info['mimetype'].split(', '):
i18n_type = method
break
elif mimetype is not None:
for method in self.handlers:
if mimetype in self.mimetypes_for(method):
i18n_type = method
break
return i18n_type
def is_supported(self, m):
"""Check whether the method is supported.
Args:
m: The method to check.
Returns:
True, if it is supported. Else, False.
"""
return m in self.methods
def mimetypes_for(self, m):
"""Get the mimetypes for the specified method.
Args:
m: The method which we want the mimetypes for.
Returns:
The mimetypes for the method or an empty list.
"""
if m not in self.methods:
return []
return self._string_to_list(self.methods[m]['mimetype'])
def handler_for(self, m):
"""Return a handler for the i18n type specified.
Args:
m: A i18n_method.
filename: The filename (if available).
Returns:
A particular handler for the method or None, in case the method
has not been registered.
"""
if m not in self.handlers:
return None
return self.handlers[m]()
def appropriate_handler(self, resource, language, **kwargs):
"""Return the appropriate handler based on the arguments.
The arguments may be the filename of the resource or whether
a pot file has been requested.
Args:
resource: The resource the handler is for.
language: The language the handler is asked for.
Returns:
A subclass of formats.core.Handler or None.
"""
method = resource.i18n_type
handler = registry.handler_for
# Only PO/POT files need special treatment
if method != 'PO':
return handler(method)
# Override the behavior manually
wants_pot = kwargs.get('wants_pot')
if wants_pot:
return handler('POT')
# Check file extension
filename = kwargs.get('filename')
if filename is not None:
if filename.endswith('po'):
return handler('PO')
else:
return handler('POT')
# Return POT, when no language has been asked
if language is None:
return handler('POT')
return handler('PO')
# # Always return PO for non-source language files
# slang = resource.source_language
# if language != slang:
# return handler('PO')
# Should never be here
return None
def file_extension_for(self, resource, language):
"""Return the filename extension that should be used
for the specific resource-language pair.
"""
resource_method = resource.i18n_method
try:
if resource_method != 'PO':
return self.extensions_for(resource_method)[0]
if language is None:
return self.extensions_for('POT')[0]
return self.extensions_for(resource_method)[0]
except IndexError, e:
msg = "No extensions for resource %s: %s"
logger.error(msg % (resource, e), exc_info=True)
raise
registry = _FormatsRegistry()
| bsd-2-clause | 56,344,700,331,536,184 | 31.178571 | 85 | 0.561182 | false | 4.668394 | false | false | false |
jonathanslenders/python-prompt-toolkit | examples/progress-bar/many-parallel-tasks.py | 1 | 1591 | #!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
import threading
import time
from prompt_toolkit import HTML
from prompt_toolkit.shortcuts import ProgressBar
def main():
with ProgressBar(
title=HTML("<b>Example of many parallel tasks.</b>"),
bottom_toolbar=HTML("<b>[Control-L]</b> clear <b>[Control-C]</b> abort"),
) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = [
threading.Thread(target=run_task, args=("First task", 50, 0.1)),
threading.Thread(target=run_task, args=("Second task", 100, 0.1)),
threading.Thread(target=run_task, args=("Third task", 8, 3)),
threading.Thread(target=run_task, args=("Fourth task", 200, 0.1)),
threading.Thread(target=run_task, args=("Fifth task", 40, 0.2)),
threading.Thread(target=run_task, args=("Sixth task", 220, 0.1)),
threading.Thread(target=run_task, args=("Seventh task", 85, 0.05)),
threading.Thread(target=run_task, args=("Eight task", 200, 0.05)),
]
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
| bsd-3-clause | -3,867,059,811,151,878,000 | 33.586957 | 82 | 0.583281 | false | 3.7 | false | false | false |
JoeMido/networking-midonet | midonet/neutron/db/migration/alembic_migration/versions/4105f6d52b82_add_router_service_insertion_tables.py | 2 | 1984 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add router service insertion tables
Revision ID: 4105f6d52b82
Revises: 4cedd30aadf6
Create Date: 2014-12-24 19:25:38.042068
"""
# revision identifiers, used by Alembic.
revision = '4105f6d52b82'
down_revision = '4cedd30aadf6'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'midonet_servicerouterbindings',
sa.Column('resource_id', sa.String(length=36), nullable=False),
sa.Column('resource_type', sa.String(length=36), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], [u'routers.id'],
name='midonet_servicerouterbindings_ibfk_1'),
sa.PrimaryKeyConstraint('resource_id', 'resource_type'))
op.create_table(
'midonet_routerservicetypebindings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('service_type_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(
['router_id'], ['routers.id'],
name='midonet_routerservicetypebindings_ibfk_1'),
sa.PrimaryKeyConstraint(u'router_id'))
def downgrade():
op.drop_table(u'midonet_routerservicetypebindings')
op.drop_table(u'midonet_servicerouterbindings')
| apache-2.0 | 7,861,029,487,135,160,000 | 35.740741 | 78 | 0.690524 | false | 3.574775 | false | false | false |
mtunique/knows | algorithm/std_functions.py | 1 | 2587 | __author__ = 'mt'
# -*- coding: utf-8 -*-
import math
from counts import *
import jieba
import jieba.analyse
from bs4 import BeautifulSoup
def to_string(content, strip=True):
return BeautifulSoup(content).html.body.get_text('\n', strip=strip)
def _cos(x, y):
ans = 0.
len_x = 0
len_y = 0
for i in range(len(x)):
ans += x[i] * y[i]
len_x += x[i] ** 2
len_y += y[i] ** 2
return math.sqrt(math.fabs(ans)) / math.sqrt(len_x) / math.sqrt(len_y)
def cos(x, y):
if len(x) == len(y):
return _cos(x, y)
else:
print "Vectors' lengths are different"
def parse_doc_list(docs, vocab):
"""
@param docs: A List of documents. Each document must be a string
@param vocab: No_stop_words vocabularies, that's to say only when the word is in this list will it not be ignored
@return:
Returns a pair of lists of lists.
The first, wordids, says what vocabulary tokens are present in
each document. wordids[i][j] gives the jth unique token present in
document i. (Don't count on these tokens being in any particular
order.)
The second, wordcts, says how many times each vocabulary token is
present. wordcts[i][j] is the number of times that the token given
by wordids[i][j] appears in document i.
"""
#jieba.initialize()
D = len(docs)
wordids = list()
wordcts = list()
for d in range(0, D):
words = jieba.cut(docs[d])
ddict = dict()
for word in words:
if word in vocab:
wordtoken = vocab[word]
if not wordtoken in ddict:
ddict[wordtoken] = 0
ddict[wordtoken] += 1
wordids.append([i-1 for i in ddict.keys()])
wordcts.append(ddict.values())
return wordids, wordcts
def doc_to_vector(doc, vocab):
ids, count = parse_doc_list([doc], vocab)
ids, count = ids[0], count[0]
temp_dict = {}
if len(ids):
for index in range(len(ids)):
temp_dict.setdefault(str(ids[index]), count[index])
ans = []
for tmp_id in range(VECTOR_LEN):
try:
ans.append(temp_dict[str(tmp_id)])
except KeyError:
ans.append(0.)
return ans
def get_base_vectors(db=None):
if not db:
from dbs.mongodb import db
return [db.vector.find_one({'_id': str(i)})['v'] for i in range(20)]
def vector_to_topic_vector(vector, base_vector):
return [cos(vector, base_vector[i]) for i in range(20)]
if __name__ == '__main__':
print cos([0.1, -0.1], [1.1, -0.9]) | gpl-2.0 | 7,264,268,008,693,762,000 | 25.141414 | 117 | 0.58678 | false | 3.262295 | false | false | false |
isb-cgc/ISB-CGC-Webapp | visualizations/data_request.py | 1 | 6140 | ###
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from django.conf import settings
#############################################
# this is file is an abstraction for all visualizations to access for gathering data.
#############################################
# static endpoints
MAF_ENDPOINT_URI_TEMPLATE = settings.BASE_API_URL + '/_ah/api/maf_api/v1/maf_search?gene={gene}&{tumor_parameters}'
BQ_ENDPOINT_URL = settings.BASE_API_URL + '/_ah/api/bq_api/v1'
INTERPRO_BQ_ENDPOINT_URI_TEMPLATE = settings.BASE_API_URL + '/_ah/api/bq_api/v1/bq_interpro?uniprot_id={uniprot_id}'
# Static definitions
SEQPEEK_VIEW_DEBUG_MODE = False
SAMPLE_ID_FIELD_NAME = 'tumor_sample_barcode'
TUMOR_TYPE_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'amino_acid_position'
PROTEIN_DOMAIN_DB = 'PFAM'
# Static definitions
friendly_name_map = {
'disease_code':'Disease Code',
'gender':'Gender',
'mirnPlatform':'microRNA expression platform',
'gexpPlatform':'gene (mRNA) expression platform',
'methPlatform':'DNA methylation platform',
'rppaPlatform':'protein quantification platform',
'cnvrPlatform':'copy-number platform',
'age_at_initial_pathologic_diagnosis':'age at diagnosis',
'hsa_miR_146a_5p':'hsa-miR-146a-5p expression (log2[normalized_counts+1])',
'hsa_miR_7_7p':'hsa-miR-7-5p expression (log2[normalized_counts+1])',
'CNVR_EGFR':'EGFR copy-number (log2[CN/2])',
'EGFR_chr7_55086714_55324313':'EGFR expression (log2[normalized_counts+1])',
'EGFR_chr7_55086714_55324313_EGFR':'EGFR protein quantification',
'EGFR_chr7_55086288_cg03860890_TSS1500_Island':'EGFR methylation (TSS1500, CpG island)',
'EGFR_chr7_55086890_cg14094960_5pUTR_Island':"EGFR methylation (5' UTR, CpG island)",
'EGFR_chr7_55089770_cg10002850_Body_SShore':'EGFR methylation (first intron, cg10002850)',
'EGFR_chr7_55177623_cg18809076_Body':'EGFR methylation (first intron, cg18809076)'
}
numerical_attributes = [
'age_at_initial_pathologic_diagnosis',
'hsa_miR_146a_5p',
'hsa_miR_7_7p',
'CNVR_EGFR',
'EGFR_chr7_55086714_55324313',
'EGFR_chr7_55086714_55324313_EGFR',
'EGFR_chr7_55086288_cg03860890_TSS1500_Island',
'EGFR_chr7_55086890_cg14094960_5pUTR_Island',
'EGFR_chr7_55089770_cg10002850_Body_SShore',
'EGFR_chr7_55177623_cg18809076_Body'
]
categorical_attributes = [
'disease_code',
'gender',
'mirnPlatform',
'gexpPlatform',
'methPlatform',
'rppaPlatform',
'cnvrPlatform'
]
fm_friendly_name_map = {
'percent_lymphocyte_infiltration':'Percent Lymphocyte Infiltration',
'percent_monocyte_infiltration':'Percent Monocyte Infiltration',
'percent_necrosis':'Percent Necrosis',
'percent_neutrophil_infiltration':'Percent Neutrophil Infiltration',
'percent_normal_cells':'Percent Normal Cells',
'percent_stromal_cells':'Percent Stromal Cells',
'percent_tumor_cells':'Percent Tumor Cells',
'percent_tumor_nuclei':'Percent Tumor Nuclei',
'age_at_initial_pathologic_diagnosis':'Age at Diagnosis',
'days_to_birth':'Days to Birth',
'days_to_initial_pathologic_diagnosis':'Days to Diagnosis',
'year_of_initial_pathologic_diagnosis':'Year of Diagnosis',
'days_to_last_known_alive':'Days to Last Known Alive',
'tumor_necrosis_percent':'Tumor Necrosis Percent',
'tumor_nuclei_percent':'Tumor Nuclei Percent',
'tumor_weight':'Tumor Weight',
'days_to_last_followup':'Days to Last Followup',
'gender':'Gender',
'history_of_neoadjuvant_treatment':'History of Neoadjuvant Treatment',
'icd_o_3_histology':'ICD-O-3 Code',
'other_dx':'Prior Diagnosis',
'vital_status':'Vital Status',
'country':'Country',
'disease_code':'Disease Code',
'histological_type':'Histological Type',
'icd_10':'ICD-10 Category',
'icd_o_3_site':'ICD-O-3 Site',
'tumor_tissue_site':'Tumor Tissue Site',
'tumor_type':'Tumor Type',
'person_neoplasm_cancer_status':'Neoplasm Cancer Status',
'pathologic_N':'Pathologic N Stage',
'radiation_therapy':'Radiation Therapy',
'pathologic_T':'Pathologic T Stage',
'race':'Race',
'ethnicity':'Ethnicity',
'sampleType':'Sample Type',
'DNAseq_data':'DNA Sequencing Data',
'mirnPlatform':'microRNA expression platform',
'gexpPlatform':'gene (mRNA) expression platform',
'methPlatform':'DNA methylation platform',
'rppaPlatform':'protein quantification platform',
'cnvrPlatform':'copy-number platform',
}
fm_numerical_attributes = [
'percent_lymphocyte_infiltration',
'percent_monocyte_infiltration',
'percent_necrosis',
'percent_neutrophil_infiltration',
'percent_normal_cells',
'percent_stromal_cells',
'percent_tumor_cells',
'percent_tumor_nuclei',
'age_at_initial_pathologic_diagnosis',
'days_to_birth',
'days_to_initial_pathologic_diagnosis',
'year_of_initial_pathologic_diagnosis',
'days_to_last_known_alive',
'tumor_necrosis_percent',
'tumor_nuclei_percent',
'tumor_weight',
'days_to_last_followup'
]
fm_categorical_attributes = [
'gender',
'history_of_neoadjuvant_treatment',
'icd_o_3_histology',
'other_dx',
'vital_status',
'country',
'disease_code',
'histological_type',
'icd_10',
'icd_o_3_site',
'tumor_tissue_site',
'tumor_type',
'person_neoplasm_cancer_status',
'pathologic_N',
'radiation_therapy',
'pathologic_T',
'race',
'ethnicity',
'sampleType',
'DNAseq_data',
'mirnPlatform',
'cnvrPlatform',
'methPlatform',
'gexpPlatform',
'rppaPlatform'
]
| apache-2.0 | -5,620,259,480,021,835,000 | 34.697674 | 116 | 0.682899 | false | 2.839963 | false | false | false |
magfest/ubersystem | uber/badge_funcs.py | 1 | 5493 | from uber.models import Attendee
from uber.config import c
def check_range(badge_num, badge_type):
if badge_num is not None:
try:
badge_num = int(badge_num)
except Exception:
return '"{}" is not a valid badge number (should be an integer)'.format(badge_num)
if badge_num:
min_num, max_num = c.BADGE_RANGES[int(badge_type)]
if not min_num <= badge_num <= max_num:
return '{} badge numbers must fall within the range {} - {}'.format(
dict(c.BADGE_OPTS)[badge_type], min_num, max_num)
def is_badge_unchanged(attendee, old_badge_type, old_badge_num):
old_badge_num = int(old_badge_num or 0) or None
return old_badge_type == attendee.badge_type and (
not attendee.badge_num or old_badge_num == attendee.badge_num)
def reset_badge_if_unchanged(attendee, old_badge_type, old_badge_num):
"""
The "change badge" page can pass an empty string for the badge number,
but if nothing actually changed about the attendee's badge, we need the
old number back!
"""
if is_badge_unchanged(attendee, old_badge_type, old_badge_num):
attendee.badge_num = old_badge_num
return 'Attendee is already {} with badge {}'.format(c.BADGES[old_badge_type], old_badge_num)
# TODO: returning (result, error) is not a convention we're using anywhere else,
# so maybe change this to be more idiomatic if convenient, but not a big deal
def get_badge_type(badge_num):
if not c.NUMBERED_BADGES:
return c.ATTENDEE_BADGE, ''
else:
try:
for (badge_type, (lowest, highest)) in c.BADGE_RANGES.items():
if int(badge_num) in range(lowest, highest + 1):
return badge_type, ''
return None, "{0!r} isn't a valid badge number; it's not in the range of any badge type".format(badge_num)
except Exception:
return None, '{0!r} is not a valid integer'.format(badge_num)
def get_real_badge_type(badge_type):
return c.ATTENDEE_BADGE if badge_type in [c.PSEUDO_DEALER_BADGE, c.PSEUDO_GROUP_BADGE] else badge_type
# TODO: perhaps a check_leaderless() for checking for leaderless groups, since those don't get emails
# run through all badges and check 2 things:
# 1) there are no gaps in badge numbers
# 2) all badge numbers are in the ranges set by c.BADGE_RANGES
# note: does not do any duplicates checking, that's a different pre-existing check
def badge_consistency_check(session):
errors = []
# check 1, see if anything is out of range, or has a duplicate badge number
badge_nums_seen = []
attendees = session.query(Attendee).filter(Attendee.first_name != '', Attendee.badge_num != 0) \
.order_by('badge_num').all()
for attendee in attendees:
out_of_range_error = check_range(attendee.badge_num, attendee.badge_type)
if out_of_range_error:
msg = '{a.full_name}: badge #{a.badge_num}: {err}'.format(a=attendee, err=out_of_range_error)
errors.append(msg)
if attendee.badge_num in badge_nums_seen:
msg = '{a.full_name}: badge #{a.badge_num}: Has been assigned the same badge number ' \
'of another badge, which is not supposed to happen'.format(a=attendee)
errors.append(msg)
badge_nums_seen.append(attendee.badge_num)
# check 2: see if there are any gaps in each of the badge ranges
for badge_type_val, badge_type_desc in c.BADGE_OPTS:
prev_badge_num = -1
prev_attendee_name = ""
attendees = session.query(Attendee) \
.filter(Attendee.badge_type == badge_type_val, Attendee.first_name != '', Attendee.badge_num != 0) \
.order_by('badge_num').all()
for attendee in attendees:
if prev_badge_num == -1:
prev_badge_num = attendee.badge_num
prev_attendee_name = attendee.full_name
continue
if attendee.badge_num - 1 != prev_badge_num:
msg = "gap in badge sequence between " + badge_type_desc + " " + \
"badge# " + str(prev_badge_num) + "(" + prev_attendee_name + ")" + " and " + \
"badge# " + str(attendee.badge_num) + "(" + attendee.full_name + ")"
errors.append(msg)
prev_badge_num = attendee.badge_num
prev_attendee_name = attendee.full_name
return errors
def needs_badge_num(attendee=None, badge_type=None):
"""
Takes either an Attendee object, a badge_type, or both and returns whether or not the attendee should be
assigned a badge number. If neither parameter is given, always returns False.
:param attendee: Passing an existing attendee allows us to check for a new badge num whenever the attendee
is updated, particularly for when they are checked in.
:param badge_type: Must be an integer. Allows checking for a new badge number before adding/updating the
Attendee() object.
:return:
"""
if not badge_type and attendee:
badge_type = attendee.badge_type
elif not badge_type and not attendee:
return None
if c.NUMBERED_BADGES:
if attendee:
return (badge_type in c.PREASSIGNED_BADGE_TYPES or attendee.checked_in) \
and attendee.paid != c.NOT_PAID and attendee.badge_status != c.INVALID_STATUS
else:
return badge_type in c.PREASSIGNED_BADGE_TYPES
| agpl-3.0 | 3,580,210,522,217,002,000 | 40.931298 | 118 | 0.631167 | false | 3.562257 | false | false | false |
sweet-st/seedvpn | test/hander.py | 1 | 2274 | import socket
import logging
import AES_Encrypt
import shell
'''
class Hander():
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def login():
pass
'''
class Hander():
def __init__(self, IS_SERVER, config_dictionary):
if not IS_SERVER:
self.IS_SERVER = False
self.ip = config_dictionary["addr"]
self.port = config_dictionary["port"]
self.password = config_dictionary["password"]
else:
self.IS_SERVER = True
self.password = config_dictionary["password"]
self.udpfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def login(self):
data = "LOGIN:" + self.password + ":" + "tihs is random message"
en_data = AES_Encrypt.encrypt(data)
self.udpfd.sendto(en_data, (self.ip, self.port))
def check_loggin(self, data, log_times):
de_data = AES_Encrypt.decrypt(data)
try:
de_data = de_data.decode()
except:
logging.debug("check login decode error")
return None
# success de_data : LOGIN:SUCCESS:10.10.0.2
# error de_data : LOGIN:PASSWORD
if de_data.startswith("LOGIN:SUCCESS"):
recv_ip = de_data.split(":")[2]
return recv_ip
else:
return 1 # login error
def check_passwd(self, data):
de_data = AES_Encrypt.decrypt(data)
try:
de_data = de_data.decode()
except UnicodeDecodeError:
logging.debug("de_data decode error")
return 1 # passwd decode error
if (data.startswith("LOGIN:") and data.split(":")[1]) == self.password:
return 0 # password right
else:
return 2 # password error
def send_public_key(self, ip, port):
if self.IS_SERVER:
ip = ip
port = port
else:
ip = self.ip
port = self.port
with open("public.pem", 'r') as public_pem:
public_key = public_pem.read()
en_public_key = AES_Encrypt.encrypt(public_key)
self.udpfd.sendto(en_public_key, (ip, port))
| gpl-3.0 | -3,187,089,210,640,430,600 | 28.921053 | 79 | 0.543975 | false | 3.860781 | false | false | false |
inbloom/legacy-projects | lri-b/neorest.py | 2 | 11904 | #!/usr/bin/env python
# Copyright 2012-2013 inBloom, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lridb,json,schema,datetime,copy,prop,timer,time,traceback
import py2neo.neo4j,py2neo.cypher
class neorest(lridb.lridb):
def open(self):
self.log.debug(self.server_url)
self.ndb = py2neo.neo4j.GraphDatabaseService(self.server_url)
self.ndb.get_indexes(py2neo.neo4j.Node)
self.log.debug("RAW NODE COUNT =",self.ndb.get_node_count())
self.log.debug("RAW LINK COUNT =",self.ndb.get_relationship_count())
def close(self):
pass
def boot(self,create_indices=False):
self.schema = schema.schema(bootstrap_filenames = self.bootstrap_filenames,in_test_mode=self.in_test_mode)
if self.in_test_mode:
return
# Create a right hand side for literal properties stored in link
self.null_node=self.ndb.get_node(0)
# Make sure our indices are set up
#self.init_indices()
self.init_indices(create_indices=create_indices)
# Get our bootstrap schema
if create_indices:
self.schema.load_bootstrap() # First time load
self.log.debug(self.schema.errors)
else:
self.schema.load_from_neodb(self)
def init_indices(self,create_indices=True):
self.node_index = self.ndb.get_or_create_index(py2neo.neo4j.Node,"node_index")
self.link_index = self.ndb.get_or_create_index(py2neo.neo4j.Relationship,"link_index")
def create_entity(self,creator=None,max_tries=3):
self.log.debug("CREATE ENTITY")
if not creator:
creator=self.creator_guid
rec={}
rec["guid"]=lridb.random_guid()
rec["timestamp"] = datetime.datetime.utcnow().isoformat()
rec["creator"] = creator
self.log.debug("CREATE ENTITY VIA REST",rec)
success = False
tries = 0
while not success or tries == max_tries:
try:
n = self.ndb.create(rec)[0]
success = True
except Exception, e:
neoerrs = traceback.format_exc().split("\n")
time.sleep(0.25)
tries +=1
if not success:
self.errors.append("Too many Neo4J BadRequest errors in node creation!")
self.errors.extend(neoerrs)
return None
self.node_index.add("guid",rec["guid"],n)
return n
def internal_properties(self,x):
return x.get_properties()
def links_of_node_gen(self,n):
for link in n.get_relationships():
yield link
def write_and_index_property_old(self,node,rec,primary_proptype,target_node,max_tries=3):
# Now create link to hold actual property and add the literal properties
success = False
tries = 0
while not success and tries < max_tries:
try:
link = node.create_relationship_to(target_node,primary_proptype,rec)
success = True
except Exception, e:
tries += 1
neoerrs = traceback.format_exc().split("\n")
time.sleep(0.25)
if not success:
self.errors.append("Too many Neo4J errors in relationship creation!")
self.errors.extend(neoerrs)
return None
link["rec"]=json.dumps(rec)
self.log.debug("CREATING LINK INDEX ENTRIES")
# Index links by from, guid, type, and value, and maybe to
for f in ["from","to","guid","proptype","value","timestamp","creator","alive","replaced_by","complete"]:
if f in rec:
if isinstance(rec[f],basestring):
self.link_index.add(f,rec[f].lower(),link)
else:
self.link_index.add(f,str(rec[f]).lower(),link)
self.log.debug("CREATED LINK INDEX ENTRY",f,rec[f])
return link
def write_and_index_property(self,node,rec,primary_proptype,target_node,max_tries=3):
# Now create link to hold actual property and add the literal properties
success = False
tries = 0
while not success and tries < max_tries:
try:
link = node.create_relationship_to(target_node,primary_proptype,rec)
success = True
except Exception, e:
tries += 1
neoerrs = traceback.format_exc().split("\n")
time.sleep(0.25)
if not success:
self.errors.append("Too many Neo4J errors in relationship creation!")
self.errors.extend(neoerrs)
return None
link["rec"]=json.dumps(rec)
self.log.debug("CREATING LINK INDEX ENTRIES")
# Index links by from, guid, type, and value, and maybe to
batch = py2neo.neo4j.WriteBatch(self.ndb)
for f in ["from","to","guid","proptype","value","timestamp","creator","alive","replaced_by","complete"]:
if f in rec:
if isinstance(rec[f],basestring):
batch.add_indexed_relationship(self.link_index,f,rec[f].lower(),link)
else:
batch.add_indexed_relationship(self.link_index,f,str(rec[f]).lower(),link)
self.log.debug("CREATED LINK INDEX ENTRY",f,rec[f])
batch.submit()
return link
def get_entity(self,guid):
hits = self.node_index.get("guid",guid)
if hits:
return hits[0]
return None
def get_prop(self,guid):
self.log.debug("GET PROP BY GUID:",guid)
hits = self.link_index.get("guid",guid)
self.log.debug(hits)
if hits:
return hits[0]
return None
def form_cypher_query(self,constraints,limit,skip):
# Normalize for non-strings and escape quotes for strings
clean_constraints = copy.deepcopy(constraints)
for k,v in clean_constraints.items():
if isinstance(v,bool) or isinstance(v,int) or isinstance(v,float):
clean_constraints[k]=str(clean_constraints[k]).lower()
elif isinstance(v,basestring):
clean_constraints[k] = v.replace('\\','\\\\').replace('"','\\"').lower()
wildcard_search=False
if 'proptype' in clean_constraints and clean_constraints['proptype'] in lridb.wildcard_allowed_properties and 'value' in clean_constraints and "*" in clean_constraints['value']:
value = clean_constraints['value']
value = value.replace('\\','\\\\').replace('"','\\"')
del clean_constraints['value']
wildcard_search=True
# First make a lucene query
lq = ' AND '.join([k+':"'+v+'"' for k,v in clean_constraints.items() if isinstance(v,basestring)])
self.log.debug("PROPERTY SEARCH LUCENE QUERY:",repr(lq))
# And then a cypher query from that
lq = lq.replace('\\','\\\\').replace('"','\\"')
# If we are searching by name and we have no spaces in the
# name, then let's do a wildcard search
if wildcard_search:
where = ' WHERE r.value =~ "(?i)%s"' % (value)
else:
where = ""
#where = ''
q = 'START r=relationship:link_index("%s") %s RETURN r' % (lq,where)
#q = 'START r=relationship:link_index("%s") RETURN r' % (lq)
if skip:
q += " SKIP %d" % (skip)
if limit:
q += " LIMIT %d" % (limit)
return q.encode('utf-8')
def link_search(self,constraints,limit=None,start=None,max_tries=3):
# We use cypher simply to support pagination
q = self.form_cypher_query(constraints,limit,start)
self.log.debug("LINK SEARCH CYPHER QUERY:",q)
# We try a few times due to py2neo bug that causes timeouts
success = False
tries = 0
while not success and tries < max_tries:
try:
hits, metadata = py2neo.cypher.execute(self.ndb,q)
success = True
except Exception, e:
tries += 1
neoerrs = traceback.format_exc().split("\n")
self.log.debug("FAILING CYPHER QUERY =",repr(q),"-- TRYING %d more times." % (3-tries))
time.sleep(0.1)
if not success:
self.errors.append("Too many Neo4J errors in cypher query execution!")
self.errors.extend(neoerrs)
return None
return [h[0] for h in hits] # Need only to return the first column
def update_property(self,oldrec=None,newrec={}):
self.log.debug("UPDATE PROP REC",oldrec)
if "proptype" in newrec and oldrec["proptype"] != newrec["proptype"]:
self.errors.append("UPDATE: Changing proptype is not allowed.")
oldrec["internal"].append("UPDATE: Changing proptype is not allowed.")
return None
# Create the record for our replacement property
finalrec={"proptype":oldrec["proptype"],
"creator":oldrec["creator"]}
for k in ["from","to","value","complete"]:
if k in newrec:
finalrec[k]=newrec[k]
elif k in oldrec:
finalrec[k]=oldrec[k]
if "to" in finalrec and "from" in finalrec and "value" in finalrec:
del finalrec["value"] # Can't be both link and literal
if newrec.get("alive") == False:
# This update is a property deletion
finalrec["alive"] = False
# Make the new property
self.log.debug("MAKING REPLACEMENT PROP REC",oldrec)
newp = prop.prop(rec=finalrec,db=self)
if newp.is_valid:
self.log.debug("CREATE UPDATED PROP:",finalrec)
newp.create(is_update=True)
if newp.errors:
self.errors.append("PROPERTY UPDATE: "+(" ".join(newp.errors)))
return None
self.log.debug("CREATE UPDATED PROP FINAL:",newp.link["rec"])
# Point old property to its replacement
oldrec["internal"]["replaced_by"] = newp.link["guid"]
#oldrec["internal"]["alive"] = False
oldrec["replaced_by"] = newp.link["guid"]
#oldrec["alive"] = False -- Don't make old property dead
oldrec["internal"]["rec"] = json.dumps(dict([(k,v) for k,v in oldrec.items() if k not in ["internal","rec"]]))
# Update our index
self.log.debug(oldrec)
self.link_index.remove(key="replaced_by",entity=oldrec["internal"])
self.link_index.add(key="replaced_by",value=newp.rec["guid"],entity=oldrec["internal"])
#self.link_index.remove(key="alive",value="true",entity=oldrec["internal"])
#self.link_index.add(key="alive",value="false",entity=oldrec["internal"])
return newp
else:
self.errors.append("PROPERTY UPDATE: "+(" ".join(newp.errors)))
oldrec["internal"].errors.append("PROPERTY UPDATE: "+(" ".join(newp.errors)))
return None
def destroy_node(self,n):
n.delete()
def destroy_link(self,n):
l.delete()
| apache-2.0 | 5,365,534,441,605,054,000 | 35.182371 | 185 | 0.572329 | false | 3.876262 | false | false | false |
PetePriority/home-assistant | homeassistant/components/freebox/__init__.py | 1 | 2731 | """
Support for Freebox devices (Freebox v6 and Freebox mini 4K).
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/freebox/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_FREEBOX
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.discovery import async_load_platform
REQUIREMENTS = ['aiofreepybox==0.0.6']
_LOGGER = logging.getLogger(__name__)
DOMAIN = "freebox"
DATA_FREEBOX = DOMAIN
FREEBOX_CONFIG_FILE = 'freebox.conf'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Freebox component."""
conf = config.get(DOMAIN)
async def discovery_dispatch(service, discovery_info):
if conf is None:
host = discovery_info.get('properties', {}).get('api_domain')
port = discovery_info.get('properties', {}).get('https_port')
_LOGGER.info("Discovered Freebox server: %s:%s", host, port)
await async_setup_freebox(hass, config, host, port)
discovery.async_listen(hass, SERVICE_FREEBOX, discovery_dispatch)
if conf is not None:
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
await async_setup_freebox(hass, config, host, port)
return True
async def async_setup_freebox(hass, config, host, port):
"""Start up the Freebox component platforms."""
from aiofreepybox import Freepybox
from aiofreepybox.exceptions import HttpRequestError
app_desc = {
'app_id': 'hass',
'app_name': 'Home Assistant',
'app_version': '0.65',
'device_name': socket.gethostname()
}
token_file = hass.config.path(FREEBOX_CONFIG_FILE)
api_version = 'v1'
fbx = Freepybox(
app_desc=app_desc,
token_file=token_file,
api_version=api_version)
try:
await fbx.open(host, port)
except HttpRequestError:
_LOGGER.exception('Failed to connect to Freebox')
else:
hass.data[DATA_FREEBOX] = fbx
hass.async_create_task(async_load_platform(
hass, 'sensor', DOMAIN, {}, config))
hass.async_create_task(async_load_platform(
hass, 'device_tracker', DOMAIN, {}, config))
async def close_fbx(event):
"""Close Freebox connection on HA Stop."""
await fbx.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_fbx)
| apache-2.0 | -2,485,271,579,243,107,000 | 29.010989 | 78 | 0.659099 | false | 3.670699 | true | false | false |
prometheus42/MangaLoader | src/data.py | 1 | 2753 |
import logging
logger = logging.getLogger('MangaLoader.data')
# -------------------------------------------------------------------------------------------------
# Manga class
# -------------------------------------------------------------------------------------------------
class Manga(object):
def __init__(self, name):
self.name = name
self.chapter_list = []
self.url = ''
self.internalName = ''
self.cover_url = ''
self.is_open = None
def __str__(self):
return str(self.name)
def add_chapter(self, chapter):
chapter.manga = self
self.chapter_list.append(chapter)
def get_chapter(self, number):
for chapter in self.chapter_list:
if chapter.chapterNo == number:
return chapter
return None
def get_chapters(self, numbers):
result = []
for chapter in self.chapter_list:
if chapter.chapterNo == numbers or chapter.chapterNo in numbers:
result.append(chapter)
return result
# -------------------------------------------------------------------------------------------------
# Chapter class
# -------------------------------------------------------------------------------------------------
class Chapter(object):
def __init__(self, manga, chapter_no):
self.manga = manga
self.chapterNo = chapter_no
self.chapterTitle = ''
self.url = ''
self.image_list = []
self.text = ''
self.title = ''
def __str__(self):
if self.manga is not None:
return str(self.manga) + ' ' + str(self.chapterNo)
else:
return str(self.chapterNo)
def add_image(self, image):
image.chapter = self
self.image_list.append(image)
def get_image(self, number):
for image in self.image_list:
if image.imageNo == number:
return image
return None
def get_images(self, numbers):
result = []
for image in self.image_list:
if image.imageNo == numbers or image.imageNo in numbers:
result.append(image)
return result
# -------------------------------------------------------------------------------------------------
# Image class
# -------------------------------------------------------------------------------------------------
class Image(object):
def __init__(self, chapter, image_no):
self.chapter = chapter
self.imageNo = image_no
self.url = None
def __str__(self):
if self.chapter is not None:
return str(self.chapter) + ' - ' + str(self.imageNo)
else:
return str(self.imageNo)
| mit | 675,683,230,206,739,100 | 28.602151 | 99 | 0.432982 | false | 4.705983 | false | false | false |
J08nY/PGPy | pgpy/types.py | 1 | 25555 | """ types.py
"""
from __future__ import division
import abc
import base64
import binascii
import bisect
import codecs
import collections
import operator
import os
import re
import warnings
import weakref
from enum import EnumMeta
from enum import IntEnum
import six
from ._author import __version__
from .decorators import sdproperty
from .errors import PGPError
__all__ = ['Armorable',
'ParentRef',
'PGPObject',
'Field',
'Header',
'MetaDispatchable',
'Dispatchable',
'SignatureVerification',
'FlagEnumMeta',
'FlagEnum',
'Fingerprint',
'SorteDeque']
if six.PY2:
FileNotFoundError = IOError
re.ASCII = 0
class Armorable(six.with_metaclass(abc.ABCMeta)):
__crc24_init = 0x0B704CE
__crc24_poly = 0x1864CFB
__armor_fmt = '-----BEGIN PGP {block_type}-----\n' \
'{headers}\n' \
'{packet}\n' \
'={crc}\n' \
'-----END PGP {block_type}-----\n'
# the re.VERBOSE flag allows for:
# - whitespace is ignored except when in a character class or escaped
# - anything after a '#' that is not escaped or in a character class is ignored, allowing for comments
__armor_regex = re.compile(r"""# This capture group is optional because it will only be present in signed cleartext messages
(^-{5}BEGIN\ PGP\ SIGNED\ MESSAGE-{5}(?:\r?\n)
(Hash:\ (?P<hashes>[A-Za-z0-9\-,]+)(?:\r?\n){2})?
(?P<cleartext>(.*\r?\n)*(.*(?=\r?\n-{5})))(?:\r?\n)
)?
# armor header line; capture the variable part of the magic text
^-{5}BEGIN\ PGP\ (?P<magic>[A-Z0-9 ,]+)-{5}(?:\r?\n)
# try to capture all the headers into one capture group
# if this doesn't match, m['headers'] will be None
(?P<headers>(^.+:\ .+(?:\r?\n))+)?(?:\r?\n)?
# capture all lines of the body, up to 76 characters long,
# including the newline, and the pad character(s)
(?P<body>([A-Za-z0-9+/]{1,75}={,2}(?:\r?\n))+)
# capture the armored CRC24 value
^=(?P<crc>[A-Za-z0-9+/]{4})(?:\r?\n)
# finally, capture the armor tail line, which must match the armor header line
^-{5}END\ PGP\ (?P=magic)-{5}(?:\r?\n)?
""", flags=re.MULTILINE | re.VERBOSE)
@property
def charset(self):
return self.ascii_headers.get('Charset', 'utf-8')
@charset.setter
def charset(self, encoding):
self.ascii_headers['Charset'] = codecs.lookup(encoding).name
@staticmethod
def is_ascii(text):
if isinstance(text, six.string_types):
return bool(re.match(r'^[ -~\r\n]*$', text, flags=re.ASCII))
if isinstance(text, (bytes, bytearray)):
return bool(re.match(br'^[ -~\r\n]*$', text, flags=re.ASCII))
raise TypeError("Expected: ASCII input of type str, bytes, or bytearray") # pragma: no cover
@staticmethod
def is_armor(text):
"""
Whether the ``text`` provided is an ASCII-armored PGP block.
:param text: A possible ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: Whether the text is ASCII-armored.
"""
if isinstance(text, (bytes, bytearray)): # pragma: no cover
text = text.decode('latin-1')
return Armorable.__armor_regex.search(text) is not None
@staticmethod
def ascii_unarmor(text):
"""
Takes an ASCII-armored PGP block and returns the decoded byte value.
:param text: An ASCII-armored PGP block, to un-armor.
:raises: :py:exc:`ValueError` if ``text`` did not contain an ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: A ``dict`` containing information from ``text``, including the de-armored data.
It can contain the following keys: ``magic``, ``headers``, ``hashes``, ``cleartext``, ``body``, ``crc``.
"""
m = {'magic': None, 'headers': None, 'body': bytearray(), 'crc': None}
if not Armorable.is_ascii(text):
m['body'] = bytearray(text)
return m
if isinstance(text, (bytes, bytearray)): # pragma: no cover
text = text.decode('latin-1')
m = Armorable.__armor_regex.search(text)
if m is None: # pragma: no cover
raise ValueError("Expected: ASCII-armored PGP data")
m = m.groupdict()
if m['hashes'] is not None:
m['hashes'] = m['hashes'].split(',')
if m['headers'] is not None:
m['headers'] = collections.OrderedDict(re.findall('^(?P<key>.+): (?P<value>.+)$\n?', m['headers'], flags=re.MULTILINE))
if m['body'] is not None:
try:
m['body'] = bytearray(base64.b64decode(m['body'].encode()))
except (binascii.Error, TypeError) as ex:
six.raise_from(PGPError, ex)
if m['crc'] is not None:
m['crc'] = Header.bytes_to_int(base64.b64decode(m['crc'].encode()))
if Armorable.crc24(m['body']) != m['crc']:
warnings.warn('Incorrect crc24', stacklevel=3)
return m
@staticmethod
def crc24(data):
# CRC24 computation, as described in the RFC 4880 section on Radix-64 Conversions
#
# The checksum is a 24-bit Cyclic Redundancy Check (CRC) converted to
# four characters of radix-64 encoding by the same MIME base64
# transformation, preceded by an equal sign (=). The CRC is computed
# by using the generator 0x864CFB and an initialization of 0xB704CE.
# The accumulation is done on the data before it is converted to
# radix-64, rather than on the converted data.
crc = Armorable.__crc24_init
if not isinstance(data, bytearray):
data = six.iterbytes(data)
for b in data:
crc ^= b << 16
for i in range(8):
crc <<= 1
if crc & 0x1000000:
crc ^= Armorable.__crc24_poly
return crc & 0xFFFFFF
@abc.abstractproperty
def magic(self):
"""The magic string identifier for the current PGP type"""
@classmethod
def from_file(cls, filename):
with open(filename, 'rb') as file:
obj = cls()
data = bytearray(os.path.getsize(filename))
file.readinto(data)
po = obj.parse(data)
if po is not None:
return (obj, po)
return obj # pragma: no cover
@classmethod
def from_blob(cls, blob):
obj = cls()
if (not isinstance(blob, six.binary_type)) and (not isinstance(blob, bytearray)):
po = obj.parse(bytearray(blob, 'latin-1'))
else:
po = obj.parse(bytearray(blob))
if po is not None:
return (obj, po)
return obj # pragma: no cover
def __init__(self):
super(Armorable, self).__init__()
self.ascii_headers = collections.OrderedDict()
self.ascii_headers['Version'] = 'PGPy v' + __version__ # Default value
def __str__(self):
payload = base64.b64encode(self.__bytes__()).decode('latin-1')
payload = '\n'.join(payload[i:(i + 64)] for i in range(0, len(payload), 64))
return self.__armor_fmt.format(
block_type=self.magic,
headers=''.join('{key}: {val}\n'.format(key=key, val=val) for key, val in self.ascii_headers.items()),
packet=payload,
crc=base64.b64encode(PGPObject.int_to_bytes(self.crc24(self.__bytes__()), 3)).decode('latin-1')
)
def __copy__(self):
obj = self.__class__()
obj.ascii_headers = self.ascii_headers.copy()
return obj
class ParentRef(object):
# mixin class to handle weak-referencing a parent object
@property
def _parent(self):
if isinstance(self.__parent, weakref.ref):
return self.__parent()
return self.__parent
@_parent.setter
def _parent(self, parent):
try:
self.__parent = weakref.ref(parent)
except TypeError:
self.__parent = parent
@property
def parent(self):
return self._parent
def __init__(self):
super(ParentRef, self).__init__()
self._parent = None
class PGPObject(six.with_metaclass(abc.ABCMeta, object)):
__metaclass__ = abc.ABCMeta
@staticmethod
def int_byte_len(i):
return (i.bit_length() + 7) // 8
@staticmethod
def bytes_to_int(b, order='big'): # pragma: no cover
"""convert bytes to integer"""
if six.PY2:
# save the original type of b without having to copy any data
_b = b.__class__()
if order != 'little':
b = reversed(b)
if not isinstance(_b, bytearray):
b = six.iterbytes(b)
return sum(c << (i * 8) for i, c in enumerate(b))
return int.from_bytes(b, order)
@staticmethod
def int_to_bytes(i, minlen=1, order='big'): # pragma: no cover
"""convert integer to bytes"""
blen = max(minlen, PGPObject.int_byte_len(i), 1)
if six.PY2:
r = iter(_ * 8 for _ in (range(blen) if order == 'little' else range(blen - 1, -1, -1)))
return bytes(bytearray((i >> c) & 0xff for c in r))
return i.to_bytes(blen, order)
@staticmethod
def text_to_bytes(text):
if text is None:
return text
# if we got bytes, just return it
if isinstance(text, (bytearray, six.binary_type)):
return text
# if we were given a unicode string, or if we translated the string into utf-8,
# we know that Python already has it in utf-8 encoding, so we can now just encode it to bytes
return text.encode('utf-8')
@staticmethod
def bytes_to_text(text):
if text is None or isinstance(text, six.text_type):
return text
return text.decode('utf-8')
@abc.abstractmethod
def parse(self, packet):
"""this method is too abstract to understand"""
@abc.abstractmethod
def __bytearray__(self):
"""
Returns the contents of concrete subclasses in a binary format that can be understood by other OpenPGP
implementations
"""
def __bytes__(self):
"""
Return the contents of concrete subclasses in a binary format that can be understood by other OpenPGP
implementations
"""
# this is what all subclasses will do anyway, so doing this here we can reduce code duplication significantly
return bytes(self.__bytearray__())
class Field(PGPObject):
@abc.abstractmethod
def __len__(self):
"""Return the length of the output of __bytes__"""
class Header(Field):
@staticmethod
def encode_length(l, nhf=True, llen=1):
def _new_length(l):
if 192 > l:
return Header.int_to_bytes(l)
elif 8384 > l:
elen = ((l & 0xFF00) + (192 << 8)) + ((l & 0xFF) - 192)
return Header.int_to_bytes(elen, 2)
return b'\xFF' + Header.int_to_bytes(l, 4)
def _old_length(l, llen):
return Header.int_to_bytes(l, llen) if llen > 0 else b''
return _new_length(l) if nhf else _old_length(l, llen)
@sdproperty
def length(self):
return self._len
@length.register(int)
def length_int(self, val):
self._len = val
@length.register(six.binary_type)
@length.register(bytearray)
def length_bin(self, val):
def _new_len(b):
def _parse_len(a, offset=0):
# returns (the parsed length, size of length field, whether the length was of partial type)
fo = a[offset]
if 192 > fo:
return (self.bytes_to_int(a[offset:offset + 1]), 1, False)
elif 224 > fo: # >= 192 is implied
dlen = self.bytes_to_int(b[offset:offset + 2])
return (((dlen - (192 << 8)) & 0xFF00) + ((dlen & 0xFF) + 192), 2, False)
elif 255 > fo: # >= 224 is implied
# this is a partial-length header
return (1 << (fo & 0x1f), 1, True)
elif 255 == fo:
return (self.bytes_to_int(b[offset + 1:offset + 5]), 5, False)
else: # pragma: no cover
raise ValueError("Malformed length: 0x{:02x}".format(fo))
part_len, size, partial = _parse_len(b)
del b[:size]
if partial:
total = part_len
while partial:
part_len, size, partial = _parse_len(b, total)
del b[total:total + size]
total += part_len
self._len = total
else:
self._len = part_len
def _old_len(b):
if self.llen > 0:
self._len = self.bytes_to_int(b[:self.llen])
del b[:self.llen]
else: # pragma: no cover
self._len = 0
_new_len(val) if self._lenfmt == 1 else _old_len(val)
@sdproperty
def llen(self):
l = self.length
lf = self._lenfmt
if lf == 1:
# new-format length
if 192 > l:
return 1
elif 8384 > self.length: # >= 192 is implied
return 2
else:
return 5
else:
# old-format length
##TODO: what if _llen needs to be (re)computed?
return self._llen
@llen.register(int)
def llen_int(self, val):
if self._lenfmt == 0:
self._llen = {0: 1, 1: 2, 2: 4, 3: 0}[val]
def __init__(self):
super(Header, self).__init__()
self._len = 1
self._llen = 1
self._lenfmt = 1
self._partial = False
class MetaDispatchable(abc.ABCMeta):
"""
MetaDispatchable is a metaclass for objects that subclass Dispatchable
"""
_roots = set()
"""
_roots is a set of all currently registered RootClass class objects
A RootClass is successfully registered if the following things are true:
- it inherits (directly or indirectly) from Dispatchable
- __typeid__ == -1
"""
_registry = {}
"""
_registry is the Dispatchable class registry. It uses the following format:
{ (RootClass, None): OpaqueClass }:
denotes the default ("opaque") for a given RootClass.
An OpaqueClass is successfully registered as such provided the following conditions are met:
- it inherits directly from a RootClass
- __typeid__ is None
{ (RootClass, TypeID): SubClass }:
denotes the class that handles the type given in TypeID
a SubClass is successfully registered as such provided the following conditions are met:
- it inherits (directly or indirectly) from a RootClass
- __typeid__ is a positive int
- the given typeid is not already registered
{ (RootClass, TypeID): VerSubClass }:
denotes that a given TypeID has multiple versions, and that this is class' subclasses handle those.
A VerSubClass is registered identically to a normal SubClass.
{ (RootClass, TypeID, Ver): VerSubClass }:
denotes the class that handles the type given in TypeID and the version of that type given in Ver
a Versioned SubClass is successfully registered as such provided the following conditions are met:
- it inherits from a VerSubClass
- __ver__ > 0
- the given typeid/ver combination is not already registered
"""
def __new__(mcs, name, bases, attrs): # NOQA
ncls = super(MetaDispatchable, mcs).__new__(mcs, name, bases, attrs)
if not hasattr(ncls.__typeid__, '__isabstractmethod__'):
if ncls.__typeid__ == -1 and not issubclass(ncls, tuple(MetaDispatchable._roots)):
# this is a root class
MetaDispatchable._roots.add(ncls)
elif issubclass(ncls, tuple(MetaDispatchable._roots)) and ncls.__typeid__ != -1:
for rcls in [ root for root in MetaDispatchable._roots if issubclass(ncls, root) ]:
if (rcls, ncls.__typeid__) not in MetaDispatchable._registry:
MetaDispatchable._registry[(rcls, ncls.__typeid__)] = ncls
if (ncls.__ver__ is not None and ncls.__ver__ > 0 and
(rcls, ncls.__typeid__, ncls.__ver__) not in MetaDispatchable._registry):
MetaDispatchable._registry[(rcls, ncls.__typeid__, ncls.__ver__)] = ncls
# finally, return the new class object
return ncls
def __call__(cls, packet=None): # NOQA
def _makeobj(cls):
obj = object.__new__(cls)
obj.__init__()
return obj
if packet is not None:
if cls in MetaDispatchable._roots:
rcls = cls
elif issubclass(cls, tuple(MetaDispatchable._roots)): # pragma: no cover
rcls = next(root for root in MetaDispatchable._roots if issubclass(cls, root))
##TODO: else raise an exception of some kind, but this should never happen
header = rcls.__headercls__()
header.parse(packet)
ncls = None
if (rcls, header.typeid) in MetaDispatchable._registry:
ncls = MetaDispatchable._registry[(rcls, header.typeid)]
if ncls.__ver__ == 0:
if header.__class__ != ncls.__headercls__:
nh = ncls.__headercls__()
nh.__dict__.update(header.__dict__)
try:
nh.parse(packet)
except Exception as ex:
six.raise_from(PGPError, ex)
header = nh
if (rcls, header.typeid, header.version) in MetaDispatchable._registry:
ncls = MetaDispatchable._registry[(rcls, header.typeid, header.version)]
else: # pragma: no cover
ncls = None
if ncls is None:
ncls = MetaDispatchable._registry[(rcls, None)]
obj = _makeobj(ncls)
obj.header = header
try:
obj.parse(packet)
except Exception as ex:
six.raise_from(PGPError, ex)
else:
obj = _makeobj(cls)
return obj
class Dispatchable(six.with_metaclass(MetaDispatchable, PGPObject)):
__metaclass__ = MetaDispatchable
@abc.abstractproperty
def __headercls__(self): # pragma: no cover
return False
@abc.abstractproperty
def __typeid__(self): # pragma: no cover
return False
__ver__ = None
class SignatureVerification(object):
_sigsubj = collections.namedtuple('sigsubj', ['verified', 'by', 'signature', 'subject'])
@property
def good_signatures(self):
"""
A generator yielding namedtuples of all signatures that were successfully verified
in the operation that returned this instance. The namedtuple has the following attributes:
``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not.
``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation.
``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified.
``sigsubj.subject`` - the subject that was verified using the signature.
"""
for s in [ i for i in self._subjects if i.verified ]:
yield s
@property
def bad_signatures(self): # pragma: no cover
"""
A generator yielding namedtuples of all signatures that were not verified
in the operation that returned this instance. The namedtuple has the following attributes:
``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not.
``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation.
``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified.
``sigsubj.subject`` - the subject that was verified using the signature.
"""
for s in [ i for i in self._subjects if not i.verified ]:
yield s
def __init__(self):
"""
Returned by :py:meth:`PGPKey.verify`
Can be compared directly as a boolean to determine whether or not the specified signature verified.
"""
super(SignatureVerification, self).__init__()
self._subjects = []
def __contains__(self, item):
return item in {ii for i in self._subjects for ii in [i.signature, i.subject]}
def __len__(self):
return len(self._subjects)
def __bool__(self):
return all(s.verified for s in self._subjects)
def __nonzero__(self):
return self.__bool__()
def __and__(self, other):
if not isinstance(other, SignatureVerification):
raise TypeError(type(other))
self._subjects += other._subjects
return self
def __repr__(self):
return "<SignatureVerification({verified})>".format(verified=str(bool(self)))
def add_sigsubj(self, signature, by, subject=None, verified=False):
self._subjects.append(self._sigsubj(verified, by, signature, subject))
class FlagEnumMeta(EnumMeta):
def __and__(self, other):
return { f for f in iter(self) if f.value & other }
def __rand__(self, other): # pragma: no cover
return self & other
if six.PY2:
class FlagEnum(IntEnum):
__metaclass__ = FlagEnumMeta
else:
namespace = FlagEnumMeta.__prepare__('FlagEnum', (IntEnum,))
FlagEnum = FlagEnumMeta('FlagEnum', (IntEnum,), namespace)
class Fingerprint(str):
"""
A subclass of ``str``. Can be compared using == and != to ``str``, ``unicode``, and other :py:obj:`Fingerprint` instances.
Primarily used as a key for internal dictionaries, so it ignores spaces when comparing and hashing
"""
@property
def keyid(self):
return str(self).replace(' ', '')[-16:]
@property
def shortid(self):
return str(self).replace(' ', '')[-8:]
def __new__(cls, content):
if isinstance(content, Fingerprint):
return content
# validate input before continuing: this should be a string of 40 hex digits
content = content.upper().replace(' ', '')
if not bool(re.match(r'^[A-F0-9]{40}$', content)):
raise ValueError("Expected: String of 40 hex digits")
# store in the format: "AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333"
# ^^ note 2 spaces here
spaces = [ ' ' if i != 4 else ' ' for i in range(10) ]
chunks = [ ''.join(g) for g in six.moves.zip_longest(*[iter(content)] * 4) ]
content = ''.join(j for i in six.moves.zip_longest(chunks, spaces, fillvalue='') for j in i).strip()
return str.__new__(cls, content)
def __eq__(self, other):
if isinstance(other, Fingerprint):
return str(self) == str(other)
if isinstance(other, (six.text_type, bytes, bytearray)):
if isinstance(other, (bytes, bytearray)): # pragma: no cover
other = other.decode('latin-1')
other = str(other).replace(' ', '')
return any([self.replace(' ', '') == other,
self.keyid == other,
self.shortid == other])
return False # pragma: no cover
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(str(self.replace(' ', '')))
def __bytes__(self):
return binascii.unhexlify(six.b(self.replace(' ', '')))
class SorteDeque(collections.deque):
"""A deque subclass that tries to maintain sorted ordering using bisect"""
def insort(self, item):
i = bisect.bisect_left(self, item)
self.rotate(- i)
self.appendleft(item)
self.rotate(i)
def resort(self, item): # pragma: no cover
if item in self:
# if item is already in self, see if it is still in sorted order.
# if not, re-sort it by removing it and then inserting it into its sorted order
i = bisect.bisect_left(self, item)
if i == len(self) or self[i] is not item:
self.remove(item)
self.insort(item)
else:
# if item is not in self, just insert it in sorted order
self.insort(item)
def check(self): # pragma: no cover
"""re-sort any items in self that are not sorted"""
for unsorted in iter(self[i] for i in range(len(self) - 2) if not operator.le(self[i], self[i + 1])):
self.resort(unsorted)
| bsd-3-clause | -3,138,562,924,995,488,000 | 32.892573 | 131 | 0.555469 | false | 4.015556 | false | false | false |
Linaf/Convae-Destin | scripts/feature_extract_test.py | 1 | 1423 | """Feature extraction test"""
import numpy as np;
import sys
import theano;
import theano.tensor as T;
sys.path.append("..")
import scae_destin.datasets as ds;
from scae_destin.convnet import ReLUConvLayer;
from scae_destin.convnet import LCNLayer
n_epochs=1;
batch_size=100;
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("/home/icog/convAE+destin/cifar-10-batches-py");
Xtr=np.mean(Xtr, 3);
Xte=np.mean(Xte, 3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 32, 32))
layer_0=LCNLayer(filter_size=(7,7),
num_filters=50,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size,
border_mode="full");
extract=theano.function(inputs=[idx],
outputs=layer_0.apply(images),
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print extract(1).shape
| apache-2.0 | -5,373,216,358,462,393,000 | 28.040816 | 91 | 0.61279 | false | 2.779297 | true | true | false |
robisen1/AndroidWifiCracker | services/gps/TestNmea.py | 1 | 5151 | #!/usr/bin/env python
import unittest
import NmeaSentences
import NmeaParser
import datetime
from Utc import Utc
class TestNmeaSentence(unittest.TestCase):
def setUp(self):
self.__nmea = NmeaSentences.NmeaSentence("XYZ")
def test_getType(self):
self.assertEqual(self.__nmea.getType(), "XYZ")
class TestGpggaSentence(unittest.TestCase):
def setUp(self):
sentence1 = "GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,"
self.__gpgga1 = NmeaSentences.GpggaSentence( sentence1.split(',') )
self.__sentence2 = "GPXXX,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,"
self.__sentence3 = "GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,"
def test_getType(self):
self.assertEqual(self.__gpgga1.getType(), "GPGGA")
def test_getTime(self):
dt = datetime.datetime.now( tz = Utc() )
dt = dt.replace(hour = 12, minute = 35, second = 19, microsecond = 0)
self.assertEqual(self.__gpgga1.getTime(), dt)
def test_getLatitude(self):
self.assertAlmostEqual(self.__gpgga1.getLatitude(), 48.0 + 7.038 / 60.0)
def test_getLongitude(self):
self.assertAlmostEqual(self.__gpgga1.getLongitude(), 11.0 + 31.000 / 60.0)
def test_getQuality(self):
self.assertEqual(self.__gpgga1.getQuality(), 1)
def test_getNumSatellites(self):
self.assertEqual(self.__gpgga1.getNumSatellites(), 8)
def test_getHorizontalDop(self):
self.assertAlmostEqual(self.__gpgga1.getHorizontalDop(), 0.9)
def test_getAltitude(self):
self.assertAlmostEqual(self.__gpgga1.getAltitude(), 545.4)
def test_getAltitudeUnits(self):
self.assertEqual(self.__gpgga1.getAltitudeUnits(), "M")
def test_getGeoidHeight(self):
self.assertAlmostEqual(self.__gpgga1.getGeoidHeight(), 46.9)
def test_getGeoidHeightUnits(self):
self.assertEqual(self.__gpgga1.getGeoidHeightUnits(), "M")
def test_getSecondsSinceLastDgpsUpdate(self):
self.assertEqual(self.__gpgga1.getSecondsSinceLastDgpsUpdate(), 0)
def test_getDgpsStationId(self):
self.assertEqual(self.__gpgga1.getDgpsStationId(), "")
def test_GpggaSentence1(self):
self.assertRaises(NmeaSentences.InvalidGpggaSentence, NmeaSentences.GpggaSentence, self.__sentence2)
def test_GpggaSentence2(self):
self.assertRaises(NmeaSentences.InvalidGpggaSentence, NmeaSentences.GpggaSentence, self.__sentence3)
class TestNmeaParser(unittest.TestCase):
def setUp(self):
self.__gpggaRaw1 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\n"
self.__gpgga1 = NmeaParser.NmeaParser.Parse(self.__gpggaRaw1)
self.__gpggaRaw2 = "GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\n"
self.__gpggaRaw3 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\n\n"
self.__gpggaRaw4 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\r"
self.__gpggaRaw5 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,447\r\n"
self.__gpggaRaw6 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*46\r\n"
def test_Parse1(self):
self.assertEqual(self.__gpgga1.getType(), "GPGGA")
dt = datetime.datetime.now( tz = Utc() )
dt = dt.replace(hour = 12, minute = 35, second = 19, microsecond = 0)
self.assertEqual(self.__gpgga1.getTime(), dt)
self.assertAlmostEqual(self.__gpgga1.getLatitude(), 48.0 + 7.038 / 60.0)
self.assertAlmostEqual(self.__gpgga1.getLongitude(), 11.0 + 31.000 / 60.0)
self.assertEqual(self.__gpgga1.getQuality(), 1)
self.assertEqual(self.__gpgga1.getNumSatellites(), 8)
self.assertAlmostEqual(self.__gpgga1.getHorizontalDop(), 0.9)
self.assertAlmostEqual(self.__gpgga1.getAltitude(), 545.4)
self.assertEqual(self.__gpgga1.getAltitudeUnits(), "M")
self.assertAlmostEqual(self.__gpgga1.getGeoidHeight(), 46.9)
self.assertEqual(self.__gpgga1.getGeoidHeightUnits(), "M")
self.assertEqual(self.__gpgga1.getSecondsSinceLastDgpsUpdate(), 0)
self.assertEqual(self.__gpgga1.getDgpsStationId(), "")
def test_Parse2(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw2)
def test_Parse3(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw3)
def test_Parse4(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw4)
def test_Parse5(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw5)
def test_Parse6(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw6)
if __name__ == '__main__':
unittest.main()
| mit | -4,426,617,892,255,343,000 | 41.933333 | 108 | 0.647835 | false | 2.828666 | true | false | false |
googleapis/python-translate | owlbot.py | 1 | 2125 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
default_version = "v3"
for library in s.get_staging_dirs(default_version):
if library.name.startswith("v3"):
# TODO(danoscarmike): remove once upstream protos have been fixed
# Escape underscores in gs:\\ URLs
s.replace(
library / "google/cloud/translate_v3*/types/translation_service.py",
"a_b_c_",
"a_b_c\_"
)
excludes = [
"setup.py",
"nox*.py",
"README.rst",
"docs/conf.py",
"docs/index.rst",
"translation.py",
]
s.move(library, excludes=excludes)
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(
samples=True,
microgenerator=True,
cov_level=99,
)
s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file
# Correct namespace in noxfile
s.replace("noxfile.py", "google.cloud.translation", "google.cloud.translate")
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples()
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| apache-2.0 | -7,526,043,277,117,997,000 | 30.716418 | 95 | 0.582118 | false | 4.126214 | false | false | false |
ngokevin/cyder | cyder/maintain2cyder/printer.py | 1 | 2133 | import sys
"""
A class to print BIND records.
"""
class Printer(object):
def __init__( self, fd=sys.stdout, AJUST1=30, AJUST2=10 ):
self.fd = fd
self.AJUST1 = AJUST1
self.AJUST2 = AJUST2
def print_raw( self, message ):
self.fd.write( message )
def print_rr( self, name, data , rr_type, terminal="" ):
name = name.ljust(self.AJUST1," ")
self.fd.write("%s %s%s%s%s\n" % (name,rr_type," "*self.AJUST2,data,terminal))
def print_PTR( self, ip, dname ):
ip = '.'.join(list(reversed(ip.split('.')))) # Reverse the ip along '.' boundaries
self.print_rr( ip, dname, "PTR", "." )
def print_A( self, dname, ip , terminal=""):
self.print_rr( dname, ip, "A", terminal )
def print_CNAME( self, dname, cname ):
self.print_rr( dname, cname, "CNAME", "." )
def print_MX( self, name, domain, ttl, prio, server ):
self.print_rr( name, str(prio)+" ".rjust(3," ") , 'MX', server )
def print_NS( self, dname, nameservers ):
if not nameservers:
return
dname = dname.ljust(self.AJUST1," ")
padding = "@".ljust(self.AJUST1," ")
for ns in nameservers:
self.fd.write("%s NS%s%s.\n" % (padding," "*self.AJUST2,ns))
def print_SOA( self, ttl, dname, primary_master, contact,serial, refresh, retry, expire, minimum ):
dname = dname+"."
dname = dname.ljust(self.AJUST1," ")
off = 9
soa = ""
soa += "$TTL %s\n" % (ttl)
soa += "%s IN SOA %s. %s. (\n" % (dname, primary_master, contact)
soa +="\n"
soa += str(serial).rjust(self.AJUST1+off," ")
soa +=" ;Serial"
soa +="\n"
soa += str(refresh).rjust(self.AJUST1+off," ")
soa +=" ;Refresh"
soa +="\n"
soa += str(retry).rjust(self.AJUST1+off," ")
soa +=" ;Retry"
soa +="\n"
soa += str(expire).rjust(self.AJUST1+off," ")
soa +=" ;Expire"
soa +="\n"
soa += str(minimum).rjust(self.AJUST1+off," ")+" )"
soa +=" ;Minimum"
soa +="\n"
self.fd.write(soa)
| bsd-3-clause | 8,471,048,928,994,542,000 | 32.328125 | 103 | 0.51805 | false | 2.987395 | false | false | false |
JT5D/Alfred-Popclip-Sublime | Sublime Text 2/SideBarGit/StatusBarBranch.py | 3 | 1240 | import sublime, sublime_plugin
from sidebar.SideBarGit import SideBarGit
from sidebar.SideBarSelection import SideBarSelection
import threading
class Object():
pass
s = sublime.load_settings('SideBarGit.sublime-settings')
class StatusBarBranch(sublime_plugin.EventListener):
def on_load(self, v):
if s.get('statusbar_branch') and v.file_name():
StatusBarBranchGet(v.file_name(), v).start()
def on_activated(self, v):
if s.get('statusbar_branch') and v.file_name():
StatusBarBranchGet(v.file_name(), v).start()
class StatusBarBranchGet(threading.Thread):
def __init__(self, file_name, v):
threading.Thread.__init__(self)
self.file_name = file_name
self.v = v
def run(self):
for repo in SideBarGit().getSelectedRepos(SideBarSelection([self.file_name]).getSelectedItems()):
object = Object()
object.item = repo.repository
object.command = ['git', 'branch']
object.silent = True
SideBarGit().run(object)
sublime.set_timeout(lambda:self.on_done(SideBarGit.last_stdout.decode('utf-8')), 0)
return
def on_done(self, branches):
branches = branches.split('\n')
for branch in branches:
if branch.startswith("*"):
self.v.set_status('statusbar_sidebargit_branch', branch)
return | gpl-2.0 | -9,210,344,488,855,552,000 | 27.860465 | 99 | 0.716129 | false | 3.076923 | false | false | false |
karmux/cid | cid/cursor.py | 1 | 1286 | from .locals import get_cid
class CidCursorWrapper(object):
"""
A cursor wrapper that attempts to add a cid comment to each query
"""
def __init__(self, cursor):
self.cursor = cursor
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def add_comment(self, sql):
cid = get_cid()
if cid:
cid = cid.replace('/*', '\/\*').replace('*/', '\*\/')
return "/* cid: {} */\n{}".format(cid, sql)
return sql
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
sql = self.add_comment(sql)
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
sql = self.add_comment(sql)
return self.cursor.executemany(sql, param_list)
| bsd-3-clause | 7,719,256,106,157,359,000 | 27.577778 | 77 | 0.580093 | false | 3.981424 | false | false | false |
oihane/odoo-addons | stock_picking_analytic/models/stock.py | 2 | 1797 | # Copyright (c) 2017 Alfredo de la fuente <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class StockPicking(models.Model):
_inherit = 'stock.picking'
analytic_account_id = fields.Many2one(
comodel_name='account.analytic.account', string='Analytic account',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
@api.onchange('analytic_account_id')
def onchange_analytic_account_id(self):
for picking in self.filtered(lambda x: x.analytic_account_id and
x.analytic_account_id.partner_id):
picking.partner_id = picking.analytic_account_id.partner_id.id
class StockMove(models.Model):
_inherit = 'stock.move'
def _action_done(self):
moves = super(StockMove, self)._action_done()
for move in moves.filtered(
lambda x: x.picking_id and
x.picking_id.analytic_account_id and
x.picking_id.picking_type_code in ('outgoing', 'incoming')):
vals = move._prepare_data_for_create_analytic_line()
self.env['account.analytic.line'].create(vals)
return moves
def _prepare_data_for_create_analytic_line(self):
self.ensure_one()
vals = {
'stock_move_id': self.id,
'account_id': self.picking_id.analytic_account_id.id,
'partner_id': self.picking_id.partner_id.id,
'product_id': self.product_id.id,
'product_uom_id': self.product_uom.id,
'unit_amount': self.product_qty,
'amount': self.product_qty * self.price_unit,
'name': u"{} {}".format(self.picking_id.name, self.name),
}
return vals
| agpl-3.0 | 3,870,059,004,579,822,600 | 38.065217 | 78 | 0.60601 | false | 3.594 | false | false | false |
pudo/aleph | services/ingest-file/ingestors/support/pdf.py | 1 | 1838 | import uuid
from pdflib import Document
from followthemoney import model
from normality import collapse_spaces # noqa
from ingestors.support.ocr import OCRSupport
from ingestors.support.convert import DocumentConvertSupport
class PDFSupport(DocumentConvertSupport, OCRSupport):
"""Provides helpers for PDF file context extraction."""
def pdf_extract(self, entity, pdf):
"""Extract pages and page text from a PDF file."""
entity.schema = model.get('Pages')
temp_dir = self.make_empty_directory()
for page in pdf:
self.pdf_extract_page(entity, temp_dir, page)
def pdf_alternative_extract(self, entity, pdf_path):
checksum = self.manager.store(pdf_path)
entity.set('pdfHash', checksum)
pdf = Document(bytes(pdf_path))
self.pdf_extract(entity, pdf)
def pdf_extract_page(self, document, temp_dir, page):
"""Extract the contents of a single PDF page, using OCR if need be."""
texts = page.lines
image_path = temp_dir.joinpath(str(uuid.uuid4()))
page.extract_images(path=bytes(image_path), prefix=b'img')
languages = self.manager.context.get('languages')
for image_file in image_path.glob("*.png"):
with open(image_file, 'rb') as fh:
data = fh.read()
text = self.extract_ocr_text(data, languages=languages)
if text is not None:
texts.append(text)
text = ' \n'.join(texts).strip()
entity = self.manager.make_entity('Page')
entity.make_id(document.id, page.page_no)
entity.set('document', document)
entity.set('index', page.page_no)
entity.add('bodyText', text)
self.manager.emit_entity(entity)
self.manager.emit_text_fragment(document, text, entity.id)
| mit | 3,929,945,399,312,507,000 | 38.106383 | 78 | 0.638738 | false | 3.837161 | false | false | false |
blockbomb/plover | setup.py | 8 | 1745 | #!/usr/bin/env python2.6
# Copyright (c) 2010 Joshua Harlan Lifton.
# See LICENSE.txt for details.
from distutils.core import setup
from plover import __version__
from plover import __description__
from plover import __long_description__
from plover import __url__
from plover import __download_url__
from plover import __license__
setup(name='plover',
version=__version__,
description=__description__,
long_description=__long_description__,
url=__url__,
download_url=__download_url__,
license=__license__,
author='Joshua Harlan Lifton',
author_email='[email protected]',
maintainer='Joshua Harlan Lifton',
maintainer_email='[email protected]',
package_dir={'plover':'plover'},
packages=['plover', 'plover.machine', 'plover.gui', 'plover.oslayer',
'plover.dictionary'],
package_data={'plover' : ['assets/*']},
data_files=[('/usr/share/applications', ['application/Plover.desktop']),
('/usr/share/pixmaps', ['plover/assets/plover_on.png']),],
scripts=['application/plover'],
requires=['serial', 'Xlib', 'wx', 'appdirs', 'wxversion'],
platforms=['GNU/Linux'],
classifiers=['Programming Language :: Python',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Development Status :: 4 - Beta',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Topic :: Adaptive Technologies',
'Topic :: Desktop Environment',]
)
| gpl-2.0 | -7,679,780,743,119,549,000 | 40.547619 | 81 | 0.593123 | false | 4.096244 | false | false | false |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/idlexlib/extensionManager.py | 4 | 9491 | ## """
## Copyright(C) 2011-2012 The Board of Trustees of the University of Illinois.
## All rights reserved.
##
## Developed by: Roger D. Serwy
## University of Illinois
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal with the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## + Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
## + Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in the
## documentation and/or other materials provided with the distribution.
## + Neither the names of Roger D. Serwy, the University of Illinois, nor
## the names of its contributors may be used to endorse or promote
## products derived from this Software without specific prior written
## permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
import sys
if sys.version < '3':
from StringIO import StringIO
from Tkinter import *
import tkFileDialog
import tkMessageBox
else:
from io import StringIO
from tkinter import *
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import imp
try:
import importlib
HAS_IMPORTLIB = True
except ImportError:
HAS_IMPORTLIB = False
from idlelib.configHandler import idleConf, IdleConfParser
import os
def make_config_parser(cfg):
""" Stuff Configration String into a fake file and return an IDLE config parser """
fp = StringIO()
fp.write(cfg)
fp.write('\n')
fp.seek(0)
# parse the configuration from the fake file
confparse = IdleConfParser('')
try:
confparse.readfp(fp)
except BaseException as e:
print('\n Configuration Parse Error', e)
return None
return confparse
class ExtensionManager(object):
""" Manages extensions for IdleX
"""
def __init__(self, path):
head,tail = os.path.split(path)
self.extension_dir = head
self.IDLEX_EXTENSIONS = self.get_idlex_extensions(head)
IDLE_EXTENSIONS = [] # A list of default extensions in IDLE - those that come with the standard distribution
for i in idleConf.defaultCfg['extensions'].sections():
if i.endswith('_cfgBindings') or i.endswith('_bindings'):
continue
IDLE_EXTENSIONS.append(i)
self.IDLE_EXTENSIONS = IDLE_EXTENSIONS
def get_idlex_extensions(self, directory):
""" Get a list of user extensions from 'directory' """
contents = os.listdir(directory)
contents.sort()
contents = [x for x in contents if not x.startswith('_')]
user_extensions = []
for i in contents:
fullpath = os.path.join(directory, i)
if fullpath.endswith('.py') \
and os.path.isfile(fullpath):
try:
txt = open(fullpath, 'r').read(1000)
except IOError:
print(' IOError while loading extension: %r' % fullpath)
if '# IDLEX EXTENSION' in txt:
name = i[:-3] # truncate .py
user_extensions.append(name)
else:
print(' Not an IdleX extension: %r' % fullpath)
return user_extensions
def load_extension(self, name):
""" Imports an extension by name and returns a reference to the module.
Invalid modules return None.
"""
fullname = 'extensions.%s' % name
try:
if HAS_IMPORTLIB:
mod = importlib.import_module('.' + fullname, package=__package__)
else:
mod = __import__(fullname, globals(), locals(), [''], 1)
except Exception as err:
import traceback
traceback.print_exc()
mod = None
return mod
def find_extension(self, name):
""" Locates an extension """
path = self.extension_dir
info = imp.find_module(name, [path])
def load_extension_cfg(self, extName):
""" Load the extension. get its default config string
from the "config_extension_def" variable."""
mod = self.load_extension(extName)
if mod is None:
print("could not load %s" % extName)
return
if hasattr(mod, "config_extension_def"):
return mod.config_extension_def
else:
print("\n Missing 'config_extension_def' in %s. Not loading." % extName)
return None
def copy_options(self, name, cfgdict, confparse, blank=False):
d = cfgdict["extensions"]
optionlist = confparse.GetOptionList(name)
for option in optionlist:
try:
value = confparse.get(name, option, raw=True)
except BaseException as e:
print(' Error during extension settings copy:\n', e)
return False
if not d.has_section(name):
d.add_section(name)
if not blank:
d.set(name, option, value)
else:
d.set(name, option, '')
return True
def transfer_cfg(self, extName, confparse, keys=True):
""" Transfer the configuration from the extension
into IDLE's configuration. Returns True if successful. """
if confparse is None:
return False
# copy the user extension configuration in IDLE
retval = self.copy_options(extName, idleConf.userCfg, confparse)
if 0: # DEVELOPERS - this takes a long time to process
# Report Any keybinding conflicts the user extension may have
keyset = idleConf.GetCurrentKeySet()
name_cfg = extName+'_cfgBindings'
optionlist = confparse.GetOptionList(name_cfg)
for option in optionlist:
b = '<<%s>>' % option
value = confparse.get(name_cfg, option)
if value == '<Control-Key-l>': continue # WORKAROUND: skip clear window binding
for event, binding in list(keyset.items()):
if value in binding and event != b and value:
print('\n Warning: [%s] has an event binding conflict with' % name_cfg)
print(' ', event, value)
# idleConf.GetExtensionBindings pulls only from the default configuration.
# Must transfer bindings to defaultCfg dictionary instead.
if keys:
self.copy_options(extName+'_cfgBindings', idleConf.defaultCfg,
confparse)
return retval
def load_idlex_extensions(self, userExt=None):
""" Load extensions. Returns number of extensions loaded. """
if userExt is None:
userExt = self.IDLEX_EXTENSIONS
# get already-saved settings
d = idleConf.GetUserCfgDir()
usercfgfile = os.path.join(d, 'idlex-config-extensions.cfg')
if os.path.isfile(usercfgfile):
U = open(usercfgfile).read()
else:
U = ''
count = 0
userConfParser = make_config_parser(U)
key_isdefault = idleConf.GetOption('main','Keys','default', type="bool")
for extName in userExt:
if self.reload_cfg(extName):
count += 1
# transfer already-saved settings, otherwise IDLE forgets them
# when idleConf.SaveUserCfgFiles is called from within IDLE. Bug?
self.transfer_cfg(extName, userConfParser,
keys=not key_isdefault) # Overwrite defaults with user config
idleConf.SaveUserCfgFiles()
return count
def reload_cfg(self, extName):
# get the default configuration for the individual extension
cfg = self.load_extension_cfg(extName)
if cfg is None:
return False
# shove the conf string into a ConfigParse object
extConfParser = make_config_parser(cfg)
if extConfParser is None:
print('\n Unable to parse configuration for %s' % extName)
return False
# transfer the configuration to IDLE
if not self.transfer_cfg(extName, extConfParser, keys=True):
print('\n Unable to transfer configuration for %s' % extName)
return False
return True
try:
from . import extensions
except (ImportError, ValueError) as err:
import extensions
path = extensions.__file__
extensionManager = ExtensionManager(path)
| gpl-3.0 | -8,490,886,088,189,031,000 | 34.02214 | 120 | 0.610789 | false | 4.443352 | true | false | false |
blueshed/blueshed-py | src/blueshed/utils/pika_broadcaster.py | 1 | 12533 | '''
Created on 23 Sep 2015
@author: peterb
'''
from pika import adapters
import pika
import logging
import os
class PikaBroadcaster(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
EXCHANGE = 'chat-messages'
EXCHANGE_TYPE = 'fanout'#'topic'
QUEUE = 'chat'
ROUTING_KEY = ''
def __init__(self, amqp_url=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
"""
self._clients = None
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = amqp_url
def set_clients(self, clients):
"""used to call clients"""
self._clients = clients
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
logging.info('Connecting to %s', self._url)
return adapters.TornadoConnection(pika.URLParameters(self._url),
self.on_connection_open)
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
logging.info('Closing connection')
self._connection.close()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
logging.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
logging.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
logging.info('Connection opened')
self._connection = unused_connection
self.add_on_connection_close_callback()
self.open_channel()
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
if not self._closing:
# Create a new connection
self._connection = self.connect()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
logging.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
logging.warning('Channel %i was closed: (%s) %s',
channel, reply_code, reply_text)
self._connection.close()
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
logging.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
logging.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self.EXCHANGE_TYPE)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
logging.info('Exchange declared')
self.setup_queue(self.QUEUE)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
logging.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, exclusive=True)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
self.QUEUE = method_frame.method.queue
logging.info('Binding %s to %s with %s',
self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)
self._channel.queue_bind(self.on_bindok, self.QUEUE,
self.EXCHANGE, self.ROUTING_KEY)
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
logging.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
logging.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
logging.info('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag)
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param str|unicode body: The message body
"""
logging.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
if self._clients:
for client in self._client:
client.write_message(body)
self.acknowledge_message(basic_deliver.delivery_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
"""
logging.info('RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
logging.info('Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
logging.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message,
self.QUEUE)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
"""
logging.info('Queue bound')
self.start_consuming()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
logging.info('Closing the channel')
self._channel.close()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
logging.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def post(self, msg):
if self._channel:
self._channel.basic_publish(self.EXCHANGE,routing_key=self.ROUTING_KEY,body=msg)
| mit | 3,863,031,073,489,841,000 | 37.804954 | 92 | 0.646533 | false | 4.318746 | false | false | false |
james-nichols/dtrw | viral_penetration/fit_EDTA_sites.py | 1 | 7076 | #!/usr/local/bin/python3
# Libraries are in parent directory
import sys
sys.path.append('../')
import numpy as np
import scipy
import time, csv, math, collections
from dtrw import *
# Local fit functions for a variety of scripts
from fit_functions import *
import mpmath
import scipy.integrate
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import cm
from matplotlib.backends.backend_pdf import PdfPages
import pdb
output_pdf = sys.argv[1]
def append_string_as_int(array, item):
try:
array = np.append(array, np.int32(item))
except ValueError:
array = np.append(array, np.nan)
return array
def append_string_as_float(array, item):
try:
array = np.append(array, np.float64(item))
except ValueError:
array = np.append(array, np.nan)
return array
labels = []
image_index = []
cervix = []
EDTA = []
p24 = np.array([], dtype=np.int32)
virions = np.array([], dtype=np.int32)
penetrators = np.array([], dtype=np.int32)
depth = np.array([], dtype=np.float64)
no_mucous_data = 'SMEG_Data/NeuraminidaseNOBAFLinear.csv'
with_mucous_data = 'SMEG_Data/PenetrationMLoadnewestOMITAngelafixed.csv'
EDTA_data = 'SMEG_Data/EctoCervixEDTABaLAngelafixed.csv'
with open(EDTA_data, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
labels = next(reader)
for row in reader:
image_index.append(row[0])
cervix.append(row[1])
EDTA.append(row[2])
p24 = append_string_as_int(p24, row[3])
virions = append_string_as_int(virions, row[4])
penetrators = append_string_as_int(penetrators, row[5])
depth = append_string_as_float(depth, row[6])
count = collections.Counter(image_index)
image_yes = collections.Counter([image_index[i] for i in range(len(image_index)) if EDTA[i] == 'Y'])
image_no = collections.Counter([image_index[i] for i in range(len(image_index)) if EDTA[i] == 'N'])
# The number of sites we analyse...
number_of_sites = 10
# Pick out the most common sites
sites = count.most_common(number_of_sites)
sites_yes = image_yes.most_common(number_of_sites)
sites_no = image_no.most_common(number_of_sites)
pp = PdfPages(output_pdf + '{0}.pdf'.format(sys.argv[2]))
#for site in [sites_yes, sites_no]:
# for site in sites:
if sys.argv[3] == 'Y':
site = sites_yes[int(sys.argv[2])]
else:
site = sites_no[int(sys.argv[2])]
# All locations of this particular image
img_loc = [i for i,x in enumerate(image_index) if x == site[0]]
depth_loc = depth[img_loc]
# Ok, data loaded, now lets get to business. Prune zeros and NaNs from depth
# (may want to infact double check a 0.0 depth is valid if it's seen as a penetrator)
nz_depth = depth_loc[np.nonzero(depth_loc)]
nz_depth = nz_depth[~np.isnan(nz_depth)]
num_depth_bins = 20
# Depth Histogram
depth_hist, depth_bins = np.histogram(nz_depth, num_depth_bins, density=True)
bin_cent = (depth_bins[1:]+depth_bins[:-1])/2.0
# Depth based survival function - sometimes a better function to fit to, and we certainly don't lose resolution
surv_func = scipy.stats.itemfreq(nz_depth-1.0)
surv_func_x = surv_func[:,0]
surv_func_y = 1.0 - np.insert(np.cumsum(surv_func[:,1]), 0, 0.0)[:-1] / surv_func[:,1].sum()
if surv_func_x[0] != 0.0:
surv_func_x = np.insert(surv_func_x, 0, 0.0)
surv_func_y = np.insert(surv_func_y, 0, 1.0)
T = 4.0
L = surv_func_x.max() #nz_depth.max()
dX = L / 100.0
D_alpha = 20.0
alpha = 0.75
# Last minimisation got close to:
#diff_fit = [ 5.28210775, 0.95847065]
#subdiff_fit = [ 15.07811124, 0.55, 0.99997347]
xs = np.arange(0.0, L+dX, dX)
#
# FIT Diffusion model - analytic
#
diff_init_params = [D_alpha]
diff_fit = scipy.optimize.fmin_slsqp(lsq_diff, diff_init_params, args=(T, surv_func_x, surv_func_y), \
bounds=[(0.0, np.Inf)], epsilon = 1.0e-8, acc=1.0e-6, full_output=True)
diff_sq_err = diff_fit[1]
diff_fit = diff_fit[0]
print('Diffusion fit parameters:', diff_fit)
diff_analytic_soln_survival = produce_diff_soln_survival(diff_fit, T, xs)
diff_analytic_soln = produce_diff_soln(diff_fit, T, xs)
#
# FIT Subdiffusion model - numerical (DTRW algorithm)
#
#history_truncation = 0
# New regime: start at diff parameter fit
#subdiff_init_params = [diff_fit[0], alpha]
#subdiff_fit = scipy.optimize.fmin_slsqp(lsq_subdiff, subdiff_init_params, args=(T, 4.0 * L, dX, surv_func_x, surv_func_y, history_truncation), \
# bounds=[(0.0, 50.0),(0.51, 1.0)], epsilon = 1.0e-3, acc=1.0e-6, full_output=True)
#subdiff_sq_err = subdiff_fit[1]
#subdiff_fit = subdiff_fit[0]
#print 'Subdiffusion fit parameters:', subdiff_fit
#dtrw_sub_soln = produce_subdiff_soln(subdiff_fit, T, 4.0*L, dX)
#dtrw_sub_soln_survival = produce_subdiff_soln_survival(subdiff_fit, T, 4.0*L, dX)
#
# FIT Subdiffusion model - analytic
#
subdiff_anal_init_params = [D_alpha]
subdiff_anal_fit = scipy.optimize.fmin_slsqp(lsq_subdiff_analytic, subdiff_anal_init_params, args=(T, surv_func_x, surv_func_y), \
bounds=[(0.0, np.Inf)], epsilon = 1.0e-3, acc=1.0e-6, full_output=True)
subdiff_anal_sq_err = subdiff_anal_fit[1]
subdiff_anal_fit = subdiff_anal_fit[0]
print('Subdiffusion analytic fit parameters:', subdiff_anal_fit)
anal_sub_soln = produce_subdiff_analytic_soln(subdiff_anal_fit, T, xs)
anal_sub_soln_survival = produce_subdiff_analytic_survival(subdiff_anal_fit, T, xs)
#
# FIT Exponential... for fun
#
slope, offset = np.linalg.lstsq(np.vstack([surv_func_x, np.ones(len(surv_func_x))]).T, np.log(surv_func_y).T)[0]
exp_fit = np.exp(offset + xs * slope)
#
# PLOT IT ALL
#
fig = plt.figure(figsize=(16,8))
ax1 = fig.add_subplot(1, 2, 1)
bar1, = ax1.plot(surv_func_x, surv_func_y, 'b.-')
#line1, = ax1.plot(xs, dtrw_sub_soln_survival.T[:xs.size], 'r.-')
line2, = ax1.plot(xs, anal_sub_soln_survival, 'y.-')
line3, = ax1.plot(xs, diff_analytic_soln_survival, 'g.-')
line4, = ax1.plot(xs, exp_fit, 'b')
ax1.set_title('Survival function vs fits, ' + site[0] + ', {0} virions'.format(site[1]))
ax2 = fig.add_subplot(1, 2, 2)
ax2.semilogy(surv_func_x, surv_func_y, 'b.-')
#ax2.semilogy(xs, dtrw_sub_soln_survival.T[:xs.size], 'r.-')
ax2.semilogy(xs, anal_sub_soln_survival, 'y.-')
ax2.semilogy(xs, diff_analytic_soln_survival, 'g.-')
ax2.semilogy(xs, exp_fit, 'b')
ax2.set_title('Logarithm of survival function vs fits, ' + site[0] + ', {0} virions'.format(site[1]))
#plt.legend([bar1, line1, line2, line3, line4], ["Viral survival func", "Subdiffusion fit, alpha={0:.2f}, D_alpha={1:.2f}, sq_err={2:.4f}".format(subdiff_fit[1],subdiff_fit[0],subdiff_sq_err), \
plt.legend([bar1, line2, line3, line4], ["Viral survival func", \
"Analytic subdiff fit, alpha=1/2, D_alpha={0:.2f}, sq_err={1:.4f}".format(subdiff_anal_fit[0], subdiff_anal_sq_err), \
"Diffusion fit, D_alpha={0:.2f}, sq_err={1:.2f}".format(diff_fit[0], diff_sq_err), "Exponential fit"], loc=3)
pp.savefig()
pp.close()
#plt.show()
| gpl-2.0 | 5,539,513,225,448,222,000 | 35.663212 | 194 | 0.663228 | false | 2.548073 | false | false | false |
d3str0h4x/C.Y.B.O.R.G | scripts/shellowned.py | 1 | 4885 | #!/usr/bin/python3.5
#-*- coding: utf-8 -*-
# Automação de tarefas em sistemas unix
# Non Comercial Purposes License
# NÂO RETIRAR OS CREDITOS!
# Script Open Source | Autor: d3str0 | Telegram: @phoenix_burning
# Veja mais dos meus scripts no github: https://github.com/d3str0h4x
import time
import os
import subprocess
import sys
import socket
banner = '''
███████╗██╗ ██╗███████╗██╗ ██╗ ██████╗ ██╗ ██╗███╗ ██╗███████╗██████╗
██╔════╝██║ ██║██╔════╝██║ ██║ ██╔═████╗██║ ██║████╗ ██║██╔════╝██╔══██╗
███████╗███████║█████╗ ██║ ██║ ██║██╔██║██║ █╗ ██║██╔██╗ ██║█████╗ ██║ ██║
╚════██║██╔══██║██╔══╝ ██║ ██║ ████╔╝██║██║███╗██║██║╚██╗██║██╔══╝ ██║ ██║
███████║██║ ██║███████╗███████╗███████╗╚██████╔╝╚███╔███╔╝██║ ╚████║███████╗██████╔╝
╚══════╝╚═╝ ╚═╝╚══════╝╚══════╝╚══════╝ ╚═════╝ ╚══╝╚══╝ ╚═╝ ╚═══╝╚══════╝╚═════╝
'''
skull = ''' .xx"""" """$$$$be.
-" ^""**$$$e.
." ENJOY!! '$$$c
/ Coded by d3str0 "4$$b
d 3 $$$$
$ * .$$$$$$
.$ ^c $$$$$e$$$$$$$$.
d$L 4. 4$$$$$$$$$$$$$$b
$$$$b ^ceeeee. 4$$ECL.F*$$$$$$$
e$""=. $$$$P d$$$$F $ $$$$$$$$$- $$$$$$
z$$b. ^c 3$$$F "$$$$b $"$$$$$$$ $$$$*" .=""$c
4$$$$L $$P" "$$b .$ $$$$$...e$$ .= e$$$.
^*$$$$$c %.. *c .. $$ 3$$$$$$$$$$eF zP d$$$$$
"**$$$ec " %ce"" $$$ $$$$$$$$$$* .r" =$$$$P""
"*$b. "c *$e. *** d$$$$$"L$$ .d" e$$***"
^*$$c ^$c $$$ 4J$$$$$% $$$ .e*".eeP"
"$$$$$$"'$=e....$*$$**$cz$$" "..d$*"
"*$$$ *=%4.$ L L$ P3$$$F $$$P"
"$ "%*ebJLzb$e$$$$$b $P"
%.. 4$$$$$$$$$$ "
$$$e z$$$$$$$$$$%
"*$c "$$$$$$$P"
."""*$$$$$$$$bc
.-" .$***$$$"""*e.
.-" .e$" "*$c ^*b.
.=*"""" .e$*" "*bc "*$e..
.$" .z*" ^*$e. "*****e.
$$ee$c .d" "*$. 3.
^*$E")$..$" * .ee==d%
$.d$$$* * J$$$e*
""""" "$$$"
'''
green = '\033[1;32m'
blue = '\033[34m'
purple = '\033[35m'
red = '\033[31m'
options = '''
1 Metasploit
2 Neofetch
3 Editar repositorios
4 Atualizar sistema
1337 INFO
3301 HELP
99 sair'''
sep = "-"
info = '''
Projeto: Shellowned
Autor: d3str0
Telegram: @phoenix_burning
Veja mais dos meus scripts no github: https://github.com/d3str0h4x'''
description = "Automação_de_comandos unix\n"
help = (green+"type '3301' for tutorial")
time.sleep(2)
def init():
os.system('clear')
print(green+banner)
time.sleep(3)
print(red+skull)
time.sleep(2)
os.system('clear')
print(purple+description)
print(red+'---------------------------------------')
print(blue+help)
print(red+'---------------------------------------')
while True:
print(blue+options)
y = input(purple+'Escolha uma opção: ')
if y == '1':
os.system('clear')
print(green+'Executando Metasploit Framework..')
os.system('sudo msfconsole')
elif y == '2':
os.system('clear')
os.system('neofetch')
elif y == '3':
print(green+'Abrindo lista de repositorios..')
time.sleep(2)
os.system('sudo nano /etc/apt/sources.list')
elif y == '4':
print(green+'Atualizando seu sistema..')
os.system('sudo apt-get update -y && sudo apt-get upgrade -y')
elif y == '1337':
print(green+sep+info)
elif y == '3301':
os.system('clear')
print('Escolha uma opção inserindo os numeros correspodentes')
print(green+"Para sair digite '99'")
elif y == '99':
break
os.system('exit')
else:
print(red+'invalid option')
init()
| gpl-3.0 | -4,639,166,577,025,929,000 | 30.732283 | 85 | 0.339206 | false | 2.301542 | false | false | false |
supriyasingh01/github_basics | Internetworking Distributed Project/finalProject/ovs/pox-master/pox/lib/util.py | 4 | 10875 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions
"""
import traceback
import struct
import sys
import os
import time
import socket
#FIXME: ugh, why can't I make importing pox.core work here?
import logging
log = logging.getLogger("util")
class DirtyList (list):
#TODO: right now the callback may be called more often than needed
# and it may not be called with good names/parameters.
# All you can really rely on is that it will be called in
# some way if something may have changed.
def __init__ (self, *args, **kw):
list.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def __setslice__ (self, k, v):
#TODO: actually check for change
self._smudge('__setslice__', k, v)
list.__setslice__(self, k, v)
def __delslice__ (self, k):
#TODO: actually check for change
self._smudge('__delslice__', k, None)
list.__delslice__(self, k)
def append (self, v):
self._smudge('append', None, v)
list.append(self, v)
def extend (self, v):
self._smudge('extend', None, v)
list.extend(self, v)
def insert (self, i, v):
self._smudge('insert', k, v)
list.extend(self, v)
def pop (self, i=-1):
self._smudge('pop', i, None)
list.pop(self, i)
def remove (self, v):
if v in self:
self._smudge('remove', None, v)
list.remove(self, v)
def reverse (self):
if len(self):
self._smudge('reverse', None, None)
list.reverse(self)
def sort (self, *arg, **kw):
#TODO: check for changes?
self._smudge('sort', None, None)
list.sort(self, *arg, **kw)
def __setitem__ (self, k, v):
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__setitem__slice',k,v)
elif self[k] != v:
self._smudge('__setitem__',k,v)
list.__setitem__(self, k, v)
assert good
def __delitem__ (self, k):
list.__delitem__(self, k)
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__delitem__slice',k,v)
else:
self._smudge('__delitem__', k, None)
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
class DirtyDict (dict):
"""
A dict that tracks whether values have been changed shallowly.
If you set a callback, it will be called when the value changes, and
passed three values: "add"/"modify"/"delete", key, value
"""
def __init__ (self, *args, **kw):
dict.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
def __setitem__ (self, k, v):
if k not in self:
self._smudge('__setitem__add',k,v)
elif self[k] != v:
self._smudge('__setitem__modify',k,v)
dict.__setitem__(self, k, v)
def __delitem__ (self, k):
self._smudge('__delitem__', k, None)
dict.__delitem__(self, k)
def set_extend (l, index, item, emptyValue = None):
"""
Adds item to the list l at position index. If index is beyond the end
of the list, it will pad the list out until it's large enough, using
emptyValue for the new entries.
"""
if index >= len(l):
l += ([emptyValue] * (index - len(self) + 1))
l[index] = item
def strToDPID (s):
"""
Convert a DPID in the canonical string form into a long int.
"""
s = s.replace("-", "").split("|", 2)
a = int(s[0], 16)
b = 0
if len(s) == 2:
b = int(s[1])
return a | (b << 48)
def dpidToStr (dpid, alwaysLong = False):
"""
Convert a DPID from a long into into the canonical string form.
"""
""" In flux. """
if type(dpid) is long or type(dpid) is int:
# Not sure if this is right
dpid = struct.pack('!Q', dpid)
assert len(dpid) == 8
r = '-'.join(['%02x' % (ord(x),) for x in dpid[2:]])
if alwaysLong or dpid[0:2] != (b'\x00'*2):
r += '|' + str(struct.unpack('!H', dpid[0:2])[0])
return r
def assert_type(name, obj, types, none_ok=True):
"""
Assert that a parameter is of a given type.
Raise an Assertion Error with a descriptive error msg if not.
name: name of the parameter for error messages
obj: parameter value to be checked
types: type or list or tuple of types that is acceptable
none_ok: whether 'None' is an ok value
"""
if obj is None:
if none_ok:
return True
else:
raise AssertionError("%s may not be None" % name)
if not isinstance(types, (tuple, list)):
types = [ types ]
for cls in types:
if isinstance(obj, cls):
return True
allowed_types = "|".join(map(lambda x: str(x), types))
stack = traceback.extract_stack()
stack_msg = "Function call %s() in %s:%d" % (stack[-2][2], stack[-3][0], stack[-3][1])
type_msg = "%s must be instance of %s (but is %s)" % (name, allowed_types , str(type(obj)))
raise AssertionError(stack_msg + ": " + type_msg)
def initHelper (obj, kw):
"""
Inside a class's __init__, this will copy keyword arguments to fields
of the same name. See libopenflow for an example.
"""
for k,v in kw.iteritems():
if not hasattr(obj, k):
raise TypeError(obj.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(obj, k, v)
def makePinger ():
"""
A pinger is basically a thing to let you wake a select().
On Unix systems, this makes a pipe pair. But on Windows, select() only
works with sockets, so it makes a pair of connected sockets.
"""
class PipePinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
assert os is not None
def ping (self):
if os is None: return #TODO: Is there a better fix for this?
os.write(self._w, ' ')
def fileno (self):
return self._r
def pongAll (self):
#TODO: make this actually read all
os.read(self._r, 1024)
def pong (self):
os.read(self._r, 1)
def __del__ (self):
try:
os.close(self._w)
except:
pass
try:
os.close(self._r)
except:
pass
class SocketPinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
def ping (self):
self._w.send(' ')
def pong (self):
self._r.recv(1)
def pongAll (self):
#TODO: make this actually read all
self._r.recv(1024)
def fileno (self):
return self._r.fileno()
#return PipePinger((os.pipe()[0],os.pipe()[1])) # To test failure case
if os.name == "posix":
return PipePinger(os.pipe())
#TODO: clean up sockets?
localaddress = '127.127.127.127'
startPort = 10000
import socket
import select
def tryConnect ():
l = socket.socket()
l.setblocking(0)
port = startPort
while True:
try:
l.bind( (localaddress, port) )
break
except:
port += 1
if port - startPort > 1000:
raise RuntimeError("Could not find a free socket")
l.listen(0)
r = socket.socket()
try:
r.connect((localaddress, port))
except:
import traceback
ei = sys.exc_info()
ei = traceback.format_exception_only(ei[0], ei[1])
ei = ''.join(ei).strip()
log.warning("makePinger: connect exception:\n" + ei)
return False
rlist, wlist,elist = select.select([l], [], [l], 2)
if len(elist):
log.warning("makePinger: socket error in select()")
return False
if len(rlist) == 0:
log.warning("makePinger: socket didn't connect")
return False
try:
w, addr = l.accept()
except:
return False
#w.setblocking(0)
if addr != r.getsockname():
log.info("makePinger: pair didn't connect to each other!")
return False
r.setblocking(1)
# Turn off Nagle
r.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return (r, w)
# Try a few times
for i in range(0, 3):
result = tryConnect()
if result is not False:
return SocketPinger(result)
raise RuntimeError("Could not allocate a local socket pair")
def str_to_bool (s):
"""
Given a string, parses out whether it is meant to be True or not
"""
s = str(s).lower() # Make sure
if s in ['true', 't', 'yes', 'y', 'on', 'enable', 'enabled', 'ok',
'okay', '1', 'allow', 'allowed']:
return True
try:
r = 10
if s.startswith("0x"):
s = s[2:]
r = 16
i = int(s, r)
if i != 0:
return True
except:
pass
return False
def hexdump (data):
if isinstance(data, str):
data = [ord(c) for c in data]
o = ""
def chunks (data, length):
return (data[i:i+length] for i in xrange(0, len(data), length))
def filt (c):
if c >= 32 and c <= 126: return chr(c)
return '.'
for i,chunk in enumerate(chunks(data,16)):
if i > 0: o += "\n"
o += "%04x: " % (i * 16,)
l = ' '.join("%02x" % (c,) for c in chunk)
l = "%-48s" % (l,)
l = l[:3*8-1] + " " + l[3*8:]
t = ''.join([filt(x) for x in chunk])
l += ' %-16s' % (t,)
o += l
return o
def connect_socket_with_backoff(address, port, max_backoff_seconds=32):
'''
Connect to the given address and port. If the connection attempt fails,
exponentially back off, up to the max backoff
return the connected socket, or raise an exception if the connection was unsuccessful
'''
backoff_seconds = 1
sock = None
print >>sys.stderr, "connect_socket_with_backoff(address=%s, port=%d)" % (address, port)
while True:
try:
sock = socket.socket()
sock.connect( (address, port) )
break
except socket.error as e:
print >>sys.stderr, "%s. Backing off %d seconds ..." % (str(e), backoff_seconds)
if backoff_seconds >= max_backoff_seconds:
raise RuntimeError("Could not connect to controller %s:%d" % (address, port))
else:
time.sleep(backoff_seconds)
backoff_seconds <<= 1
return sock
if __name__ == "__main__":
def cb (t,k,v): print v
l = DirtyList([10,20,30,40,50])
l.callback = cb
l.append(3)
print l
| cc0-1.0 | -2,835,207,877,191,790,000 | 25.141827 | 93 | 0.597793 | false | 3.323655 | false | false | false |
fgolemo/dataCollector | dataCollector2.py | 2 | 10401 | import csv
import glob
import re
import os
import cPickle as pickle
from featureExtractors.AbsoluteCellCountOriginal import AbsoluteCellCountOriginal
from featureExtractors.RelativeCellCountOriginal import RelativeCellCountOriginal
from featureExtractors.AbsoluteCellCountAlt import AbsoluteCellCountAlt
from featureExtractors.RelativeCellCountAlt import RelativeCellCountAlt
from featureExtractors.BasicInfo import BasicInfo
from featureExtractors.DistanceAlt import DistanceAlt
from featureExtractors.DistanceOriginal import DistanceOriginal
from featureExtractors.MutProbability import MutProbability
from featureExtractors.Lifetime import Lifetime
from featureExtractors.SizeOnAxis import SizeOnAxis
from featureExtractors.RelHeight import RelHeight
from featureExtractors.MuscleLocation import MuscleLocation
from featureExtractors.Symmetry import Symmetry
from featureExtractors.Arc import Arc
from featureExtractors.Monotony import Monotony
from featureExtractors.Gait import Gait
from featureExtractors.ShapeComplexity import ShapeComplexity
from featureExtractors.CompositionComplexity import CompositionComplexity
from helpers.config import PathConfig
__author__ = 'meta'
docString = """ DataCollector 2 main script (rewrite of the original)
This script can be run standalone with 2 optional command line parameters:
[output file name] - (string, default: 'data.csv'), this defines the filename of the CSV output that this script generates
[search pattern] - (string, default: '../EC14-Exp-*'), this defines what folders are searched. Can also be set to "null" to use the default
[limit] - (integer, default: no limit) max number of individuals to get for each experiment
[continue] - (string, default: false) if this is "continue" or "true", then the data collection will not repeat completed experiments
"""
class DataCollector2:
def __init__(self, pattern, outputFile, limit, cont):
if not pattern:
self.pattern = '../EC14-Exp-*'
else:
self.pattern = pattern
if not outputFile:
self.outputFile = 'data.csv'
else:
self.outputFile = outputFile
if not limit:
self.limit = 99999
else:
self.limit = int(limit)
if not cont:
self.cont = False
else:
self.cont = True
print "Using the following parmeters:\n" \
"pattern: {pattern}\n" \
"output file: {outfile}\n" \
"limit: {limit}\n" \
"continue: {cont}".format(
pattern=self.pattern,
outfile=self.outputFile,
limit=self.limit,
cont=self.cont
)
self.experimentsDone = []
self.rowCount = 0
self.headers = []
self.headersWritten = False
self.writer = False
self.outputFileHandle = False
self.previousPercentDone = 0
self.expNumberRegex = re.compile('([0-9]+)$')
self.featureExtractors = [
BasicInfo(),
MutProbability(),
Lifetime(),
DistanceOriginal(),
DistanceAlt(),
AbsoluteCellCountOriginal(),
RelativeCellCountOriginal(),
AbsoluteCellCountAlt(),
RelativeCellCountAlt(),
SizeOnAxis(),
RelHeight(),
MuscleLocation(),
Symmetry(),
Arc(),
Monotony(),
Gait(),
ShapeComplexity(),
CompositionComplexity()
]
self.pickleLocation = os.path.dirname(
os.path.realpath(__file__)) + os.path.sep + ".datacollector2-progress.pickle"
def getExperiments(self):
expFolders = glob.glob(self.pattern)
output = [(self.getExpNumber(os.path.basename(expFolder)),
os.path.basename(expFolder),
expFolder) for expFolder in expFolders if os.path.isdir(expFolder)]
return output
def getExpNumber(self, haystack):
m = self.expNumberRegex.search(haystack)
if m is not None:
return m.group(1)
else:
return haystack
def collectData(self):
experiments = self.getExperiments()
print "I found the following experiments: \n", [exp[0] for exp in experiments]
if self.cont:
experiments = self.filterExperimentsIfContinue(experiments)
print "Because the 'continue' flag was set, I will only parse the following\n" \
" experiments (because I think I already did the other ones before):\n", \
[exp[0] for exp in experiments]
for exp in experiments:
type = self.getType(exp)
# print exp[0],type
individuals = self.getIndividuals(exp)
print "parsing experiment {exp} (type: {type}) with {indivs} individuals".format(
exp=exp[0],
type=type,
indivs=len(individuals)
)
count = 0
for indiv in individuals[:self.limit]:
features = self.getFeatures(exp, type, indiv)
self.writeFeatures(features)
count += 1
self.printExperimentProgress(len(individuals), count)
self.saveProgress(exp)
self.closeFile()
print "wrote {} lines to {}".format(self.rowCount, self.outputFile)
def saveProgress(self, experiment):
self.experimentsDone.append(experiment)
if os.path.isfile(self.pickleLocation):
os.remove(self.pickleLocation)
pickle.dump(self.experimentsDone, open(self.pickleLocation, "wb"))
def loadProgress(self):
self.experimentsDone = pickle.load(open(self.pickleLocation, "rb"))
def filterExperimentsIfContinue(self, experiments):
self.loadProgress()
out = [experiment for experiment in experiments if experiment not in self.experimentsDone]
return out
def getIndividuals(self, experiment):
indivs = glob.glob(experiment[2] + os.path.sep + PathConfig.populationFolderNormal + os.path.sep + "*.vxa")
output = [(os.path.basename(indiv).split("_")[0], indiv) for indiv in indivs]
output.sort(key=lambda x: int(x[0]))
return output
def getType(self, experiment):
# if the alternative population DOES have a disease then the main experiment DIDN'T have a disease
if self.hasAltPopWithDisease(experiment):
if not self.hasAltPopWithoutDisease(experiment):
return "with disease"
else:
self.errorHasBothPopFiles(experiment)
# if the alternative population DOESN'T have a disease then the main experiment DID have a disease
if self.hasAltPopWithoutDisease(experiment):
if not self.hasAltPopWithDisease(experiment):
return "no disease"
else:
self.errorHasBothPopFiles(experiment)
# if neither is the case, then there are no population files for this experiment... abort
self.errorHasNoPop(experiment)
def hasAltPopWithoutDisease(self, experiment):
return self.hasAltPop(experiment, "no disease")
def hasAltPopWithDisease(self, experiment):
return self.hasAltPop(experiment, "with disease")
def hasAltPop(self, experiment, condition):
altPopPath = experiment[2] + os.path.sep + PathConfig.populationFoldersAlt[condition]
if not os.path.isdir(altPopPath):
return False
if len(os.listdir(altPopPath)) > 0:
return True
return False
def getFeatures(self, experiment, type, indiv):
output = []
for feature in self.featureExtractors:
output += feature.extract(experiment, type, indiv)
return output
def printExperimentProgress(self, total, current):
percentDone = round(100 * current * 1.0 / total)
if percentDone != self.previousPercentDone:
sys.stdout.write('{}% done\r'.format(int(percentDone)))
sys.stdout.flush()
self.previousPercentDone = percentDone
def writeFeatures(self, features):
if not self.headersWritten:
self.headers = self.getFeatureHeader()
writeOption = "wb"
if self.cont:
writeOption = "ab"
self.outputFileHandle = open(self.outputFile, writeOption)
self.writer = csv.DictWriter(self.outputFileHandle, fieldnames=self.headers)
if not self.cont:
self.writer.writeheader()
self.headersWritten = True
self.rowCount += 1
rowDict = dict(zip(self.headers, features))
self.writer.writerow(rowDict)
def closeFile(self):
if not not self.outputFileHandle:
self.outputFileHandle.close()
def getFeatureHeader(self):
output = []
for feature in self.featureExtractors:
output += feature.getCSVheader()
return output
@staticmethod
def errorHasBothPopFiles(experiment):
print "ERROR: this shouldn't happen - an experiment has alternative population files " \
"both WITH and WITHOUT disease in addition to the normal experiment traces:"
print experiment
print "...Please fix this before continuing. Exiting."
quit()
@staticmethod
def errorHasNoPop(experiment):
print "ERROR: the following experiment has no alternative population files (neither with disease nor without):"
print experiment
print "...Please fix this before continuing. Exiting."
quit()
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print docString
quit()
pattern = False
outputFile = False
limit = False
con = False
if len(sys.argv) >= 2:
outputFile = sys.argv[1]
if len(sys.argv) >= 3:
pattern = sys.argv[2]
if pattern.lower() == "null" or pattern.lower() == "false":
pattern = False
if len(sys.argv) >= 4:
limit = sys.argv[3]
if len(sys.argv) == 5:
cont = sys.argv[4]
if cont.lower() in ["cont", "continue", "c", "true", "y"]:
con = True
else:
con = False
dataCol = DataCollector2(pattern, outputFile, limit, con)
dataCol.collectData()
| apache-2.0 | -8,523,537,037,601,760,000 | 36.684783 | 139 | 0.63292 | false | 4.24704 | false | false | false |
xaratustrah/iq_suite | iqtools/tiqdata.py | 1 | 10520 | """
Class for IQ Data
TIQ format
Xaratustrah Aug-2015
"""
import os
import logging as log
import numpy as np
import xml.etree.ElementTree as et
from iqtools.iqbase import IQBase
class TIQData(IQBase):
def __init__(self, filename):
super().__init__(filename)
# Additional fields in this subclass
self.acq_bw = 0.0
self.rbw = 0.0
self.rf_att = 0.0
self.span = 0.0
self.scale = 0.0
self.header = ''
self.data_offset = 0
@property
def dictionary(self):
return {'center': self.center,
'nsamples_total': self.nsamples_total,
'fs': self.fs,
'nframes': self.nframes,
'lframes': self.lframes,
'data': self.data_array,
'nframes_tot': self.nframes_tot,
'DateTime': self.date_time,
'rf_att': self.rf_att,
'span': self.span,
'acq_bw': self.acq_bw,
'file_name': self.filename,
'rbw': self.rbw}
def __str__(self):
return \
'<font size="4" color="green">Record length:</font> {:.2e} <font size="4" color="green">[s]</font><br>'.format(
self.nsamples_total / self.fs) + '\n' + \
'<font size="4" color="green">No. Samples:</font> {} <br>'.format(self.nsamples_total) + '\n' + \
'<font size="4" color="green">Sampling rate:</font> {} <font size="4" color="green">[sps]</font><br>'.format(
self.fs) + '\n' + \
'<font size="4" color="green">Center freq.:</font> {} <font size="4" color="green">[Hz]</font><br>'.format(
self.center) + '\n' + \
'<font size="4" color="green">Span:</font> {} <font size="4" color="green">[Hz]</font><br>'.format(
self.span) + '\n' + \
'<font size="4" color="green">Acq. BW.:</font> {} <br>'.format(self.acq_bw) + '\n' + \
'<font size="4" color="green">RBW:</font> {} <br>'.format(self.rbw) + '\n' + \
'<font size="4" color="green">RF Att.:</font> {} <br>'.format(self.rf_att) + '\n' + \
'<font size="4" color="green">Date and Time:</font> {} <br>'.format(self.date_time) + '\n'
def read(self, nframes=10, lframes=1024, sframes=1):
"""Process the tiq input file.
Following information are extracted, except Data offset, all other are stored in the dic. Data needs to be normalized over 50 ohm.
AcquisitionBandwidth
Frequency
File name
Data I and Q [Unit is Volt]
Data Offset
DateTime
NumberSamples
Resolution Bandwidth
RFAttenuation (it is already considered in the data scaling, no need to use this value, only for info)
Sampling Frequency
Span
Voltage Scaling
"""
self.lframes = lframes
self.nframes = nframes
self.sframes = sframes
filesize = os.path.getsize(self.filename)
log.info("File size is {} bytes.".format(filesize))
with open(self.filename) as f:
line = f.readline()
self.data_offset = int(line.split("\"")[1])
with open(self.filename, 'rb') as f:
ba = f.read(self.data_offset)
xml_tree_root = et.fromstring(ba)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}AcquisitionBandwidth'):
self.acq_bw = float(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}Frequency'):
self.center = float(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}DateTime'):
self.date_time = str(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}NumberSamples'):
self.nsamples_total = int(elem.text) # this entry matches (filesize - self.data_offset) / 8) well
for elem in xml_tree_root.iter('NumericParameter'):
if 'name' in elem.attrib and elem.attrib['name'] == 'Resolution Bandwidth' and elem.attrib['pid'] == 'rbw':
self.rbw = float(elem.find('Value').text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}RFAttenuation'):
self.rf_att = float(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}SamplingFrequency'):
self.fs = float(elem.text)
for elem in xml_tree_root.iter('NumericParameter'):
if 'name' in elem.attrib and elem.attrib['name'] == 'Span' and elem.attrib['pid'] == 'globalrange':
self.span = float(elem.find('Value').text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}Scaling'):
self.scale = float(elem.text)
log.info("Center {0} Hz, span {1} Hz, sampling frequency {2} scale factor {3}.".format(self.center, self.span,
self.fs, self.scale))
log.info("Header size {} bytes.".format(self.data_offset))
log.info("Proceeding to read binary section, 32bit (4 byte) little endian.")
log.info('Total number of samples: {}'.format(self.nsamples_total))
log.info("Frame length: {0} data points = {1}s".format(lframes, lframes / self.fs))
self.nframes_tot = int(self.nsamples_total / lframes)
log.info("Total number of frames: {0} = {1}s".format(self.nframes_tot, self.nsamples_total / self.fs))
log.info("Start reading at offset: {0} = {1}s".format(sframes, sframes * lframes / self.fs))
log.info("Reading {0} frames = {1}s.".format(nframes, nframes * lframes / self.fs))
self.header = ba
total_n_bytes = 8 * nframes * lframes # 8 comes from 2 times 4 byte integer for I and Q
start_n_bytes = 8 * (sframes - 1) * lframes
try:
with open(self.filename, 'rb') as f:
f.seek(self.data_offset + start_n_bytes)
ba = f.read(total_n_bytes)
except:
log.error('File seems to end here!')
return
# return a numpy array of little endian 8 byte floats (known as doubles)
self.data_array = np.fromstring(ba, dtype='<i4') # little endian 4 byte ints.
# Scale to retrieve value in Volts. Augmented assignment does not work here!
self.data_array = self.data_array * self.scale
self.data_array = self.data_array.view(
dtype='c16') # reinterpret the bytes as a 16 byte complex number, which consists of 2 doubles.
log.info("Output complex array has a size of {}.".format(self.data_array.size))
# in order to read you may use: data = x.item()['data'] or data = x[()]['data'] other wise you get 0-d error
def read_samples(self, nsamples, offset=0):
"""
Read a specific number of samples
Parameters
----------
nsamples How many samples to read
offset Either start from the beginning, i.e. 0 or start at a different offset.
Returns
-------
"""
self.read_header()
assert nsamples < (self.nsamples_total - offset)
total_n_bytes = 8 * nsamples # 8 comes from 2 times 4 byte integer for I and Q
start_n_bytes = 8 * offset
try:
with open(self.filename, 'rb') as f:
f.seek(self.data_offset + start_n_bytes)
ba = f.read(total_n_bytes)
except:
log.error('File seems to end here!')
return
# return a numpy array of little endian 8 byte floats (known as doubles)
self.data_array = np.fromstring(ba, dtype='<i4') # little endian 4 byte ints.
# Scale to retrieve value in Volts. Augmented assignment does not work here!
self.data_array = self.data_array * self.scale
self.data_array = self.data_array.view(
dtype='c16') # reinterpret the bytes as a 16 byte complex number, which consists of 2 doubles.
log.info("Output complex array has a size of {}.".format(self.data_array.size))
# in order to read you may use: data = x.item()['data'] or data = x[()]['data'] other wise you get 0-d error
def read_header(self):
with open(self.filename) as f:
line = f.readline()
self.data_offset = int(line.split("\"")[1])
with open(self.filename, 'rb') as f:
ba = f.read(self.data_offset)
xml_tree_root = et.fromstring(ba)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}AcquisitionBandwidth'):
self.acq_bw = float(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}Frequency'):
self.center = float(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}DateTime'):
self.date_time = str(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}NumberSamples'):
self.nsamples_total = int(elem.text) # this entry matches (filesize - self.data_offset) / 8) well
for elem in xml_tree_root.iter('NumericParameter'):
if 'name' in elem.attrib and elem.attrib['name'] == 'Resolution Bandwidth' and elem.attrib['pid'] == 'rbw':
self.rbw = float(elem.find('Value').text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}RFAttenuation'):
self.rf_att = float(elem.text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}SamplingFrequency'):
self.fs = float(elem.text)
for elem in xml_tree_root.iter('NumericParameter'):
if 'name' in elem.attrib and elem.attrib['name'] == 'Span' and elem.attrib['pid'] == 'globalrange':
self.span = float(elem.find('Value').text)
for elem in xml_tree_root.iter(tag='{http://www.tektronix.com}Scaling'):
self.scale = float(elem.text)
log.info("Center {0} Hz, span {1} Hz, sampling frequency {2} scale factor {3}.".format(self.center, self.span,
self.fs, self.scale))
log.info("Header size {} bytes.".format(self.data_offset))
self.header = ba
def save_header(self):
"""Saves the header byte array into a txt tile."""
with open(self.filename_wo_ext + '.xml', 'wb') as f3:
f3.write(self.header)
log.info("Header saved in an xml file.")
| gpl-2.0 | -1,870,584,986,021,713,000 | 45.140351 | 138 | 0.570817 | false | 3.542088 | false | false | false |
kg-bot/SupyBot | plugins/Darkfallonline/plugin.py | 1 | 5268 | ###
# Copyright (c) 2012, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import requests
from BeautifulSoup import BeautifulSoup
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircmsgs as ircmsgs
import supybot.schedule as schedule
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Darkfallonline')
servers = (('US1', 'http://www.us1.darkfallonline.com/news'),
('EU1', 'http://www.eu1.darkfallonline.com/news'),
)
login = 'https://ams.darkfallonline.com/AMS/'
CHANNEL = '#progval'
def check_status(url):
up = False
soup = BeautifulSoup(requests.get(url).text)
status = "players"
server = "US1"
status = {'players': False, 'gms': False, 'mastergms': False,
'admins': False}
for img in soup.findAll('img'):
for type_ in status:
if img["src"].startswith("images/%s_online" % type_):
status[type_] = True
return status
def check_login_status(url):
return requests.head(url).status_code == 200
def write_errors(f):
def newf(*args, **kwargs):
try:
f(*args, **kwargs)
except Exception as e:
import traceback
traceback.print_exc(e)
return
return newf
@internationalizeDocstring
class Darkfallonline(callbacks.Plugin):
"""Add the help for "@plugin help Darkfallonline" here
This should describe *how* to use this plugin."""
threaded = True
def __init__(self, irc):
super(Darkfallonline, self).__init__(irc)
self._state = {}
for server, url in servers:
self._state[server] = check_status(url)
self._login = check_login_status(login)
schedule.addPeriodicEvent(self._announcer, 10,
'Darkfallonline_checkstatus')
def die(self):
schedule.removeEvent('Darkfallonline_checkstatus')
@write_errors
def _announcer(self):
for server, url in servers:
status = self._state[server]
new_status = check_status(url)
for irc in world.ircs:
if CHANNEL in irc.state.channels:
for type_ in new_status:
if new_status[type_] == status[type_]:
continue
elif new_status[type_]:
msg = '[%s] %s is going up' % (server,
type_.capitalize())
else:
msg = '[%s] %s is going down' % (server,
type_.capitalize())
irc.queueMsg(ircmsgs.privmsg(CHANNEL, msg))
self._state[server] = new_status
new_login_status = check_login_status(login)
if new_login_status == self._login:
pass
elif new_login_status:
irc.queueMsg(ircmsgs.privmsg(CHANNEL, '[login] Going up'))
else:
irc.queueMsg(ircmsgs.privmsg(CHANNEL, '[login] Going down'))
self._login = new_login_status
def status(self, irc, msg, args):
"""takes no arguments
Return the status of all servers."""
for server, status in self._state.items():
irc.reply('Up on %s: %s' % (server,
format('%L', [x.capitalize() for x,y in status.items() if y]) or 'none'),
private=True)
irc.reply('Login: %s' % ('on' if self._login else 'off'), private=True)
Class = Darkfallonline
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| gpl-3.0 | 742,745,633,977,731,500 | 37.173913 | 89 | 0.637054 | false | 4.096423 | false | false | false |
tabango94/PlugAndPlayFirmwareGenerator | app_firmware/views.py | 1 | 17330 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import json, os
from django.shortcuts import redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from app_proyecto.models import Proyecto
from app_firmware.models import Firmware, VersionFirmware
from django.conf import settings
from django_angular import General
@csrf_exempt
def insertarVersionFirmware(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_firmware = Firmware.objects.get(pk=request.POST['versionFirmwarePK'])
valor = request.POST['estadoVersionFirmware']
if 'clave' in request.POST['txtSintaxisVersion'] and 'valor' in request.POST['txtSintaxisVersion']:
_versionFirmware = VersionFirmware(firmware=_firmware,
version=request.POST['txtVersionFirmware'],
propiedadesJSON="--",
modulosJSON="--",
pinesJSON="__",
archivo=request.FILES['txtIconoFirmware'],
sintaxis=request.POST['txtSintaxisVersion'],
estado=True)
if (valor == "0"):
_versionFirmware.estado = False
else:
_versionFirmware.estado = True
_versionFirmware.save()
dict = obtenerJSON(_versionFirmware, idioma)
else:
if idioma == 'ES':
mensaje = "Debe ingresar 'clave y valor en la sintaxis'"
else:
mensaje = "You must enter 'key and value in the syntax'"
dict = {
"codError": General.codError,
"mensaje": mensaje
}
except Exception as ex:
if idioma == 'ES':
mensaje = "Ha ocurrido error interno"
else:
mensaje = 'Internal error occurred'
dict = {
"codError": General.codError,
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
@csrf_exempt
def getListVersionPorFirmware(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_firmware = Firmware.objects.get(pk=request.POST['versionFirmwarePK'])
_VersionFirmware = VersionFirmware.objects.filter(firmware=_firmware)
__Firmware = {'pk': _firmware.pk,
'nombre': _firmware.nombre,
'lenguaje': _firmware.lenguaje,
'icono': _firmware.icono.url,
'proyecto': _firmware.tipoProyectoCompilador.pk,
}
_listaVersionfirmware = [{'pk': i.pk,
'version': i.version,
'archivo': i.archivo.url,
'estado': i.estado
} for i in _VersionFirmware]
dict = {
"firmware": __Firmware,
"listaVersionFirmware": _listaVersionfirmware,
"codError": General.codExito
}
except:
if idioma == 'ES':
mensaje = "Ha ocurrido error interno"
else:
mensaje = 'Internal error occurred'
dict = {
"codError": General.codError,
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
@csrf_exempt
def eliminarVersionFirmware(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_firmware= Firmware.objects.get(pk=request.POST['FirmwarePK'])
_listaVersionFirmware = VersionFirmware.objects.filter(firmware=_firmware)
if (len(_listaVersionFirmware) == 0):
_firmware.delete()
else:
_versionFirmware = VersionFirmware.objects.get(pk=request.POST['versionFirmwarePK'])
_versionFirmware.delete()
if idioma == 'ES':
mensaje = "El firmware se ha eliminado Correctamente"
else:
mensaje = 'The firmware has been successfully deleted'
dict = {
"codError": General.codExito,
"mensaje": mensaje
}
except Exception as e:
if idioma == 'ES':
mensaje = "Ha ocurrido error interno"
else:
mensaje = 'Internal error occurred'
dict = {
"codError": General.codError,
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
@csrf_exempt
def postFirmware(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_proyecto = Proyecto.objects.get(pk=request.POST['proyectoCompilador'])
_firmware = Firmware(
nombre=request.POST['txtNombre'],
lenguaje=request.POST['txtLenguaje'],
icono=request.FILES['txtIcono'],
tipoProyectoCompilador=_proyecto
)
_firmware.save()
valor = request.POST['estado']
_versionFirmware = VersionFirmware(firmware=_firmware,
version="Version 1",
propiedadesJSON="--",
modulosJSON="--",
pinesJSON="__",
archivo=request.FILES['txtArchivoFirmware'],
sintaxis=request.POST['txtSintaxis'],
estado=True)
if (valor == "0"):
_versionFirmware.estado = False
else:
_versionFirmware.estado = True
_versionFirmware.save()
dict = obtenerJSON(_versionFirmware, idioma)
except Exception as ex:
_versionFirmware.delete()
if idioma == 'ES':
mensaje = "Ha ocurrido error interno"
else:
mensaje = 'Internal error occurred'
dict = {
"codError": "1111",
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
def obtenerJSON(_versionFirmware, idioma):
_urlFirmware = _versionFirmware.archivo.name
_nombreFirmware = _urlFirmware.split("/")
infile = open(os.path.join(settings.BASE_DIR, 'media', 'archivo_firmware', _nombreFirmware[1]), 'r')
auxiliarInicio = False
contadorInicio = 0
mensajeJson = "{"
auxiliarModuloInicio = False
moduloJSON = "[{"
auxiliarPinesInicio = False
pinesJSON = "{"
for line in infile:
if line[:-1] == General.etiquetaInicio:
auxiliarInicio = True
contadorInicio = contadorInicio + 1
if line[:-1] == General.etiquetaFin:
contadorInicio = contadorInicio + 1
auxiliarInicio = False
if line == General.etiquetaFin:
contadorInicio = contadorInicio + 1
if line[:-1] == General.etiquetaModuloInicio:
auxiliarModuloInicio = True
if line[:-1] == General.etiquetaModuloFin:
moduloJSON = moduloJSON[:-1] + " } , { "
auxiliarModuloInicio = False
if line == General.etiquetaModuloFin:
moduloJSON = moduloJSON[:-1] + " } , { "
auxiliarModuloInicio = False
if line[:-1] == General.etiquetaPinesInicio:
auxiliarPinesInicio = True
if line[:-1] == General.etiquetaPinesFin:
auxiliarPinesInicio = False
if line == General.etiquetaPinesFin:
auxiliarPinesInicio = False
if auxiliarPinesInicio == True:
if line[:-1] != General.etiquetaPinesInicio and line[
:-1] != General.etiquetaPinesFin and line != General.etiquetaPinesFin:
pinesJSON = pinesJSON + line[:-1] + ","
if auxiliarInicio == True:
if line[:-1] != General.etiquetaInicio and line[:-1] != General.etiquetaFin and line != General.etiquetaFin:
mensajeJson = mensajeJson + line[:-1] + ","
if auxiliarModuloInicio == True:
if line[:-1] != General.etiquetaModuloInicio and line[
:-1] != General.etiquetaModuloFin and line != General.etiquetaModuloFin:
moduloJSON = moduloJSON + line[:-1] + ","
pinesJSON = pinesJSON[:-2] + "}"
mensajeJson = mensajeJson[:-1] + "}"
moduloJSON = moduloJSON[:-4] + "]"
if contadorInicio == 2:
_versionFirmware.pinesJSON = pinesJSON
_versionFirmware.propiedadesJSON = mensajeJson
_versionFirmware.modulosJSON = moduloJSON
_versionFirmware.save()
if idioma == 'ES':
mensaje = "se ha Registrado correctamente el Firmware"
else:
mensaje = 'The Firmware has been successfully registered'
dict = {
"codError": "0000",
"mensaje": mensaje
}
else:
_versionFirmware.delete()
if idioma == 'ES':
mensaje = "No se ha registrado el Firmware, Por Favor Revise el Archivo"
else:
mensaje = 'You have not registered the Firmware, Please Check the File'
dict = {
"codError": "1111",
"mensaje": mensaje
}
return dict
@csrf_exempt
def getListFirmwareTodosActivos(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_version_firmware = VersionFirmware.objects.filter(estado=True)
_listafirmware = [{'pk': i.pk,
'nombre': i.firmware.nombre,
'version': i.version,
'archivo': i.archivo.url,
'propiedadesJSON': i.propiedadesJSON,
'modulosJSON': i.modulosJSON,
'pinJSON': i.pinesJSON,
'estado': False
} for i in _version_firmware]
dict = {
"listaFirmware": _listafirmware,
"codError": "0000"
}
except:
if idioma == 'ES':
mensaje = 'No se encuentra el Nombre del firmware dentro de la Línea Comando'
else:
mensaje = 'The Firmware Name is not found inside the Command Line'
dict = {
"codError": "1111",
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
@csrf_exempt
def getListFirmwareActivos(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_proyecto = Proyecto.objects.get(pk=request.POST['proyectoCompilador'])
_firmware = Firmware.objects.filter(tipoProyectoCompilador=_proyecto)
_version_firmware = VersionFirmware.objects.filter(firmware=_firmware, estado=True)
_listafirmware = [{'pk': i.firmware.id,
'nombre': i.firmware.nombre,
'lenguaje': i.firmware.lenguaje,
'version': i.version,
'icono': i.firmware.icono.url,
'archivo': i.archivo.url,
'proyecto': i.firmware.tipoProyectoCompilador.nombreCarpeta,
'estado': i.estado,
'firmwarePK':i.firmware.pk,
'versionPK':i.pk
} for i in _version_firmware]
dict = {
"listaFirmware": _listafirmware,
"codError": "0000"
}
except:
if idioma == 'ES':
mensaje = 'No se encuentra el Nombre del firmware dentro de la Línea Comando'
else:
mensaje = 'The Firmware Name is not found inside the Command Line'
dict = {
"codError": "1111",
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
@csrf_exempt
def getListFirmwareMinimoActivos(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_firmware = Firmware.objects.all()
_version_firmware = VersionFirmware.objects.filter(firmware=_firmware, estado=True)
_listafirmware = [{'pk': i.firmware.id,
'nombre': i.firmware.nombre,
'version': i.version,
'modulos': i.modulosJSON,
} for i in _version_firmware]
dict = {
"listaFirmware": _listafirmware,
"codError": "0000"
}
except:
if idioma == 'ES':
mensaje = 'No se encuentra el Nombre del firmware dentro de la Línea Comando'
else:
mensaje = 'The Firmware Name is not found inside the Command Line'
dict = {
"codError": "1111",
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/')
@csrf_exempt
def getListFirmwareIncluirInactivos(request):
if request.user.is_authenticated():
idioma = request.user.persona.idioma
if request.is_ajax():
try:
_proyecto = Proyecto.objects.get(pk=request.POST['proyectoCompilador'])
_firmware = Firmware.objects.filter(tipoProyectoCompilador=_proyecto)
_version_firmware = VersionFirmware.objects.filter(firmware=_firmware)
_listafirmware = [{'pk': i.firmware.id,
'nombre': i.firmware.nombre,
'lenguaje': i.firmware.lenguaje,
'version': i.version,
'icono': i.firmware.icono.url,
'archivo': i.archivo.url,
'proyecto': i.firmware.tipoProyectoCompilador.nombreCarpeta,
'estado': i.estado
} for i in _version_firmware]
dict = {
"listaFirmware": _listafirmware,
"codError": "0000"
}
except:
if idioma == 'ES':
mensaje = 'No se encuentra el Nombre del firmware dentro de la Línea Comando'
else:
mensaje = 'The Firmware Name is not found inside the Command Line'
dict = {
"codError": "1111",
"mensaje": mensaje
}
data_json = json.dumps(dict)
return HttpResponse(data_json, content_type="aplication/json")
else:
return redirect('/') | gpl-3.0 | 3,974,226,344,714,469,400 | 42.644836 | 125 | 0.480203 | false | 4.262239 | false | false | false |
tjguk/networkzero | setup.py | 1 | 1317 | #
# Initially copied from:
# https://raw.githubusercontent.com/pypa/sampleproject/master/setup.py
#
from setuptools import setup, find_packages
import os
import codecs
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='networkzero',
version='1.0b1',
description='Making networking simple for teachers',
long_description=long_description,
url='https://github.com/tjguk/networkzero',
author='Tim Golden',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='networking education',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'pyzmq==16.0.3',
'netifaces==0.10.6',
],
)
| mit | 3,273,137,443,079,042,600 | 23.388889 | 74 | 0.621868 | false | 3.762857 | false | false | false |
mdietrichc2c/vertical-ngo | logistic_consignee/model/sale_order.py | 1 | 1365 | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher, Leonardo Pistone
# Copyright 2014-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields
class SaleOrder(models.Model):
_inherit = 'sale.order'
LO_STATES = {
'cancel': [('readonly', True)],
'progress': [('readonly', True)],
'manual': [('readonly', True)],
'shipping_except': [('readonly', True)],
'invoice_except': [('readonly', True)],
'done': [('readonly', True)],
}
consignee_id = fields.Many2one(
'res.partner',
string='Consignee',
states=LO_STATES,
help="The person to whom the shipment is to be delivered.")
| agpl-3.0 | -7,047,719,712,801,923,000 | 33.125 | 77 | 0.650549 | false | 3.866856 | false | false | false |
abdullahalmasum/a2billing-1.9.4-untar | CallBack/callback-daemon-py/callback_daemon/database.py | 2 | 9261 | #!/usr/bin/env python
# vim: set expandtab shiftwidth=4:
'''
* This file is part of A2Billing (http://www.a2billing.net/)
*
* A2Billing, Commercial Open Source Telecom Billing platform,
* powered by Star2billing S.L. <http://www.star2billing.com/>
*
* @copyright Copyright (C) 2004-2009 - Star2billing S.L.
* @author Belaid Arezqui <[email protected]>
* @license http://www.fsf.org/licensing/licenses/agpl-3.0.html
* @package A2Billing
*
* Software License Agreement (GNU Affero General Public License)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
database.py
module to connect to Postgresql & Mysql Database and manipulate database information .
'''
__author__ = "Belaid Arezqui ([email protected])"
__copyright__ = "Copyright (C) Belaid Arezqui"
__revision__ = "$Id$"
__version__ = "1.00"
# ------------------------------ IMPORT ------------------------------
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later needed")
import ConfigParser
from sqlalchemy import *
from sqlalchemy import orm
from sqlalchemy.orm import sessionmaker
import datetime, time
# ------------------------------ CLASS ------------------------------
class SQLError(Exception):
''' Error exception class '''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ConnectionError(Exception): pass
class SQlRow_Empty(Exception): pass
# Class for the ORM
# These are the empty classes that will become our data classes
class CallBack_Spool(object):
pass
class Server_Group(object):
pass
class Server_Manager(object):
pass
class callback_database:
"""Daemon base class"""
config_filename = None
section = 'database' # override this
"A class to handle all modification on DB"
dbname = ''
dbhost = ''
dbport = None
dbopt = ''
dbtty = ''
dbuser = ''
dbpasswd = ''
dbtype = ''
count_server_manager = 0
# TODO : create it with protected __ for better design
def __init__(self):
# cool to call a function to fetch the conf
self.read_basic_config()
self.db_connect()
def read_basic_config(self):
"""Read basic options from the daemon config file"""
cp = ConfigParser.ConfigParser()
cp.read([self.config_filename])
self.config_parser = cp
self.dbname = cp.get(self.section, 'dbname')
self.dbhost = cp.get(self.section, 'hostname')
self.dbport = cp.get(self.section, 'port')
self.dbuser = cp.get(self.section, 'user')
self.dbpasswd = cp.get(self.section, 'password')
self.dbtype = cp.get(self.section, 'dbtype')
def status_on (self, status):
if (status.lower()=='on') :
return 'ACTIVE'
else :
return 'INACTIVE'
def db_connect (self):
if (len(self.dbpasswd) > 0) :
connection_string = self.dbtype + "://" + self.dbuser + ":" + self.dbpasswd + "@" + self.dbhost + "/" + self.dbname
else :
connection_string = self.dbtype + "://" + self.dbuser + "@" + self.dbhost + "/" + self.dbname
try:
self.engine = create_engine(connection_string)
self.engine.echo = False # Try changing this to True and see what happens
self.metadata = MetaData(self.engine)
Session = sessionmaker(bind=self.engine, autoflush=True)
# create a Session
self.session = Session()
self.cc_callback_spool = Table('cc_callback_spool', self.metadata, autoload=True)
self.cc_server_group = Table('cc_server_group', self.metadata, autoload=True)
self.cc_server_manager = Table('cc_server_manager', self.metadata, autoload=True)
# map to the class
CallBack_Spool_mapper = orm.mapper(CallBack_Spool, self.cc_callback_spool)
Server_Group_mapper = orm.mapper(Server_Group, self.cc_server_group)
Server_Manager_mapper = orm.mapper(Server_Manager, self.cc_server_manager)
self.CallBack_Spool_q = self.session.query(CallBack_Spool)
self.Server_Manager_q = self.session.query(Server_Manager)
except Exception, error_message:
#print "connection error to " + connection_string
raise ConnectionError(error_message)
def db_close (self):
try:
self.session.flush()
except Exception, error_message:
raise SQLError(error_message)
def count_callback_spool(self):
return self.CallBack_Spool_q.filter((self.cc_callback_spool.c.status=='PENDING')).count()
def find_server_manager(self, c_id_group):
get_Server_Manager = self.Server_Manager_q.filter(
(self.cc_server_manager.c.id_group==c_id_group)
).all()
return get_Server_Manager
def find_server_manager_roundrobin(self, c_id_group):
nball_Server_Manager = self.Server_Manager_q.filter(
(self.cc_server_manager.c.id_group==c_id_group)
).count()
if (nball_Server_Manager == 0):
raise SQlRow_Empty("No Server_Manager has been found for this idgroup : "+ str(c_id_group))
nb_sel_Server_Manager = (self.count_server_manager % nball_Server_Manager) + 1
selected_Server_Manager = self.Server_Manager_q.get(nb_sel_Server_Manager)
self.count_server_manager = self.count_server_manager + 1
return selected_Server_Manager
def find_callback_request(self, c_status = 'PENDING', c_hours = 24):
get_CallBack_Spool = self.CallBack_Spool_q.filter(
(self.cc_callback_spool.c.status==c_status) &
(self.cc_callback_spool.c.entry_time > datetime.datetime.now() - datetime.timedelta(hours=c_hours)) &
((self.cc_callback_spool.c.callback_time==None) | (self.cc_callback_spool.c.callback_time < datetime.datetime.now()))
).all()
return get_CallBack_Spool
def update_callback_request (self, c_id, c_status):
try:
get_CallBack_Spool = self.CallBack_Spool_q.filter((self.cc_callback_spool.c.id == c_id)).one()
get_CallBack_Spool.status = c_status
self.session.flush()
except:
#print "--- nothing to update ---"
pass
def update_callback_request_server (self, c_id, c_status, c_id_server, c_manager_result):
try:
get_CallBack_Spool = self.CallBack_Spool_q.filter((self.cc_callback_spool.c.id == c_id)).one()
get_CallBack_Spool.status = c_status
get_CallBack_Spool.id_server = c_id_server
get_CallBack_Spool.manager_result = c_manager_result
get_CallBack_Spool.num_attempt += 1
get_CallBack_Spool.last_attempt_time = func.now();
self.session.flush()
except:
#print "--- nothing to update ---"
pass
# ------------------------------ MAIN ------------------------------
if __name__ == "__main__":
"""
print "\n\n"
inst_cb_db = callback_database()
print inst_cb_db.count_callback_spool()
print
get_CallBack_Spool = inst_cb_db.find_callback_request('SENT', 121212)
for p in get_CallBack_Spool[0:5]:
print p.id,' ===========>>> >>> ',p.uniqueid, '>> ',p.status, '>> ',p.num_attempt, ' ::>> ',p.id_server, ' ::>> ',p.manager_result
inst_cb_db.update_callback_request (5, 'SENT')
inst_cb_db.update_callback_request (5, 'SENT')
inst_cb_db.update_callback_request_server (5, 'SENT', 77, 'rhaaaaaaaa')
print
get_Server_Manager = inst_cb_db.find_server_manager(1)
for p in get_Server_Manager[0:5]:
print p.id,' ===========>>> >>> ',p.id_group, '>> ',p.server_ip, '>> ',p.manager_username
try:
get_Server_Manager = inst_cb_db.find_server_manager_roundrobin(11)
print get_Server_Manager.id,' ===========>>> >>> ',get_Server_Manager.id_group, '>> ',get_Server_Manager.server_ip, '>> ',get_Server_Manager.manager_username
except:
print "--- no manager ---"
pass
"""
| agpl-3.0 | 5,660,961,621,840,508,000 | 33.427509 | 165 | 0.579635 | false | 3.732769 | true | false | false |
ActiveState/code | recipes/Python/580658_Function_guards_for_Python_3/recipe-580658.py | 1 | 13853 | #!/usr/bin/env python3
#-*- coding: iso-8859-1 -*-
################################################################################
#
# Function guards for Python 3.
#
# (c) 2016, Dmitry Dvoinikov <[email protected]>
# Distributed under MIT license.
#
# Samples:
#
# from funcguard import guard
#
# @guard
# def abs(a, _when = "a >= 0"):
# return a
#
# @guard
# def abs(a, _when = "a < 0"):
# return -a
#
# assert abs(1) == abs(-1) == 1
#
# @guard
# def factorial(n): # no _when expression => default
# return 1
#
# @guard
# def factorial(n, _when = "n > 1"):
# return n * factorial(n - 1)
#
# assert factorial(10) == 3628800
#
# class TypeTeller:
# @staticmethod
# @guard
# def typeof(value, _when = "isinstance(value, int)"):
# return int
# @staticmethod
# @guard
# def typeof(value, _when = "isinstance(value, str)"):
# return str
#
# assert TypeTeller.typeof(0) is int
# TypeTeller.typeof(0.0) # throws
#
# class AllowedProcessor:
# def __init__(self, allowed):
# self._allowed = allowed
# @guard
# def process(self, value, _when = "value in self._allowed"):
# return "ok"
# @guard
# def process(self, value): # no _when expression => default
# return "fail"
#
# ap = AllowedProcessor({1, 2, 3})
# assert ap.process(1) == "ok"
# assert ap.process(0) == "fail"
#
# guard.default_eval_args( # values to insert to all guards scopes
# office_hours = lambda: 9 <= datetime.now().hour < 18)
#
# @guard
# def at_work(*args, _when = "office_hours()", **kwargs):
# print("welcome")
#
# @guard
# def at_work(*args, **kwargs):
# print("come back tomorrow")
#
# at_work() # either "welcome" or "come back tomorrow"
#
# The complete source code with self-tests is available from:
# https://github.com/targeted/funcguard
#
################################################################################
__all__ = [ "guard", "GuardException", "IncompatibleFunctionsException",
"FunctionArgumentsMatchException", "GuardExpressionException",
"DuplicateDefaultGuardException", "GuardEvalException",
"NoMatchingFunctionException" ]
################################################################################
import inspect; from inspect import getfullargspec
import functools; from functools import wraps
import sys; from sys import modules
try:
(lambda: None).__qualname__
except AttributeError:
import qualname; from qualname import qualname # prior to Python 3.3 workaround
else:
qualname = lambda f: f.__qualname__
################################################################################
class GuardException(Exception): pass
class IncompatibleFunctionsException(GuardException): pass
class FunctionArgumentsMatchException(GuardException): pass
class GuardExpressionException(GuardException): pass
class DuplicateDefaultGuardException(GuardException): pass
class GuardEvalException(GuardException): pass
class NoMatchingFunctionException(GuardException): pass
################################################################################
# takes an argument specification for a function and a set of actual call
# positional and keyword arguments, returns a flat namespace-like dict
# mapping parameter names to their actual values
def _eval_args(argspec, args, kwargs):
# match positional arguments
matched_args = {}
expected_args = argspec.args
default_args = argspec.defaults or ()
_many = lambda t: "argument" + ("s" if len(t) != 1 else "")
# copy provided args to expected, append defaults if necessary
for i, name in enumerate(expected_args):
if i < len(args):
value = args[i]
elif i >= len(expected_args) - len(default_args):
value = argspec.defaults[i - len(expected_args) + len(default_args)]
else:
missing_args = expected_args[len(args):len(expected_args) - len(default_args)]
raise FunctionArgumentsMatchException("missing required positional {0:s}: {1:s}".\
format(_many(missing_args), ", ".join(missing_args)))
matched_args[name] = value
# put extra provided args to *args if the function allows
if argspec.varargs:
matched_args[argspec.varargs] = args[len(expected_args):] if len(args) > len(expected_args) else ()
elif len(args) > len(expected_args):
raise FunctionArgumentsMatchException(
"takes {0:d} positional {1:s} but {2:d} {3:s} given".
format(len(expected_args), _many(expected_args),
len(args), len(args) == 1 and "was" or "were"))
# match keyword arguments
matched_kwargs = {}
expected_kwargs = argspec.kwonlyargs
default_kwargs = argspec.kwonlydefaults or {}
# extract expected kwargs from provided, using defaults if necessary
missing_kwargs = []
for name in expected_kwargs:
if name in kwargs:
matched_kwargs[name] = kwargs[name]
elif name in default_kwargs:
matched_kwargs[name] = default_kwargs[name]
else:
missing_kwargs.append(name)
if missing_kwargs:
raise FunctionArgumentsMatchException("missing required keyword {0:s}: {1:s}".\
format(_many(missing_kwargs), ", ".join(missing_kwargs)))
extra_kwarg_names = [ name for name in kwargs if name not in matched_kwargs ]
if argspec.varkw:
if extra_kwarg_names:
extra_kwargs = { name: kwargs[name] for name in extra_kwarg_names }
else:
extra_kwargs = {}
matched_args[argspec.varkw] = extra_kwargs
elif extra_kwarg_names:
raise FunctionArgumentsMatchException("got unexpected keyword {0:s}: {1:s}".\
format(_many(extra_kwarg_names), ", ".join(extra_kwarg_names)))
# both positional and keyword argument are returned in the same scope-like dict
for name, value in matched_kwargs.items():
matched_args[name] = value
return matched_args
################################################################################
# takes an argument specification for a function, from it extracts and returns
# a compiled expression which is to be matched against call arguments
def _get_guard_expr(func_name, argspec):
guard_expr_text = None
if "_when" in argspec.args:
defaults = argspec.defaults or ()
i = argspec.args.index("_when")
if i >= len(argspec.args) - len(defaults):
guard_expr_text = defaults[i - len(argspec.args) + len(defaults)]
elif "_when" in argspec.kwonlyargs:
guard_expr_text = (argspec.kwonlydefaults or {}).get("_when")
else:
return None # indicates default guard
if guard_expr_text is None:
raise GuardExpressionException("guarded function {0:s}() requires a \"_when\" "
"argument with guard expression text as its "
"default value".format(func_name))
try:
guard_expr = compile(guard_expr_text, func_name, "eval")
except Exception as e:
error = str(e)
else:
error = None
if error is not None:
raise GuardExpressionException("invalid guard expression for {0:s}(): "
"{1:s}".format(func_name, error))
return guard_expr
################################################################################
# checks whether two functions' argspecs are compatible to be guarded as one,
# compatible argspecs have identical positional and keyword parameters except
# for "_when" and annotations
def _compatible_argspecs(argspec1, argspec2):
return _stripped_argspec(argspec1) == _stripped_argspec(argspec2)
def _stripped_argspec(argspec):
args = argspec.args[:]
defaults = list(argspec.defaults or ())
kwonlyargs = argspec.kwonlyargs[:]
kwonlydefaults = (argspec.kwonlydefaults or {}).copy()
if "_when" in args:
i = args.index("_when")
if i >= len(args) - len(defaults):
del defaults[i - len(args) + len(defaults)]
del args[i]
elif "_when" in kwonlyargs and "_when" in kwonlydefaults:
i = kwonlyargs.index("_when")
del kwonlyargs[i]
del kwonlydefaults["_when"]
return (args, defaults, kwonlyargs, kwonlydefaults, argspec.varargs, argspec.varkw)
################################################################################
def guard(func, module = None): # the main decorator function
# see if it is a function of a lambda
try:
eval(func.__name__)
except SyntaxError:
return func # <lambda> => not guarded
except NameError:
pass # valid name
# get to the bottom of a possible decorator chain
# to get the original function's specification
original_func = func
while hasattr(original_func, "__wrapped__"):
original_func = original_func.__wrapped__
func_name = qualname(original_func)
func_module = module or modules[func.__module__] # module serves only as a place to keep state
argspec = getfullargspec(original_func)
# the registry of known guarded function is attached to the module containg them
guarded_functions = getattr(func_module, "__guarded_functions__", None)
if guarded_functions is None:
guarded_functions = func_module.__guarded_functions__ = {}
original_argspec, first_guard, last_guard = guard_info = \
guarded_functions.setdefault(func_name, [argspec, None, None])
# all the guarded functions with the same name must have identical signature
if argspec is not original_argspec and not _compatible_argspecs(argspec, original_argspec):
raise IncompatibleFunctionsException("function signature is incompatible "
"with the previosly registered {0:s}()".format(func_name))
@wraps(func)
def func_guard(*args, **kwargs): # the call proxy function
# since all versions of the function have essentially identical signatures,
# their mapping to the actually provided arguments can be calculated once
# for each call and not against every version of the function
try:
eval_args = _eval_args(argspec, args, kwargs)
except FunctionArgumentsMatchException as e:
error = str(e)
else:
error = None
if error is not None:
raise FunctionArgumentsMatchException("{0:s}() {1:s}".format(func_name, error))
for name, value in guard.__default_eval_args__.items():
eval_args.setdefault(name, value)
# walk the chain of function versions starting with the first, looking
# for the one for which the guard expression evaluates to truth
current_guard = func_guard.__first_guard__
while current_guard:
try:
if not current_guard.__guard_expr__ or \
eval(current_guard.__guard_expr__, globals(), eval_args):
break
except Exception as e:
error = str(e)
else:
error = None
if error is not None:
raise GuardEvalException("guard expression evaluation failed for "
"{0:s}(): {1:s}".format(func_name, error))
current_guard = current_guard.__next_guard__
else:
raise NoMatchingFunctionException("none of the guard expressions for {0:s}() "
"matched the call arguments".format(func_name))
return current_guard.__wrapped__(*args, **kwargs) # call the winning function version
# in different version of Python @wraps behaves differently with regards
# to __wrapped__, therefore we set it the way we need it here
func_guard.__wrapped__ = func
# the guard expression is attached
func_guard.__guard_expr__ = _get_guard_expr(func_name, argspec)
# maintain a linked list for all versions of the function
if last_guard and not last_guard.__guard_expr__: # the list is not empty and the
# last guard is already a default
if not func_guard.__guard_expr__:
raise DuplicateDefaultGuardException("the default version of {0:s}() has already "
"been specified".format(func_name))
# the new guard has to be inserted one before the last
if first_guard is last_guard: # the list contains just one guard
# new becomes first, last is not changed
first_guard.__first_guard__ = func_guard.__first_guard__ = func_guard
func_guard.__next_guard__ = first_guard
first_guard = guard_info[1] = func_guard
else: # the list contains more than one guard
# neither first nor last are changed
prev_guard = first_guard
while prev_guard.__next_guard__ is not last_guard:
prev_guard = prev_guard.__next_guard__
func_guard.__first_guard__ = first_guard
func_guard.__next_guard__ = last_guard
prev_guard.__next_guard__ = func_guard
else: # the new guard is inserted last
if not first_guard:
first_guard = guard_info[1] = func_guard
func_guard.__first_guard__ = first_guard
func_guard.__next_guard__ = None
if last_guard:
last_guard.__next_guard__ = func_guard
last_guard = guard_info[2] = func_guard
return func_guard
guard.__default_eval_args__ = {}
guard.default_eval_args = lambda *args, **kwargs: guard.__default_eval_args__.update(*args, **kwargs)
################################################################################
# EOF
| mit | -7,951,982,927,786,345,000 | 35.551451 | 107 | 0.593157 | false | 4.217047 | false | false | false |
guildai/guild | guild/ipy.py | 1 | 19332 | # Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import functools
import importlib
import inspect
import logging
import os
import sys
import threading
import warnings
import six
with warnings.catch_warnings():
warnings.simplefilter("ignore", Warning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
try:
import pandas as pd
except ImportError:
raise RuntimeError(
"guild.ipy requires pandas - install it first before using "
"this module (see https://pandas.pydata.org/pandas-docs/stable/"
"install.html for help)"
)
# ipy makes use of the full Guild API and so, like main_bootstrap,
# requires the external modules.
from guild import main_bootstrap
main_bootstrap.ensure_external_path()
from guild import batch_util
from guild import click_util
from guild import config
from guild import exit_code
from guild import index as indexlib
from guild import model_proxy
from guild import op_util
from guild import opref as opreflib
from guild import run as runlib
from guild import run_util
from guild import summary
from guild import util
from guild import var
from guild.commands import runs_impl
log = logging.getLogger("guild")
RUN_DETAIL = [
"id",
"operation",
"status",
"started",
"stopped",
"label",
"run_dir",
]
DEFAULT_MAX_TRIALS = 20
class RunException(Exception):
def __init__(self, run, from_exc):
super(RunException, self).__init__(run, from_exc)
self.run = run
self.from_exc = from_exc
class RunError(RunException):
pass
class RunTerminated(RunException):
pass
class OutputTee(object):
def __init__(self, fs, lock):
self._fs = fs
self._lock = lock
def write(self, s):
with self._lock:
for f in self._fs:
f.write(s)
def flush(self):
with self._lock:
for f in self._fs:
f.flush()
class RunOutput(object):
def __init__(self, run, summary=None):
self.run = run
self.summary = summary
self._f = None
self._f_lock = None
self._stdout = None
self._stderr = None
def __enter__(self):
self._f = open(self.run.guild_path("output"), "w")
self._f_lock = threading.Lock()
self._stdout = sys.stdout
sys.stdout = OutputTee(self._tee_fs(sys.stdout), self._f_lock)
self._stderr = sys.stderr
sys.stderr = OutputTee(self._tee_fs(sys.stderr), self._f_lock)
def _tee_fs(self, iof):
fs = [iof, self._f]
if self.summary:
fs.append(self.summary)
return fs
def __exit__(self, *exc):
with self._f_lock:
self._f.close()
if self.summary:
self.summary.close()
sys.stdout = self._stdout
sys.stderr = self._stderr
@functools.total_ordering
class RunIndex(object):
def __init__(self, run, fmt):
self.value = run
self.run = run # backward compatible alias
self.fmt = fmt
def __str__(self):
return self.value.short_id
def __eq__(self, x):
return self._x_id(x) == self.value.id
def __lt__(self, x):
return self.value.id < self._x_id(x)
@staticmethod
def _x_id(x):
if isinstance(x, six.string_types):
return x
elif isinstance(x, RunIndex):
return x.value.id
return None
class RunsSeries(pd.Series):
@property
def _constructor(self):
return RunsSeries
@property
def _constructor_expanddim(self):
return RunsDataFrame
def delete(self, **kw):
self.to_frame().delete(**kw)
def info(self, **kw):
_print_run_info(self[0], **kw)
def scalars(self):
return _runs_scalars([self[0].value])
def scalars_detail(self):
return _runs_scalars_detail([self[0].value])
def flags(self):
return _runs_flags([self[0].value])
def compare(self):
return _runs_compare([self[0]])
class RunsDataFrame(pd.DataFrame):
@property
def _constructor(self):
return RunsDataFrame
@property
def _constructor_sliced(self):
return RunsSeries
@property
def _constructor_expanddim(self):
return RunsDataFrame
def delete(self, permanent=False):
runs = self._runs()
var.delete_runs(runs, permanent)
return [run.id for run in runs]
def _runs(self):
return [row[1][0].value for row in self.iterrows()]
def _items(self):
return [row[1][0] for row in self.iterrows()]
# pylint: disable=arguments-differ
def info(self, *args, **kw):
self.loc[0].info(*args, **kw)
def scalars(self):
return _runs_scalars(self._runs())
def scalars_detail(self):
return _runs_scalars_detail(self._runs())
def flags(self):
return _runs_flags(self._runs())
def compare(self):
return _runs_compare(self._items())
class Batch(object):
def __init__(self, gen_trials, op, flag_vals, opts):
self.gen_trials = gen_trials
self.op = op
self.flag_vals = _coerce_range_functions(flag_vals)
self.opts = opts
def __call__(self):
runs = []
results = []
prev_results_cb = lambda: (runs, results)
for trial in self.gen_trials(self.flag_vals, prev_results_cb, **self.opts):
trial_flag_vals, trial_attrs = _split_gen_trial(trial)
print(
"Running %s (%s):"
% (self.op.__name__, op_util.flags_desc(trial_flag_vals))
)
run, result = _run(self.op, trial_flag_vals, self.opts, trial_attrs)
runs.append(run)
results.append(result)
return runs, results
def _split_gen_trial(trial):
if isinstance(trial, tuple):
assert len(trial) == 2, ("generated trial must be a two-tuple or a dict", trial)
return trial
else:
return trial, {}
def _coerce_range_functions(flag_vals):
return {name: _coerce_range_function(val) for name, val in flag_vals.items()}
def _coerce_range_function(val):
if isinstance(val, RangeFunction):
return str(val)
return val
class RangeFunction(object):
def __init__(self, name, *args):
self.name = name
self.args = args
def __str__(self):
args = ":".join([str(arg) for arg in self.args])
return "%s[%s]" % (self.name, args)
def batch_gen_trials(flag_vals, _prev_trials_cb, max_trials=None, **kw):
if kw:
log.warning("ignoring batch config: %s", kw)
max_trials = max_trials or DEFAULT_MAX_TRIALS
trials = 0
for trial_flag_vals in batch_util.expand_flags(flag_vals):
if trials >= max_trials:
return
trials += 1
yield trial_flag_vals
def optimizer_trial_generator(model_op):
main_mod = _optimizer_module(model_op.module_name)
try:
return main_mod.gen_trials
except AttributeError:
raise TypeError(
"%s optimizer module does not implement gen_trials" % main_mod.__name__
)
def _optimizer_module(module_name):
return importlib.import_module(module_name)
def uniform(low, high):
return RangeFunction("uniform", low, high)
def loguniform(low, high):
return RangeFunction("loguniform", low, high)
def run(op, *args, **kw):
if not callable(op):
raise ValueError("op must be callable")
opts = _pop_opts(kw)
flag_vals = _init_flag_vals(op, args, kw)
run = _init_runner(op, flag_vals, opts)
return run()
def _pop_opts(kw):
opts = {}
for name in list(kw):
if name[:1] == "_":
opts[name[1:]] = kw.pop(name)
return opts
def _init_flag_vals(op, args, kw):
# pylint: disable=deprecated-method
op_f = _op_f(op)
op_flag_vals = inspect.getcallargs(op_f, *args, **kw)
_remove_bound_method_self(op_f, op_flag_vals)
return _coerce_slice_vals(op_flag_vals)
def _op_f(op):
assert callable(op), repr(op)
if inspect.isfunction(op) or inspect.ismethod(op):
return op
assert hasattr(op, "__call__")
return op.__call__
def _remove_bound_method_self(op, op_flag_vals):
im_self = util.find_apply(
[
lambda: getattr(op, "__self__", None),
lambda: getattr(op, "im_self", None),
]
)
if im_self:
for key, val in op_flag_vals.items():
if val is im_self:
del op_flag_vals[key]
break
else:
assert False, (op_flag_vals, im_self)
def _coerce_slice_vals(flag_vals):
return {name: _coerce_slice_val(val) for name, val in flag_vals.items()}
def _coerce_slice_val(val):
if isinstance(val, slice):
return uniform(val.start, val.stop)
return val
def _init_runner(op, flag_vals, opts):
return util.find_apply(
[_optimize_runner, _batch_runner, _single_runner], op, flag_vals, opts
)
def _optimize_runner(op, flag_vals, opts):
optimizer = opts.get("optimizer")
if not optimizer:
return _maybe_random_runner(op, flag_vals, opts)
opts = _filter_kw(opts, ["optimizer"])
return Batch(_init_gen_trials(optimizer), op, flag_vals, opts)
def _filter_kw(opts, keys):
return {k: v for k, v in opts.items() if k not in keys}
def _maybe_random_runner(op, flag_vals, opts):
assert not opts.get("optimizer"), opts
for val in flag_vals.values():
if isinstance(val, RangeFunction):
return Batch(_init_gen_trials("random"), op, flag_vals, opts)
return None
def _init_gen_trials(optimizer):
try:
model_op, _name = model_proxy.resolve_plugin_model_op(optimizer)
except model_proxy.NotSupported:
raise TypeError("optimizer %r is not supported" % optimizer)
else:
return optimizer_trial_generator(model_op)
def _batch_runner(op, flag_vals, opts):
for val in flag_vals.values():
if isinstance(val, list):
return Batch(batch_gen_trials, op, flag_vals, opts)
return None
def _single_runner(op, flag_vals, opts):
return lambda: _run(op, flag_vals, opts)
def _run(op, flag_vals, opts, extra_attrs=None):
run = _init_run()
_init_run_attrs(run, op, flag_vals, opts, extra_attrs)
summary = _init_output_scalars(run, opts)
try:
with RunOutput(run, summary):
_write_proc_lock(run)
with util.Chdir(run.path):
result = op(**flag_vals)
except KeyboardInterrupt as e:
exit_status = exit_code.KEYBOARD_INTERRUPT
util.raise_from(RunTerminated(run, e), e)
except Exception as e:
exit_status = exit_code.DEFAULT_ERROR
util.raise_from(RunError(run, e), e)
else:
exit_status = 0
return run, result
finally:
_finalize_run(run, exit_status)
def _init_run():
run_id = runlib.mkid()
run_dir = os.path.join(var.runs_dir(), run_id)
run = runlib.Run(run_id, run_dir)
run.init_skel()
return run
def _init_run_attrs(run, op, flag_vals, opts, extra_attrs):
opref = opreflib.OpRef("func", "", "", "", _op_name(op, opts))
run.write_opref(opref)
run.write_attr("started", runlib.timestamp())
run.write_attr("flags", flag_vals)
run.write_attr("label", _run_label(flag_vals, opts))
if extra_attrs:
for name, val in extra_attrs.items():
run.write_attr(name, val)
def _op_name(op, opts):
return opts.get("op_name") or _default_op_name(op)
def _default_op_name(op):
if inspect.isfunction(op) or inspect.ismethod(op):
return op.__name__
return op.__class__.__name__
def _run_label(flag_vals, opts):
return op_util.run_label(_label_template(opts), flag_vals)
def _label_template(opts):
return util.find_apply([_explicit_label, _tagged_label], opts)
def _explicit_label(opts):
return opts.get("label")
def _tagged_label(opts):
try:
tag = opts["tag"]
except KeyError:
return None
else:
return "%s ${default_label}" % tag
def _init_output_scalars(run, opts):
config = opts.get("output_scalars", summary.DEFAULT_OUTPUT_SCALARS)
if not config:
return None
abs_guild_path = os.path.abspath(run.guild_path())
return summary.OutputScalars(config, abs_guild_path)
def _write_proc_lock(run):
op_util.write_proc_lock(os.getpid(), run)
def _finalize_run(run, exit_status):
run.write_attr("exit_status", exit_status)
run.write_attr("stopped", runlib.timestamp())
op_util.delete_proc_lock(run)
def runs(**kw):
runs = runs_impl.filtered_runs(_runs_cmd_args(**kw))
data, cols = _format_runs(runs)
return RunsDataFrame(data=data, columns=cols)
def _runs_cmd_args(
operations=None,
labels=None,
tags=None,
comments=None,
running=False,
completed=False,
error=False,
terminated=False,
pending=False,
staged=False,
unlabeled=None,
marked=False,
unmarked=False,
started=None,
digest=None,
deleted=None,
remote=None,
):
operations = operations or ()
labels = labels or ()
tags = tags or ()
comments = comments or ()
return click_util.Args(
filter_ops=operations,
filter_labels=labels,
filter_tags=tags,
filter_comments=comments,
status_running=running,
status_completed=completed,
status_error=error,
status_terminated=terminated,
status_pending=pending,
status_staged=staged,
filter_unlabeled=unlabeled,
filter_marked=marked,
filter_unmarked=unmarked,
filter_started=started,
filter_digest=digest,
deleted=deleted,
remote=remote,
)
def _format_runs(runs):
cols = (
"run",
"operation",
"started",
"status",
"label",
)
data = [_format_run(run, cols) for run in runs]
return data, cols
def _format_run(run, cols):
fmt = run_util.format_run(run)
return [_run_attr(run, name, fmt) for name in cols]
def _run_attr(run, name, fmt):
if name == "run":
return RunIndex(run, fmt)
elif name in ("operation",):
return fmt[name]
elif name in ("started", "stopped"):
return _datetime(run.get(name))
elif name in ("label",):
return run.get(name, "")
elif name == "time":
return _run_time(run)
else:
return getattr(run, name)
def _datetime(ts):
if ts is None:
return None
return datetime.datetime.fromtimestamp(int(ts / 1000000))
def _run_time(run):
formatted_time = util.format_duration(run.get("started"), run.get("stopped"))
return pd.to_timedelta(formatted_time)
def _print_run_info(item, output=False, scalars=False):
for name in RUN_DETAIL:
print("%s: %s" % (name, item.fmt.get(name, "")))
print("flags:", end="")
print(run_util.format_attr(item.value.get("flags", "")))
if scalars:
print("scalars:")
for s in indexlib.iter_run_scalars(item.value):
print(" %s: %f (step %i)" % (s["tag"], s["last_val"], s["last_step"]))
if output:
print("output:")
for line in run_util.iter_output(item.value):
print(" %s" % line, end="")
def _runs_scalars(runs):
data = []
cols = [
"run",
"prefix",
"tag",
"first_val",
"first_step",
"last_val",
"last_step",
"min_val",
"min_step",
"max_val",
"max_step",
"avg_val",
"count",
"total",
]
for run in runs:
for s in indexlib.iter_run_scalars(run):
data.append(s)
return pd.DataFrame(data, columns=cols)
def _runs_scalars_detail(runs):
from guild import tfevent
data = []
cols = [
"run",
"path",
"tag",
"val",
"step",
]
for run in runs:
for path, _run_id, scalars in tfevent.scalar_readers(run.dir):
rel_path = os.path.relpath(path, run.dir)
for tag, val, step in scalars:
data.append([run, rel_path, tag, val, step])
return pd.DataFrame(data, columns=cols)
def _runs_flags(runs):
data = [_run_flags_data(run) for run in runs]
return pd.DataFrame(data)
def _run_flags_data(run):
data = run.get("flags") or {}
data[_run_flags_key(data)] = run.id
return data
def _run_flags_key(flag_vals):
run_key = "run"
while run_key in flag_vals:
run_key = "_" + run_key
return run_key
def _runs_compare(items):
core_cols = ["run", "operation", "started", "time", "status", "label"]
flag_cols = set()
scalar_cols = set()
data = []
for item in items:
row_data = {}
data.append(row_data)
# Order matters here - we want flag vals to take precedence
# over scalar vals with the same name.
_apply_scalar_data(item.value, scalar_cols, row_data)
_apply_flag_data(item.value, flag_cols, row_data)
_apply_run_core_data(item, core_cols, row_data)
cols = core_cols + sorted(flag_cols) + _sort_scalar_cols(scalar_cols, flag_cols)
return pd.DataFrame(data, columns=cols)
def _apply_scalar_data(run, cols, data):
for name, val in _run_scalar_data(run).items():
cols.add(name)
data[name] = val
def _run_scalar_data(run):
data = {}
step = None
last_step = None
for s in indexlib.iter_run_scalars(run):
key = s["tag"]
data[key] = s["last_val"]
last_step = s["last_step"]
if key == "loss":
step = last_step
if data:
if step is None:
step = last_step
data["step"] = step
return data
def _apply_flag_data(run, cols, data):
for name, val in _run_flags_data(run).items():
if name == "run":
continue
cols.add(name)
data[name] = val
def _apply_run_core_data(item, cols, data):
for name in cols:
data[name] = _run_attr(item.value, name, item.fmt)
def _sort_scalar_cols(scalar_cols, flag_cols):
# - List step first if it exists
# - Don't include flag cols in result
cols = []
if "step" in scalar_cols:
cols.append("step")
for col in sorted(scalar_cols):
if col == "step" or col in flag_cols:
continue
cols.append(col)
return cols
def guild_home():
return config.guild_home()
def set_guild_home(path):
config.set_guild_home(path)
| apache-2.0 | 5,983,932,685,837,985,000 | 24.470356 | 88 | 0.600093 | false | 3.459556 | false | false | false |
UMONS-GFA/bdas | parsing/bin_to_influx.py | 1 | 4781 | import glob
import sys
import logging
import datetime
import pandas as pd
from os import path, makedirs, rename
from influxdb import DataFrameClient
from time import gmtime
from parsing import bin_to_df
from bdas.settings import DATABASE, BIN_DIR, PROCESSED_DIR, UNPROCESSED_DIR, LOG_DIR, LOG_FILE, MASK
def bin_to_influx(bin_filename, last_date):
df, metadata, status = bin_to_df.bin_to_df(bin_filename)
if status == 0:
df2 = df[df.index > last_date]
if df2.size > 0:
for col in df2.columns:
df3 = pd.DataFrame({'date': df2[col].index, 'value': df2[col].values, 'sensor': col,
'das': metadata['NetId']})
df3.set_index('date', inplace=True)
client.write_points(df3, 'measurement', {'sensor': metadata['NetId'] + '-' + col})
return status
if __name__ == "__main__":
i = 1
status = None
log_path = path.join(BIN_DIR, LOG_DIR)
if not path.exists(log_path):
makedirs(log_path)
processed_path = path.join(BIN_DIR, PROCESSED_DIR)
if not path.exists(processed_path):
makedirs(processed_path)
logging_level = logging.DEBUG
logging.Formatter.converter = gmtime
log_format = '%(asctime)-15s %(levelname)s:%(message)s'
logging.basicConfig(format=log_format, datefmt='%Y/%m/%d %H:%M:%S UTC', level=logging_level,
handlers=[logging.FileHandler(path.join(BIN_DIR, LOG_DIR, LOG_FILE)), logging.StreamHandler()])
logging.info('_____ Started _____')
if len(sys.argv) > 1:
if len(sys.argv) % 2 == 1:
while i < len(sys.argv)-1:
if sys.argv[i] == 'MASK':
MASK = str(sys.argv[i+1])
elif sys.argv[i] == 'binpath':
BIN_DIR = str(sys.argv[i+1])
elif sys.argv[i] == 'dbname':
DATABASE['NAME'] = str(sys.argv[i+1])
else:
logging.warning('*** Unknown argument : ' + sys.argv[i])
pass
i += 2
else:
logging.error('*** Parsing failed : arguments should be given by pairs [key value]...')
status = 2
logging.info('_____ Ended _____')
sys.exit(status)
else:
logging.warning('*** No argument found...')
bin_filenames = sorted(glob.iglob(BIN_DIR+MASK+'.bin'))
logging.info('%d bin files to process...' % len(bin_filenames))
if len(bin_filenames) > 0:
client = DataFrameClient(DATABASE['HOST'], DATABASE['PORT'], DATABASE['USER'],
DATABASE['PASSWORD'], DATABASE['NAME'])
for f in bin_filenames:
metadata = bin_to_df.get_metadata(f)
if metadata is not None:
if metadata['NetId'] is not None:
net_id = metadata['NetId']
first_channel = metadata['Channels'][0]
tag_to_search = net_id + '-' + first_channel
last_measurement = client.query('select last(*) from "measurement" where "sensor"= tag_to_search ;')
if not last_measurement:
ld = datetime.datetime(1970, 1, 1, 0, 0, 0).replace(tzinfo=datetime.timezone.utc)
else:
ld = last_measurement['measurement'].index.to_pydatetime()[0]
status = bin_to_influx(f, ld)
if status == 0 or status == 1:
rename(f, path.join(path.dirname(f), PROCESSED_DIR, path.basename(f)))
rename(f + '.jsn', path.join(path.dirname(f), PROCESSED_DIR, path.basename(f) + '.jsn'))
else:
logging.warning('%s could not be processed...' % f)
if not path.exists(path.join(BIN_DIR, UNPROCESSED_DIR)):
makedirs(path.join(BIN_DIR, UNPROCESSED_DIR))
rename(f, path.join(path.dirname(f), UNPROCESSED_DIR, path.basename(f)))
rename(f + '.jsn', path.join(path.dirname(f), UNPROCESSED_DIR, path.basename(f) + '.jsn'))
else:
logging.warning('%s could not be processed because NetID is null' % f)
if not path.exists(path.join(BIN_DIR, UNPROCESSED_DIR)):
makedirs(path.join(BIN_DIR, UNPROCESSED_DIR))
rename(f, path.join(path.dirname(f), UNPROCESSED_DIR, path.basename(f)))
rename(f + '.jsn', path.join(path.dirname(f), UNPROCESSED_DIR, path.basename(f) + '.jsn'))
else:
status = 1
logging.warning('No files to process...')
logging.info('_____ Ended _____')
sys.exit(status)
| gpl-3.0 | -6,081,361,585,220,011,000 | 45.872549 | 120 | 0.53357 | false | 3.779447 | false | false | false |
numirias/firefed | firefed/feature/permissions.py | 1 | 1123 | import attr
from attr import attrib, attrs
from firefed.feature import Feature, formatter
from firefed.output import out
from firefed.util import tabulate
@attrs
class Permissions(Feature):
"""List host permissions (e.g. location sharing).
This feature extracts the stored permissions which the user has granted to
particular hosts (e.g. popups, location sharing, desktop notifications).
"""
perms = attrib(default=None, init=False)
def prepare(self):
self.perms = self.load_sqlite(
db='permissions.sqlite',
table='moz_perms',
cls=attr.make_class('Permission', ['host', 'permission']),
column_map={'origin': 'host', 'type': 'permission'},
)
def summarize(self):
out('%d permissions found.' % len(list(self.perms)))
def run(self):
self.build_format()
@formatter('table', default=True)
def table(self):
rows = [attr.astuple(p) for p in self.perms]
tabulate(rows, headers=('Host', 'Permission'))
@formatter('csv')
def csv(self):
Feature.csv_from_items(self.perms)
| mit | 123,947,964,493,940,500 | 27.075 | 78 | 0.637578 | false | 3.968198 | false | false | false |
OBIGOGIT/etch | binding-python/runtime/src/main/python/etch/binding/support/DefaultValueFactory.py | 2 | 4435 | """
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from ..msg.Field import *
from ..msg.ImportExportHelper import *
from ..msg.Message import *
from ..msg.StructValue import *
from ..msg.Type import *
from ..msg.ValueFactory import *
from ..util.DateSerializer import *
from ..util.ListSerializer import *
from ..util.MapSerializer import *
from ..util.SetSerializer import *
from .Validator_RuntimeException import *
from .Validator_long import *
class DefaultValueFactory(ValueFactory):
"""
Default implementation of ValueFactory which provides some
dynamic type and field support, as well as standard value
conversions and import and rt.
"""
# Names
ETCH_RUNTIME_EXCEPTION_TYPE_NAME = "_Etch_RuntimeException"
ETCH_LIST_TYPE_NAME = "_Etch_List"
ETCH_MAP_TYPE_NAME = "_Etch_Map"
ETCH_SET_TYPE_NAME = "_Etch_Set"
ETCH_DATETIME_TYPE_NAME = "_Etch_Datetime"
ETCH_AUTH_EXCEPTION_TYPE_NAME = "_Etch_AuthException"
ETCH_EXCEPTION_MESSAGE_NAME = "_exception"
MSG_FIELD_NAME = "msg"
MESSAGE_ID_FIELD_NAME = "_messageId"
IN_REPLY_TO_FIELD_NAME = "_inReplyTo"
RESULT_FIELD_NAME = "result"
# Fields
_mf_msg = Field(MSG_FIELD_NAME)
"""The msg field of the standard unchecked exception"""
_mf__messageId = Field(MESSAGE_ID_FIELD_NAME)
"""The well-known _messageId field"""
_mf__inReplyTo = Field(IN_REPLY_TO_FIELD_NAME)
"""The well-known _inReplyTo field"""
_mf_result = Field(RESULT_FIELD_NAME)
"""The well-known result field"""
@staticmethod
def init(typs, class2type):
"""
Initializes the standard types and fields needed by all
etch generated value factories.
@param types
@param class2type
"""
cls = DefaultValueFactory
RuntimeExceptionSerialzier.init(typs[cls.ETCH_RUNTIME_EXCEPTION_TYPE_NAME], class2type)
ListSerialzier.init(typs[cls.ETCH_LIST_TYPE_NAME], class2type)
MapSerialzier.init(typs[cls.ETCH_MAP_TYPE_NAME], class2type)
SetSerialzier.init(typs[cls.ETCH_SET_TYPE_NAME], class2type)
DateSerialzier.init(typs[cls.ETCH_DATETIME_TYPE_NAME], class2type)
AuthExceptionSerialzier.init(typs[cls.ETCH_AUTH_EXCEPTION_TYPE_NAME], class2type)
# _mt__Etch_AuthException
t = typs.get(cls.ETCH_EXCEPTION_MESSAGE_NAME)
t.putValidator( cls._mf_result, Validator_RuntimeException.get())
t.putValidator( cls._mf__messageId, Validator_long.get(0))
t.putValidator( cls._mf__inReplyTo, Validator_long.get(0))
def __init__(self, typs, class2type):
"""
Constructs the DefaultValueFactory.
@param typs
@param class2type
"""
cls = self.__class__
self.__types = typs
self.__class2type = class2type
self._mt__Etch_RuntimeException = typs.get(cls.ETCH_RUNTIME_EXCEPTION_TYPE_NAME)
self._mt__Etch_List = typs.get(cls.ETCH_LIST_TYPE_NAME)
self._mt__Etch_Map = typs.get(cls.ETCH_MAP_TYPE_NAME)
self._mt__Etch_Set = typs.get(cls.ETCH_SET_TYPE_NAME)
self._mt__Etch_Datetime = typs.get(cls.ETCH_DATETIME_TYPE_NAME)
self._mt__Etch_AuthException = typs.get(cls.ETCH_AUTH_EXCEPTION_TYPE_NAME)
self._mt__exception = typs.get(cls.ETCH_EXCEPTION_MESSAGE_NAME)
def get_mt__Etch_RuntimeException(self):
return _mt__Etch_RuntimeException
| apache-2.0 | -6,336,460,698,594,596,000 | 37.565217 | 95 | 0.64938 | false | 3.548 | false | false | false |
sesh/flexx | make/_sphinx.py | 21 | 3982 | """ Tools for Sphinx to build docs and/or websites.
"""
import os
import os.path as op
import sys
import shutil
if sys.version_info[0] < 3:
input = raw_input # noqa
def sh(cmd):
"""Execute command in a subshell, return status code."""
return subprocess.check_call(cmd, shell=True)
def sh2(cmd):
"""Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell."""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
out = p.communicate()[0]
retcode = p.returncode
if retcode:
raise subprocess.CalledProcessError(retcode, cmd)
else:
return out.rstrip().decode('utf-8', 'ignore')
def sphinx_clean(build_dir):
if op.isdir(build_dir):
shutil.rmtree(build_dir)
os.mkdir(build_dir)
print('Cleared build directory.')
def sphinx_build(src_dir, build_dir):
import sphinx
try:
ret = 0
ret = sphinx.main(['sphinx-build', # Dummy
'-b', 'html',
'-d', op.join(build_dir, 'doctrees'),
src_dir, # Source
op.join(build_dir, 'html'), # Dest
])
except SystemExit:
pass
if ret != 0:
raise RuntimeError('Sphinx error: %s' % ret)
print("Build finished. The HTML pages are in %s/html." % build_dir)
def sphinx_show(html_dir):
index_html = op.join(html_dir, 'index.html')
if not op.isfile(index_html):
sys.exit('Cannot show pages, build the html first.')
import webbrowser
webbrowser.open_new_tab(index_html)
def sphinx_copy_pages(html_dir, pages_dir, pages_repo):
print('COPYING PAGES')
# Create the pages repo if needed
if not op.isdir(pages_dir):
os.chdir(ROOT_DIR)
sh("git clone %s %s" % (pages_repo, pages_dir))
# Ensure that its up to date
os.chdir(pages_dir)
sh('git checkout master -q')
sh('git pull -q')
os.chdir('..')
# This is pretty unforgiving: we unconditionally nuke the destination
# directory, and then copy the html tree in there
tmp_git_dir = op.join(ROOT_DIR, pages_dir + '_git')
shutil.move(op.join(pages_dir, '.git'), tmp_git_dir)
try:
shutil.rmtree(pages_dir)
shutil.copytree(html_dir, pages_dir)
shutil.move(tmp_git_dir, op.join(pages_dir, '.git'))
finally:
if op.isdir(tmp_git_dir):
shutil.rmtree(tmp_git_dir)
# Copy individual files
open(op.join(pages_dir, 'README.md'), 'wb').write(
'Autogenerated website - do not edit\n'.encode('utf-8'))
for fname in ['CNAME', '.nojekyll']: # nojekyll or website wont work
if op.isfile(op.join(WEBSITE_DIR, fname)):
shutil.copyfile(op.join(WEBSITE_DIR, fname),
op.join(pages_dir, fname))
# Messages
os.chdir(pages_dir)
sh('git status')
print()
print("Website copied to _gh-pages. Above you can see its status:")
print(" Run 'make website show' to view.")
print(" Run 'make website upload' to commit and push.")
def sphinx_upload(repo_dir):
# Check head
os.chdir(repo_dir)
status = sh2('git status | head -1')
branch = re.match('On branch (.*)$', status).group(1)
if branch != 'master':
e = 'On %r, git branch is %r, MUST be "master"' % (repo_dir,
branch)
raise RuntimeError(e)
# Show repo and ask confirmation
print()
print('You are about to commit to:')
sh('git config --get remote.origin.url')
print()
print('Most recent 3 commits:')
sys.stdout.flush()
sh('git --no-pager log --oneline -n 3')
ok = input('Are you sure you want to commit and push? (y/[n]): ')
ok = ok or 'n'
# If ok, add, commit, push
if ok.lower() == 'y':
sh('git add .')
sh('git commit -am"Update (automated commit)"')
print()
sh('git push')
| bsd-2-clause | 4,251,001,334,143,003,000 | 30.603175 | 73 | 0.580864 | false | 3.520778 | false | false | false |
paolodedios/pybuilder | src/unittest/python/plugins/python/cram_plugin_tests.py | 3 | 12634 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import pathsep
import unittest
from pybuilder.core import Project
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.cram_plugin import (_cram_command_for,
_find_files,
_report_file,
run_cram_tests,
)
from pybuilder.utils import jp, np
from test_utils import patch, Mock, call
class CramPluginTests(unittest.TestCase):
def test_command_respects_no_verbose(self):
project = Project('.')
project.set_property('verbose', False)
expected = ['-m', 'cram', '-E']
received = _cram_command_for(project)
self.assertEqual(expected, received)
def test_command_respects_verbose(self):
project = Project('.')
project.set_property('verbose', True)
expected = ['-m', 'cram', '-E', '--verbose']
received = _cram_command_for(project)
self.assertEqual(expected, received)
@patch('pybuilder.plugins.python.cram_plugin.discover_files_matching')
def test_find_files(self, discover_mock):
project = Project('.')
project.set_property('dir_source_cmdlinetest', np('any/dir'))
project.set_property('cram_test_file_glob', '*.t')
expected = [np(jp(project.basedir, './any/dir/test.cram'))]
discover_mock.return_value = expected
received = _find_files(project)
self.assertEqual(expected, received)
discover_mock.assert_called_once_with(np('any/dir'), '*.t')
def test_report(self):
project = Project('.')
project.set_property('dir_reports', np('any/dir'))
expected = np(jp(project.basedir, 'any/dir/cram.err'))
received = _report_file(project)
self.assertEqual(expected, received)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
def test_running_plugin_cram_from_target(self,
read_file_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('cram_run_test_from_target', True)
project.set_property('dir_dist', 'python')
project.set_property('dir_dist_scripts', 'scripts')
project.set_property('verbose', False)
project._plugin_env = {}
logger = Mock()
reactor = Mock()
reactor.python_env_registry = {}
reactor.python_env_registry["pybuilder"] = pyb_env = Mock()
reactor.pybuilder_venv = pyb_env
pyb_env.environ = {}
pyb_env.executable = ["a/b"]
execute_mock = pyb_env.execute_command = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = ['test1.cram', 'test2.cram']
report_mock.return_value = 'report_file'
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 0
run_cram_tests(project, logger, reactor)
execute_mock.assert_called_once_with(
['a/b', 'cram', 'test1.cram', 'test2.cram'], 'report_file',
error_file_name='report_file',
env={'PYTHONPATH': np(jp(project.basedir, 'python')) + pathsep,
'PATH': np(jp(project.basedir, 'python/scripts')) + pathsep}
)
expected_info_calls = [call('Running Cram command line tests'),
call('Cram tests were fine'),
call('results'),
]
self.assertEqual(expected_info_calls, logger.info.call_args_list)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
def test_running_plugin_from_scripts(self,
read_file_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('cram_run_test_from_target', False)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
project.set_property('verbose', False)
project._plugin_env = {}
logger = Mock()
reactor = Mock()
reactor.python_env_registry = {}
reactor.python_env_registry["pybuilder"] = pyb_env = Mock()
reactor.pybuilder_venv = pyb_env
pyb_env.environ = {}
pyb_env.executable = ["a/b"]
execute_mock = pyb_env.execute_command = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = ['test1.cram', 'test2.cram']
report_mock.return_value = 'report_file'
read_file_mock.return_value = ['test fails for file', '# results']
execute_mock.return_value = 0
run_cram_tests(project, logger, reactor)
execute_mock.assert_called_once_with(
['a/b', 'cram', 'test1.cram', 'test2.cram'], 'report_file',
error_file_name='report_file',
env={'PYTHONPATH': np(jp(project.basedir, 'python')) + pathsep,
'PATH': np(jp(project.basedir, 'scripts')) + pathsep}
)
expected_info_calls = [call('Running Cram command line tests'),
call('Cram tests were fine'),
call('results'),
]
self.assertEqual(expected_info_calls, logger.info.call_args_list)
@patch('pybuilder.plugins.python.cram_plugin.tail_log')
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
def test_running_plugin_fails(self,
read_file_mock,
report_mock,
find_files_mock,
command_mock,
tail_mock,
):
project = Project('.')
project.set_property('verbose', False)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
logger = Mock()
reactor = Mock()
reactor.python_env_registry = {}
reactor.python_env_registry["pybuilder"] = pyb_env = Mock()
reactor.pybuilder_venv = pyb_env
pyb_env.environ = {}
pyb_env.executable = ["a/b"]
execute_mock = pyb_env.execute_command = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = ['test1.cram', 'test2.cram']
report_mock.return_value = 'report_file'
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 1
tail_mock.return_value = "tail data"
self.assertRaises(
BuildFailedException, run_cram_tests, project, logger, reactor)
execute_mock.assert_called_once_with(
['a/b', 'cram', 'test1.cram', 'test2.cram'], 'report_file',
error_file_name='report_file',
env={'PYTHONPATH': np(jp(project.basedir, 'python')) + pathsep,
'PATH': np(jp(project.basedir, 'scripts')) + pathsep}
)
expected_info_calls = [call('Running Cram command line tests'),
]
expected_error_calls = [call('Cram tests failed! See report_file for full details:\ntail data'),
]
self.assertEqual(expected_info_calls, logger.info.call_args_list)
self.assertEqual(expected_error_calls, logger.error.call_args_list)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
def test_running_plugin_no_failure_no_tests(self,
read_file_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('verbose', True)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
project.set_property("cram_fail_if_no_tests", False)
project._plugin_env = {}
logger = Mock()
reactor = Mock()
reactor.python_env_registry = {}
reactor.python_env_registry["pybuilder"] = pyb_env = Mock()
reactor.pybuilder_venv = pyb_env
pyb_env.environ = {}
pyb_env.executable = ["a/b"]
execute_mock = pyb_env.execute_command = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = []
report_mock.return_value = 'report_file'
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 1
run_cram_tests(project, logger, reactor)
execute_mock.assert_not_called()
expected_info_calls = [call('Running Cram command line tests'),
]
self.assertEqual(expected_info_calls, logger.info.call_args_list)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
def test_running_plugin_failure_no_tests(self,
read_file_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('verbose', True)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
project.set_property("cram_fail_if_no_tests", True)
project._plugin_env = {}
logger = Mock()
reactor = Mock()
reactor.python_env_registry = {}
reactor.python_env_registry["pybuilder"] = pyb_env = Mock()
pyb_env.environ = {}
execute_mock = pyb_env.execute_command = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = []
report_mock.return_value = 'report_file'
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 1
self.assertRaises(
BuildFailedException, run_cram_tests, project, logger, reactor)
execute_mock.assert_not_called()
expected_info_calls = [call('Running Cram command line tests'),
]
self.assertEqual(expected_info_calls, logger.info.call_args_list)
| apache-2.0 | -8,196,437,905,931,931,000 | 44.610108 | 104 | 0.559205 | false | 4.01717 | true | false | false |
google-research/episodic-curiosity | third_party/keras_resnet/models.py | 1 | 12574 | # coding=utf-8
# COPYRIGHT
#
# All contributions by Raghavendra Kotikalapudi:
# Copyright (c) 2016, Raghavendra Kotikalapudi.
# All rights reserved.
#
# All other contributions:
# Copyright (c) 2016, the respective contributors.
# All rights reserved.
#
# Copyright (c) 2018 Google LLC
# All rights reserved.
#
# Each contributor holds copyright over their respective contributions.
# The project versioning (Git) records all such contribution source information.
#
# LICENSE
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Model definitions for the R-network.
Forked from https://github.com/raghakot/keras-resnet/blob/master/resnet.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
# pytype: disable=import-error
from tensorflow.keras import backend as K
from tensorflow.keras.activations import sigmoid
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import add
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dot
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Lambda
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
# pytype: enable=import-error
EMBEDDING_DIM = 512
TOP_HIDDEN = 4
def _bn_relu(inpt):
"""Helper to build a BN -> relu block."""
norm = BatchNormalization(axis=3)(inpt)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block."""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(inpt):
conv = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(
inpt)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block."""
# This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(inpt):
activation = _bn_relu(inpt)
return Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(
activation)
return f
def _shortcut(inpt, residual):
"""Adds shortcut between inpt and residual block and merges with "sum"."""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(inpt)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[1] / residual_shape[1]))
stride_height = int(round(input_shape[2] / residual_shape[2]))
equal_channels = input_shape[3] == residual_shape[3]
shortcut = inpt
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(
filters=residual_shape[3],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(
inpt)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks."""
def f(inpt):
"""Helper function."""
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
inpt = block_function(
filters=filters,
init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(
inpt)
return inpt
return f
def basic_block(filters,
init_strides=(1, 1),
is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34."""
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
def f(inpt):
"""Helper function."""
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(
filters=filters,
kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(
inpt)
else:
conv1 = _bn_relu_conv(
filters=filters, kernel_size=(3, 3), strides=init_strides)(
inpt)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(inpt, residual)
return f
def _bn_relu_for_dense(inpt):
norm = BatchNormalization(axis=1)(inpt)
return Activation("relu")(norm)
def _top_network(input_shape):
"""Add top classification layers.
Args:
input_shape: shape of the embedding of the input image.
Returns:
A model taking a batch of input image embeddings, returning a batch of
similarities (shape [batch, 2])
"""
x1 = Input(shape=input_shape, name="top_deep_net_x1")
x2 = Input(shape=input_shape, name="top_deep_net_x2")
x = concatenate([x1, x2])
raw_result = _bn_relu_for_dense(x)
for _ in range(TOP_HIDDEN):
raw_result = Dense(
units=EMBEDDING_DIM, kernel_initializer="he_normal")(
raw_result)
raw_result = _bn_relu_for_dense(raw_result)
output = Dense(
units=2, activation="softmax", kernel_initializer="he_normal")(
raw_result)
model = Model(inputs=[x1, x2], outputs=output)
model.summary()
return model
def _metric_top_network(input_shape):
"""A simple top network that basically computes sigmoid(dot_product(x1, x2)).
Args:
input_shape: shape of the embedding of the input image.
Returns:
A model taking a batch of input image embeddings, returning a batch of
similarities (shape [batch, 2])
"""
x1 = Input(shape=input_shape, name="top_metric_net_x1")
x2 = Input(shape=input_shape, name="top_metric_net_x2")
def one_hot_sigmoid(x):
return K.concatenate([1 - sigmoid(x), sigmoid(x)], axis=1)
dot_product = Dot(axes=-1)([x1, x2])
output = Lambda(one_hot_sigmoid)(dot_product)
model = Model(inputs=[x1, x2], outputs=output)
model.summary()
return model
class ResnetBuilder(object):
"""Factory class for creating Resnet models."""
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, is_classification):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The inpt shape in the form (nb_rows, nb_cols, nb_channels)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or
`bottleneck`. The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units. At each block
unit, the number of filters are doubled and the inpt size is halved
is_classification: if True add softmax layer on top
Returns:
The keras `Model`.
The model's input is an image tensor. Its shape is [batch, height, width,
channels] if the backend is tensorflow.
The model's output is the embedding with shape [batch, num_outputs].
Raises:
Exception: wrong input shape.
"""
if len(input_shape) != 3:
raise Exception(
"Input shape should be a tuple (nb_rows, nb_cols, nb_channels)")
inpt = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(inpt)
pool1 = MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), padding="same")(
conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(
block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(
block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(
pool_size=(block_shape[1], block_shape[2]),
strides=(1, 1))(
block)
flatten1 = Flatten()(pool2)
last_activation = None
if is_classification:
last_activation = "softmax"
dense = Dense(
units=num_outputs,
kernel_initializer="he_normal",
activation=last_activation)(
flatten1)
model = Model(inputs=inpt, outputs=dense)
model.summary()
return model
@staticmethod
def build_resnet_18(input_shape, num_outputs, is_classification=True):
"""Create Resnet-18."""
return ResnetBuilder.build(input_shape, num_outputs, basic_block,
[2, 2, 2, 2], is_classification)
@staticmethod
@gin.configurable
def build_siamese_resnet_18(input_shape,
use_deep_top_network=True,
trainable_bottom_network=True):
"""Create siamese architecture for R-network.
Args:
input_shape: Shape of the input images, (height, width, channels)
use_deep_top_network: If true (default), a deep network will be used for
comparing embeddings. Otherwise, we use a simple
distance metric.
trainable_bottom_network: Whether the bottom (embedding) model is
trainable.
Returns:
A tuple:
- The model mapping two images [batch, height, width, channels] to
similarities [batch, 2].
- The embedding model mapping one image [batch, height, width, channels]
to embedding [batch, EMBEDDING_DIM].
- The similarity model mapping two embedded images
[batch, 2*EMBEDDING_DIM] to similariries [batch, 2].
The returned models share weights. In particular, loading the weights of
the first model also loads the weights of the other two models.
"""
branch = ResnetBuilder.build_resnet_18(
input_shape, EMBEDDING_DIM, is_classification=False)
branch.trainable = trainable_bottom_network
x1 = Input(shape=input_shape, name="x1")
x2 = Input(shape=input_shape, name="x2")
y1 = branch(x1)
y2 = branch(x2)
if use_deep_top_network:
similarity_network = _top_network((EMBEDDING_DIM,))
else:
similarity_network = _metric_top_network((EMBEDDING_DIM,))
output = similarity_network([y1, y2])
return Model(inputs=[x1, x2], outputs=output), branch, similarity_network
| apache-2.0 | -5,892,137,180,987,779,000 | 33.355191 | 80 | 0.672737 | false | 3.744491 | false | false | false |
SuLab/scheduled-bots | scheduled_bots/geneprotein/OrthologBot.py | 1 | 7744 | """
https://bitbucket.org/sulab/wikidatabots/src/4f2e4bdf3d7328eb6fd94cc67af61e194bda0a96/genes/orthologs/human/parseHomologene.py?at=dronetest_DiseaseBot&fileviewer=file-view-default
https://www.wikidata.org/wiki/Q14911732#P684
https://www.wikidata.org/wiki/Q18049645
homologene release 68
https://www.wikidata.org/wiki/Q20976936
"""
import argparse
import json
import os
from collections import defaultdict
from datetime import datetime
from tqdm import tqdm
from scheduled_bots import get_default_core_props, PROPS
from scheduled_bots.geneprotein import HelperBot
from scheduled_bots.geneprotein.Downloader import MyGeneDownloader
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers
core_props = get_default_core_props()
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
__metadata__ = {'name': 'OrthologBot',
'maintainer': 'GSS',
'tags': ['gene', 'ortholog'],
}
def main(metadata, log_dir="./logs", fast_run=True, write=True):
"""
Main function for creating/updating genes
:param metadata: looks like: {"ensembl" : 84, "cpdb" : 31, "netaffy" : "na35", "ucsc" : "20160620", .. }
:type metadata: dict
:param log_dir: dir to store logs
:type log_dir: str
:param fast_run: use fast run mode
:type fast_run: bool
:param write: actually perform write
:type write: bool
:return: None
"""
# login
login = wdi_login.WDLogin(user=WDUSER, pwd=WDPASS)
wdi_core.WDItemEngine.setup_logging(log_dir=log_dir, logger_name='WD_logger', log_name=log_name,
header=json.dumps(__metadata__))
# get all ids mappings
entrez_wdid = wdi_helpers.id_mapper(PROPS['Entrez Gene ID'])
wdid_entrez = {v: k for k, v in entrez_wdid.items()}
homo_wdid = wdi_helpers.id_mapper(PROPS['HomoloGene ID'], return_as_set=True)
wdid_homo = dict()
for homo, wdids in homo_wdid.items():
for wdid in wdids:
wdid_homo[wdid] = homo
entrez_homo = {wdid_entrez[wdid]: homo for wdid, homo in wdid_homo.items() if wdid in wdid_entrez}
taxon_wdid = wdi_helpers.id_mapper(PROPS['NCBI Taxonomy ID'])
# only do certain records
mgd = MyGeneDownloader(q="_exists_:homologene AND type_of_gene:protein-coding",
fields=','.join(['taxid', 'homologene', 'entrezgene']))
docs, total = mgd.query()
docs = list(tqdm(docs, total=total))
records = HelperBot.tag_mygene_docs(docs, metadata)
# group together all orthologs
# d[taxid][entrezgene] = { set of entrezgene ids for orthologs }
d = defaultdict(lambda: defaultdict(set))
entrez_taxon = dict() # keep this for the qualifier on the statements
for doc in records:
this_taxid = doc['taxid']['@value']
this_entrez = doc['entrezgene']['@value']
entrez_taxon[str(this_entrez)] = str(this_taxid)
if str(this_entrez) not in entrez_wdid:
continue
for taxid, entrez in doc['homologene']['@value']['genes']:
if taxid == 4932 and this_taxid == 559292:
# ridiculous workaround because entrez has the taxid for the strain and homologene has it for the species
# TODO: This needs to be fixed if you want to use other things that may have species/strains .. ?`
continue
if taxid != this_taxid and str(entrez) in entrez_wdid:
d[str(this_taxid)][str(this_entrez)].add(str(entrez))
print("taxid: # of genes : {}".format({k: len(v) for k, v in d.items()}))
homogene_ver = metadata['homologene']
release = wdi_helpers.Release("HomoloGene build{}".format(homogene_ver), "Version of HomoloGene", homogene_ver,
edition_of_wdid='Q468215',
archive_url='ftp://ftp.ncbi.nih.gov/pub/HomoloGene/build{}/'.format(
homogene_ver)).get_or_create(login)
reference = lambda homogeneid: [wdi_core.WDItemID(release, PROPS['stated in'], is_reference=True),
wdi_core.WDExternalID(homogeneid, PROPS['HomoloGene ID'], is_reference=True)]
ec = 0
for taxid, subd in tqdm(d.items()):
for entrezgene, orthologs in tqdm(subd.items(), leave=False):
try:
do_item(entrezgene, orthologs, reference, entrez_homo, entrez_taxon, taxon_wdid, entrez_wdid, login,
write)
except Exception as e:
wdi_helpers.format_msg(entrezgene, PROPS['Entrez Gene ID'], None, str(e), type(e))
ec += 1
# clear the fast run store once we move on to the next taxon
wdi_core.WDItemEngine.fast_run_store = []
wdi_core.WDItemEngine.fast_run_container = None
print("Completed succesfully with {} exceptions".format(ec))
def do_item(entrezgene, orthologs, reference, entrez_homo, entrez_taxon, taxon_wdid, entrez_wdid, login, write):
entrezgene = str(entrezgene)
s = []
this_ref = reference(entrez_homo[entrezgene])
for ortholog in orthologs:
ortholog = str(ortholog)
if ortholog == entrezgene:
continue
if ortholog not in entrez_taxon:
raise ValueError("missing taxid for: " + ortholog)
qualifier = wdi_core.WDItemID(taxon_wdid[entrez_taxon[ortholog]], PROPS['found in taxon'], is_qualifier=True)
s.append(wdi_core.WDItemID(entrez_wdid[ortholog], PROPS['ortholog'],
references=[this_ref], qualifiers=[qualifier]))
item = wdi_core.WDItemEngine(wd_item_id=entrez_wdid[entrezgene], data=s, fast_run=fast_run,
fast_run_base_filter={PROPS['Entrez Gene ID']: '',
PROPS['found in taxon']: taxon_wdid[entrez_taxon[entrezgene]]},
core_props=core_props)
wdi_helpers.try_write(item, entrezgene, PROPS['Entrez Gene ID'], edit_summary="edit orthologs", login=login,
write=write)
# print(item.wd_item_id)
if __name__ == "__main__":
"""
Data to be used is retrieved from mygene.info
"""
parser = argparse.ArgumentParser(description='run wikidata gene bot')
parser.add_argument('--log-dir', help='directory to store logs', type=str)
parser.add_argument('--dummy', help='do not actually do write', action='store_true')
parser.add_argument('--fastrun', dest='fastrun', action='store_true')
parser.add_argument('--no-fastrun', dest='fastrun', action='store_false')
parser.set_defaults(fastrun=True)
args = parser.parse_args()
log_dir = args.log_dir if args.log_dir else "./logs"
run_id = datetime.now().strftime('%Y%m%d_%H:%M')
__metadata__['run_id'] = run_id
fast_run = args.fastrun
# get metadata about sources
mgd = MyGeneDownloader()
metadata = dict()
src = mgd.get_metadata()['src']
for source in src.keys():
metadata[source] = src[source]["version"]
log_name = '{}-{}.log'.format(__metadata__['name'], run_id)
if wdi_core.WDItemEngine.logger is not None:
wdi_core.WDItemEngine.logger.handles = []
wdi_core.WDItemEngine.setup_logging(log_dir=log_dir, log_name=log_name, header=json.dumps(__metadata__),
logger_name='orthologs')
main(metadata, log_dir=log_dir, fast_run=fast_run, write=not args.dummy)
| mit | 1,585,796,242,095,598,600 | 42.751412 | 179 | 0.625646 | false | 3.259259 | false | false | false |
harti768/Watch-my-Pi | create_email_data.py | 1 | 1473 | import send_email
def main():
#Eingabe von Informationen
print("Willkomen bei der Installation von Watch My Pi! \nZum Nutzen der Software benötigt Watch My Pi! ihre E-Mail Adresse und Anmelde Daten \nDiese werden nur lokal gespeichert und kommen an keiner Stellle online. \nACHTUNG: Die Anmelde Daten inbegriffen dem Passwort sind zurzeit lokal zugreifbar, wir empfehlen daher eine eigene E-Mail nur für Watch My Pi! zu verwenden.")
emailData = input("\n E_Mail: \n ") +"|"
emailData += input("\n SMTP Adresse (Form: smtp.domain.com): \n ") +"|"
emailData += input("\n Passwort: \n ")
rightKey = False;
#TODO: Random key generation
key = "7T23C"
#Speichern der Daten als .txt Datei
path = "C:\\Users\\Hartmut\\Desktop\\testDatei.txt"
file = open(path, "w")
file.write(emailData)
file.close()
#Senden einer Email und bestätigen des Keys
send_email.main("Bitte übertragen sie diesen Schlüssel in die Konsole:\n\n"+key)
print("\nIhn wurde ein Schlüssel per E-Mail gesendet. Bitte bestätigen sie die Richtigkeit der angegebenen Daten indem sie den Schlüssel in die Konsole schreiben.")
while(rightKey):
if(input("Schlüssel: ")==key):
rightKey = True
else:
print("Der Schlüssel war leider falsch")
print("Se haben Watch My Pi erfolgreich instaliert. Viel Spaß damit! :)")
return True
if __name__ == '__main__':
main()
| mit | -2,137,441,953,899,311,900 | 42 | 379 | 0.668263 | false | 2.877953 | false | false | false |
h4ck3rm1k3/gcc-ontology | tests/pythoscope_test.py | 1 | 1680 | import sys
sys.path.append("/home/mdupont/experiments/pythoscope/")
sys.path.append("/home/mdupont/experiments/pythoscope/pythoscope")
import pprint
import pythoscope
from pythoscope.store import Project,Function
import sys
sys.path.append("/home/mdupont/experiments/py-loadr-forkr-debugr")
import forkr
# forkr.set_logging() # turn on all logging
import inspect
import ast
import sys
sys.path.append("/home/mdupont/experiments/astunparse/lib/")
import astunparse
import pprint
from ast import *
def test_unparse_ast() :
print "Hello Python!"
def pythoscope_t3st():
project = Project.from_directory(".")
#inspect_project(project)
#add_tests_to_project(project, modules, template, force)
#modname = "foo"
#module = project.create_test_module_from_name(modname)
#pprint.pprint(module)
foo = Function("testfoo")
#module = project.find_module_by_full_path(modname)
#pprint.pprint(module)
#pprint.pprint(module.__dict__)
#pprint.pprint(dir(module))
#module.objects.append(foo)
#template = "unittest"
#generator = pythoscope.generator.TestGenerator.from_template(template)
#generator._add_tests_for_module(module, project, True)
code = test_unparse_ast
ast2 = ast.parse(inspect.getsource(code))
code2 = astunparse.unparse(ast2)
m = project.create_module("tests/foo123.py", code=code2)
#pprint.pprint(m.__dict__)
pprint.pprint(project.__dict__)
for module in project.get_modules():
module.changed =True
# print("Calling save() on module %r" % module.subpath)
# module.save()
project.save()
if __name__ == '__main__' :
pythoscope_test()
| gpl-3.0 | -5,380,355,806,445,398,000 | 28.473684 | 75 | 0.691667 | false | 3.307087 | true | false | false |
bbc/kamaelia | Sketches/MPS/BugReports/FixTests/Axon/Examples/Handles/TestHandle.py | 6 | 1109 | #!/usr/bin/python
import time
from Axon.background import background
from Kamaelia.UI.Pygame.Text import Textbox, TextDisplayer
from Axon.Handle import Handle
background().start()
try:
import Queue
queue = Queue # Python 3 compatibility change
except ImportError:
# Python 3 compatibility change
import queue
TD = Handle(
TextDisplayer(position=(20, 90),
text_height=36,
screen_width=900,
screen_height=200,
background_color=(130,0,70),
text_color=(255,255,255)
)
).activate()
TB = Handle(
Textbox(position=(20, 340),
text_height=36,
screen_width=900,
screen_height=400,
background_color=(130,0,70),
text_color=(255,255,255)
)
).activate()
message = "hello\n"
while 1:
time.sleep(1)
try:
data = TB.get("outbox")
print (data)
message = data
except queue.Empty:
pass
TD.put(message, "inbox")
| apache-2.0 | 8,626,075,919,318,856,000 | 24.204545 | 58 | 0.533814 | false | 4.047445 | false | false | false |
dwfreed/mitmproxy | mitmproxy/contentviews/protobuf.py | 1 | 1279 | import subprocess
from . import base
class ViewProtobuf(base.View):
"""Human friendly view of protocol buffers
The view uses the protoc compiler to decode the binary
"""
name = "Protocol Buffer"
prompt = ("protobuf", "p")
content_types = [
"application/x-protobuf",
"application/x-protobuffer",
]
@staticmethod
def is_available():
try:
p = subprocess.Popen(
["protoc", "--version"],
stdout=subprocess.PIPE
)
out, _ = p.communicate()
return out.startswith("libprotoc")
except:
return False
def decode_protobuf(self, content):
# if Popen raises OSError, it will be caught in
# get_content_view and fall back to Raw
p = subprocess.Popen(['protoc', '--decode_raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=content)
if out:
return out
else:
return err
def __call__(self, data, **metadata):
decoded = self.decode_protobuf(data)
return "Protobuf", base.format_text(decoded)
| mit | -6,204,324,615,913,677,000 | 27.422222 | 58 | 0.53792 | false | 4.684982 | false | false | false |
openpolis/open_municipio | open_municipio/people/models.py | 1 | 62224 | import datetime # use this only for checking types. use django.utils.datetime_safe for handling actual dates
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import models, transaction
from django.db.models import permalink, Q
from django.db.models.query import EmptyQuerySet
from django.utils.datetime_safe import date
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from model_utils import Choices
from model_utils.managers import PassThroughManager
from model_utils.models import TimeStampedModel
from sorl.thumbnail import ImageField
from open_municipio.monitoring.models import MonitorizedItem
from open_municipio.newscache.models import NewsTargetMixin
from open_municipio.people.managers import ( TimeFramedQuerySet, GroupQuerySet,
ChargeQuerySet )
from open_municipio.om_utils.models import SlugModel
import open_municipio
from collections import Counter
#
# Persons, charges and groups
#
class Person(models.Model, MonitorizedItem):
"""
The ``related_news`` attribute can be used to fetch news related to a given person.
"""
FEMALE_SEX = 0
MALE_SEX = 1
SEX = Choices(
(MALE_SEX, _('Male')),
(FEMALE_SEX, _('Female')),
)
first_name = models.CharField(_('first name'), max_length=128)
last_name = models.CharField(_('last name'), max_length=128)
birth_date = models.DateField(_('birth date'))
birth_location = models.CharField(_('birth location'), blank=True, max_length=128)
slug = models.SlugField(unique=True, blank=True, null=True, max_length=128)
sex = models.IntegerField(_('sex'), choices=SEX)
op_politician_id = models.IntegerField(_('openpolis politician ID'), blank=True, null=True)
img = ImageField(upload_to="person_images", blank=True, null=True)
# manager to handle the list of monitoring having as content_object this instance
#monitoring_set = generic.GenericRelation(Monitoring, object_id_field='object_pk')
class Meta:
verbose_name = _('person')
verbose_name_plural = _('people')
def __unicode__(self):
return u'%s, %s' % (self.last_name, self.first_name)
def save(self, *args, **kwargs):
if self.slug is None:
self.slug = slugify("%s %s %s" % (self.first_name, self.last_name, self.birth_date))
super(Person, self).save(*args, **kwargs)
@permalink
def get_absolute_url(self):
return 'om_politician_detail', (), { 'slug': self.slug }
@property
def openpolis_link(self):
link = None
if self.op_politician_id:
link = settings.OP_URL_TEMPLATE % { "op_id":self.op_politician_id }
return link
@property
def is_om_user(self):
"""
check whether the person is a registered om user
"""
try:
prof = self.userprofile
return True
except ObjectDoesNotExist:
return False
@property
def full_name(self):
return "%s %s" % (self.first_name, self.last_name)
@property
def all_institution_charges(self):
"""
Returns the QuerySet of all institution charges held by this person during his/her career.
"""
return self.institutioncharge_set.select_related().all()
def get_past_institution_charges(self, moment=None):
return self.institutioncharge_set.select_related().past(moment=moment)\
.exclude(institution__institution_type__in=(Institution.COMMITTEE, Institution.JOINT_COMMITTEE))\
.order_by('-start_date')
past_institution_charges = property(get_past_institution_charges)
def get_current_institution_charges(self, moment=None):
"""
Returns the current institution charges at the given moment (no committees).
"""
return self.institutioncharge_set.select_related().current(moment=moment).exclude(
institution__institution_type__in=(Institution.COMMITTEE, Institution.JOINT_COMMITTEE)
)
current_institution_charges = property(get_current_institution_charges)
def get_current_committee_charges(self, moment=None):
"""
Returns the current committee charges, at the given moment.
"""
return self.institutioncharge_set.select_related().current(moment=moment).filter(
institution__institution_type__in=(Institution.COMMITTEE, Institution.JOINT_COMMITTEE)
).order_by('-institutionresponsability__charge_type','institution__position')
current_committee_charges = property(get_current_committee_charges)
def get_current_charge_in_institution(self, institution, moment=None):
"""
Returns the current charge in the given institution at the given moment.
Returns empty array if no charges are found.
"""
charges = self.institutioncharge_set.select_related().current(moment=moment).filter(
institution=institution
)
if charges.count() == 1:
return charges[0]
elif charges.count() == 0:
raise ObjectDoesNotExist
else:
raise MultipleObjectsReturned
def has_current_charges(self, moment=None):
"""
Used for admin interface
"""
if self.institutioncharge_set.current(moment).count() > 0:
return True
else:
return False
has_current_charges.short_description = _('Current')
def is_counselor(self, moment=None):
"""
check if the person is a member of the council at the given moment
"""
if self.current_counselor_charge(moment):
return True
else:
return False
def current_counselor_charge(self, moment=None):
"""
fetch the current charge in Council, if any
"""
i = Institution.objects.get(institution_type=Institution.COUNCIL)
try:
ic = self.get_current_charge_in_institution(i, moment)
return ic
except ObjectDoesNotExist:
return None
def last_charge(self, moment=None):
"""
last charge, if any
"""
charges = self.current_institution_charges if self.has_current_charges() else self.past_institution_charges
if charges.count() > 0:
return charges[0]
else:
raise ObjectDoesNotExist
def get_historical_groupcharges(self, moment=None):
"""
Returns all groupcharges for the person
"""
i = Institution.objects.get(institution_type=Institution.COUNCIL)
try:
ic = self.get_current_charge_in_institution(i, moment)
gc = GroupCharge.objects.select_related().past(moment).filter(charge=ic)
except ObjectDoesNotExist:
gc = None
return gc
historical_groupcharges = property(get_historical_groupcharges)
def get_current_groupcharge(self, moment=None):
"""
Returns GroupCharge at given moment in time (now if moment is None)
Charge is the IntstitutionalCharge in the council
"""
i = Institution.objects.get(institution_type=Institution.COUNCIL)
try:
ic = self.get_current_charge_in_institution(i, moment)
gc = GroupCharge.objects.select_related().current(moment).get(charge=ic)
except ObjectDoesNotExist:
gc = None
return gc
current_groupcharge = property(get_current_groupcharge)
def get_current_group(self, moment=None):
"""
Returns group at given moment in time (now if moment is None)
Group is computed from GroupCharge where Charge is the IntstitutionalCharge in the council
Returns None if there is no current group.
"""
gc = self.get_current_groupcharge(moment)
if gc is None:
return None
return gc.group
current_group = property(get_current_group)
@property
def resources(self):
"""
Returns the list of resources associated with this person
"""
return self.resource_set.all()
@property
def content_type_id(self):
"""
Return id of the content type associated with this instance.
"""
return ContentType.objects.get_for_model(self).id
@property
def age(self):
"""
Returns an integer of year between birth_date and now
"""
#end_date = in_date if in_date else date.today()
return (date.today() - self.birth_date).days / 365
@property
def related_news(self):
"""
News related to a politician are the union of the news related to allthe politician's
current and past institution charges
"""
news = EmptyQuerySet()
for c in self.all_institution_charges:
news |= c.related_news
return news
@property
def speeches(self):
"""
Speeches of a politician
"""
from open_municipio.acts.models import Speech
return Speech.objects.filter(author=self)
@property
def n_speeches(self):
"""
Number of speeches of a politician
"""
return self.speeches.count()
@property
def speeches_size(self):
"""
Number of speeches of a politician
"""
return sum([s.text_size for s in self.speeches.all()])
class Resource(models.Model):
"""
This class maps the internet resources (mail, web sites, rss, facebook, twitter, )
It must be subclassed, by a PersonResource, InstitutionResource or GroupResource class.
The `value` field contains the resource.
The `description` field may be used to specify the context.
A `PERSON` resource may be a secretary, a responsible. We're interested only in
her name, it must not be mapped into the system.
"""
RES_TYPE = Choices(
('EMAIL', 'email', _('email')),
('URL', 'url', _('url')),
('PHONE', 'phone', _('phone')),
('FAX', 'fax', _('fax')),
('SNAIL', 'snail', _('snail mail')),
('PERSON', 'person', _('person')),
('TWITTER', 'twitter', _('twitter')),
('FACEBOOK', 'facebook', _('facebook')),
('FINANCIAL', 'financial', _('financial information')),
)
resource_type = models.CharField(verbose_name=_('type'), max_length=10, choices=RES_TYPE)
# 2000 chars is the maximum length suggested for url length (see: http://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers )
value = models.CharField(verbose_name=_('value'), max_length=2000)
description = models.CharField(verbose_name=_('description'), max_length=255, blank=True)
class Meta:
abstract = True
verbose_name = _('Resource')
verbose_name_plural = ('Resources')
class PersonResource(Resource):
person = models.ForeignKey('Person', verbose_name=_('person'), related_name='resource_set')
class InstitutionResource(Resource):
institution = models.ForeignKey('Institution', verbose_name=_('institution'), related_name='resource_set')
class GroupResource(Resource):
group = models.ForeignKey('Group', verbose_name=_('group'), related_name='resource_set')
class Charge(NewsTargetMixin, models.Model):
"""
This is the base class for the different macro-types of charges (institution, organization, administration).
The ``related_news`` attribute can be used to fetch news items related to a given charge.
"""
person = models.ForeignKey('Person', verbose_name=_('person'))
start_date = models.DateField(_('start date'))
end_date = models.DateField(_('end date'), blank=True, null=True)
end_reason = models.CharField(_('end reason'), blank=True, max_length=255)
description = models.CharField(_('description'), blank=True, max_length=255,
help_text=_('Insert the complete description of the charge, if it gives more information than the charge type'))
# objects = PassThroughManager.for_queryset_class(TimeFramedQuerySet)()
objects = PassThroughManager.for_queryset_class(ChargeQuerySet)()
class Meta:
abstract = True
def get_absolute_url(self):
return self.person.get_absolute_url()
# @property
def is_in_charge(self, as_of=None):
if not as_of:
#as_of = datetime.now()
as_of = date.today()
# if a datetime, extract the date part
if isinstance(as_of, datetime.datetime):
as_of = as_of.date()
# check we receive a date (note: a datetime is also a date, but
# we already took care of this case in the previous lines)
if not isinstance(as_of, datetime.date):
raise ValueError("The passed parameter is not a date")
return as_of >= self.start_date and (not self.end_date or as_of <= self.end_date)
@property
def duration(self):
if not self.start_date: return None
# return (self.end_date if self.end_date else datetime.datetime.now().date()) - self.start_date
return (self.end_date if self.end_date else date.today()) - self.start_date
@property
def speeches(self):
"""
Speeches of a charge
"""
start_date = self.start_date;
end_date = self.end_date if self.end_date else datetime.datetime.now();
return open_municipio.acts.models.Speech.objects.filter(\
author=self.person, sitting_item__sitting__date__range=(start_date, end_date))
@property
def n_speeches(self):
"""
Number of speeches of a charge
"""
return self.speeches.count()
@property
def speeches_size(self):
"""
Number of speeches of a charge
"""
return sum([s.text_size for s in self.speeches.all()])
class ChargeResponsability(models.Model):
"""
Describes a responsability that the charge has
inside the charge's *container*. It integrates the composition relation.
For example: a counselor may be the president of the council.
This is an abstract class, that must be subclassed, in order to specify
the context (institution charge or group charge)
"""
start_date = models.DateField(_('start date'))
end_date = models.DateField(_('end date'), blank=True, null=True)
description = models.CharField(_('description'), blank=True, max_length=255,
help_text=_('Insert an extended description of the responsability'))
objects = PassThroughManager.for_queryset_class(TimeFramedQuerySet)()
class Meta:
abstract = True
class InstitutionCharge(Charge):
"""
This is a charge in the institution (city council, city government, mayor, committee).
"""
substitutes = models.OneToOneField('InstitutionCharge', blank=True, null=True,
related_name='reverse_substitute_set',
on_delete=models.PROTECT,
verbose_name=_('in substitution of'))
substituted_by = models.OneToOneField('InstitutionCharge', blank=True, null=True,
related_name='reverse_substituted_by_set',
on_delete=models.PROTECT,
verbose_name=_('substituted by'))
institution = models.ForeignKey('Institution', on_delete=models.PROTECT, verbose_name=_('institution'), related_name='charge_set')
op_charge_id = models.IntegerField(_('openpolis institution charge ID'), blank=True, null=True)
original_charge = models.ForeignKey('InstitutionCharge', blank=True, null=True,
related_name='committee_charge_set',
verbose_name=_('original institution charge'))
n_rebel_votations = models.IntegerField(default=0)
n_present_votations = models.IntegerField(default=0, verbose_name=_("number of presences during votes"))
n_absent_votations = models.IntegerField(default=0, verbose_name=_("number of absences during votes"))
n_present_attendances = models.IntegerField(default=0, verbose_name=_("number of present attendances"))
n_absent_attendances = models.IntegerField(default=0, verbose_name=_("number of absent attendances"))
can_vote = models.BooleanField(default=True, verbose_name=_("in case of a city council member, specifies whether he/she can vote"))
def get_absolute_url(self):
url = None
if self.institution.institution_type == Institution.COMMITTEE:
url = self.person.get_absolute_url()
else:
url = reverse("om_politician_detail",
kwargs={"slug":self.person.slug,
"institution_slug": self.institution.slug,
"year":self.start_date.year, "month": self.start_date.month,
"day":self.start_date.day })
return url
def is_counselor(self):
return self.institution.institution_type == Institution.COUNCIL
@property
def is_in_city_government(self):
return (self.institution.institution_type == Institution.CITY_GOVERNMENT or \
self.institution.institution_type == Institution.MAYOR)
class Meta(Charge.Meta):
db_table = u'people_institution_charge'
verbose_name = _('institution charge')
verbose_name_plural = _('institution charges')
ordering = ['person__first_name', 'person__last_name']
def __unicode__(self):
if self.denomination:
return u"%s %s - %s" % (self.person.first_name, self.person.last_name, self.denomination)
else:
return u"%s %s" % (self.person.first_name, self.person.last_name)
# TODO: model validation: check that ``substitutes`` and ``substituted_by`` fields
# point to ``InstitutionCharge``s of the same kind
@property
def denomination(self):
if self.institution.institution_type == Institution.MAYOR:
denomination = _('Mayor') #.translate(settings.LANGUAGE_CODE) #-FS why?
if self.description != "":
denomination += ", %s" % self.description
return denomination
elif self.institution.institution_type == Institution.CITY_GOVERNMENT:
if self.responsabilities.count():
s = self.responsabilities[0].get_charge_type_display()
if self.responsabilities[0].charge_type == InstitutionResponsability.CHARGE_TYPES.firstdeputymayor:
s += ", %s" % self.description
return "%s" % (s, )
else:
return " %s" % self.description
elif self.institution.institution_type == Institution.COUNCIL:
if self.responsabilities.count():
return "%s Consiglio Comunale" % (self.responsabilities[0].get_charge_type_display(),)
else:
return _('Counselor')
elif self.institution.institution_type == Institution.COMMITTEE:
if self.responsabilities.count():
return "%s" % (self.responsabilities[0].get_charge_type_display())
else:
return _('Member').translate(settings.LANGUAGE_CODE)
else:
return ''
@property
def committee_charges(self):
return self.committee_charge_set.all()
@property
def responsabilities(self):
return self.institutionresponsability_set.all()
def get_current_responsability(self, moment=None):
"""
Returns the current group responsability, if any
"""
if self.responsabilities.current(moment=moment).count() == 0:
return None
if self.responsabilities.current(moment=moment).count() == 1:
return self.responsabilities.current(moment=moment)[0]
raise MultipleObjectsReturned
current_responsability = property(get_current_responsability)
@property
def presented_acts(self):
"""
The QuerySet of acts presented by this charge.
"""
return self.presented_act_set.all()
@property
def n_presented_acts(self):
"""
The number of acts presented by this charge
"""
return self.presented_acts.count()
@property
def received_acts(self):
"""
The QuerySet of acts received by this charge.
"""
return self.received_act_set.all()
@property
def n_received_acts(self):
"""
The QuerySet of acts received by this charge.
"""
return self.received_act_set.count()
@property
def charge_type(self):
"""
Returns the basic charge type translated string, according to the institution.
For example: the council president's basic type is counselor.
"""
if self.institution.institution_type == Institution.MAYOR:
return _('Mayor')
elif self.institution.institution_type == Institution.CITY_GOVERNMENT:
return _('City government member')
elif self.institution.institution_type == Institution.COUNCIL:
return _('Counselor')
elif self.institution.institution_type == Institution.COMMITTEE:
return _('Committee member')
else:
return 'Unknown charge type!'
@property
def charge_type_verbose(self):
"""
"""
s = self.charge_type
if self.start_date:
if self.end_date and self.start_date.year == self.end_date.year:
s += ' nel ' + str(self.start_date.year)
else:
s += ' dal ' + str(self.start_date.year)
if self.end_date:
s += ' al ' + str(self.end_date.year)
return s
@property
def council_group(self):
"""
DEPRECATED: use `self.current_groupcharge.group`
Returns the city council's group this charge currently belongs to (if any).
"""
return self.current_groupcharge.group
@property
def current_groupcharge(self):
"""
Returns the current group related to a council charge (end_date is null).
A single GroupCharge object is returned. The group may be accessed by the `.group` attribute
A Council Institution charge MUST have one group.
Other types of charge do not have a group, so None is returned.
"""
return self.current_at_moment_groupcharge()
def current_at_moment_groupcharge(self, moment=None):
"""
Returns groupcharge at given moment in time.
If moment is None, then current groupcharge is returned
"""
if self.institution.institution_type == Institution.COUNCIL:
try:
return GroupCharge.objects.select_related().current(moment=moment).get(charge__id=self.id)
except GroupCharge.DoesNotExist:
return None
elif self.original_charge and \
(self.institution.institution_type == Institution.COMMITTEE or \
self.institution.institution_type == Institution.JOINT_COMMITTEE):
try:
return GroupCharge.objects.select_related().current(moment=moment).get(charge=self.original_charge)
except GroupCharge.DoesNotExist:
return None
else:
return None
@property
def historical_groupcharges(self):
"""
Returns the list of past groups related to a council charge (end_date is not null).
A list of GroupCharge objects is returned. The group may be accessed by the `.group` attribute
"""
if self.institution.institution_type == Institution.COUNCIL:
return GroupCharge.objects.select_related().past().filter(charge__id=self.id)
else:
return []
def update_rebellion_cache(self):
"""
Re-compute the number of votations where the charge has vote differently from her group
and update the n_rebel_votations counter
"""
self.n_rebel_votations = self.chargevote_set.filter(is_rebel=True).count()
self.save()
def update_presence_cache(self):
"""
Re-compute the number of votations where the charge was present/absent
and update the respective counters
"""
from open_municipio.votations.models import ChargeVote
from open_municipio.attendances.models import ChargeAttendance
absent = ChargeVote.VOTES.absent
self.n_present_votations = self.chargevote_set.exclude(vote=absent).count()
self.n_absent_votations = self.chargevote_set.filter(vote=absent).count()
self.n_present_attendances = self.chargeattendance_set.filter(value=ChargeAttendance.VALUES.pres).count()
self.n_absent_attendances = self.chargeattendance_set.exclude(value=ChargeAttendance.VALUES.pres).count()
self.save()
@property
def taxonomy_count(self):
count = { 'categories' : Counter(), 'tags' : Counter(), 'topics' : Counter(), 'locations' : Counter() }
for act in self.presented_acts:
count['categories'].update(act.categories)
count['tags'].update(act.tags)
count['locations'].update(act.locations)
return count
class InstitutionResponsability(ChargeResponsability):
"""
Responsability for institutional charges.
"""
CHARGE_TYPES = Choices(
('MAYOR', 'mayor', _('Mayor')),
('FIRSTDEPUTYMAYOR', 'firstdeputymayor', _('First deputy mayor')),
('PRESIDENT', 'president', _('President')),
('VICE', 'vice', _('Vice president')),
('VICEVICE', 'vicevice', _('Vice vice president')),
)
charge = models.ForeignKey(InstitutionCharge, verbose_name=_('charge'))
charge_type = models.CharField(_('charge type'), max_length=16, choices=CHARGE_TYPES)
class Meta:
verbose_name = _('institutional responsability')
verbose_name_plural = _('institutional responsabilities')
class CompanyCharge(Charge):
"""
This is a charge in a company controlled by the municipality (it: partecipate).
"""
CEO_CHARGE = 1
PRES_CHARGE = 2
VICE_CHARGE = 3
DIR_CHARGE = 4
CHARGE_TYPES = Choices(
(CEO_CHARGE, _('Chief Executive Officer')),
(PRES_CHARGE, _('President')),
(VICE_CHARGE, _('Vice president')),
(DIR_CHARGE, _('Member of the board')),
)
company = models.ForeignKey('Company', on_delete=models.PROTECT, verbose_name=_('company'), related_name='charge_set')
charge_type = models.IntegerField(_('charge type'), choices=CHARGE_TYPES)
class Meta(Charge.Meta):
db_table = u'people_organization_charge'
verbose_name = _('organization charge')
verbose_name_plural = _('organization charges')
def __unicode__(self):
# TODO: implement ``get_charge_type_display()`` method
return u'%s - %s' % (self.get_charge_type_display(), self.company.name)
class AdministrationCharge(Charge):
"""
This is a charge in the internal municipality administration.
"""
DIR_CHARGE = 1
EXEC_CHARGE = 2
CHARGE_TYPES = Choices(
(DIR_CHARGE, _('Director')),
(EXEC_CHARGE, _('Executive')),
)
office = models.ForeignKey('Office', on_delete=models.PROTECT, verbose_name=_('office'), related_name='charge_set')
charge_type = models.IntegerField(_('charge type'), choices=CHARGE_TYPES)
class Meta(Charge.Meta):
db_table = u'people_administration_charge'
verbose_name = _('administration charge')
verbose_name_plural = _('administration charges')
def __unicode__(self):
# TODO: implement ``get_charge_type_display()`` method
return u'%s - %s' % (self.get_charge_type_display(), self.office.name)
class Group(models.Model):
"""
This model represents a group of counselors.
"""
name = models.CharField(max_length=100)
acronym = models.CharField(blank=True, max_length=16)
charge_set = models.ManyToManyField('InstitutionCharge', through='GroupCharge')
slug = models.SlugField(unique=True, blank=True, null=True, help_text=_('Suggested value automatically generated from name, must be unique'))
img = ImageField(upload_to="group_images", blank=True, null=True)
start_date = models.DateField(blank=True, null=True, verbose_name=_("start date"))
end_date = models.DateField(blank=True, null=True, verbose_name=_("end date"))
objects = PassThroughManager.for_queryset_class(GroupQuerySet)()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
ordering = ("name", "acronym", )
def get_absolute_url(self):
return reverse("om_institution_group", kwargs={'slug': self.slug})
def __unicode__(self):
if self.start_date:
return u'%s (%s, %s)' % (self.name, self.acronym, self.start_date.year)
else:
return u'%s (%s)' % (self.name, self.acronym)
@property
def leader(self):
"""
The current leader of the Group as GroupResponsability.
None if not found.
To fetch the InstitutionCharge, .groupcharge.charge.
"""
try:
leader = GroupResponsability.objects.select_related().get(
charge__group=self,
charge_type=GroupResponsability.CHARGE_TYPES.leader,
end_date__isnull=True
)
return leader
except ObjectDoesNotExist:
return None
@property
def deputy(self):
"""
The current deputy leader of the Group as GroupResponsability.
None if not found.
To fetch the InstitutionCharge, .groupcharge.charge.
"""
try:
deputy = GroupResponsability.objects.select_related().get(
charge__group=self,
charge_type=GroupResponsability.CHARGE_TYPES.deputy,
end_date__isnull=True
)
return deputy
except ObjectDoesNotExist:
return None
@property
def members(self):
"""
Current members of the group, as institution charges, leader and
council president and vice presidents **excluded**.
"""
group_members = self.groupcharge_set.current().exclude(
groupresponsability__charge_type__in=(
GroupResponsability.CHARGE_TYPES.leader,
GroupResponsability.CHARGE_TYPES.deputy
),
groupresponsability__end_date__isnull=True
)
return self.institution_charges.filter(groupcharge__in=group_members)
"""
President and vice-president may be excluded
.exclude(
groupcharge__charge__institutionresponsability__charge_type__in=(
InstitutionResponsability.CHARGE_TYPES.president,
InstitutionResponsability.CHARGE_TYPES.vice
)
)
"""
@property
def alpha_members(self):
"""
Alphabetically sorted members
"""
return self.members.order_by('person__last_name')
def get_institution_charges(self, moment=None):
"""
All current institution charges in the group, leader **included**
"""
return self.charge_set.all().current(moment=moment)
institution_charges = property(get_institution_charges)
@property
def current_size(self):
"""
returns number of current charges
"""
return self.groupcharge_set.current().count()
@property
def is_current(self):
"""
returns True if the group has at least one current charge
"""
return self.groupcharge_set.current().count() > 0
@property
def majority_records(self):
return self.groupismajority_set.all()
@property
def in_council_now(self):
today = date.today()
found = self.majority_records.filter(Q(end_date__gt=today) | Q(end_date__isnull=True))
return found.count() > 0
@property
def is_majority_now(self):
# only one majority record with no ``end_date`` (or with an ``end_date``
# set in the future) should exists at a time (i.e. the current one)
today = date.today()
found = self.majority_records.filter(is_majority=True).exclude(end_date__lt=today)
return found.count() > 0
@property
def resources(self):
return self.resource_set.all()
class GroupCharge(models.Model):
"""
This model records the historical composition of council groups.
This only makes sense for ``InstitutionCharges``.
"""
group = models.ForeignKey('Group', verbose_name=_("group"))
charge = models.ForeignKey('InstitutionCharge', verbose_name=_("charge"))
charge_description = models.CharField(blank=True, max_length=255, verbose_name=_("charge description"))
start_date = models.DateField(verbose_name=_("start date"))
end_date = models.DateField(blank=True, null=True, verbose_name=_("end date"))
end_reason = models.CharField(blank=True, max_length=255, verbose_name=_("end reason"))
objects = PassThroughManager.for_queryset_class(TimeFramedQuerySet)()
@property
def responsabilities(self):
return self.groupresponsability_set.all()
def get_current_responsability(self, moment=None):
"""
Returns the current group responsability, if any
"""
if self.responsabilities.current(moment=moment).count() == 0:
return None
if self.responsabilities.current(moment=moment).count() == 1:
return self.responsabilities.current(moment=moment)[0]
raise MultipleObjectsReturned
current_responsability = property(get_current_responsability)
@property
def responsability(self):
if self.responsabilities.count() == 1:
r = self.responsabilities[0]
end_date = ""
if r.end_date:
end_date = " - %s" % r.end_date
s = "%s: %s%s" % (r.get_charge_type_display(), r.start_date, end_date)
return s
else:
return ""
class Meta:
db_table = u'people_group_charge'
verbose_name = _('group charge')
verbose_name_plural = _('group charges')
def __unicode__(self):
if self.responsability:
return u"%s - %s - %s" % (self.group.acronym, self.charge.person, self.responsability)
else:
return u"%s - %s" % (self.group.acronym, self.charge.person)
class GroupResponsability(ChargeResponsability):
"""
Responsibility for group charges.
"""
CHARGE_TYPES = Choices(
('LEADER', 'leader', _('Group leader')),
('DEPUTY', 'deputy', _('Group deputy leader')),
)
charge_type = models.CharField(_('charge type'), max_length=16, choices=CHARGE_TYPES)
charge = models.ForeignKey(GroupCharge, verbose_name=_('charge'))
def __unicode__(self):
end_date = ""
if self.end_date:
end_date = " - %s" % self.end_date
return u"%s (%s%s)" % (self.get_charge_type_display(), self.start_date, end_date)
class Meta:
verbose_name = _("group responsibility")
verbose_name_plural = _("group responsibilities")
class GroupIsMajority(models.Model):
"""
This model records the historical composition of the majority
"""
group = models.ForeignKey('Group')
is_majority = models.NullBooleanField(_('Is majority'), default=False, null=True)
start_date = models.DateField(_('Start date'))
end_date = models.DateField(_('End date'), blank=True, null=True)
objects = PassThroughManager.for_queryset_class(TimeFramedQuerySet)()
class Meta:
verbose_name = _('group majority')
verbose_name_plural = _('group majorities')
def __unicode__(self):
if self.is_majority:
return u'yes'
elif self.is_majority is False:
return u'no'
else:
return u'na'
#
# Bodies
#
class Body(SlugModel):
"""
The base model for bodies.
Uses the *abstract base class* inheritance model.
"""
name = models.CharField(_('name'), max_length=255)
slug = models.SlugField(unique=True, blank=True, null=True, help_text=_('Suggested value automatically generated from name, must be unique'))
description = models.TextField(_('description'), blank=True)
@property
def lowername(self):
return self.name.lower()
class Meta:
abstract = True
def __unicode__(self):
return u'%s' % (self.name,)
class Institution(Body):
"""
Institutional bodies can be of different types (as specified by the ``institution_type`` field).
This model has a relation with itself, in order to map hierarchical bodies (joint committees, ...).
"""
MAYOR = 1
CITY_GOVERNMENT = 2
COUNCIL = 3
COMMITTEE = 4
JOINT_COMMITTEE = 5
INSTITUTION_TYPES = Choices(
(MAYOR, _('Mayor')),
(COUNCIL, _('Council')),
(CITY_GOVERNMENT, _('Town government')),
(COMMITTEE, _('Committee')),
(JOINT_COMMITTEE, _('Joint committee')),
)
parent = models.ForeignKey('Institution', related_name='sub_body_set', blank=True, null=True)
institution_type = models.IntegerField(choices=INSTITUTION_TYPES)
position = models.PositiveIntegerField(editable=False, default=0)
class Meta(Body.Meta):
verbose_name = _('institution')
verbose_name_plural = _('institutions')
ordering = ('position',)
def save(self, *args, **kwargs):
"""slugify name on first save"""
if not self.id:
self.slug = slugify(self.name)
# set position
qs = self.__class__.objects.order_by('-position')
try:
self.position = qs[0].position + 1
except IndexError:
self.position = 0
super(Institution, self).save(*args, **kwargs)
def get_absolute_url(self):
if self.institution_type == self.MAYOR:
return reverse("om_institution_mayor")
elif self.institution_type == self.CITY_GOVERNMENT:
return reverse("om_institution_citygov")
elif self.institution_type == self.COUNCIL:
return reverse("om_institution_council")
elif self.institution_type == self.COMMITTEE:
return reverse("om_institution_committee", kwargs={'slug': self.slug})
@property
def sittings(self):
"""
A Sitting is linked to an Institution trhough fields "institution" and
"other_institution". The related name of the former is "sitting_set",
while the related name of the latter is "other_sittings". If you want to
know all the sittings of this Institution you must take the (distinct)
union of the two
"""
qs = (self.sitting_set.all() | self.other_sittings.all()).distinct()
return qs
@property
def name_with_preposition(self):
"""
returns name with preposition
"""
if self.institution_type == self.MAYOR:
return "del %s" % self.name
elif self.institution_type == self.CITY_GOVERNMENT:
return "della %s" % self.name
elif self.institution_type == self.COUNCIL:
return "del %s" % self.name
elif self.institution_type == self.COMMITTEE:
return "della %s" % self.name
return self.name
@property
def charges(self):
"""
The QuerySet of all *current* charges (``InstitutionCharge`` instances)
associated with this institution.
"""
return self.get_current_charges(moment=None)
def get_current_charges(self, moment=None):
"""
The WS of all charges current at the specified moment
"""
return self.charge_set.all().current(moment)
@property
def firstdeputy(self):
"""
The current firstdeputy mayor of the institution as InstitutionResponsability.
None if not found.
To access the charge: firstdeputy.charge
"""
try:
return InstitutionResponsability.objects.select_related().get(
charge__institution=self,
charge_type=InstitutionResponsability.CHARGE_TYPES.firstdeputymayor,
end_date__isnull=True
)
except ObjectDoesNotExist:
return None
@property
def president(self):
"""
The current president of the institution as InstitutionResponsability.
None if not found.
To access the charge: pres.charge
"""
try:
pres = InstitutionResponsability.objects.select_related().get(
charge__institution=self,
charge_type=InstitutionResponsability.CHARGE_TYPES.president,
end_date__isnull=True
)
return pres
except ObjectDoesNotExist:
return None
@property
def vicepresidents(self):
"""
The current vice presidents of the institution, as InstitutionResponsabilities
There can be more than one vicepresident.
To access the charge: vp.charge
"""
return InstitutionResponsability.objects.select_related().filter(
charge__institution=self,
charge_type=InstitutionResponsability.CHARGE_TYPES.vice,
end_date__isnull=True
)
@property
def members(self):
"""
Members of the institution, as charges.
Current mayor, first deputy, president and vice presidents **excluded**.
"""
return self.charges.exclude(
institutionresponsability__charge_type__in=(
InstitutionResponsability.CHARGE_TYPES.mayor,
InstitutionResponsability.CHARGE_TYPES.firstdeputymayor,
InstitutionResponsability.CHARGE_TYPES.president,
InstitutionResponsability.CHARGE_TYPES.vice,
),
institutionresponsability__end_date__isnull=True
).select_related()
@property
def emitted_acts(self):
"""
The QuerySet of all acts emitted by this institution.
Note that the objects comprising the resulting QuerySet aren't generic ``Act`` instances,
but instances of specific ``Act`` subclasses (i.e. ``Deliberation``, ``Motion``, etc.).
This is made possible by the fact that the default manager for the ``Act`` model is
``model_utils.managers.InheritanceManager``, and this manager class declares
``use_for_related_fields = True``. See `Django docs`_ for details.
.. _`Django docs`: https://docs.djangoproject.com/en/1.3/topics/db/managers/#controlling-automatic-manager-types
"""
# NOTE: See also Django bug #14891
return self.emitted_act_set.all().select_subclasses()
@property
def resources(self):
return self.resource_set.all()
@transaction.commit_on_success
def _move(self, up):
"""
To move an object requires, potentially, to update all the list of objects.
In fact, we cannot assume that the position arguments are all consecutive.
Doing some insertions and deletions it is possible to create "bubbles" and
duplicates for the position values. The sorting algorithms goes like this:
- assign everyone a consecutive and unique position value
- detect the previous and next institution, w.r.t. self
- if up, switch position with previous and save previous
- if down, switch position with next and save next
- save self
"""
qs = self.__class__._default_manager
qs.order_by("position")
p = 0
prev_inst = None
next_inst = None
found = False
for curr_inst in qs.all():
found = found or (curr_inst == self)
if curr_inst.position != p:
curr_inst.position = p
curr_inst.save()
p = p + 1
if not found:
prev_inst = curr_inst
elif next_inst is None and curr_inst != self:
next_inst = curr_inst
if up:
if prev_inst:
prev_inst.position,self.position = self.position,prev_inst.position
prev_inst.save()
else:
if next_inst:
next_inst.position,self.position = self.position,next_inst.position
next_inst.save()
self.save()
def move_down(self):
"""
Move this object down one position.
"""
return self._move(up=False)
def move_up(self):
"""
Move this object up one position.
"""
return self._move(up=True)
class Company(Body):
"""
A company owned by the municipality, whose executives are nominated politically.
"""
class Meta(Body.Meta):
verbose_name = _('company')
verbose_name_plural = _('companies')
def get_absolute_url(self):
return reverse("om_company_detail", kwargs={'slug': self.slug})
@property
def charges(self):
"""
The QuerySet of all *current* charges (``CompanyCharge`` instances)
associated with this company.
"""
return self.charge_set.current()
class Office(Body):
"""
Internal municipality office, playing a role in municipality's administration.
"""
parent = models.ForeignKey('Office', blank=True, null=True, default=None, verbose_name=_("the parent office, in a hierarchy"))
class Meta(Body.Meta):
verbose_name = _('office')
verbose_name_plural = _('offices')
def get_abolute_url(self):
return reverse("om_office_detail", kwargs={'slug': self.slug})
@property
def charges(self):
"""
The QuerySet of all *current* charges (``AdministrationCharge`` instances)
associated with this office.
"""
return self.charge_set.current()
#
# Sittings
#
class Sitting(TimeStampedModel):
"""
A sitting models a gathering of people in a give institution.
Usually votations and speeches occur, during a sitting.
A sitting is broken down into SittingItems, and each item may be related to one or more acts.
Each item contains Speeches, which are a very special extension of Document
(audio attachments, with complex relations with votations, charges and acts).
"""
idnum = models.CharField(blank=True, max_length=64, verbose_name=_("identifier"))
date = models.DateField(verbose_name=_("date"))
number = models.IntegerField(blank=True, null=True, verbose_name=_("number"))
call = models.IntegerField(blank=True, null=True, verbose_name=_("call"))
institution = models.ForeignKey(Institution, on_delete=models.PROTECT, verbose_name=_("institution"))
other_institution_set = models.ManyToManyField(Institution, blank=True, null=True, verbose_name=_("other institutions"), related_name="other_sittings")
minute = models.ForeignKey('acts.Minute', null=True, blank=True, related_name="sitting_set", verbose_name=_("minute"))
class Meta:
verbose_name = _('sitting')
verbose_name_plural = _('sittings')
def __unicode__(self):
num = ""
if self.number:
num = " num. %s " % self.number
return u'Seduta %s del %s (%s)' % (num, self.date.strftime('%d/%m/%Y'), self.institution.name)
@property
def other_institutions(self):
return self.other_institution_set.all()
@property
def institutions(self):
qs = Institution.objects.none()
if self.institution_id != None:
qs = Institution.objects.filter(id=self.institution_id)
qs = (qs | self.other_institution_set.all()).distinct()
return qs
@property
def sitting_items(self):
return SittingItem.objects.filter(sitting=self)
@property
def num_items(self):
return self.sitting_items.count()
@permalink
def get_absolute_url(self):
prefix = "%s-%s-%s" % (self.institution.slug, self.idnum, self.date, )
sitting_url = 'om_sitting_detail', (), { 'prefix':prefix, 'pk':self.pk, }
return sitting_url
@property
def sitting_next(self):
next = Sitting.objects.filter(date__gt=self.date,institution=self.institution).order_by("date")[:1]
if len(next) == 0:
return None
else:
return next[0]
@property
def sitting_prev(self):
prev = Sitting.objects.filter(date__lt=self.date,institution=self.institution).order_by("-date")[:1]
if len(prev) == 0:
return None
else:
return prev[0]
class SittingItem(models.Model):
"""
A SittingItem maps a single point of discussion in a Sitting.
It can be of type:
- odg - a true items of discussion
- procedural - a procedural issue, discussed, mostly less relevant
- intt - interrogations and interpellances (questions and answers), usually discussed at the beginning of the sitting
SittingItems are ordered through the seq_order field.
"""
ITEM_TYPE = Choices(
('ODG', 'odg', _('ordine del giorno')),
('PROC', 'procedural', _('questione procedurale')),
('INTT', 'intt', _('interrogation')),
)
sitting = models.ForeignKey(Sitting)
title = models.CharField(max_length=512)
item_type = models.CharField(choices=ITEM_TYPE, max_length=4)
seq_order = models.IntegerField(default=0,verbose_name=_('seq_order'))
related_act_set = models.ManyToManyField('acts.Act', blank=True, null=True)
class Meta:
verbose_name = _('sitting item')
verbose_name_plural = _('sitting items')
def __unicode__(self):
return unicode(self.title)
@permalink
def get_absolute_url(self):
return 'om_sittingitem_detail', (), { 'pk': self.pk }
@property
def num_related_acts(self):
return self.related_act_set.count()
@property
def long_repr(self):
"""
long unicode representation, contains the sitting details
"""
return u'%s - %s' % (self.sitting, self)
@property
def num_speeches(self):
"""
the amount of speeches that refer to this sitting item
"""
return open_municipio.acts.models.Speech.objects.filter(sitting_item=self).count()
## Private DB access API
class Mayor(object):
"""
A municipality mayor (both as a charge and an institution).
"""
@property
def as_institution(self):
"""
A municipality mayor, as an *institution*.
"""
mayor = None
try:
mayor = Institution.objects.select_related().get(institution_type=Institution.MAYOR)
except Institution.DoesNotExist:
# mayor does not exist, currently
pass
return mayor
@property
def as_charge(self):
"""
A municipality mayor, as a *charge*.
"""
mayor = None
try:
mayor = InstitutionCharge.objects.select_related().filter(end_date__isnull=True).get(institution__institution_type=Institution.MAYOR)
except InstitutionCharge.DoesNotExist:
# mayor has not been created
pass
return mayor
@property
def acts(self):
"""
The QuerySet of all acts emitted by the mayor (as an institution).
Note that the objects comprising the resulting QuerySet aren't generic ``Act`` instances,
but instances of specific ``Act`` subclasses (i.e. ``Deliberation``, ``Motion``, etc.).
"""
return self.as_institution.emitted_acts
class CityCouncil(object):
@property
def as_institution(self):
"""
A municipality council, as an *institution*.
"""
city_council = None
try:
city_council = Institution.objects.get(institution_type=Institution.COUNCIL)
except Institution.DoesNotExist:
# the city council has not been created
pass
return city_council
@property
def charges(self):
"""
All current members of the municipality council (aka *counselors*), as charges.
President and vice-presidents **included**.
"""
charges = InstitutionCharge.objects.none()
if self.as_institution:
charges = self.as_institution.charges.select_related()
return charges
@property
def president(self):
"""
The current president of the city council as InstitutionResponsability
None if not found.
"""
president = None
if self.as_institution:
president = self.as_institution.president
return president
@property
def vicepresidents(self):
"""
The current vice presidents of the city council, as InstitutionResponsabilities
There can be more than one vicepresident
"""
vp = None
if self.as_institution:
vp = self.as_institution.vicepresidents.select_related()
return vp
@property
def members(self):
"""
Members of the municipality council (aka *counselors*), as charges.
Current president and vice presidents **excluded**.
"""
members = InstitutionCharge.objects.none()
if self.as_institution:
members = self.as_institution.members.select_related()
return members
@property
def majority_members(self):
"""
Majority counselors, as charges.
"""
# FIXME: this method should return a QuerySet, non a Set
result = set()
for majority_group in self.majority_groups:
result.add(majority_group.counselors)
return result
@property
def minority_members(self):
"""
Minority counselors, as charges.
"""
# FIXME: this method should return a QuerySet, non a Set
result = set()
for minority_group in self.minority_groups:
result.add(minority_group.counselors)
return result
@property
def groups(self):
"""
Groups of counselors within of a municipality council.
"""
return Group.objects.select_related().all()
@property
def majority_groups(self):
"""
Counselors' groups belonging to majority.
"""
qs = Group.objects.select_related().filter(groupismajority__end_date__isnull=True).filter(groupismajority__is_majority=True)
return qs
@property
def minority_groups(self):
"""
Counselors' groups belonging to minority.
"""
qs = Group.objects.select_related().filter(groupismajority__end_date__isnull=True).filter(groupismajority__is_majority=False)
return qs
@property
def acts(self):
"""
The QuerySet of all acts emitted by the City Council.
Note that the objects comprising the resulting QuerySet aren't generic ``Act`` instances,
but instances of specific ``Act`` subclasses (i.e. ``Deliberation``, ``Motion``, etc.).
"""
return self.as_institution.select_related().emitted_acts
@property
def deliberations(self):
"""
The QuerySet of all deliberations emitted by the City Council.
"""
from open_municipio.acts.models import Deliberation
return Deliberation.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def interrogations(self):
"""
The QuerySet of all interrogations emitted by the City Council.
"""
from open_municipio.acts.models import Interrogation
return Interrogation.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def interpellations(self):
"""
The QuerySet of all interpellations emitted by the City Council.
"""
from open_municipio.acts.models import Interpellation
return Interpellation.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def motions(self):
"""
The QuerySet of all motions emitted by the City Council.
"""
from open_municipio.acts.models import Motion
return Motion.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def agendas(self):
"""
The QuerySet of all agendas emitted by the City Council.
"""
from open_municipio.acts.models import Agenda
return Agenda.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def amendments(self):
"""
The QuerySet of all amendments emitted by the City Council.
"""
from open_municipio.acts.models import Amendment
return Amendment.objects.select_related().filter(emitting_institution=self.as_institution)
class CityGovernment(object):
@property
def as_institution(self):
"""
A municipality government, as an *institution*.
"""
city_gov = None
try:
city_gov = Institution.objects.get(institution_type=Institution.CITY_GOVERNMENT)
except Institution.DoesNotExist:
# city gov has not been created, yet
pass
return city_gov
@property
def charges(self):
"""
Members of a municipality government (mayor and first deputy included), as charges.
"""
return self.as_institution.charges.select_related()
@property
def firstdeputy(self):
"""
Returns the first deputy mayor, if existing, None if not existing
"""
firstdeputy = None
if self.as_institution:
firstdeputy = self.as_institution.firstdeputy
return firstdeputy
@property
def members(self):
"""
Members of a municipality government (mayor and first deputy excluded), as charges.
"""
members = InstitutionCharge.objects.none()
if self.as_institution:
members = self.as_institution.members.select_related()
return members
@property
def acts(self):
"""
The QuerySet of all acts emitted by the city government (as an institution).
Note that the objects comprising the resulting QuerySet aren't generic ``Act`` instances,
but instances of specific ``Act`` subclasses (i.e. ``Deliberation``, ``Motion``, etc.).
"""
return self.as_institution.emitted_acts
@property
def deliberations(self):
"""
The QuerySet of all deliberations emitted by the City Government.
"""
from open_municipio.acts.models import Deliberation
return Deliberation.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def interrogations(self):
"""
The QuerySet of all interrogations emitted by the City Government.
"""
from open_municipio.acts.models import Interrogation
return Interrogation.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def interpellations(self):
"""
The QuerySet of all interpellations emitted by the City Government.
"""
from open_municipio.acts.models import Interpellation
return Interpellation.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def motions(self):
"""
The QuerySet of all motions emitted by the City Government.
"""
from open_municipio.acts.models import Motion
return Motion.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def agendas(self):
"""
The QuerySet of all agendas emitted by the City Government.
"""
from open_municipio.acts.models import Agenda
return Agenda.objects.select_related().filter(emitting_institution=self.as_institution)
@property
def amendments(self):
"""
The QuerySet of all amendments emitted by the City Government.
"""
from open_municipio.acts.models import Amendment
return Amendment.objects.select_related().filter(emitting_institution=self.as_institution)
class Committees(object):
def as_institution(self):
"""
Municipality committees, as *institutions*.
"""
# FIXME: Should we include joint committees here?
# (Institution.JOINT_COMMITTEE)
return Institution.objects.select_related().filter(
institution_type__in=(Institution.COMMITTEE, Institution.JOINT_COMMITTEE)
)
class Municipality(object):
"""
A hierarchy of objects representing a municipality.
Provides convenient access to insitutions, charges, groups and the like.
"""
def __init__(self):
self.mayor = Mayor()
self.gov = CityGovernment()
self.council = CityCouncil()
self.committees = Committees()
municipality = Municipality()
| agpl-3.0 | 937,666,606,277,034,800 | 33.020776 | 171 | 0.618539 | false | 4.017562 | false | false | false |
rh-s/heat | heat/engine/service_software_config.py | 1 | 12137 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
from oslo_utils import timeutils
import requests
import six
from six.moves.urllib import parse as urlparse
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import api
from heat.objects import resource as resource_object
from heat.objects import software_config as software_config_object
from heat.objects import software_deployment as software_deployment_object
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
class SoftwareConfigService(service.Service):
def show_software_config(self, cnxt, config_id):
sc = software_config_object.SoftwareConfig.get_by_id(cnxt, config_id)
return api.format_software_config(sc)
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
scs = software_config_object.SoftwareConfig.get_all(
cnxt,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
result = [api.format_software_config(sc, detail=False) for sc in scs]
return result
def create_software_config(self, cnxt, group, name, config,
inputs, outputs, options):
sc = software_config_object.SoftwareConfig.create(cnxt, {
'group': group,
'name': name,
'config': {
'inputs': inputs,
'outputs': outputs,
'options': options,
'config': config
},
'tenant': cnxt.tenant_id})
return api.format_software_config(sc)
def delete_software_config(self, cnxt, config_id):
software_config_object.SoftwareConfig.delete(cnxt, config_id)
def list_software_deployments(self, cnxt, server_id):
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
result = [api.format_software_deployment(sd) for sd in all_sd]
return result
def metadata_software_deployments(self, cnxt, server_id):
if not server_id:
raise ValueError(_('server_id must be specified'))
all_sd = software_deployment_object.SoftwareDeployment.get_all(
cnxt, server_id)
# sort the configs by config name, to give the list of metadata a
# deterministic and controllable order.
all_sd_s = sorted(all_sd, key=lambda sd: sd.config.name)
result = [api.format_software_config(sd.config) for sd in all_sd_s]
return result
def _push_metadata_software_deployments(self, cnxt, server_id, sd):
rs = (resource_object.Resource.
get_by_physical_resource_id(cnxt, server_id))
if not rs:
return
deployments = self.metadata_software_deployments(cnxt, server_id)
md = rs.rsrc_metadata or {}
md['deployments'] = deployments
rs.update_and_save({'rsrc_metadata': md})
metadata_put_url = None
metadata_queue_id = None
for rd in rs.data:
if rd.key == 'metadata_put_url':
metadata_put_url = rd.value
break
elif rd.key == 'metadata_queue_id':
metadata_queue_id = rd.value
break
if metadata_put_url:
json_md = jsonutils.dumps(md)
requests.put(metadata_put_url, json_md)
elif metadata_queue_id:
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(sd.stack_user_project_id)
queue = zaqar.queue(metadata_queue_id)
queue.post({'body': md, 'ttl': zaqar_plugin.DEFAULT_TTL})
def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
container, object_name = urlparse.urlparse(
deploy_signal_id).path.split('/')[-2:]
swift_plugin = cnxt.clients.client_plugin('swift')
swift = swift_plugin.client()
try:
headers = swift.head_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise ex
lm = headers.get('last-modified')
last_modified = swift_plugin.parse_last_modified(lm)
prev_last_modified = sd.updated_at
if prev_last_modified:
# assume stored as utc, convert to offset-naive datetime
prev_last_modified = prev_last_modified.replace(tzinfo=None)
if prev_last_modified and (last_modified <= prev_last_modified):
return sd
try:
(headers, obj) = swift.get_object(container, object_name)
except Exception as ex:
# ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex):
LOG.info(_LI(
'Signal object not found: %(c)s %(o)s'), {
'c': container, 'o': object_name})
return sd
raise ex
if obj:
self.signal_software_deployment(
cnxt, sd.id, jsonutils.loads(obj),
last_modified.isoformat())
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def _refresh_zaqar_software_deployment(self, cnxt, sd, deploy_queue_id):
zaqar_plugin = cnxt.clients.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(sd.stack_user_project_id)
queue = zaqar.queue(deploy_queue_id)
messages = list(queue.pop())
if messages:
self.signal_software_deployment(
cnxt, sd.id, messages[0].body, None)
return software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, sd.id)
def show_software_deployment(self, cnxt, deployment_id):
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
if sd.status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
c = sd.config.config
input_values = dict((i['name'], i['value']) for i in c['inputs'])
transport = input_values.get('deploy_signal_transport')
if transport == 'TEMP_URL_SIGNAL':
sd = self._refresh_swift_software_deployment(
cnxt, sd, input_values.get('deploy_signal_id'))
elif transport == 'ZAQAR_SIGNAL':
sd = self._refresh_zaqar_software_deployment(
cnxt, sd, input_values.get('deploy_queue_id'))
return api.format_software_deployment(sd)
def create_software_deployment(self, cnxt, server_id, config_id,
input_values, action, status,
status_reason, stack_user_project_id):
sd = software_deployment_object.SoftwareDeployment.create(cnxt, {
'config_id': config_id,
'server_id': server_id,
'input_values': input_values,
'tenant': cnxt.tenant_id,
'stack_user_project_id': stack_user_project_id,
'action': action,
'status': status,
'status_reason': status_reason})
self._push_metadata_software_deployments(cnxt, server_id, sd)
return api.format_software_deployment(sd)
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at):
if not deployment_id:
raise ValueError(_('deployment_id must be specified'))
sd = software_deployment_object.SoftwareDeployment.get_by_id(
cnxt, deployment_id)
status = sd.status
if not status == rpc_api.SOFTWARE_DEPLOYMENT_IN_PROGRESS:
# output values are only expected when in an IN_PROGRESS state
return
details = details or {}
output_status_code = rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
ov = sd.output_values or {}
status = None
status_reasons = {}
status_code = details.get(output_status_code)
if status_code and str(status_code) != '0':
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[output_status_code] = _(
'Deployment exited with non-zero status code: %s'
) % details.get(output_status_code)
event_reason = 'deployment failed (%s)' % status_code
else:
event_reason = 'deployment succeeded'
for output in sd.config.config['outputs'] or []:
out_key = output['name']
if out_key in details:
ov[out_key] = details[out_key]
if output.get('error_output', False):
status = rpc_api.SOFTWARE_DEPLOYMENT_FAILED
status_reasons[out_key] = details[out_key]
event_reason = 'deployment failed'
for out_key in rpc_api.SOFTWARE_DEPLOYMENT_OUTPUTS:
ov[out_key] = details.get(out_key)
if status == rpc_api.SOFTWARE_DEPLOYMENT_FAILED:
# build a status reason out of all of the values of outputs
# flagged as error_output
status_reasons = [' : '.join((k, six.text_type(status_reasons[k])))
for k in status_reasons]
status_reason = ', '.join(status_reasons)
else:
status = rpc_api.SOFTWARE_DEPLOYMENT_COMPLETE
status_reason = _('Outputs received')
self.update_software_deployment(
cnxt, deployment_id=deployment_id,
output_values=ov, status=status, status_reason=status_reason,
config_id=None, input_values=None, action=None,
updated_at=updated_at)
# Return a string describing the outcome of handling the signal data
return event_reason
def update_software_deployment(self, cnxt, deployment_id, config_id,
input_values, output_values, action,
status, status_reason, updated_at):
update_data = {}
if config_id:
update_data['config_id'] = config_id
if input_values:
update_data['input_values'] = input_values
if output_values:
update_data['output_values'] = output_values
if action:
update_data['action'] = action
if status:
update_data['status'] = status
if status_reason:
update_data['status_reason'] = status_reason
if updated_at:
update_data['updated_at'] = timeutils.normalize_time(
timeutils.parse_isotime(updated_at))
else:
update_data['updated_at'] = timeutils.utcnow()
sd = software_deployment_object.SoftwareDeployment.update_by_id(
cnxt, deployment_id, update_data)
# only push metadata if this update resulted in the config_id
# changing, since metadata is just a list of configs
if config_id:
self._push_metadata_software_deployments(cnxt, sd.server_id, sd)
return api.format_software_deployment(sd)
def delete_software_deployment(self, cnxt, deployment_id):
software_deployment_object.SoftwareDeployment.delete(
cnxt, deployment_id)
| apache-2.0 | -4,000,311,948,028,636,000 | 40.003378 | 79 | 0.59677 | false | 4.064635 | true | false | false |
tchar/ctypesgen | ctypesgencore/parser/cparser.py | 13 | 6895 | #!/usr/bin/env python
'''
Parse a C source file.
To use, subclass CParser and override its handle_* methods. Then instantiate
the class with a string to parse.
'''
__docformat__ = 'restructuredtext'
import operator
import os.path
import re
import sys
import time
import warnings
import preprocessor
import yacc
import cgrammar
import cdeclarations
# --------------------------------------------------------------------------
# Lexer
# --------------------------------------------------------------------------
class CLexer(object):
def __init__(self, cparser):
self.cparser = cparser
self.type_names = set()
self.in_define = False
def input(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
while self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
if not t:
break
if t.type == 'PP_DEFINE':
self.in_define = True
elif t.type == 'PP_END_DEFINE':
self.in_define = False
# Transform PP tokens into C tokens
elif t.type == 'LPAREN':
t.type = '('
elif t.type == 'PP_NUMBER':
t.type = 'CONSTANT'
elif t.type == 'IDENTIFIER' and t.value in cgrammar.keywords:
t.type = t.value.upper()
elif t.type == 'IDENTIFIER' and t.value in self.type_names:
if (self.pos < 2 or self.tokens[self.pos-2].type not in
('ENUM', 'STRUCT', 'UNION')):
t.type = 'TYPE_NAME'
t.lexer = self
t.clexpos = self.pos - 1
return t
return None
# --------------------------------------------------------------------------
# Parser
# --------------------------------------------------------------------------
class CParser(object):
'''Parse a C source file.
Subclass and override the handle_* methods. Call `parse` with a string
to parse.
'''
def __init__(self, options):
self.preprocessor_parser = preprocessor.PreprocessorParser(options,self)
self.parser = yacc.Parser()
prototype = yacc.yacc(method = 'LALR',
debug = False,
module = cgrammar,
write_tables = True,
outputdir = os.path.dirname(__file__),
optimize = True)
# If yacc is reading tables from a file, then it won't find the error
# function... need to set it manually
prototype.errorfunc = cgrammar.p_error
prototype.init_parser(self.parser)
self.parser.cparser = self
self.lexer = CLexer(self)
if not options.no_stddef_types:
self.lexer.type_names.add('wchar_t')
self.lexer.type_names.add('ptrdiff_t')
self.lexer.type_names.add('size_t')
if not options.no_gnu_types:
self.lexer.type_names.add('__builtin_va_list')
if sys.platform == 'win32' and not options.no_python_types:
self.lexer.type_names.add('__int64')
def parse(self, filename, debug=False):
'''Parse a file.
If `debug` is True, parsing state is dumped to stdout.
'''
self.handle_status('Preprocessing %s' % filename)
self.preprocessor_parser.parse(filename)
self.lexer.input(self.preprocessor_parser.output)
self.handle_status('Parsing %s' % filename)
self.parser.parse(lexer=self.lexer, debug=debug)
# ----------------------------------------------------------------------
# Parser interface. Override these methods in your subclass.
# ----------------------------------------------------------------------
def handle_error(self, message, filename, lineno):
'''A parse error occured.
The default implementation prints `lineno` and `message` to stderr.
The parser will try to recover from errors by synchronising at the
next semicolon.
'''
print >> sys.stderr, '%s:%s %s' % (filename, lineno, message)
def handle_pp_error(self, message):
'''The C preprocessor emitted an error.
The default implementatin prints the error to stderr. If processing
can continue, it will.
'''
print >> sys.stderr, 'Preprocessor:', message
def handle_status(self, message):
'''Progress information.
The default implementationg prints message to stderr.
'''
print >> sys.stderr, message
def handle_define(self, name, params, value, filename, lineno):
'''#define `name` `value`
or #define `name`(`params`) `value`
name is a string
params is None or a list of strings
value is a ...?
'''
def handle_define_constant(self, name, value, filename, lineno):
'''#define `name` `value`
name is a string
value is an ExpressionNode or None
'''
def handle_define_macro(self, name, params, value, filename, lineno):
'''#define `name`(`params`) `value`
name is a string
params is a list of strings
value is an ExpressionNode or None
'''
def impl_handle_declaration(self, declaration, filename, lineno):
'''Internal method that calls `handle_declaration`. This method
also adds any new type definitions to the lexer's list of valid type
names, which affects the parsing of subsequent declarations.
'''
if declaration.storage == 'typedef':
declarator = declaration.declarator
if not declarator:
# XXX TEMPORARY while struct etc not filled
return
while declarator.pointer:
declarator = declarator.pointer
self.lexer.type_names.add(declarator.identifier)
self.handle_declaration(declaration, filename, lineno)
def handle_declaration(self, declaration, filename, lineno):
'''A declaration was encountered.
`declaration` is an instance of Declaration. Where a declaration has
multiple initialisers, each is returned as a separate declaration.
'''
pass
class DebugCParser(CParser):
'''A convenience class that prints each invocation of a handle_* method to
stdout.
'''
def handle_define(self, name, value, filename, lineno):
print '#define name=%r, value=%r' % (name, value)
def handle_define_constant(self, name, value, filename, lineno):
print '#define constant name=%r, value=%r' % (name, value)
def handle_declaration(self, declaration, filename, lineno):
print declaration
if __name__ == '__main__':
DebugCParser().parse(sys.argv[1], debug=True)
| bsd-3-clause | 7,333,108,137,968,191,000 | 32.149038 | 80 | 0.545323 | false | 4.491857 | false | false | false |
macchina-io/macchina.io | platform/JS/V8/v8/tools/sanitizers/sancov_formatter.py | 4 | 15598 | #!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to transform and merge sancov files into human readable json-format.
The script supports three actions:
all: Writes a json file with all instrumented lines of all executables.
merge: Merges sancov files with coverage output into an existing json file.
split: Split json file into separate files per covered source file.
The json data is structured as follows:
{
"version": 1,
"tests": ["executable1", "executable2", ...],
"files": {
"file1": [[<instr line 1>, <bit_mask>], [<instr line 2>, <bit_mask>], ...],
"file2": [...],
...
}
}
The executables are sorted and determine the test bit mask. Their index+1 is
the bit, e.g. executable1 = 1, executable3 = 4, etc. Hence, a line covered by
executable1 and executable3 will have bit_mask == 5 == 0b101. The number of
tests is restricted to 52 in version 1, to allow javascript JSON parsing of
the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
The line-number-bit_mask pairs are sorted by line number and don't contain
duplicates.
Split json data preserves the same format, but only contains one file per
json file.
The sancov tool is expected to be in the llvm compiler-rt third-party
directory. It's not checked out by default and must be added as a custom deps:
'v8/third_party/llvm/projects/compiler-rt':
'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
"""
import argparse
import json
import logging
import os
import re
import subprocess
import sys
from multiprocessing import Pool, cpu_count
logging.basicConfig(level=logging.INFO)
# Files to exclude from coverage. Dropping their data early adds more speed.
# The contained cc files are already excluded from instrumentation, but inlined
# data is referenced through v8's object files.
EXCLUSIONS = [
'buildtools',
'src/third_party',
'third_party',
'test',
'testing',
]
# Executables found in the build output for which no coverage is generated.
# Exclude them from the coverage data file.
EXE_BLACKLIST = [
'generate-bytecode-expectations',
'hello-world',
'mksnapshot',
'parser-shell',
'process',
'shell',
]
# V8 checkout directory.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
# The sancov tool location.
SANCOV_TOOL = os.path.join(
BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
'lib', 'sanitizer_common', 'scripts', 'sancov.py')
# Simple script to sanitize the PCs from objdump.
SANITIZE_PCS = os.path.join(BASE_DIR, 'tools', 'sanitizers', 'sanitize_pcs.py')
# The llvm symbolizer location.
SYMBOLIZER = os.path.join(
BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
'llvm-symbolizer')
# Number of cpus.
CPUS = cpu_count()
# Regexp to find sancov files as output by sancov_merger.py. Also grabs the
# executable name in group 1.
SANCOV_FILE_RE = re.compile(r'^(.*)\.result.sancov$')
def executables(build_dir):
"""Iterates over executable files in the build directory."""
for f in os.listdir(build_dir):
file_path = os.path.join(build_dir, f)
if (os.path.isfile(file_path) and
os.access(file_path, os.X_OK) and
f not in EXE_BLACKLIST):
yield file_path
def process_symbolizer_output(output, build_dir):
"""Post-process llvm symbolizer output.
Excludes files outside the v8 checkout or given in exclusion list above
from further processing. Drops the character index in each line.
Returns: A mapping of file names to lists of line numbers. The file names
have relative paths to the v8 base directory. The lists of line
numbers don't contain duplicate lines and are sorted.
"""
# Path prefix added by the llvm symbolizer including trailing slash.
output_path_prefix = os.path.join(build_dir, '..', '..', '')
# Drop path prefix when iterating lines. The path is redundant and takes
# too much space. Drop files outside that path, e.g. generated files in
# the build dir and absolute paths to c++ library headers.
def iter_lines():
for line in output.strip().splitlines():
if line.startswith(output_path_prefix):
yield line[len(output_path_prefix):]
# Map file names to sets of instrumented line numbers.
file_map = {}
for line in iter_lines():
# Drop character number, we only care for line numbers. Each line has the
# form: <file name>:<line number>:<character number>.
file_name, number, _ = line.split(':')
file_map.setdefault(file_name, set([])).add(int(number))
# Remove exclusion patterns from file map. It's cheaper to do it after the
# mapping, as there are few excluded files and we don't want to do this
# check for numerous lines in ordinary files.
def keep(file_name):
for e in EXCLUSIONS:
if file_name.startswith(e):
return False
return True
# Return in serializable form and filter.
return {k: sorted(file_map[k]) for k in file_map if keep(k)}
def get_instrumented_lines(executable):
"""Return the instrumented lines of an executable.
Called trough multiprocessing pool.
Returns: Post-processed llvm output as returned by process_symbolizer_output.
"""
# The first two pipes are from llvm's tool sancov.py with 0x added to the hex
# numbers. The results are piped into the llvm symbolizer, which outputs for
# each PC: <file name with abs path>:<line number>:<character number>.
# We don't call the sancov tool to get more speed.
process = subprocess.Popen(
'objdump -d %s | '
'grep \'^\s\+[0-9a-f]\+:.*\scall\(q\|\)\s\+[0-9a-f]\+ '
'<__sanitizer_cov\(_with_check\|\|_trace_pc_guard\)\(@plt\|\)>\' | '
'grep \'^\s\+[0-9a-f]\+\' -o | '
'%s | '
'%s --obj %s -functions=none' %
(executable, SANITIZE_PCS, SYMBOLIZER, executable),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=BASE_DIR,
shell=True,
)
output, _ = process.communicate()
assert process.returncode == 0
return process_symbolizer_output(output, os.path.dirname(executable))
def merge_instrumented_line_results(exe_list, results):
"""Merge multiprocessing results for all instrumented lines.
Args:
exe_list: List of all executable names with absolute paths.
results: List of results as returned by get_instrumented_lines.
Returns: Dict to be used as json data as specified on the top of this page.
The dictionary contains all instrumented lines of all files
referenced by all executables.
"""
def merge_files(x, y):
for file_name, lines in y.iteritems():
x.setdefault(file_name, set([])).update(lines)
return x
result = reduce(merge_files, results, {})
# Return data as file->lines mapping. The lines are saved as lists
# with (line number, test bits (as int)). The test bits are initialized with
# 0, meaning instrumented, but no coverage.
# The order of the test bits is given with key 'tests'. For now, these are
# the executable names. We use a _list_ with two items instead of a tuple to
# ease merging by allowing mutation of the second item.
return {
'version': 1,
'tests': sorted(map(os.path.basename, exe_list)),
'files': {f: map(lambda l: [l, 0], sorted(result[f])) for f in result},
}
def write_instrumented(options):
"""Implements the 'all' action of this tool."""
exe_list = list(executables(options.build_dir))
logging.info('Reading instrumented lines from %d executables.',
len(exe_list))
pool = Pool(CPUS)
try:
results = pool.imap_unordered(get_instrumented_lines, exe_list)
finally:
pool.close()
# Merge multiprocessing results and prepare output data.
data = merge_instrumented_line_results(exe_list, results)
logging.info('Read data from %d executables, which covers %d files.',
len(data['tests']), len(data['files']))
logging.info('Writing results to %s', options.json_output)
# Write json output.
with open(options.json_output, 'w') as f:
json.dump(data, f, sort_keys=True)
def get_covered_lines(args):
"""Return the covered lines of an executable.
Called trough multiprocessing pool. The args are expected to unpack to:
cov_dir: Folder with sancov files merged by sancov_merger.py.
executable: Absolute path to the executable that was called to produce the
given coverage data.
sancov_file: The merged sancov file with coverage data.
Returns: A tuple of post-processed llvm output as returned by
process_symbolizer_output and the executable name.
"""
cov_dir, executable, sancov_file = args
# Let the sancov tool print the covered PCs and pipe them through the llvm
# symbolizer.
process = subprocess.Popen(
'%s print %s 2> /dev/null | '
'%s --obj %s -functions=none' %
(SANCOV_TOOL,
os.path.join(cov_dir, sancov_file),
SYMBOLIZER,
executable),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=BASE_DIR,
shell=True,
)
output, _ = process.communicate()
assert process.returncode == 0
return (
process_symbolizer_output(output, os.path.dirname(executable)),
os.path.basename(executable),
)
def merge_covered_line_results(data, results):
"""Merge multiprocessing results for covered lines.
The data is mutated, the results are merged into it in place.
Args:
data: Existing coverage data from json file containing all instrumented
lines.
results: List of results as returned by get_covered_lines.
"""
# List of executables and mapping to the test bit mask. The number of
# tests is restricted to 52, to allow javascript JSON parsing of
# the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
exe_list = data['tests']
assert len(exe_list) <= 52, 'Max 52 different tests are supported.'
test_bit_masks = {exe:1<<i for i, exe in enumerate(exe_list)}
def merge_lines(old_lines, new_lines, mask):
"""Merge the coverage data of a list of lines.
Args:
old_lines: Lines as list of pairs with line number and test bit mask.
The new lines will be merged into the list in place.
new_lines: List of new (covered) lines (sorted).
mask: The bit to be set for covered lines. The bit index is the test
index of the executable that covered the line.
"""
i = 0
# Iterate over old and new lines, both are sorted.
for l in new_lines:
while old_lines[i][0] < l:
# Forward instrumented lines not present in this coverage data.
i += 1
# TODO: Add more context to the assert message.
assert i < len(old_lines), 'Covered line %d not in input file.' % l
assert old_lines[i][0] == l, 'Covered line %d not in input file.' % l
# Add coverage information to the line.
old_lines[i][1] |= mask
def merge_files(data, result):
"""Merge result into data.
The data is mutated in place.
Args:
data: Merged coverage data from the previous reduce step.
result: New result to be merged in. The type is as returned by
get_covered_lines.
"""
file_map, executable = result
files = data['files']
for file_name, lines in file_map.iteritems():
merge_lines(files[file_name], lines, test_bit_masks[executable])
return data
reduce(merge_files, results, data)
def merge(options):
"""Implements the 'merge' action of this tool."""
# Check if folder with coverage output exists.
assert (os.path.exists(options.coverage_dir) and
os.path.isdir(options.coverage_dir))
# Inputs for multiprocessing. List of tuples of:
# Coverage dir, absoluate path to executable, sancov file name.
inputs = []
for sancov_file in os.listdir(options.coverage_dir):
match = SANCOV_FILE_RE.match(sancov_file)
if match:
inputs.append((
options.coverage_dir,
os.path.join(options.build_dir, match.group(1)),
sancov_file,
))
logging.info('Merging %d sancov files into %s',
len(inputs), options.json_input)
# Post-process covered lines in parallel.
pool = Pool(CPUS)
try:
results = pool.imap_unordered(get_covered_lines, inputs)
finally:
pool.close()
# Load existing json data file for merging the results.
with open(options.json_input, 'r') as f:
data = json.load(f)
# Merge muliprocessing results. Mutates data.
merge_covered_line_results(data, results)
logging.info('Merged data from %d executables, which covers %d files.',
len(data['tests']), len(data['files']))
logging.info('Writing results to %s', options.json_output)
# Write merged results to file.
with open(options.json_output, 'w') as f:
json.dump(data, f, sort_keys=True)
def split(options):
"""Implements the 'split' action of this tool."""
# Load existing json data file for splitting.
with open(options.json_input, 'r') as f:
data = json.load(f)
logging.info('Splitting off %d coverage files from %s',
len(data['files']), options.json_input)
for file_name, coverage in data['files'].iteritems():
# Preserve relative directories that are part of the file name.
file_path = os.path.join(options.output_dir, file_name + '.json')
try:
os.makedirs(os.path.dirname(file_path))
except OSError:
# Ignore existing directories.
pass
with open(file_path, 'w') as f:
# Flat-copy the old dict.
new_data = dict(data)
# Update current file.
new_data['files'] = {file_name: coverage}
# Write json data.
json.dump(new_data, f, sort_keys=True)
def main(args=None):
parser = argparse.ArgumentParser()
# TODO(machenbach): Make this required and deprecate the default.
parser.add_argument('--build-dir',
default=os.path.join(BASE_DIR, 'out', 'Release'),
help='Path to the build output directory.')
parser.add_argument('--coverage-dir',
help='Path to the sancov output files.')
parser.add_argument('--json-input',
help='Path to an existing json file with coverage data.')
parser.add_argument('--json-output',
help='Path to a file to write json output to.')
parser.add_argument('--output-dir',
help='Directory where to put split output files to.')
parser.add_argument('action', choices=['all', 'merge', 'split'],
help='Action to perform.')
options = parser.parse_args(args)
options.build_dir = os.path.abspath(options.build_dir)
if options.action.lower() == 'all':
if not options.json_output:
print '--json-output is required'
return 1
write_instrumented(options)
elif options.action.lower() == 'merge':
if not options.coverage_dir:
print '--coverage-dir is required'
return 1
if not options.json_input:
print '--json-input is required'
return 1
if not options.json_output:
print '--json-output is required'
return 1
merge(options)
elif options.action.lower() == 'split':
if not options.json_input:
print '--json-input is required'
return 1
if not options.output_dir:
print '--output-dir is required'
return 1
split(options)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 1,672,534,452,440,716,300 | 33.281319 | 79 | 0.669637 | false | 3.694458 | true | false | false |
nick41496/Beatnik | beatnik/migrations/0011_musicclick.py | 1 | 1054 | # Generated by Django 2.1.5 on 2019-01-22 05:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beatnik', '0010_musicaccess'),
]
operations = [
migrations.CreateModel(
name='MusicClick',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_agent', models.TextField(null=True, verbose_name="Client's user agent")),
('ip_address', models.CharField(max_length=45, null=True, verbose_name="Client's IP address")),
('referer', models.URLField(null=True, verbose_name='HTTP referer')),
('link', models.URLField(verbose_name='The web address of the link clicked')),
('link_type', models.CharField(choices=[('apple', 'Apple Music'), ('gpm', 'Google Play Music'), ('soundcloud', 'Soundcloud'), ('spotify', 'Spotify')], max_length=10, verbose_name='Type of link')),
],
),
]
| gpl-3.0 | -8,645,648,373,081,352,000 | 42.916667 | 212 | 0.593928 | false | 4.053846 | false | false | false |
Marchowes/dyn-python | dyn/tm/task.py | 2 | 3775 | # -*- coding: utf-8 -*-
"""This module contains interfaces for all Task management features of the
REST API
"""
from dyn.compat import force_unicode
from dyn.tm.session import DynectSession
__author__ = 'mhowes'
def get_tasks():
response = DynectSession.get_session().execute('/Task', 'GET',
{})
return [Task(task.pop('task_id'), api=False, **task)
for task in response['data']]
class Task(object):
"""A class representing a DynECT Task"""
def __init__(self, task_id, *args, **kwargs):
super(Task, self).__init__()
self._task_id = task_id
self._blocking = self._created_ts = None
self._customer_name = self._debug = None
self._message = self._modified_ts = None
self._name = self._status = None
self._step_count = None
self._total_steps = self._zone_name = None
self._args = None
if 'api' in kwargs:
del kwargs['api']
self._build(kwargs)
self.uri = '/Task/{}'.format(self._task_id)
def _build(self, data):
"""Build this object from the data returned in an API response"""
for key, val in data.items():
if key == 'args':
self._args = [{varg['name']: varg['value']}
for varg in val]
else:
setattr(self, '_' + key, val)
@property
def args(self):
"""Returns List of args, and their value"""
return self._args
@property
def blocking(self):
"""Returns whether this task is in a blocking state."""
return self._blocking
@property
def created_ts(self):
"""Returns Task Creation timestamp"""
return self._created_ts
@property
def customer_name(self):
"""Returns Customer Name"""
return self._customer_name
@property
def debug(self):
"""Returns Debug Information"""
return self._debug
@property
def message(self):
"""Returns Task Message"""
return self._message
@property
def modified_ts(self):
"""Returns Modified Timestamp"""
return self._modified_ts
@property
def name(self):
"""Returns Task Name"""
return self._name
@property
def status(self):
"""Returns Task Status"""
return self._status
@property
def step_count(self):
"""Returns Task Step Count"""
return self._step_count
@property
def task_id(self):
"""Returns Task_id"""
return self._task_id
@property
def total_steps(self):
"""Returns Total number of steps for this task"""
return self._total_steps
@property
def zone_name(self):
"""Returns Zone name for this task"""
return self._zone_name
def refresh(self):
"""Updates :class:'Task' with current data on system. """
api_args = dict()
response = DynectSession.get_session().execute(self.uri, 'GET',
api_args)
self._build(response['data'])
def cancel(self):
"""Cancels Task"""
api_args = dict()
response = DynectSession.get_session().execute(self.uri, 'DELETE',
api_args)
self._build(response['data'])
def __str__(self):
return force_unicode('<Task>: {} - {} - {} - {} - {}').format(
self._task_id, self._zone_name,
self._name, self._message, self._status)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
| bsd-3-clause | 2,144,039,428,772,708,400 | 26.962963 | 74 | 0.526887 | false | 4.334099 | false | false | false |
BtXin/VirtualIRLab | util/util.py | 1 | 1335 | from flask import current_app
from flask_login import current_user
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in {'txt', 'toml', 'dat'}
def check_role(role):
if not current_user.is_authenticated or current_user.role != role:
return current_app.login_manager.unauthorized()
# return redirect(url_for(current_user.role + 'api'))
def parse_multi_form(form):
data = {}
for url_k in form:
v = form[url_k]
ks = []
while url_k:
if '[' in url_k:
k, r = url_k.split('[', 1)
ks.append(k)
if r[0] == ']':
ks.append('')
url_k = r.replace(']', '', 1)
else:
ks.append(url_k)
break
sub_data = data
for i, k in enumerate(ks):
if k.isdigit():
k = int(k)
if i + 1 < len(ks):
if not isinstance(sub_data, dict):
break
if k in sub_data:
sub_data = sub_data[k]
else:
sub_data[k] = {}
sub_data = sub_data[k]
else:
if isinstance(sub_data, dict):
sub_data[k] = v
return data
| mit | -4,940,491,134,228,611,000 | 28.666667 | 91 | 0.444195 | false | 3.903509 | false | false | false |
BasementCat/bottle-utils | bottleutils/response.py | 1 | 1563 | import json
import bottle
def JsonResponse(callback):
return JsonResponsePlugin().apply(callback, None)
class JsonResponsePlugin(object):
name = 'JsonResponsePlugin'
api = 2
def apply(self, callback, route):
def wrapper(*args, **kwargs):
try:
out = callback(*args, **kwargs)
if isinstance(out, dict):
if 'result' in out or 'error' in out:
return out
return dict(result = out)
elif isinstance(out, list):
return dict(result = out)
else:
return out
except bottle.HTTPResponse as e:
if isinstance(e.body, dict):
message = e.body
else:
message = dict(message = e.body, code = e.status_code)
headers = [(k,v) for k,v in e.headers.items()]
headers.append(('Content-Type', 'application/json'))
raise bottle.HTTPResponse(json.dumps(dict(error = message)), e.status_code, headers = headers)
return wrapper
@staticmethod
def getErrorHandler(code):
def wrapper(*args, **kwargs):
return JsonResponsePlugin.errorHandler(code, *args, **kwargs)
return wrapper
@staticmethod
def errorHandler(code, *args, **kwargs):
return json.dumps({
'error': {
'code': code,
'message': bottle.HTTP_CODES[code]
}
})
| mit | 2,757,041,014,070,183,000 | 32.255319 | 110 | 0.513756 | false | 4.665672 | false | false | false |
noironetworks/group-based-policy | gbpservice/neutron/db/migration/alembic_migrations/versions/27583c259fa7_missing_nested.py | 1 | 1638 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fix missing nested domain DB migration
Revision ID: 27583c259fa7
Revises: 799f0516bc08
Create Date: 2020-05-27 14:18:11.909757
"""
# revision identifiers, used by Alembic.
revision = '27583c259fa7'
down_revision = '799f0516bc08'
import os
import sys
from neutron.db import migration
from oslo_utils import importutils
from gbpservice.neutron.db.migration import alembic_migrations as am
# This is a hack to get around the fact that the versions
# directory has no __init__.py
filepath = os.path.abspath(am.__file__)
basepath = filepath[:filepath.rfind("/")] + "/versions"
sys.path.append(basepath)
DB_4967af35820f = '4967af35820f_cisco_apic_nested_domain'
def ensure_4967af35820f_migration():
if not migration.schema_has_table(
'apic_aim_network_nested_domain_allowed_vlans'):
db_4967af35820f = importutils.import_module(DB_4967af35820f)
db_4967af35820f.upgrade()
def upgrade():
ensure_4967af35820f_migration()
# remove the appended path
del sys.path[sys.path.index(basepath)]
def downgrade():
pass
| apache-2.0 | 366,712,495,360,453,060 | 27.736842 | 78 | 0.73138 | false | 3.448421 | false | false | false |
mancoast/CPythonPyc_test | cpython/220_test_marshal.py | 3 | 1227 | from test_support import TestFailed
import marshal
import sys
# XXX Much more needed here.
# Test the full range of Python ints.
n = sys.maxint
while n:
for expected in (-n, n):
s = marshal.dumps(expected)
got = marshal.loads(s)
if expected != got:
raise TestFailed("for int %d, marshal string is %r, loaded "
"back as %d" % (expected, s, got))
n = n >> 1
# Simulate int marshaling on a 64-bit box. This is most interesting if
# we're running the test on a 32-bit box, of course.
def to_little_endian_string(value, nbytes):
bytes = []
for i in range(nbytes):
bytes.append(chr(value & 0xff))
value >>= 8
return ''.join(bytes)
maxint64 = (1L << 63) - 1
minint64 = -maxint64-1
for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
while base:
s = 'I' + to_little_endian_string(base, 8)
got = marshal.loads(s)
if base != got:
raise TestFailed("for int %d, simulated marshal string is %r, "
"loaded back as %d" % (base, s, got))
if base == -1: # a fixed-point for shifting right 1
base = 0
else:
base >>= 1
| gpl-3.0 | 8,361,069,284,016,545,000 | 28.926829 | 75 | 0.563162 | false | 3.436975 | true | false | false |
istb-mia/miapy | miapy/data/extraction/reader.py | 1 | 4397 | import abc
import os
import h5py
import numpy as np
import miapy.data.indexexpression as expr
import miapy.data.definition as df
class Reader(metaclass=abc.ABCMeta):
"""Represents the abstract dataset reader."""
def __init__(self, file_path: str) -> None:
"""Initializes a new instance.
Args:
file_path(str): The path to the dataset file.
"""
super().__init__()
self.file_path = file_path
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
self.close()
@abc.abstractmethod
def get_subject_entries(self) -> list:
"""Get the dataset entries holding the subject's data.
Returns:
list: The list of subject entry strings.
"""
pass
@abc.abstractmethod
def get_shape(self, entry: str) -> list:
"""Get the shape from an entry.
Args:
entry(str): The dataset entry.
Returns:
list: The shape of each dimension.
"""
pass
@abc.abstractmethod
def get_subjects(self) -> list:
"""Get the subject names in the dataset.
Returns:
list: The list of subject names.
"""
pass
@abc.abstractmethod
def read(self, entry: str, index: expr.IndexExpression=None):
"""Read a dataset entry.
Args:
entry(str): The dataset entry.
index(expr.IndexExpression): The slicing expression.
Returns:
The read data.
"""
pass
@abc.abstractmethod
def has(self, entry: str) -> bool:
"""Check whether a dataset entry exists.
Args:
entry(str): The dataset entry.
Returns:
bool: Whether the entry exists.
"""
pass
@abc.abstractmethod
def open(self):
"""Open the reader."""
pass
@abc.abstractmethod
def close(self):
"""Close the reader."""
pass
class Hdf5Reader(Reader):
"""Represents the dataset reader for HDF5 files."""
def __init__(self, file_path: str, category='images') -> None:
"""Initializes a new instance.
Args:
file_path(str): The path to the dataset file.
category(str): The category of an entry that contains data of all subjects
"""
super().__init__(file_path)
self.h5 = None # type: h5py.File
self.category = category
def get_subject_entries(self) -> list:
group = df.DATA_PLACEHOLDER.format(self.category)
return ['{}/{}'.format(group, k) for k in sorted(self.h5[group].keys())]
def get_shape(self, entry: str) -> list:
return self.h5[entry].shape
def get_subjects(self) -> list:
return self.read(df.SUBJECT)
def read(self, entry: str, index: expr.IndexExpression=None):
if index is None:
data = self.h5[entry][()] # need () instead of util.IndexExpression(None) [which is equal to slice(None)]
else:
data = self.h5[entry][index.expression]
if isinstance(data, np.ndarray) and data.dtype == np.object:
return data.tolist()
# if h5py.check_dtype(vlen=self.h5[entry].dtype) == str and not isinstance(data, str):
# return data.tolist()
return data
def has(self, entry: str) -> bool:
return entry in self.h5
def open(self):
self.h5 = h5py.File(self.file_path, mode='r', libver='latest')
def close(self):
if self.h5 is not None:
self.h5.close()
self.h5 = None
def get_reader(file_path: str, direct_open: bool=False) -> Reader:
""" Get the dataset reader corresponding to the file extension.
Args:
file_path(str): The path to the dataset file.
direct_open(bool): Whether the file should directly be opened.
Returns:
Reader: Reader corresponding to dataset file extension.
"""
extension = os.path.splitext(file_path)[1]
if extension not in reader_registry:
raise ValueError('unknown dataset file extension "{}"'.format(extension))
reader = reader_registry[extension](file_path)
if direct_open:
reader.open()
return reader
reader_registry = {'.h5': Hdf5Reader, '.hdf5': Hdf5Reader}
| apache-2.0 | 2,621,964,073,019,291,600 | 25.172619 | 118 | 0.581533 | false | 4.030247 | false | false | false |
lseek/empdb | employees/search_engine.py | 1 | 6556 | """Module for supporting searching/filtering of department employee lists.
In a logical sense this module implements:
select <fixed_fields> from <model> where <filter_conditions>
where:
- fixed_fields is the fixed list of fields mentioned above
- model is automatically determined by the module
- filter_conditions is provided by the user (and needs to be parsed by the
query string parser).
For simplicity ATM we only support a simple AND of the fields (first_name=foo
AND department=bar). In future a more elaborate query language can be
implemented.
Note that the only fields which can be queried are:
first_name
last_name
gender
job_title
hire_date
it is only the filter conditions that change.
If the field being filtered is a string type then we treat the value as a
substring to match against else we try to match the exact value."""
import datetime
import logging
import sqlalchemy as sqla
from models.employee import Employee
from models.dept_employee import DeptEmployee
from models.title import Title
import exceptions
LOG = logging.getLogger(__name__)
class FilterExpr(object):
"""Query string parsers must ultimately return a filter expression object."""
def to_sqla(self):
"""Return the SQLAlchemy object corresponding to this expression.
Subclasses must override this"""
return None
class EqualExpr(FilterExpr):
"""Match a value exactly."""
def __init__(self, field, val):
self.field = field
self.val = val
def to_sqla(self):
LOG.debug("converting: {} = {}".format(self.field, self.val))
return (self.field == self.val)
class LikeExpr(FilterExpr):
"""Match a pattern (case insensitive)."""
def __init__(self, field, pattern):
self.field = field
self.pattern = pattern
def to_sqla(self):
LOG.debug("converting: {} ILIKE {}".format(self.field, self.pattern))
return self.field.ilike(self.pattern)
class NotExpr(FilterExpr):
"""Negate an expression."""
sqla_operator = sqla.not_
def __init__(self, expr):
self.expr = expr
def to_sqla(self):
LOG.debug("converting: NOT({})".format(self.expr))
return sqla.not_(self.expr.to_sqla())
class AndExpr(FilterExpr):
def __init__(self, *exprs):
self.exprs = exprs
def __init__(self, *exprs):
self.exprs = exprs
def to_sqla(self):
sqla_exprs = [expr_obj.to_sqla() for expr_obj in self.exprs]
LOG.debug("converting: AND({})".format(sqla_exprs))
return sqla.and_(*sqla_exprs)
class OrExpr(FilterExpr):
def __init__(self, *exprs):
self.exprs = exprs
def __init__(self, *exprs):
self.exprs = exprs
def to_sqla(self):
sqla_exprs = [expr_obj.to_sqla() for expr_obj in self.exprs]
LOG.debug("converting: OR({})".format(sqla_exprs))
return sqla.or_(*sqla_exprs)
class QueryParser(object):
"""Base class for parsing query strings.
This class should be used in the following manner:
1. Global config instantiates a QueryParser (sub)class instance during
startup.
2. Caller calls gets parser instance from request.registry.settings
3. Caller calls instance.parse(<query args>)
4. Caller calls instance.search(session, dept_no) where session is a DB
session instance.
5. Search view passes on the result to the template."""
valid_fields = {
"first_name": Employee.first_name,
"last_name": Employee.last_name,
"gender": Employee.gender,
"hire_date": Employee.hire_date,
"curr_title": Title.title,
}
def __init__(self):
self.expr = None
def parse(self):
"""Parse the query inputs and set 'expr' to the corresponding
FilterExpr object.
If the query input was parsed successfully then return True else False
Subclasses must override this. In particular, the arguments to the
parse method must be defined by each subclass. Subclasses should set
the 'expr' attribute to the appropriate FilterExpr instance
representing the parsed query"""
return False
def search(self, session, dept_no):
"""Perform the actual search.
'session' is the SQLAlchemy session object.
'dept_no' is the department number to which the search should be
limited.
This method returns the query object. The caller can make further
modifications to the query (e.g. add limit and offset)
Subclasses should not need to override this"""
# always implicitly add dept_no and filters to select current title
today = datetime.date.today()
title_is_curr = sqla.or_(Title.to_date == None, Title.to_date >= today)
return session.query(Employee.emp_no,
Employee.first_name,
Employee.last_name,
Employee.gender,
Employee.hire_date,
Title.title.label('curr_title')).\
filter(DeptEmployee.dept_no == dept_no).\
filter(DeptEmployee.emp_no == Employee.emp_no).\
filter(DeptEmployee.to_date >= today).\
filter(Title.emp_no == Employee.emp_no).\
filter(title_is_curr).\
filter(self.expr.to_sqla())
class FormQueryParser(QueryParser):
"""A simple query parser.
All the fields are ANDed. If a field is of string type then a substring
match is performed else an exact match is performed."""
def parse(self, **kwargs):
"""Build a filter expression out of the arguments
kwargs contains the fields to be queried (e.g. {"first_name": "foo"})."""
if not kwargs:
self.expr = None
return self
expr_list = []
for field, value in kwargs.items():
try:
field_obj = self.valid_fields[field]
except KeyError:
raise exceptions.UnknownField(field=field)
pat_types = (sqla.String, sqla.CHAR, sqla.VARCHAR)
if isinstance(field_obj.type, pat_types):
expr = LikeExpr(field_obj, '%{}%'.format(value))
else:
expr = EqualExpr(field_obj, value)
expr_list.append(expr)
self.expr = AndExpr(*expr_list) if len(expr_list) > 1 else expr_list[0]
return self
| gpl-3.0 | 2,330,424,282,408,638,500 | 31.944724 | 81 | 0.622178 | false | 4.151995 | false | false | false |
kennethreitz/pipenv | pipenv/vendor/passa/internals/reporters.py | 1 | 2682 | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import resolvelib
from .traces import trace_graph
def print_title(text):
print('\n{:=^84}\n'.format(text))
def print_requirement(r, end='\n'):
print('{:>40}'.format(r.as_line(include_hashes=False)), end=end)
def print_dependency(state, key):
print_requirement(state.mapping[key], end='')
parents = sorted(
state.graph.iter_parents(key),
key=lambda n: (-1, '') if n is None else (ord(n[0].lower()), n),
)
for i, p in enumerate(parents):
if p is None:
line = '(user)'
else:
line = state.mapping[p].as_line(include_hashes=False)
if i == 0:
padding = ' <= '
else:
padding = ' ' * 44
print('{pad}{line}'.format(pad=padding, line=line))
class StdOutReporter(resolvelib.BaseReporter):
"""Simple reporter that prints things to stdout.
"""
def __init__(self, requirements):
super(StdOutReporter, self).__init__()
self.requirements = requirements
def starting(self):
self._prev = None
print_title(' User requirements ')
for r in self.requirements:
print_requirement(r)
def ending_round(self, index, state):
print_title(' Round {} '.format(index))
mapping = state.mapping
if self._prev is None:
difference = set(mapping.keys())
changed = set()
else:
difference = set(mapping.keys()) - set(self._prev.keys())
changed = set(
k for k, v in mapping.items()
if k in self._prev and self._prev[k] != v
)
self._prev = mapping
if difference:
print('New pins: ')
for k in difference:
print_dependency(state, k)
print()
if changed:
print('Changed pins:')
for k in changed:
print_dependency(state, k)
print()
def ending(self, state):
print_title(" STABLE PINS ")
path_lists = trace_graph(state.graph)
for k in sorted(state.mapping):
print(state.mapping[k].as_line(include_hashes=False))
paths = path_lists[k]
for path in paths:
if path == [None]:
print(' User requirement')
continue
print(' ', end='')
for v in reversed(path[1:]):
line = state.mapping[v].as_line(include_hashes=False)
print(' <=', line, end='')
print()
print()
| mit | -5,291,078,392,013,713,000 | 28.8 | 73 | 0.52088 | false | 4.113497 | false | false | false |
shadowfax-chc/django-natural-key-cache | tests/settings.py | 1 | 1699 | # vim: set et ts=4 sw=4 fileencoding=utf-8:
'''
Django settings for test project
'''
import os
import dj_config_url
from getenv import env
PROJ_ROOT = os.path.dirname(__file__)
SECRET_KEY = 'not_so_secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJ_ROOT, 'test.db'),
},
}
INSTALLED_APPS = (
'test_app',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
CACHE_URI = env('TEST_CACHE_URI', 'memcache://127.0.0.1:11211')
CUSTOM_CACHE_BACKEND = env('TEST_CACHE_BACKEND')
CACHES = {
'default': dj_config_url.parse(CACHE_URI)
}
if CUSTOM_CACHE_BACKEND:
CACHES['default']['BACKEND'] = CUSTOM_CACHE_BACKEND
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEBUG = True
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': "%(levelname)s [%(name)s:%(lineno)s] %(message)s",
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'root': {
'level': 'DEBUG',
'handlers': ['console'],
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': False,
'level': 'ERROR',
},
'factory': {
'handlers': ['console'],
'propagate': False,
'level': 'ERROR',
},
'natural_key_cache': {
'handlers': ['console'],
'propagate': False,
'level': 'ERROR',
},
},
}
MIDDLEWARE_CLASSES = ()
| isc | -4,793,782,855,686,300,000 | 19.46988 | 72 | 0.517363 | false | 3.305447 | true | false | false |
wangfengfighting/Caoliu-master | tt.py | 1 | 1632 | # -*- coding:utf-8 -*-
__author__ = 'Administrator'
# import urllib.request
# path = "D:\\Download"
# url = "http://img.picuphost.com/img/upload/image/20151130/113000016301.jpeg"
# name ="D:\\download\\2.jpeg"
# #保存文件时候注意类型要匹配,如要保存的图片为jpg,则打开的文件的名称必须是jpg格式,否则会产生无效图片
# conn = urllib.request.urlopen(url)
# f = open(name,'wb')
# f.write(conn.read())
# f.close()
# print('Pic Saved!')
import whyspider
# 初始化爬虫对象
my_spider = whyspider.WhySpider()
# # 模拟GET操作
# path="G:\PostgraduatePROJECT\Caoliu-master"
# fname='22.jpeg'
# path2 = path+'\\'+fname
# name='G:\\PostgraduatePROJECT\\Caoliu-master\\down\\22.jpeg'
# f = open(name,'wb')
# data= my_spider.send_get('http://img.picuphost.com/img/upload/image/20151130/113000016301.jpeg')
# f.write(data)
# f.close()
# # 模拟POST操作
# print my_spider.send_post('http://3.apitool.sinaapp.com/','why=PostString2333')
#
# # 模拟GET操作
# print my_spider.send_get('http://www.baidu.com/')
#
# # 切换到手机模式
#my_spider.set_mobile()
#
# # 模拟GET操作
# print my_spider.send_get('http://www.baidu.com/')
# import time
# time1= time.time()
#
# time2= time1+3
# print(time2-time1)
import urllib2
import whyspider
request = urllib2.Request('http://ipoock.com/img/g4/201512242250036siyu.jpeg')
request.add_header('User-Agent', 'fake-client')
#response = urllib2.urlopen(request,timeout=10)
response = urllib2.urlopen('http://ipoock.com/img/g4/201512242250036siyu.jpeg', timeout=10)
print(response)
f=open('J:\\caoliu\\ff.jpeg','wb')
f.write(response)
f.close() | apache-2.0 | 7,013,451,213,521,124,000 | 26.90566 | 98 | 0.7023 | false | 2.167155 | false | true | false |
scallopedllama/bootdreams_linux | bootdreams.py | 1 | 8158 | #!/usr/bin/python2
# Bootdreams python
# Written by Joe Balough (sallopedllama at gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
version = 0.3
print ("Bootdreams dot py Version " + str(version))
do_burn = True
# Import relevant modules
import sys
# For running commands and getting their output to stdout
import subprocess
# For string.lower
import string
# To determine if file exits
import os
# For rmtree
import shutil
# Regular expressions
import re
# Query wodim for burners
# Oddly enough, wodim returns an error code if you have a burner but returns 0 if you don't.
def query_burners():
try:
output = subprocess.Popen(['wodim', '--devices'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
return re.findall("dev='(\S*)'", output)
except subprocess.CalledProcessError, (exception):
return re.findall("dev='(\S*)'", exception.output)
# Help printing function
def print_help():
print ("Usage: " + sys.argv[0] + " Image_File [Write Speed] [/path/to/burner]")
print ("Acceptable image formats are Discjuggler (CDI), ISO, and BIN/CUE.")
print ("Write speed and burner path are optional. If omitted, lowest speed and the burner at " + drive_path + " is used.")
print ("All burner paths can be found by running 'wodim --devices'.")
# Asks user a yes / no question and quits if the user says no. Default question formatted to fit below a "WARNING: ... " string
def ask_for_continue(question = " Would you like to continue (Y/n)? "):
to_continue = string.lower(raw_input(question))
if to_continue != "" and to_continue[0] == 'n':
exit(1)
# Drive index
try:
drive_path = sys.argv[3]
except IndexError:
try:
drive_path = query_burners()[0]
except IndexError:
print ("Warning: No burner in system. A burner is obviously required.")
exit(1)
# The file to process
try:
input_image = sys.argv[1]
except IndexError:
print ("ERROR: No File Specified.")
print_help()
sys.exit(1)
# Burn Speed
try:
burn_speed = sys.argv[2]
except IndexError:
burn_speed = 0
# See if user was trying to get help
if string.lower(input_image) == "help" or string.lower(input_image) == "--help" or string.lower(input_image) == "-h":
print_help()
sys.exit(1)
# Make sure file exists
if not os.path.isfile(input_image):
print ("ERROR: File not found.")
print_help()
sys.exit(1)
# Convert extension to lower case to properly handle it
input_ext = string.lower(input_image[-3:])
# CDI AND NRG FILE HANDLING
if input_ext == "cdi" or input_ext == "nrg":
# Set some CDI / NRG specific options here
# Default for discjuggler
image_type = "DiscJuggler"
image_info_call = ["cdirip", input_image, "-info"]
# Special case for nero
if input_ext == "nrg":
image_type = "Nero"
image_info_call = ["nerorip", "-i", input_image]
# Print some helpful information
print ("Going to burn " + image_type + " image " + input_image + " at " + str(burn_speed) + "x on burner at " + drive_path)
# Get information about this image file
image_info = subprocess.Popen(image_info_call, stdout=subprocess.PIPE).communicate()[0]
# Make a list containing lists of track types for each session.
# First dimension is Session number, second is Track number
session_data = []
print ("Getting Session and Track information")
# Split the image_info string by the Session i has d track(s) string. Discard the first because it offers no data
for i in re.split('Session \d+ has \d+ track\(s\)', image_info)[1:]:
# Get all the track types in a list and append it to the list of session data
session_data.append(re.findall('Type: (\S*)', i))
# Check for situations to warn the user about:
# More than 2 sessions:
if len(session_data) > 2:
print ("Warning: Image has more than 2 sessions. Continuing anyway though this is untested.")
# Unsupported session type
for s in session_data:
for t in s:
if not t in ["Mode1/2048", "Mode2/2336", "Mode2/2352", "Audio/2352"]:
print ("ERROR: Unsupported session type " + t + ". Only Mode1/2048, Mode2/2336, Mode2/2352, and Audio/2352 are supported.")
exit(1)
# data/data image with CDDA
if session_data[0] == ["Mode2/2336", "Audio/2352"]:
print ("Warning: CDRecord cannot properly burn a data/data DiscJuggler image with CDDA.")
print (" You can continue anyway though it may be a coaster if there is very little space left in the image.")
ask_for_continue()
# Delete the temp dir if it already exists and create it again
print ("Clearing Temp Directory")
if os.path.isdir('/tmp/bootdreams'):
shutil.rmtree('/tmp/bootdreams', True)
os.mkdir('/tmp/bootdreams')
# Rip the Image
print ("Ripping " + input_ext + " image")
print ("")
# The last version (which did not fail to burn any images for me) did this bit wrong and only -iso was ever passed to cdirip.
# It never got the -cut and -cutall options which together don't work the way the readme says they should.
# Just going to make it not -cutall and fix it if a user tells me they had a bad burn that would have been fixed by it
rip_options = []
if input_ext == "cdi":
rip_options = ["cdirip", input_image, "/tmp/bootdreams", "-iso"]
if session_data[0][0] != "Audio/2352":
rip_options += ["-cut"]
else:
rip_options += ["-full"]
else:
rip_options = ["nerorip"]
if session_data[0][0] != "Audio/2352":
rip_options += ["--trim"]
else:
rip_options += ["--full"]
rip_options += [input_image, "/tmp/bootdreams"]
if subprocess.call(rip_options) != 0:
print ("ERROR: " + input_ext + "rip failed to extract image data. Please check its output for more information.")
exit(1)
# Burn the CD
if do_burn:
print ("Burning CD")
print ("")
index = 1
for s in session_data:
cdrecord_opts = []
for t in s:
if t == "Mode1/2048":
cdrecord_opts += ["-data", "/tmp/bootdreams/tdata" + str(index).zfill(2) + ".iso"]
elif t == "Mode2/2336" or t == "Mode2/2352":
cdrecord_opts += ["-xa", "/tmp/bootdreams/tdata" + str(index).zfill(2) + ".iso"]
elif t == "Audio/2352":
cdrecord_opts += ["-audio", "/tmp/bootdreams/taudio" + str(index).zfill(2) + ".wav"]
index += 1
# Build options list for cdrecord
cdrecord_call = ["cdrecord", "-dev=" + str(drive_path), "gracetime=2", "-v", "driveropts=burnfree", "speed=" + str(burn_speed)]
if index == len(session_data) + 1:
cdrecord_call.append("-eject")
else:
cdrecord_call.append("-multi")
if "-xa" in cdrecord_opts or "-data" in cdrecord_opts:
cdrecord_call.append("-tao")
else:
cdrecord_call.append("-dao")
cdrecord_call += cdrecord_opts
if not do_burn:
print(cdrecord_call)
# Burn the session
if do_burn and subprocess.call(cdrecord_call) != 0:
print ("ERROR: CDRecord failed. Please check its output for mroe information.")
exit(1)
if do_burn:
print ("Image burn complete.")
elif input_ext == "iso":
# TODO: Isos have checkbox for multisesion and menu option for record mode: mode1 or mode 2 form 1
cdrecord_call = ['cdrecord', 'dev=' + str(drive_path), 'gracetime=2', '-v', 'driveropts=burnfree', 'speed=' + str(burn_speed), '-eject', '-tao']
if iso_multi == True:
cdrecord_call += ['-multi']
if iso_mode1 == True:
cdrecord_call += ['-data']
else:
cdrecord_call += ['-xa']
cdrecord_call += [input_image]
| gpl-3.0 | 1,704,856,972,753,791,500 | 33.863248 | 146 | 0.655798 | false | 3.353062 | false | false | false |
TrampolineRTOS/GTL | build/libpm/python-makefiles/generic_galgas_makefile.py | 1 | 12683 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
#----------------------------------------------------------------------------------------------------------------------*
import sys, time, os
import makefile, default_build_options
#----------------------------------------------------------------------------------------------------------------------*
# displayDurationFromStartTime
#----------------------------------------------------------------------------------------------------------------------*
def displayDurationFromStartTime (startTime) :
totalDurationInSeconds = int (time.time () - startTime)
durationInSecondes = totalDurationInSeconds % 60
durationInMinutes = (totalDurationInSeconds // 60) % 60
durationInHours = totalDurationInSeconds // 3600
s = ""
if durationInHours > 0:
s += str (durationInHours) + "h"
if durationInMinutes > 0:
s += str (durationInMinutes) + "min"
s += str (durationInSecondes) + "s"
print ("Done at +" + s)
#----------------------------------------------------------------------------------------------------------------------*
class GenericGalgasMakefile :
mJSONfilePath = ""
mDictionary = {}
mExecutable = ""
mGoal = ""
mMaxParallelJobs = 0
mDisplayCommands = False
mCompilerTool = []
mLinkerTool = []
mStripTool = []
mSudoTool = ""
mCompilationMessage = ""
mLinkingMessage = ""
mInstallationgMessage = ""
mStripMessage = ""
mAllCompilerOptions = []
mCompilerReleaseOptions = []
mCompilerDebugOptions = []
m_C_CompilerOptions = []
m_Cpp_CompilerOptions = []
m_ObjectiveC_CompilerOptions = []
m_ObjectiveCpp_CompilerOptions = []
mTargetName = ""
mLinkerOptions = []
mExecutableSuffix = ""
mCrossCompilation = ""
def run (self) :
startTime = time.time ()
#--- Source file list
SOURCES = self.mDictionary ["SOURCES"]
#--- LIBPM
LIBPM_DIRECTORY_PATH = self.mDictionary ["LIBPM_DIRECTORY_PATH"]
#--------------------------------------------------------------------------- System
if self.mCrossCompilation == "":
(SYSTEM_NAME, MODE_NAME, SYSTEM_RELEASE, SYSTEM_VERSION, MACHINE) = os.uname ()
if SYSTEM_NAME == "Darwin":
MACHINE = "Intel"
SYSTEM_MACHINE = SYSTEM_NAME + "-" + MACHINE
else:
SYSTEM_MACHINE = self.mCrossCompilation
#--- GMP
GMP_DIRECTORY_PATH = LIBPM_DIRECTORY_PATH + "/gmp"
#--- Source directory list
SOURCES_DIR = self.mDictionary ["SOURCES_DIR"]
#--------------------------------------------------------------------------- Include dirs
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/bdd")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/command_line_interface")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/files")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/galgas")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/galgas2")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/gmp")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/streams")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/time")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/strings")
SOURCES_DIR.append (LIBPM_DIRECTORY_PATH + "/utilities")
includeDirs = ["-I" + GMP_DIRECTORY_PATH]
for d in SOURCES_DIR:
includeDirs.append ("-I" + d)
#--- Make object
make = makefile.Make (self.mGoal, self.mMaxParallelJobs == 1) # Display command utility tool path if sequential build
#--------------------------------------------------------------------------- Add Compile rule for sources (release)
#--- Object file directory
objectDirectory = "../build/cli-objects/makefile-" + self.mTargetName + "-objects"
#---
objectFileList = []
for source in SOURCES:
objectFile = objectDirectory + "/" + source + ".o"
objectFileList.append (objectFile)
sourcePath = make.searchFileInDirectories (source, SOURCES_DIR)
if sourcePath != "" :
extension = os.path.splitext (source) [1]
rule = makefile.Rule ([objectFile], self.mCompilationMessage + ": " + source)
rule.deleteTargetDirectoryOnClean ()
rule.mDependences.append (sourcePath)
rule.enterSecondaryDependanceFile (objectFile + ".dep", make)
rule.mCommand += self.mCompilerTool
rule.mCommand += self.mCompilerReleaseOptions
rule.mCommand += self.mAllCompilerOptions
if extension == ".c":
rule.mCommand += self.m_C_CompilerOptions
elif extension == ".cpp":
rule.mCommand += self.m_Cpp_CompilerOptions
rule.mCommand += ["-c", sourcePath]
rule.mCommand += ["-o", objectFile]
rule.mCommand += includeDirs
rule.mCommand += ["-MD", "-MP", "-MF", objectFile + ".dep"]
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add EXECUTABLE link rule
EXECUTABLE = self.mExecutable + self.mExecutableSuffix
rule = makefile.Rule ([EXECUTABLE], self.mLinkingMessage + ": " + EXECUTABLE)
rule.mOnErrorDeleteTarget = True
rule.deleteTargetFileOnClean ()
rule.mDependences += objectFileList
rule.mDependences.append (self.mJSONfilePath)
rule.mCommand += self.mLinkerTool
rule.mCommand += objectFileList
rule.mCommand += ["-o", EXECUTABLE]
rule.mCommand += self.mLinkerOptions
postCommand = makefile.PostCommand (self.mStripMessage + " " + EXECUTABLE)
postCommand.mCommand += self.mStripTool
postCommand.mCommand.append (EXECUTABLE)
rule.mPostCommands.append (postCommand)
rule.mPriority = 1
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add Compile rule for sources (debug)
#--- Object file directory
debugObjectDirectory = "../build/cli-objects/makefile-" + self.mTargetName + "-debug-objects"
#---
debugObjectFileList = []
for source in SOURCES:
objectFile = debugObjectDirectory + "/" + source + ".o"
debugObjectFileList.append (objectFile)
sourcePath = make.searchFileInDirectories (source, SOURCES_DIR)
if sourcePath != "" :
extension = os.path.splitext (source) [1]
rule = makefile.Rule ([objectFile], self.mCompilationMessage + " (debug): " + source)
rule.deleteTargetDirectoryOnClean ()
rule.mDependences.append (sourcePath)
rule.enterSecondaryDependanceFile (objectFile + ".dep", make)
rule.mCommand += self.mCompilerTool
rule.mCommand += self.mCompilerDebugOptions
rule.mCommand += self.mAllCompilerOptions
if extension == ".c":
rule.mCommand += self.m_C_CompilerOptions
elif extension == ".cpp":
rule.mCommand += self.m_Cpp_CompilerOptions
rule.mCommand += ["-c", sourcePath]
rule.mCommand += ["-o", objectFile]
rule.mCommand += includeDirs
rule.mCommand += ["-MD", "-MP", "-MF", objectFile + ".dep"]
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add EXECUTABLE_DEBUG link rule
EXECUTABLE_DEBUG = self.mExecutable + "-debug" + self.mExecutableSuffix
rule = makefile.Rule ([EXECUTABLE_DEBUG], self.mLinkingMessage + " (debug): " + EXECUTABLE_DEBUG)
rule.mOnErrorDeleteTarget = True
rule.deleteTargetFileOnClean ()
rule.mDependences += debugObjectFileList
rule.mDependences.append (self.mJSONfilePath)
rule.mCommand += self.mLinkerTool
rule.mCommand += debugObjectFileList
rule.mCommand += ["-o", EXECUTABLE_DEBUG]
rule.mCommand += self.mLinkerOptions
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add Compile rule for sources (lto)
#--- Object file directory
objectLTODirectory = "../build/cli-objects/makefile-" + self.mTargetName + "-objects-lto"
#---
ltoObjectFileList = []
for source in SOURCES:
objectFile = objectLTODirectory + "/" + source + ".o"
ltoObjectFileList.append (objectFile)
sourcePath = make.searchFileInDirectories (source, SOURCES_DIR)
if sourcePath != "" :
extension = os.path.splitext (source) [1]
rule = makefile.Rule ([objectFile], self.mCompilationMessage + " (lto): " + source)
rule.deleteTargetDirectoryOnClean ()
rule.mDependences.append (sourcePath)
rule.enterSecondaryDependanceFile (objectFile + ".dep", make)
rule.mCommand += self.mCompilerTool
rule.mCommand += self.mCompilerReleaseOptions
rule.mCommand += self.mAllCompilerOptions
rule.mCommand += ["-flto"]
if extension == ".c":
rule.mCommand += self.m_C_CompilerOptions
elif extension == ".cpp":
rule.mCommand += self.m_Cpp_CompilerOptions
rule.mCommand += ["-c", sourcePath]
rule.mCommand += ["-o", objectFile]
rule.mCommand += includeDirs
rule.mCommand += ["-MD", "-MP", "-MF", objectFile + ".dep"]
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add EXECUTABLE link rule
EXECUTABLE_LTO = self.mExecutable + "-lto" + self.mExecutableSuffix
rule = makefile.Rule ([EXECUTABLE_LTO], self.mLinkingMessage + ": " + EXECUTABLE_LTO)
rule.mOnErrorDeleteTarget = True
rule.deleteTargetFileOnClean ()
rule.mDependences += ltoObjectFileList
rule.mDependences.append (self.mJSONfilePath)
rule.mCommand += self.mLinkerTool
rule.mCommand += ltoObjectFileList
rule.mCommand += ["-o", EXECUTABLE_LTO]
rule.mCommand += self.mLinkerOptions
rule.mCommand += ["-flto"]
postCommand = makefile.PostCommand (self.mStripMessage + " " + EXECUTABLE_LTO)
postCommand.mCommand += self.mStripTool
postCommand.mCommand.append (EXECUTABLE_LTO)
rule.mPostCommands.append (postCommand)
rule.mPriority = 1
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add install EXECUTABLE file rule
if len (self.mSudoTool) > 0:
INSTALL_EXECUTABLE = "/usr/local/bin/" + EXECUTABLE
rule = makefile.Rule ([INSTALL_EXECUTABLE], self.mInstallationgMessage + ": " + INSTALL_EXECUTABLE)
rule.mDependences.append (EXECUTABLE)
rule.mCommand += self.mSudoTool
rule.mCommand += ["cp", EXECUTABLE, INSTALL_EXECUTABLE]
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add install EXECUTABLE-lto file rule
if len (self.mSudoTool) > 0:
INSTALL_EXECUTABLE_LTO = "/usr/local/bin/" + EXECUTABLE_LTO
rule = makefile.Rule ([INSTALL_EXECUTABLE_LTO], self.mInstallationgMessage + ": " + INSTALL_EXECUTABLE_LTO)
rule.mDependences.append (EXECUTABLE)
rule.mCommand += self.mSudoTool
rule.mCommand += ["cp", EXECUTABLE_LTO, INSTALL_EXECUTABLE_LTO]
make.addRule (rule) ;
#--------------------------------------------------------------------------- Add install EXECUTABLE-debug file rule
if len (self.mSudoTool) > 0:
INSTALL_EXECUTABLE_DEBUG = "/usr/local/bin/" + EXECUTABLE_DEBUG
rule = makefile.Rule ([INSTALL_EXECUTABLE_DEBUG], self.mInstallationgMessage + " (debug): " + INSTALL_EXECUTABLE_DEBUG)
rule.mDependences.append (INSTALL_EXECUTABLE_DEBUG)
rule.mCommand += self.mSudoTool
rule.mCommand += ["cp", EXECUTABLE_DEBUG, INSTALL_EXECUTABLE_DEBUG]
make.addRule (rule) ;
#--------------------------------------------------------------------------- Compute jobs
# make.printRules ()
make.addGoal ("all", [EXECUTABLE, EXECUTABLE_DEBUG], "Build " + EXECUTABLE + " and " + EXECUTABLE_DEBUG)
make.addGoal ("debug", [EXECUTABLE_DEBUG], "Build " + EXECUTABLE_DEBUG)
make.addGoal ("release", [EXECUTABLE], "Build " + EXECUTABLE)
make.addGoal ("lto", [EXECUTABLE_LTO], "Build " + EXECUTABLE_LTO)
if len (self.mSudoTool) > 0:
make.addGoal ("install-lto", [INSTALL_EXECUTABLE_LTO], "Build and install " + INSTALL_EXECUTABLE_LTO)
make.addGoal ("install-release", [INSTALL_EXECUTABLE], "Build and install " + INSTALL_EXECUTABLE)
make.addGoal ("install-debug", [INSTALL_EXECUTABLE_DEBUG], "Build and install " + INSTALL_EXECUTABLE_DEBUG)
#--------------------------------------------------------------------------- Run jobs
# make.printGoals ()
make.runGoal (self.mMaxParallelJobs, self.mDisplayCommands)
#--------------------------------------------------------------------------- Ok ?
make.printErrorCountAndExitOnError ()
displayDurationFromStartTime (startTime)
#----------------------------------------------------------------------------------------------------------------------*
| gpl-2.0 | 3,188,628,503,302,512,000 | 47.969112 | 125 | 0.57581 | false | 3.988365 | false | false | false |
huxh10/iSDX | examples/test-sdx/generate_policies.py | 1 | 2878 | #!/usr/bin/env python
# Author:
# Arpit Gupta ([email protected])
import json
import os
from random import shuffle, randint
import sys
import argparse
def getMatchHash(part, peer_id, count):
return int(1 * part + 1 * peer_id + count)
def generatePoliciesParticipant(part, asn_2_ip, peers, frac, limit_out, cfg_dir):
# randomly select fwding participants
shuffle(peers)
count = int(frac * len(peers))
fwding_peers = set(peers[:count])
# Generate Outbound policies
cookie_id = 1
policy = {}
policy["outbound"] = []
for peer_id in fwding_peers:
peer_count = randint(1, limit_out)
for ind in range(1, peer_count+1):
tmp_policy = {}
# Assign Cookie ID
tmp_policy["cookie"] = cookie_id
cookie_id += 1
# Match
match_hash = getMatchHash(int(part), peer_id, ind)
tmp_policy["match"] = {}
tmp_policy["match"]["tcp_dst"] = match_hash
tmp_policy["match"]["in_port"] = asn_2_ip[part].values()[0]
# Action: fwd to peer's first port (visible to part)
tmp_policy["action"] = {"fwd": peer_id}
# Add this to participants' outbound policies
policy["outbound"].append(tmp_policy)
policy["inbound"] = []
inbound_count = randint(1, limit_out)
for ind in range(1, peer_count+1):
tmp_policy = {}
# Assign Cookie ID
tmp_policy["cookie"] = cookie_id
cookie_id += 1
# Match
match_hash = getMatchHash(int(part), 0, ind)
tmp_policy["match"] = {}
tmp_policy["match"]["tcp_dst"] = match_hash
# Action: fwd to peer's first port (visible to part)
tmp_policy["action"] = {"fwd": asn_2_ip[part].values()[0]}
# Add this to participants' outbound policies
policy["inbound"].append(tmp_policy)
# Dump the policies to appropriate directory
policy_filename = "participant_" + "AS" + part + ".py"
policy_file = cfg_dir + "policies/" + policy_filename
with open(policy_file,'w') as f:
json.dump(policy, f)
''' main '''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('cfg_dir', type=str, help='specify the config file directory, e.g. ./config/')
parser.add_argument('-f', '--frac', type=str, default='1.0', help='fraction of SDN fowarding peers')
args = parser.parse_args()
frac = float(args.frac)
asn_2_ip = json.load(open(args.cfg_dir + "asn_2_ip.json", 'r'))
asn_2_id = json.load(open(args.cfg_dir + "asn_2_id.json", 'r'))
config = json.load(open(args.cfg_dir + "sdx_global.cfg", "r"))
# Params
limit_out = 4
for part in asn_2_ip:
generatePoliciesParticipant(part, asn_2_ip, config["Participants"][str(asn_2_id[part])]["Peers"], frac, limit_out, args.cfg_dir)
| apache-2.0 | -7,312,323,639,624,161,000 | 31.704545 | 136 | 0.59173 | false | 3.393868 | false | false | false |
limafabio/Pythings | Chapter3/3.8/src/binary.py | 1 | 1219 | #!/usr/bin/py
#solved by FabioLima
#
#NameScript: binary.py
#
#Author and Maintaining: Fabio Lima
#
#-----------------------------------
#Description:
#
#
#-----------------------------------
#
#Example:
#
#
#-----------------------------------
#
#History
#
#v1.0 2017/02/08, FabioLima
#
#-----------------------------------
#
#License: GPL
#
import os,sys
sys.path.append(os.path.abspath('../../3.5/src/'))
from stack import Stack
class Stack(Stack):
def divideBy2(self,number):
test = Stack()
while number > 0:
if (number%2) == 1:
test.push(1)
else:
test.push(0)
number /= 2
seqBin = ''
while not test.isEmpty():
seqBin = seqBin + str(test.pop())
return seqBin
def baseConverter(self,decimalNumber,base):
digits = "0123456789ABCEF"
test = Stack()
while decimalNumber > 0:
remainder = int(decimalNumber) % int(base)
test.push(remainder)
decimalNumber = int(decimalNumber) // int(base)
answer = ''
while not test.isEmpty():
index = test.pop()
answer += digits[index]
return answer
| mit | 1,768,685,114,516,813,000 | 20.767857 | 59 | 0.477441 | false | 3.693939 | true | false | false |
wazo-pbx/xivo-tools | visualplan/src/visualplan/analyzer.py | 1 | 3654 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Avencall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import re
class DialplanExecutionAnalyzer(object):
def analyze(self, dialplan_parse_result, log_parse_result):
line_analyses = self._do_lines_analyses(dialplan_parse_result, log_parse_result)
return _Analysis(dialplan_parse_result.filename, line_analyses)
def _do_lines_analyses(self, dialplan_parse_result, log_parse_result):
line_analyses = []
for line in dialplan_parse_result.lines:
is_executed = self._is_line_executed(line, log_parse_result, dialplan_parse_result)
line_analysis = _LineAnalysis(line.content, line.is_executable, is_executed)
line_analyses.append(line_analysis)
return line_analyses
def _is_line_executed(self, line, log_parse_result, dialplan_parse_result):
if not line.is_executable:
return False
elif line.extension.startswith('_'):
pattern = line.extension[1:]
for extension in log_parse_result.list_executed_extensions(line.context, line.priority):
if not dialplan_parse_result.has_extension(line.context, extension) and\
_is_extension_match_pattern(extension, pattern):
return log_parse_result.is_executed(line.context, extension, line.priority)
return False
else:
return log_parse_result.is_executed(line.context, line.extension, line.priority)
def _is_extension_match_pattern(extension, pattern):
regex_pattern = _convert_ast_pattern_to_regex_pattern(pattern)
if re.match(regex_pattern, extension):
return True
else:
return False
def _convert_ast_pattern_to_regex_pattern(ast_pattern):
regex_pattern_list = ['^']
index = 0
length = len(ast_pattern)
while index < length:
cur_char = ast_pattern[index]
if cur_char == 'X':
regex_pattern_list.append('[0-9]')
elif cur_char == 'Z':
regex_pattern_list.append('[1-9]')
elif cur_char == 'N':
regex_pattern_list.append('[2-9]')
elif cur_char == '[':
close_index = ast_pattern.find(']', index)
regex_pattern_list.append('[{}]'.format(ast_pattern[index:close_index]))
index += close_index
elif cur_char == '.':
regex_pattern_list.append('.+')
break
elif cur_char == '!':
regex_pattern_list.append('.*')
break
else:
regex_pattern_list.append(re.escape(cur_char))
index += 1
regex_pattern_list.append('$')
return ''.join(regex_pattern_list)
class _Analysis(object):
def __init__(self, filename, line_analyses):
self.filename = filename
self.line_analyses = line_analyses
class _LineAnalysis(object):
def __init__(self, content, is_executable, is_executed):
self.content = content
self.is_executable = is_executable
self.is_executed = is_executed
| gpl-3.0 | 7,107,767,899,599,299,000 | 37.463158 | 100 | 0.640941 | false | 3.834208 | false | false | false |
fabiencro/knmt | nmt_chainer/utilities/file_infos.py | 1 | 1191 | from __future__ import absolute_import, division, print_function, unicode_literals
import time
import os.path
import hashlib
from nmt_chainer.utilities.argument_parsing_tools import OrderedNamespace
##########################################
# A function to compute the hash of a file
# Taken from http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
#
def hash_bytestr_iter(bytesiter, hasher, ashexstr=False):
for block in bytesiter:
hasher.update(block)
return (hasher.hexdigest() if ashexstr else hasher.digest())
def file_as_blockiter(afile, blocksize=65536):
with afile:
block = afile.read(blocksize)
while len(block) > 0:
yield block
block = afile.read(blocksize)
def compute_hash_of_file(filename):
return hash_bytestr_iter(file_as_blockiter(open(filename, 'rb')), hashlib.sha256(), ashexstr = True)
def create_filename_infos(model_filename):
model_infos = OrderedNamespace()
model_infos["path"] = model_filename
model_infos["last_modif"] = time.ctime(os.path.getmtime(model_filename))
model_infos["hash"] = compute_hash_of_file(model_filename)
return model_infos
| gpl-3.0 | -1,077,444,829,948,523,000 | 33.028571 | 104 | 0.691856 | false | 3.565868 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.