repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Micronaet/micronaet-order | order_partner_default_address/__openerp__.py | 1 | 1540 | ###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Order default address',
'version': '0.1',
'category': 'Sale',
'description': '''
Set default address during order operation
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'sale',
'partner_addresses',
'order_destination', # set false this module
],
'init_xml': [],
'demo': [],
'data': [
'default_address_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| agpl-3.0 | -7,340,566,109,645,672,000 | 34 | 79 | 0.555195 | false |
DaMonkey/hankypanky | lazylibrarian/gr.py | 1 | 2378 | import time, threading, urllib, urllib2, sys
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
import lazylibrarian
from lazylibrarian import logger, formatter, database
class GoodReads:
# http://www.goodreads.com/api/
def __init__(self, name=None, type=None):
self.name = name.replace('.', '')
self.type = type
self.params = {"key": lazylibrarian.GR_API}
def find_author_id(self):
URL = 'http://www.goodreads.com/api/author_url/' + urllib.quote(self.name) + '.xml?' + urllib.urlencode(self.params)
logger.info("Searching for author with name: %s" % self.name)
try:
sourcexml = ElementTree.parse(urllib2.urlopen(URL, timeout=20))
except (urllib2.URLError, IOError, EOFError), e:
logger.error("Error fetching authorid: ", e)
rootxml = sourcexml.getroot()
resultxml = rootxml.getiterator('author')
authorlist = []
if not len(rootxml):
logger.info('No authors found with name: %s' % self.name)
return authorlist
else:
for author in resultxml:
authorid = author.attrib.get("id")
logger.info('Found author: %s with GoodReads-id: %s' % (author[0].text, authorid))
time.sleep(1)
authorlist = self.get_author_info(authorid)
return authorlist
def get_author_info(self, authorid=None):
URL = 'http://www.goodreads.com/author/show/' + authorid + '.xml?' + urllib.urlencode(self.params)
sourcexml = ElementTree.parse(urllib2.urlopen(URL, timeout=20))
rootxml = sourcexml.getroot()
resultxml = rootxml.find('author')
author_dict = {}
if not len(rootxml):
logger.info('No author found with ID: ' + authorid)
else:
logger.info("Processing info for authorID: %s" % authorid)
author_dict = {
'authorid': resultxml[0].text,
'authorlink': resultxml.find('link').text,
'authorimg': resultxml.find('image_url').text,
'authorborn': resultxml.find('born_at').text,
'authordeath': resultxml.find('died_at').text,
'totalbooks': resultxml.find('works_count').text
}
return author_dict
| gpl-3.0 | 303,320,833,390,499,000 | 36.15625 | 124 | 0.589151 | false |
merlinpatt/py-trello | trello/board.py | 1 | 7269 | from __future__ import absolute_import
from .card import Card
from .list import List
from .member import Member
class Board(object):
"""
Class representing a Trello board. Board attributes are stored as normal
Python attributes; access to all sub-objects, however, is always
an API call (Lists, Cards).
"""
@property
def lists(self):
"""
Lazily loads and returns the lists
"""
if self._lists is None:
self._lists = self.all_lists()
return self._lists
@property
def members(self):
"""
Lazily loads and returns the members
"""
if self._members is None:
self._members = self.all_members()
return self._members
def __init__(self, client=None, board_id=None, organization=None, name=''):
"""
:trello: Reference to a Trello object
:board_id: ID for the board
Alternative Constructor
:organization: reference to the parent organization
:board_id: ID for this board
"""
if organization is None:
self.client = client
else:
self.organization = organization
self.client = organization.client
self.id = board_id
self.name = name
self._lists = None
self._members = None
@classmethod
def from_json(cls, trello_client=None, organization=None, json_obj=None):
"""
Deserialize the board json object to a Board object
:trello_client: the trello client
:json_obj: the board json object
Alternative contrustraction:
Deserialize the board json object to a board object
:organization: the organization object that the board belongs to
:json_obj: the json board object
"""
if organization is None:
board = Board(client=trello_client, board_id=json_obj['id'], name=json_obj['name'].encode('utf-8'))
else:
board = Board(organization=organization, board_id=json_obj['id'], name=json_obj['name'].encode('utf-8'))
board.description = json_obj.get('desc', '').encode('utf-8')
board.closed = json_obj['closed']
board.url = json_obj['url']
return board
def __repr__(self):
return '<Board %s>' % self.name
def search_lists(self, query):
lists = [tlist for tlist in self.lists if query.lower() in tlist.name.lower()]
return lists[0] if len(lists) == 1 else lists
def search_members(self, query):
members = [member for member in self.members if query.lower() in member.full_name.lower()]
return members[0] if len(members) == 1 else members
def fetch(self):
"""Fetch all attributes for this board"""
json_obj = self.client.fetch_json('/boards/' + self.id)
self.name = json_obj['name']
self.description = json_obj.get('desc', '')
self.closed = json_obj['closed']
self.url = json_obj['url']
def save(self):
pass
def close(self):
self.client.fetch_json(
'/boards/' + self.id + '/closed',
http_method='PUT',
post_args={'value': 'true', }, )
self.closed = True
def get_list(self, list_id):
obj = self.client.fetch_json('/lists/' + list_id)
return List.from_json(board=self, json_obj=obj)
def all_lists(self):
"""Returns all lists on this board"""
return self.get_lists('all')
def open_lists(self):
"""Returns all open lists on this board"""
return self.get_lists('open')
def closed_lists(self):
"""Returns all closed lists on this board"""
return self.get_lists('closed')
def get_lists(self, list_filter):
# error checking
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/lists',
query_params={'cards': 'none', 'filter': list_filter})
return [List.from_json(board=self, json_obj=obj) for obj in json_obj]
def add_list(self, name):
"""Add a list to this board
:name: name for the list
:return: the list
"""
obj = self.client.fetch_json(
'/lists',
http_method='POST',
post_args={'name': name, 'idBoard': self.id}, )
return List.from_json(board=self, json_obj=obj)
def all_cards(self):
"""Returns all cards on this board"""
filters = {
'filter': 'all',
'fields': 'all'
}
return self.get_cards(filters)
def open_cards(self):
"""Returns all open cards on this board"""
filters = {
'filter': 'open',
'fields': 'all'
}
return self.get_cards(filters)
def closed_cards(self):
"""Returns all closed cards on this board"""
filters = {
'filter': 'closed',
'fields': 'all'
}
return self.get_cards(filters)
def get_cards(self, filters=None):
"""
:card_filter: filters on card status ('open', 'closed', 'all')
:query_params: dict containing query parameters. Eg. {'fields': 'all'}
More info on card queries:
https://trello.com/docs/api/board/index.html#get-1-boards-board-id-cards
"""
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/cards',
query_params=filters
)
return list([Card.from_json(self, json) for json in json_obj])
def all_members(self):
"""Returns all members on this board"""
filters = {
'filter': 'all',
'fields': 'all'
}
return self.get_members(filters)
def normal_members(self):
"""Returns all normal members on this board"""
filters = {
'filter': 'normal',
'fields': 'all'
}
return self.get_members(filters)
def admin_members(self):
"""Returns all admin members on this board"""
filters = {
'filter': 'admins',
'fields': 'all'
}
return self.get_members(filters)
def owner_members(self):
"""Returns all owner members on this board"""
filters = {
'filter': 'owners',
'fields': 'all'
}
return self.get_members(filters)
def get_members(self, filters=None):
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/members',
query_params=filters)
members = list()
for obj in json_obj:
m = Member(self.client, obj['id'])
m.status = obj['status'].encode('utf-8')
m.id = obj.get('id', '')
m.bio = obj.get('bio', '')
m.url = obj.get('url', '')
m.username = obj['username'].encode('utf-8')
m.full_name = obj['fullName'].encode('utf-8')
m.initials = obj['initials'].encode('utf-8')
members.append(m)
return members
def fetch_actions(self, action_filter):
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/actions',
query_params={'filter': action_filter})
self.actions = json_obj
| bsd-3-clause | -675,035,804,408,850,800 | 29.670886 | 116 | 0.551245 | false |
waterblue13/tensor2tensor | tensor2tensor/data_generators/ptb.py | 1 | 5009 | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for PTB data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import tarfile
# Dependency imports
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import registry
import tensorflow as tf
EOS = text_encoder.EOS
PTB_URL = "http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz"
def _read_words(filename):
"""Reads words from a file."""
with tf.gfile.GFile(filename, "r") as f:
if sys.version_info[0] >= 3:
return f.read().replace("\n", " %s " % EOS).split()
else:
return f.read().decode("utf-8").replace("\n", " %s " % EOS).split()
def _build_vocab(filename, vocab_path, vocab_size):
"""Reads a file to build a vocabulary of `vocab_size` most common words.
The vocabulary is sorted by occurrence count and has one word per line.
Originally from:
https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/reader.py
Args:
filename: file to read list of words from.
vocab_path: path where to save the vocabulary.
vocab_size: size of the vocablulary to generate.
"""
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = words[:vocab_size]
with open(vocab_path, "w") as f:
f.write("\n".join(words))
def _get_token_encoder(vocab_dir, vocab_name, filename):
"""Reads from file and returns a `TokenTextEncoder` for the vocabulary."""
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
_build_vocab(filename, vocab_path, 10000)
return text_encoder.TokenTextEncoder(vocab_path)
class PTBProblem(problem.Text2TextProblem):
"""A class for generating PTB data."""
@property
def has_inputs(self):
return False
@property
def target_space_id(self):
if self.is_character_level:
return problem.SpaceID.EN_CHR
return problem.SpaceID.EN_TOK
@property
def num_shards(self):
return 10
@property
def vocab_name(self):
return "vocab.lmptb_10k"
@property
def use_subword_tokenizer(self):
return False
@property
def targeted_vocab_size(self):
return 10000
def generator(self, data_dir, tmp_dir, train):
filename = os.path.basename(PTB_URL)
compressed_filepath = generator_utils.maybe_download(
tmp_dir, filename, PTB_URL)
ptb_files = []
ptb_char_files = []
with tarfile.open(compressed_filepath, "r:gz") as tgz:
files = []
# Selecting only relevant files.
for m in tgz.getmembers():
if "ptb" in m.name and ".txt" in m.name:
if "char" in m.name:
ptb_char_files += [m.name]
else:
ptb_files += [m.name]
files += [m]
tgz.extractall(tmp_dir, members=files)
if self.is_character_level:
files = ptb_char_files
else:
files = ptb_files
train_file, valid_file = None, None
for filename in files:
if "train" in filename:
train_file = os.path.join(tmp_dir, filename)
elif "valid" in filename:
valid_file = os.path.join(tmp_dir, filename)
assert train_file, "Training file not found"
assert valid_file, "Validation file not found"
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = _get_token_encoder(data_dir, self.vocab_file, train_file)
if train:
return self._generator(train_file, encoder)
return self._generator(valid_file, encoder)
def _generator(self, filename, encoder):
with tf.gfile.GFile(filename, "r") as f:
for line in f:
line = " ".join(line.replace("\n", " %s " % EOS).split())
tok = encoder.encode(line)
if tok:
yield {"inputs": [0], "targets": tok}
@registry.register_problem
class LanguagemodelPtb10k(PTBProblem):
"""A class for generating PTB data, 10k vocab."""
@property
def is_character_level(self):
return False
@registry.register_problem
class LanguagemodelPtbCharacters(PTBProblem):
"""A class for generating PTB data, character-level."""
@property
def is_character_level(self):
return True
| apache-2.0 | 6,008,215,858,963,581,000 | 27.622857 | 79 | 0.678978 | false |
trabucayre/gnuradio | grc/core/blocks/dummy.py | 1 | 1190 | # Copyright 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from . import Block, register_build_in
from ._build import build_params
@register_build_in
class DummyBlock(Block):
is_dummy_block = True
label = 'Missing Block'
key = '_dummy'
def __init__(self, parent, missing_block_id, parameters, **_):
self.key = missing_block_id
self.parameters_data = build_params([],False, False,self.flags, self.key)
super(DummyBlock, self).__init__(parent=parent)
param_factory = self.parent_platform.make_param
for param_id in parameters:
self.params.setdefault(param_id, param_factory(parent=self, id=param_id, dtype='string'))
def is_valid(self):
return False
@property
def enabled(self):
return False
def add_missing_port(self, port_id, direction):
port = self.parent_platform.make_port(
parent=self, direction=direction, id=port_id, name='?', dtype='',
)
if port.is_source:
self.sources.append(port)
else:
self.sinks.append(port)
return port
| gpl-3.0 | 8,029,031,467,999,280,000 | 26.045455 | 101 | 0.626891 | false |
sagiss/sardana | test/test_ctrl/WaterPapCtrl_stat1.py | 1 | 4673 | import PyTango
import socket
import MotorController
class IcePapController(MotorController.MotorController):
MaxDevice = 6
def __init__(self,inst,props):
print "PYTHON -> IcePapController ctor for instance",inst
MotorController.MotorController.__init__(self,inst,props)
self.nb_call = 0;
self.socket_connected = False;
self.db = PyTango.Database()
self.ct_name = "IcePapController/" + self.inst_name
#
# Get controller properties
#
prop_list = ['host','port','timeout']
prop = self.db.get_property(self.ct_name,prop_list)
if len(prop["host"]) != 0:
self.host = prop["host"][0]
else:
print "Property host not defined for controller",self.ct_name
self.host = "nada"
if len(prop["port"]) != 0:
self.port = int(prop["port"][0])
else:
print "Property port not defined for controller",self.ct_name
self.port = 0
if len(prop["timeout"]) != 0:
self.timeout = int(prop["timeout"][0])
else:
print "Property timeout not defined for controller",self.ct_name
self.timeout = 3
#
# Connect to the icepap
#
print "PYTHON -> IcePap on",self.host," and port",self.port," with timeout = ",self.timeout
# self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.sock.settimeout(self.timeout)
# self.sock.connect(("icepap", self.port))
# self.socket_connected = True
print "PYTHON -> Connected to", self.host, " on port", self.port
#
# Check that the Icepap is OK
#
# ans = self.IceWriteRead("?ID")
def AddDevice(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In AddDevice method for axis",axis
# raise RuntimeError,"Hola la la"
def DeleteDevice(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In DeleteDevice method for axis",axis
def StateOne(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In StateOne method for axis",axis
tup = (PyTango.DevState.FAULT,0,"Hola tio")
return tup
def PreReadAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In PreReadAll method"
def PreReadOne(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In PreReadOne method for axis",axis
def ReadAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In ReadAll method"
def ReadOne(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In ReadOne method for axis",axis
return 123
def PreStartAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In PreStartAll method"
def PreStartOne(self,axis,pos):
print "PYTHON -> IcePapController/",self.inst_name,": In PreStartOne method for axis",axis," with pos",pos
return True
def StartOne(self,axis,pos):
print "PYTHON -> IcePapController/",self.inst_name,": In StartOne method for axis",axis," with pos",pos
def StartAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In StartAll method"
def SetPar(self,axis,name,value):
print "PYTHON -> IcePapController/",self.inst_name,": In SetPar method for axis",axis," name=",name," value=",value
def GetPar(self,axis,name):
print "PYTHON -> IcePapController/",self.inst_name,": In GetPar method for axis",axis," name=",name
return 12.34
def IceWrite(self,data):
data = data + "\n"
byteSent = self.sock.send(data)
print "PYTHON -> Sent", byteSent, "bytes to icepap"
def IceWriteRead(self,data):
self.IceWrite(data)
byteReceived = self.sock.recv(1024)
print "PYTHON -> Icepap answered:",byteReceived
return byteReceived
def IceResetFifo(self):
self.IceWrite("fiforst")
def IceCheckError(self,ice_answer):
if (ice_answer.find("ERROR") != -1):
new_ans = self.IceWriteRead("?ERR 1")
print "Error from IcePap =",new_ans
def __del__(self):
print "PYTHON -> IcePapController/",self.inst_name,": Aarrrrrg, I am dying"
#
# Reset IcePap FIFO
#
if (self.socket_connected == True):
print "PYTHON -> Closing connection"
self.IceResetFifo()
self.sock.close()
if __name__ == "__main__":
obj = IcePapController('test')
# obj.AddDevice(2)
# obj.DeleteDevice(2)
| lgpl-3.0 | -6,052,067,277,264,365,000 | 31.908451 | 123 | 0.598973 | false |
jmread/cerebro | cerebro/RTF.py | 1 | 3665 | from numpy import *
from functions import sigmoid
set_printoptions(precision=4)
class RTF():
'''
Recurrent Basis/Transformation Function
---------------------------------------
Turn x into \phi in a recurrent manner.
'''
W_hh = None
W_ih = None
z = None
def __init__(self, N_i, N_h, f=sigmoid, density=0.1):
'''
'''
self.f = f # non-linearity
self.N_i = N_i # inputs
# Generate nodes
self.z = zeros(N_h) # nodes
self.z[0] = 1. # output bias node
# Generate random weights
self.W_ih = random.randn(N_i,N_h-1) * 1.0 * (random.rand(N_i,N_h-1) <= density)
self.W_hh = random.randn(N_h-1,N_h-1) * 1.0 * (random.rand(N_h-1,N_h-1) <= density)
# Calculate the eigenvectors (V) of W_hh
V,U = linalg.eig(self.W_hh)
# Check that we won't be dividing by 0
if max(absolute(V)) <= 0.:
V = V + 0.01
# Scale the initial weights to a spectral radius of 1.
self.W_hh = self.W_hh / max(absolute(V))
#self.b_ih = random.randn(N_h-1) * 0.1
def store_y(self,y):
print "we can store y (the PREVIOUS output) so as to use it in the transformamtion"
def phi(self,x):
#print "+++++++++++"
#print self.W_hh.shape
#print self.W_ih.shape
##print self.b_ih.shape
#print x.shape
#print self.z.shape
#print "==========="
self.z[1:] = self.f( dot(self.W_hh, self.z[1:]) + dot(self.W_ih.T, x) ) #self.b_ih + <--- I don't think bias is needed for ESN??
return self.z
def reset(self):
self.z = self.z * 0.
self.z[0] = 1.
class RTFv2(RTF):
'''
Like RTF, but includes (@TODO)
- output feedback loop
- regularization (noise to the input)
- efficient sparse solution (each node is connected to exactly N other nodes) -- similary to Markov Chain code for Jaakko's seminar course.
'''
W_oh = None
y = None
v = None
def __init__(self, N_i, N_h, N_o, f=sigmoid, density=0.1, state_noise=0.01):
RTF.__init__(self,N_i,N_h,f,density)
self.N_o = N_o # outputs
self.W_oh = random.randn(N_o,N_h-1) * 1.0 * (random.rand(N_o,N_h-1) <= density) # NEW
self.v = state_noise
def store_y(self,y):
self.y = y
def phi(self,x):
self.z[0:-1] = self.f( dot(self.W_hh, self.z[0:-1]) + dot(self.W_ih.T, x) + dot(self.W_oh.T, self.y)) + random.randn(len(self.z)-1) * self.v
return self.z
def demo():
D = 2
H = 10
N = 100
rtf = RTF(D,H,f=sigmoid,density=0.2)
#print rtf.W
X = random.randn(N,D) #(random.rand(N,D) > 0.5) * 1.
X[:,0] = 1.
X[10:20,:] = 0.
X[40:60,:] = 0.
X[80:100,:] = 0.
Z = zeros((N,H))
for i in range(N):
Z[i] = rtf.phi(X[i])
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.pyplot import *
fig = figure()
ax = fig.add_subplot(111)
ax.set_xlim([0,N])
ax.set_ylim([-0.1,1.1])
lines = [None for i in range(H+D)]
for j in range(D):
lines[j], = ax.plot([0,0],"k:",label=""+str(j),linewidth=2)
for j in range(D,H+D):
lines[j], = ax.plot([0,0],label=""+str(j),linewidth=2)
ion()
for lim in range(1,N):
for j in range(D):
lines[j].set_data(range(0,lim),X[0:lim,j])
for j in range(H):
lines[j].set_data(range(0,lim),Z[0:lim,j])
pause(0.1)
grid(True)
legend()
show()
ioff()
if __name__ == '__main__':
demo()
| gpl-3.0 | -5,572,325,840,859,599,000 | 27.192308 | 151 | 0.505048 | false |
vlimant/IntelROCCS | Monitor/datasetSummary.py | 1 | 10120 | #!/usr/bin/python
#-------------------------------------------------------------------------------------------------
#quick script to give basic information about usage of a class of datasets
#originally written to provide PromptReco RECO info to Dima
#---------------------------------------------------------------------------------------------------
import os, sys
if not os.environ.get('DETOX_DB') or not os.environ.get('MONITOR_DB'):
print '\n ERROR - DETOX environment not defined: source setup.sh\n'
sys.exit(0)
import re, glob, subprocess, time, json, MySQLdb
from Dataset import *
import getAccessInfo
genesis=1378008000
# genesis=int(time.mktime(time.strptime("2014-09-01","%Y-%m-%d")))
nowish = time.time()
# get the dataset pattern to consider (careful the pattern will be translated, better implementation
# should be done at some point)
datasetPattern = '/.*/.*PromptReco.*/RECO'
#===================================================================================================
# H E L P E R S
#===================================================================================================
def processPhedexCacheFile(fileName,debug=0):
# processing the contents of a simple file into a hash array
sizesPerSite = {}
iFile = open(fileName,'r')
# loop through all lines
for line in iFile.xreadlines():
line = line[:-1]
f = line.split()
if len(f) == 9:
dataset = f[0]
group = f[1]
size = float(f[2])
nFiles = int(f[3])
custd = int(f[4])
site = f[5]
date = int(f[6])
valid = int(f[8])
else:
print 'Columns not equal 9: \n %s'%(line)
sys.exit(1)
# first step, find the sizes per site per dataset hash array
if site in sizesPerSite:
sizesPerSitePerDataset = sizesPerSite[site]
else:
sizesPerSitePerDataset = {}
sizesPerSite[site] = sizesPerSitePerDataset
if dataset in sizesPerSitePerDataset:
sizesPerSitePerDataset[dataset] += size
else:
sizesPerSitePerDataset[dataset] = size
iFile.close();
# return the sizes per site
return sizesPerSite
def processFile(fileName,debug=0):
# processing the contents of a simple file into a hash array
nSkipped = 0
nAccessed = {} # the result is a hash array
# read the data from the json file
try:
with open(fileName) as data_file:
data = json.load(data_file)
except ValueError:
# if the file doesn't exist, then there were no accesses that day
print fileName
return (0,{})
# generic full print (careful this can take very long)
if debug>1:
pprint.pprint(data)
if debug>0:
print " SiteName: " + data["SITENAME"]
# loop through the full json data an extract the accesses per dataset
for entry in data["DATA"]:
key = entry["COLLNAME"]
value = entry["NACC"]
if debug>0:
print " NAccess - %6d %s"%(value,key)
# filter out non-AOD data
if not re.match(datasetPattern,key):
if debug>0:
print ' WARNING -- rejecting *USER* type data: ' + key
nSkipped += 1
continue
# here is where we assign the values to the hash (checking if duplicates are registered)
if key in nAccessed:
print ' WARNING - suspicious entry - the entry exists already (continue)'
nAccessed[key] += value
else:
nAccessed[key] = value
# return the datasets
return (nSkipped, nAccessed)
def addData(nAllAccessed,nAccessed,debug=0):
# adding a hash array (nAccessed) to the mother of all hash arrays (nAllAccessed)
# loop through the hash array
for key in nAccessed:
# add the entries to our all access hash array
if key in nAllAccessed:
nAllAccessed[key] += nAccessed[key]
else:
nAllAccessed[key] = nAccessed[key]
# return the updated all hash array
return nAllAccessed
def addSites(nSites,nAccessed,debug=0):
# adding up the number of sites for each dataset
# loop through the hash array
for key in nAccessed:
# add the entries to our all access hash array
if key in nSites:
nSites[key] += 1
else:
nSites[key] = 1
# return the updated all hash array
return nSites
def getDbCursor():
# get access to the detox database
# configuration
db = os.environ.get('DETOX_SITESTORAGE_DB')
server = os.environ.get('DETOX_SITESTORAGE_SERVER')
user = os.environ.get('DETOX_SITESTORAGE_USER')
pw = os.environ.get('DETOX_SITESTORAGE_PW')
# open database connection
db = MySQLdb.connect(host=server,db=db, user=user,passwd=pw)
# prepare a cursor object using cursor() method
return db.cursor()
def readDatasetProperties():
# read dataset properties fromt the database
sizesGb = {}
fileNumbers = {}
# get access to the database
cursor = getDbCursor()
sql = "select Datasets.DatasetName, DatasetProperties.NFiles, DatasetProperties.Size "
sql += "from Datasets join DatasetProperties "
sql += "on Datasets.DatasetId = DatasetProperties.DatasetId";
# go ahead and try
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
dataset = row[0]
nFiles = row[1]
sizeGb = row[2]
# add to our memory
# print dataset,nFiles,sizeGb
fileNumbers[dataset] = int(nFiles)
sizesGb[dataset] = float(sizeGb)
except:
pass
return (fileNumbers, sizesGb)
#===================================================================================================
# M A I N
#===================================================================================================
debug = 0
site = 'T2*'
siterx=re.sub("\*",".*",site) # to deal with stuff like T2* --> T2.*
nSiteAccess = {}
monitorDB = os.environ['MONITOR_DB']
# form date regexps
starttmp = time.gmtime(genesis)
endtmp = time.gmtime(nowish)
dates=[]
for year in range(starttmp[0],endtmp[0]+1):
for month in range(1,13):
if year==starttmp[0] and month<starttmp[1]:
continue
elif year==endtmp[0] and month>endtmp[1]:
continue
else:
dates.append("%i-%02i-??"%(year,month))
# --------------------------------------------------------------------------------------------------
# loop through all matching snapshot files
# --------------------------------------------------------------------------------------------------
# figure out which snapshot files to consider
workDirectory = os.environ['DETOX_DB'] + '/' + os.environ['DETOX_STATUS']
files=[]
Dataset.siteList = [ x.split("/")[-1] for x in glob.glob(workDirectory + '/'+site)]
if os.environ['UPDATE_CACHE']=="True":
# should only be set to False for testing purposes
getAccessInfo.get() # update access cache
for date in dates:
files += glob.glob(monitorDB+'/sitesInfo/'+site+'/'+date)
# files += glob.glob(workDirectory + '/' + site + '/' + os.environ['DETOX_SNAPSHOTS'] + '/' + date)
phedexFile = open(workDirectory+'/DatasetsInPhedexAtSites.dat','r')
groupPattern = '.*'
# say what we are goign to do
print "\n = = = = S T A R T A N A L Y S I S = = = =\n"
print " Pattern: %s"%(datasetPattern)
print " Group: %s"%(groupPattern)
print " Logfiles in: %s"%(workDirectory)
print " Site pattern: %s"%(siterx)
fileNumbers,sizesGb = readDatasetProperties()
# datasetsOnSites[k]=v, where k is a dataset and v is a set of sites
# this can probably be done smarter and combined with nSites computation
datasetsOnSites={}
datasetSet={}
for line in phedexFile:
l = line.split()
datasetName=l[0]
siteName=l[5]
if not re.match(datasetPattern,datasetName):
continue
if re.match(".*BUNNIES.*",datasetName):
# get rid of T0 testing datasets
# why are these even in DDM?
continue
if not re.match(siterx,siteName):
continue
if datasetName not in datasetSet:
datasetObject = Dataset(datasetName)
datasetSet[datasetName] = datasetObject
else:
datasetObject = datasetSet[datasetName]
datasetObject.isDeleted = False
datasetObject.addCurrentSite(siteName,l[6],l[7])
datasetObject = None
# remove blacklisted datasets
blacklistFile = open(os.environ.get('MONITOR_DB')+'/datasets/blacklist.log','r')
blacklistSet = set(map(lambda x : x.split()[0], list(blacklistFile)))
removeByKey(datasetSet,blacklistSet)
for fileName in sorted(files):
if debug>0:
print ' Analyzing: ' + fileName
g = fileName.split("/")
siteName = g[-2]
if siteName in nSiteAccess:
nSiteAccessEntry = nSiteAccess[siteName]
else:
nSiteAccessEntry = {}
nSiteAccess[siteName] = nSiteAccessEntry
# analyze this file
(nSkipped, nAccessed) = processFile(fileName,debug)
# add the results to our 'all' record
nSiteAccessEntry = addData(nSiteAccessEntry,nAccessed,debug)
utime = time.mktime(time.strptime(g[-1],"%Y-%m-%d"))
for datasetName in nAccessed:
try:
datasetSet[datasetName].addAccesses(siteName,nAccessed[datasetName],utime)
except KeyError:
pass
for d in datasetSet:
try:
datasetObject = datasetSet[d]
datasetObject.nFiles = fileNumbers[d]
datasetObject.sizeGB = sizesGb[d]
datasetObject = None
except KeyError:
datasetObject = None
pass
for run in ['2015A-','2015B-','2015C-','2015D-','2013-','2013A','2012-','2012A-','2012C-','2012D-']:
for k,v in datasetSet.iteritems():
if k.find(run)<0:
continue
s='<tr><td>%s</td>'%(k)
for startTime in [nowish-30*86400,nowish-90*86400,nowish-180*86400,genesis]:
s+='<td>%.1f</td>'%(v.getTotalAccesses(startTime,nowish)/float(v.nFiles))
s+='</tr>'
print s
sys.exit(0)
| mit | -671,869,903,044,938,800 | 32.509934 | 103 | 0.578953 | false |
benauthor/pykafka | pykafka/test/utils.py | 1 | 1395 | import time
import os
from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection
def get_cluster():
"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""
if os.environ.get('BROKERS', None) and \
os.environ.get('ZOOKEEPER', None) and \
os.environ.get('KAFKA_BIN', None):
# Broker is already running. Use that.
return KafkaConnection(os.environ['KAFKA_BIN'],
os.environ['BROKERS'],
os.environ['ZOOKEEPER'])
else:
return KafkaInstance(num_instances=3)
def stop_cluster(cluster):
"""Stop a created cluster, or merely flush a pre-existing one."""
if isinstance(cluster, KafkaInstance):
cluster.terminate()
else:
cluster.flush()
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
| apache-2.0 | -317,677,155,204,829,200 | 32.214286 | 104 | 0.630824 | false |
ESA-VirES/eoxserver-magnetism | eoxsmagnetism/ows/wms/capabilitiesrenderer.py | 1 | 3134 | #-------------------------------------------------------------------------------
# $Id$
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import logging
from eoxserver.core import Component, implements
from eoxserver.core.config import get_eoxserver_config
from eoxserver.contrib import mapserver as ms
from eoxserver.resources.coverages.crss import CRSsConfigReader
from eoxserver.services.result import (
result_set_from_raw_data, get_content_type
)
from eoxserver.services.exceptions import RenderException
from eoxserver.services.ows.wms.exceptions import InvalidCRS, InvalidFormat
from eoxserver.services.ows.wms.interfaces import (
WMSCapabilitiesRendererInterface
)
logger = logging.getLogger(__name__)
class MapServerCapabilitiesRenderer(Component):
""" Base class for various WMS render components using MapServer.
"""
implements(WMSCapabilitiesRendererInterface)
def render(self):
mapfile_path = get_eoxserver_config().get("wmm", "mapfile")
map_ = ms.mapObj(mapfile_path) #TODO: path to map
map_.setMetaData("ows_enable_request", "*")
map_.setProjection("EPSG:4326")
map_.imagecolor.setRGB(0, 0, 0)
# set supported CRSs
decoder = CRSsConfigReader(get_eoxserver_config())
crss_string = " ".join(
map(lambda crs: "EPSG:%d" % crs, decoder.supported_crss_wms)
)
map_.setMetaData("ows_srs", crss_string)
map_.setMetaData("wms_srs", crss_string)
ms_request = ms.create_request((
("service", "WMS"),
("version", "1.3.0"),
("request", "GetCapabilities"),
))
raw_result = map_.dispatch(ms_request)
result = result_set_from_raw_data(raw_result)
return result, get_content_type(result)
| mit | 4,156,629,531,796,532,700 | 38.670886 | 80 | 0.659221 | false |
iLoveTux/data_store | test/test_api.py | 1 | 2541 | import data.store
import bottle
from io import BytesIO
from data.store import api
def test_api_exists():
assert hasattr(data.store, "api")
def test_get_collections_returns_list_of_collections():
assert data.store.api.get_collections() == {}
def test_del_collection_deletes_a_collection():
api.post_collection("new1")
length = len(api.collections)
api.del_collection("new1")
assert (length - 1) == len(api.collections)
def test_post_to_collections_collection_creates_new_collection():
data.store.api.post_collection("new")
assert "new" in data.store.api.collections
def test_post_to_post_record_adds_a_record_to_collection():
body = '{"name": "cliff", "email": "[email protected]"}'
bottle.request.environ['CONTENT_LENGTH'] = str(len(bottle.tob(body)))
bottle.request.environ['CONTENT_TYPE'] = "application/json"
bottle.request.environ['wsgi.input'] = BytesIO()
bottle.request.environ['wsgi.input'].write(bottle.tob(body))
bottle.request.environ['wsgi.input'].seek(0)
api.post_record("new")
assert "new" in data.store.api.collections
assert len(data.store.api.collections["new"]) == 1
def test_get_to_get_records_returns_records():
body = "name=cliff"
bottle.request.environ['CONTENT_LENGTH'] = str(len(bottle.tob(body)))
bottle.request.environ['wsgi.input'] = BytesIO()
bottle.request.environ['wsgi.input'].write(bottle.tob(body))
bottle.request.environ['wsgi.input'].seek(0)
results = api.get_records("new")
assert len(results) == 1
def test_delete_to_delete_record_deletes_a_record():
body = "name=cliff"
bottle.request.environ['CONTENT_LENGTH'] = str(len(bottle.tob(body)))
bottle.request.environ['wsgi.input'] = BytesIO()
bottle.request.environ['wsgi.input'].write(bottle.tob(body))
bottle.request.environ['wsgi.input'].seek(0)
results = api.delete_record("new")
assert len(api.collections["new"]) == 0
def test_update_record_updates_a_record():
api.collections["new"].add_record({"name": "Cliff", "_id": "test"})
body = '{"email": "[email protected]"}'
bottle.request.environ['CONTENT_LENGTH'] = str(len(bottle.tob(body)))
bottle.request.environ['CONTENT_TYPE'] = "application/json"
bottle.request.environ['wsgi.input'] = BytesIO()
bottle.request.environ['wsgi.input'].write(bottle.tob(body))
bottle.request.environ['wsgi.input'].seek(0)
results = api.update_record("new", "test")
assert api.collections["new"].find({"_id": "test"})[0]["email"] == "[email protected]"
| gpl-2.0 | -7,675,300,760,249,336,000 | 40.655738 | 88 | 0.683983 | false |
psiwczak/openstack | nova/db/sqlalchemy/models.py | 1 | 38288 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, BigInteger, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import ForeignKeyConstraint
from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
BASE = declarative_base()
class NovaBase(object):
"""Base class for Nova Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
created_at = Column(DateTime, default=utils.utcnow)
updated_at = Column(DateTime, onupdate=utils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
metadata = None
def save(self, session=None):
"""Save this object."""
if not session:
session = get_session()
session.add(self)
try:
session.flush()
except IntegrityError, e:
if str(e).endswith('is not unique'):
raise exception.Duplicate(str(e))
else:
raise
def delete(self, session=None):
"""Delete this object."""
self.deleted = True
self.deleted_at = utils.utcnow()
self.save(session=session)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
self._i = iter(object_mapper(self).columns)
return self
def next(self):
n = self._i.next().name
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict"""
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins."""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
availability_zone = Column(String(255), default='nova')
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
service = relationship(Service,
backref=backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == False)')
vcpus = Column(Integer)
memory_mb = Column(Integer)
local_gb = Column(Integer)
vcpus_used = Column(Integer)
memory_mb_used = Column(Integer)
local_gb_used = Column(Integer)
hypervisor_type = Column(Text)
hypervisor_version = Column(Integer)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
#
# NOTE(sandy): We'll need to make this extensible for other schedulers.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(Text, nullable=True)
disk_available_least = Column(Integer)
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = FLAGS.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
for key, value in self.iteritems():
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = value
try:
base_name = FLAGS.instance_name_template % info
except KeyError:
base_name = self.uuid
if getattr(self, '_rescue', False):
base_name += "-rescue"
return base_name
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
server_name = Column(String(255))
# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id))
# kernel = relationship(Kernel, backref=backref('instances', order_by=id))
# project = relationship(Project, backref=backref('instances', order_by=id))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(Text)
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
# *not* flavor_id
instance_type_id = Column(Integer)
user_data = Column(Text)
reservation_id = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host a instance booted.
# An instance may have moved to another host by live migraiton.
launched_on = Column(Text)
locked = Column(Boolean)
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255), nullable=True)
default_swap_device = Column(String(255), nullable=True)
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(String(255))
access_ip_v6 = Column(String(255))
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_teminate
# True: -> 'terminate'
# False: -> 'stop'
shutdown_terminate = Column(Boolean(), default=True, nullable=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False, nullable=False)
# OpenStack compute cell name
cell_name = Column(String(255))
class InstanceInfoCache(BASE, NovaBase):
"""
Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(Text)
instance_id = Column(String(36), ForeignKey('instances.uuid'),
nullable=False, unique=True)
instance = relationship(Instance,
backref=backref('info_cache', uselist=False),
foreign_keys=instance_id,
primaryjoin=instance_id == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represent possible instance_types or flavor of VM offered"""
__tablename__ = "instance_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, nullable=False, default=1)
vcpu_weight = Column(Integer, nullable=True)
instances = relationship(Instance,
backref=backref('instance_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Instance.instance_type_id == '
'InstanceTypes.id, '
'InstanceTypes.deleted == False)')
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.volume_name_template % self.id
ec2_id = Column(Integer)
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(255))
provider_auth = Column(String(255))
volume_type_id = Column(Integer)
class VolumeMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for a volume"""
__tablename__ = 'volume_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeMetadata.volume_id == Volume.id,'
'VolumeMetadata.deleted == False)')
class VolumeTypes(BASE, NovaBase):
"""Represent possible volume_types of volumes offered"""
__tablename__ = "volume_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
volumes = relationship(Volume,
backref=backref('volume_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Volume.volume_type_id == VolumeTypes.id, '
'VolumeTypes.deleted == False)')
class VolumeTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for a volume_type"""
__tablename__ = 'volume_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
nullable=False)
volume_type = relationship(VolumeTypes, backref="extra_specs",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
'VolumeTypeExtraSpecs.deleted == False)')
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
id = Column(Integer, primary_key=True)
class_name = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'snapshots'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.snapshot_name_template % self.id
@property
def volume_name(self):
return FLAGS.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36))
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2"""
__tablename__ = "block_device_mapping"
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(Integer, ForeignKey('instances.uuid'),
nullable=False)
instance = relationship(Instance,
backref=backref('balock_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'False)')
device_name = Column(String(255), nullable=False)
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
delete_on_termination = Column(Boolean, default=False)
# for ephemeral device
virtual_name = Column(String(255), nullable=True)
# for snapshot or volume
snapshot_id = Column(String(36), ForeignKey('snapshots.id'))
# outer join
snapshot = relationship(Snapshot,
foreign_keys=snapshot_id)
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
volume = relationship(Volume,
foreign_keys=volume_id)
volume_size = Column(Integer, nullable=True)
# for no device to suppress devices.
no_device = Column(Boolean, nullable=True)
connection_info = Column(Text, nullable=True)
class IscsiTarget(BASE, NovaBase):
"""Represates an iscsi target for a given host"""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==False)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
id = Column(Integer, primary_key=True)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_id = Column(Integer, ForeignKey('instances.id'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == False,'
'SecurityGroup.deleted == False)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == False)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(String(255))
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
id = Column(Integer, primary_key=True)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(String(255))
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
id = Column(Integer, primary_key=True)
name = Column(String(255))
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(Text)
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(255), ForeignKey('instances.uuid'),
nullable=True)
#TODO(_cerberus_): enum
status = Column(String(255))
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (schema.UniqueConstraint("vpn_public_address",
"vpn_public_port"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(String(255), unique=True)
cidr_v6 = Column(String(255), unique=True)
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(String(255))
netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(String(255))
broadcast = Column(String(255))
dns1 = Column(String(255))
dns2 = Column(String(255))
vlan = Column(Integer)
vpn_public_address = Column(String(255))
vpn_public_port = Column(Integer)
vpn_private_address = Column(String(255))
dhcp_start = Column(String(255))
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
id = Column(Integer, primary_key=True)
address = Column(String(255), unique=True)
network_id = Column(Integer, nullable=False)
instance_id = Column(Integer, nullable=False)
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
network_id = Column(Integer, nullable=True)
virtual_interface_id = Column(Integer, nullable=True)
instance_id = Column(Integer, nullable=True)
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has a its virtual_interface_id column set
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
host = Column(String(255))
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
fixed_ip_id = Column(Integer, nullable=True)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False, nullable=False)
pool = Column(String(255))
interface = Column(String(255))
class AuthToken(BASE, NovaBase):
"""Represents an authorization token for all API transactions.
Fields are a string representing the actual token and a user id for
mapping to the actual user
"""
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(String(255))
server_management_url = Column(String(255))
storage_url = Column(String(255))
cdn_management_url = Column(String(255))
class User(BASE, NovaBase):
"""Represents a user."""
__tablename__ = 'users'
id = Column(String(255), primary_key=True)
name = Column(String(255))
access_key = Column(String(255))
secret_key = Column(String(255))
is_admin = Column(Boolean)
class Project(BASE, NovaBase):
"""Represents a project."""
__tablename__ = 'projects'
id = Column(String(255), primary_key=True)
name = Column(String(255))
description = Column(String(255))
project_manager = Column(String(255), ForeignKey(User.id))
members = relationship(User,
secondary='user_project_association',
backref='projects')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
domain = Column(String(512), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
project = relationship(Project,
primaryjoin=project_id == Project.id,
foreign_keys=[Project.id],
uselist=False)
class UserProjectRoleAssociation(BASE, NovaBase):
__tablename__ = 'user_project_role_association'
user_id = Column(String(255), primary_key=True)
user = relationship(User,
primaryjoin=user_id == User.id,
foreign_keys=[User.id],
uselist=False)
project_id = Column(String(255), primary_key=True)
project = relationship(Project,
primaryjoin=project_id == Project.id,
foreign_keys=[Project.id],
uselist=False)
role = Column(String(255), primary_key=True)
ForeignKeyConstraint(['user_id',
'project_id'],
['user_project_association.user_id',
'user_project_association.project_id'])
class UserRoleAssociation(BASE, NovaBase):
__tablename__ = 'user_role_association'
user_id = Column(String(255), ForeignKey('users.id'), primary_key=True)
user = relationship(User, backref='roles')
role = Column(String(255), primary_key=True)
class UserProjectAssociation(BASE, NovaBase):
__tablename__ = 'user_project_association'
user_id = Column(String(255), ForeignKey(User.id), primary_key=True)
project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
id = Column(Integer, primary_key=True)
address = Column(String(255))
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_id = Column(Integer)
password = Column(String(255))
port = Column(Integer, nullable=True)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = relationship(ConsolePool, backref=backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance"""
__tablename__ = 'instance_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
instance = relationship(Instance, backref="metadata",
foreign_keys=instance_id,
primaryjoin='and_('
'InstanceMetadata.instance_id == Instance.id,'
'InstanceMetadata.deleted == False)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance"""
__tablename__ = 'instance_system_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == False)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type"""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == False)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell."""
__tablename__ = 'cells'
id = Column(Integer, primary_key=True)
name = Column(String(255))
api_url = Column(String(255))
username = Column(String(255))
password = Column(String(255))
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
rpc_host = Column(String(255))
rpc_port = Column(Integer())
rpc_virtual_host = Column(String(255))
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255), unique=True)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255), unique=True)
operational_state = Column(String(255), nullable=False)
availability_zone = Column(String(255), nullable=False)
_hosts = relationship(AggregateHost,
secondary="aggregate_hosts",
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == False,'
'Aggregate.deleted == False)',
secondaryjoin='and_('
'AggregateHost.aggregate_id == Aggregate.id, '
'AggregateHost.deleted == False,'
'Aggregate.deleted == False)',
backref='aggregates')
_metadata = relationship(AggregateMetadata,
secondary="aggregate_metadata",
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == False,'
'Aggregate.deleted == False)',
secondaryjoin='and_('
'AggregateMetadata.aggregate_id == Aggregate.id, '
'AggregateMetadata.deleted == False,'
'Aggregate.deleted == False)',
backref='aggregates')
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor"""
__tablename__ = 'bw_usage_cache'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
mac = Column(String(255), nullable=False)
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance"""
__tablename__ = 's3_images'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatability layer for the EC2 volume service"""
__tablename__ = 'volume_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatability layer for the EC2 snapshot service"""
__tablename__ = 'snapshot_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SMFlavors(BASE, NovaBase):
"""Represents a flavor for SM volumes."""
__tablename__ = 'sm_flavors'
id = Column(Integer(), primary_key=True)
label = Column(String(255))
description = Column(String(255))
class SMBackendConf(BASE, NovaBase):
"""Represents the connection to the backend for SM."""
__tablename__ = 'sm_backend_config'
id = Column(Integer(), primary_key=True)
flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
sr_uuid = Column(String(255))
sr_type = Column(String(255))
config_params = Column(String(2047))
class SMVolume(BASE, NovaBase):
__tablename__ = 'sm_volume'
id = Column(String(36), ForeignKey(Volume.id), primary_key=True)
backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
nullable=False)
vdi_uuid = Column(String(255))
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
id = Column(Integer(), primary_key=True, autoincrement=True)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(Text)
def register_models():
"""Register Models and create metadata.
Called from nova.db.sqlalchemy.__init__ as part of loading the driver,
it will never need to be called explicitly elsewhere unless the
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
models = (AgentBuild,
Aggregate,
AggregateHost,
AggregateMetadata,
AuthToken,
Certificate,
Cell,
Console,
ConsolePool,
FixedIp,
FloatingIp,
Instance,
InstanceFault,
InstanceMetadata,
InstanceTypeExtraSpecs,
InstanceTypes,
IscsiTarget,
Migration,
Network,
Project,
SecurityGroup,
SecurityGroupIngressRule,
SecurityGroupInstanceAssociation,
Service,
SMBackendConf,
SMFlavors,
SMVolume,
User,
Volume,
VolumeMetadata,
VolumeTypeExtraSpecs,
VolumeTypes,
VolumeIdMapping,
SnapshotIdMapping,
)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
| apache-2.0 | -8,264,634,571,435,047,000 | 34.951174 | 79 | 0.619385 | false |
cevaris/pants | tests/python/pants_test/backend/python/tasks2/test_gather_sources.py | 1 | 2951 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.python_setup import PythonRepos
from pants.backend.python.python_setup import PythonSetup
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.tasks2.gather_sources import GatherSources
from pants.build_graph.resources import Resources
from pants_test.tasks.task_test_base import TaskTestBase
class GatherSourcesTest(TaskTestBase):
@classmethod
def task_type(cls):
return GatherSources
def test_gather_sources(self):
filemap = {
'src/python/foo.py': 'foo_py_content',
'src/python/bar.py': 'bar_py_content',
'src/python/baz.py': 'baz_py_content',
'resources/qux/quux.txt': 'quux_txt_content',
}
for rel_path, content in filemap.items():
self.create_file(rel_path, content)
sources1 = self.make_target(spec='//:sources1_tgt', target_type=PythonLibrary,
sources=['src/python/foo.py', 'src/python/bar.py'])
sources2 = self.make_target(spec='//:sources2_tgt', target_type=PythonLibrary,
sources=['src/python/baz.py'])
resources = self.make_target(spec='//:resources_tgt', target_type=Resources,
sources=['resources/qux/quux.txt'])
pex = self._gather_sources([sources1, sources2, resources])
pex_root = pex.cmdline()[1]
for rel_path, expected_content in filemap.items():
with open(os.path.join(pex_root, rel_path)) as infile:
content = infile.read()
self.assertEquals(expected_content, content)
def _gather_sources(self, target_roots):
context = self.context(target_roots=target_roots, for_subsystems=[PythonSetup, PythonRepos])
# We must get an interpreter via the cache, instead of using PythonInterpreter.get() directly,
# to ensure that the interpreter has setuptools and wheel support.
interpreter = PythonInterpreter.get()
interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
PythonRepos.global_instance(),
logger=context.log.debug)
interpreters = interpreter_cache.setup(paths=[os.path.dirname(interpreter.binary)],
filters=[str(interpreter.identity.requirement)])
context.products.get_data(PythonInterpreter, lambda: interpreters[0])
task = self.create_task(context)
task.execute()
return context.products.get_data(GatherSources.PYTHON_SOURCES)
| apache-2.0 | 4,771,192,398,828,748,000 | 43.044776 | 98 | 0.680108 | false |
kyamagu/psd2svg | src/psd2svg/__main__.py | 1 | 1695 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import argparse
import logging
import os
from psd2svg import psd2svg
def main():
parser = argparse.ArgumentParser(description='Convert PSD file to SVG')
parser.add_argument(
'input', metavar='INPUT', type=str, help='Input PSD file path or URL')
parser.add_argument(
'output', metavar='PATH', type=str, nargs='?', default='.',
help='Output file or directory. When directory is specified, filename'
' is automatically inferred from input')
parser.add_argument(
'--resource-path', metavar='PATH', type=str, default=None,
help='Resource path relative to output.')
parser.add_argument(
'--rasterizer', metavar='METHOD', default='chromium', type=str,
help='Specify which rasterizer to use. default chromium.')
parser.add_argument(
'--loglevel', metavar='LEVEL', default='WARNING',
help='Logging level, default WARNING')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper(),
'WARNING'))
prefix, ext = os.path.splitext(args.output)
if ext.lower() in (".png", ".jpg", ".jpeg", ".gif" ".tiff"):
from psd2svg.rasterizer import create_rasterizer
rasterizer = create_rasterizer(args.rasterizer)
svg_file = prefix + ".svg"
psd2svg(args.input, svg_file, resource_path=args.resource_path)
image = rasterizer.rasterize(svg_file)
image.save(args.output)
else:
psd2svg(args.input, args.output, resource_path=args.resource_path)
if __name__ == '__main__':
main()
| mit | 7,406,820,582,032,174,000 | 38.418605 | 78 | 0.637168 | false |
hdb3/ministack | merlyn.py | 1 | 1184 |
spec = {
'name' : "The devil's work...",
'external network name' : "exnet3",
'keypair' : "openstack_rsa",
'controller' : "r720",
'dns' : "10.30.65.200",
'credentials' : { 'user' : "nic", 'password' : "nic", 'project' : "nic" },
'Networks' : [
{ 'name' : "merlynctl" , "start": "172.16.1.2", "end": "172.16.1.100", "subnet" :" 172.16.1.0/24", "gateway": "172.16.1.1" },
{ 'name' : "merlyn201" , "start": "192.168.1.201", "end": "192.168.1.202", "subnet" :" 192.168.1.0/24", "vlan": 201, "physical_network": "vlannet" },
{ 'name' : "merlyn202" , "start": "192.168.1.202", "end": "192.168.1.203", "subnet" :" 192.168.1.0/24", "vlan": 202, "physical_network": "vlannet" }
],
'Hosts' : [
{ 'name' : "monos" , 'image' : "centos7.2" , 'flavor':"m1.large" , 'net' : [ ("merlynctl","*","10.30.65.130")] },
{ 'name' : "m201" , 'image' : "centos7.2" , 'flavor':"m1.medium" , 'net' : [ ("merlynctl","*","10.30.65.131"),("merlyn201" , "192.168.1.201") ] },
{ 'name' : "m202" , 'image' : "centos7.2" , 'flavor':"m1.medium" , 'net' : [ ("merlynctl","*","10.30.65.132"),("merlyn202" , "192.168.1.202") ] },
]
}
| apache-2.0 | 3,854,112,634,913,775,600 | 61.315789 | 157 | 0.495777 | false |
phockett/ePSproc | epsproc/vol/setOptions.py | 1 | 3675 |
"""
ePSproc vol module setOptions
Functions to read & write default set of plotting options to file.
If run as main:
- Check existing file from passed arg, or in default location (epsproc/vol/plotOptions.json)
- Read file if exists.
- If file is missing, prompt to write defaults to file.
08/08/20 v1, dev. See also set_plot_options_json.ipynb
"""
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
import sys
import os
import inspect
from pathlib import Path
def setLocalOptions():
optionsLocal = {}
globalSettings = {"note":"Global plot settings, used as defaults. To change for session, overwrite in local dict. To change permanently, overwrite in file plotOptions.json. To reset, use `epsproc/vol/set_plot_options_json.ipynb` or .py.",
"pType":"Abs", "interactive":True, "inline":True, "animate":False,
"isoLevels":6, "isoValsAbs":None, "isoValsPC":None, "isoValsGlobal":True,
"opacity":0.5,
"subplot":False
# "plotter":"" # Set plotter dynamically based on options above...?
}
optionsLocal["global"] = globalSettings
BGplotterSettings = {"addAxis" : True,
"kwargs" : {} # Set empty kwargs dict for passing any other params at run time.
}
optionsLocal["BGplotter"] = BGplotterSettings
return optionsLocal
# def setOptionsFile(optionsFile = None):
def readOptionsFile(optionsFile, verbose = False):
# Set path wrapper in case str was passed.
optionsFile = Path(optionsFile)
if optionsFile.is_file():
# try:
with open(optionsFile) as json_file:
optionsFileJSON = json.load(json_file)
print(f"\n*** Read existing plot options from file {optionsFile} OK.")
if verbose:
print(json.dumps(optionsFileJSON, sort_keys=False, indent=4))
return optionsFileJSON
else:
print(f"\n*** Plot options file {optionsFile} not found, using defaults.")
return setLocalOptions()
def writeOptionsFile(optionsFile, optionsLocal, owFlag = False):
# Set path wrapper in case str was passed.
optionsFile = Path(optionsFile)
print(f"*** Writing plot options to file {optionsFile}")
if optionsFile.is_file():
owFlag = input(f"File {optionsFile} exists, overwrite (y/n)? ")
else:
owFlag = 'y'
with open(optionsFile, 'w') as json_file:
if owFlag == 'y':
json.dump(optionsLocal, json_file, indent=4, sort_keys=False) # Set indent + sort keys for nicer (HF) file output.
if __name__ == "__main__":
# Check passed args
if len(sys.argv > 1):
optionsFile = Path(sys.argv[1])
else:
optionsFile = None
# Set default path based on file location - may not be robust?
# From https://stackoverflow.com/a/12154601
if optionsFile is None:
optionsFile = Path((os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))),'plotOptions.json')
writeFlag = True
# Read file
optionsLocal = setLocalOptions()
# if optionsFile.is_file():
# with open(optionsFile) as json_file:
# optionsFileJSON = json.load(json_file)
#
# print(f"*** Read existing file {optionsFile} OK, contents:")
# print(json.dumps(optionsFileJSON, sort_keys=False, indent=4))
#
# else:
# print(f"*** File {optionsFile} not found.")
if writeFlag:
ow = input("Write defaults to {optionsFile} (y/n)? ")
if ow == 'y':
writeOptionsFile(optionsFile, setLocalOptions())
| gpl-3.0 | -1,978,875,038,658,223,400 | 29.122951 | 242 | 0.626939 | false |
jralls/gramps | gramps/gui/views/treemodels/flatbasemodel.py | 1 | 32063 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the flat treemodel that is used for all flat treeviews.
For performance, Gramps does not use Gtk.TreeStore, as that would mean keeping
the entire database table of an object in memory.
Instead, it suffices to keep in memory the sortkey and the matching handle,
as well as a map of sortkey,handle to treeview path, and vice versa.
For a flat view, the index of sortkey,handle will be the path, so it suffices
to keep in memory a map that given a sortkey,handle returns the path.
As we need to be able to insert/delete/update objects, and for that the handle
is all we know initially, and as sortkey,handle is uniquely determined by
handle, instead of keeping a map of sortkey,handle to path, we keep a map of
handle to path
As a user selects another column to sort, the sortkey must be rebuild, and the
map remade.
The class FlatNodeMap keeps a sortkeyhandle list with (sortkey, handle) entries,
and a handle2path dictionary. As the Map is flat, the index in sortkeyhandle
corresponds to the path.
The class FlatBaseModel, is the base class for all flat treeview models.
It keeps a FlatNodeMap, and obtains data from database as needed
"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
import logging
import bisect
import time
_LOG = logging.getLogger(".gui.basetreemodel")
#-------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import GObject
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.filters import SearchFilter, ExactSearchFilter
from gramps.gen.const import GRAMPS_LOCALE as glocale
from .basemodel import BaseModel
#-------------------------------------------------------------------------
#
# FlatNodeMap
#
#-------------------------------------------------------------------------
UEMPTY = ""
class FlatNodeMap:
"""
A NodeMap for a flat treeview. In such a TreeView, the paths possible are
0, 1, 2, ..., n-1, where n is the number of items to show. For the model
it is needed to keep the Path to Iter mappings of the TreeView in memory
The order of what is shown is based on the unique key: (sortkey, handle)
Naming:
* srtkey : key on which to sort
* hndl : handle of the object, makes it possible to retrieve the
object from the database. As handle is unique, it is used
in the iter for the TreeView
* index : the index in the internal lists. When a view is in reverse,
this is not kept physically, but instead via an offset
* path : integer path in the TreeView. This will be index if view is
ascending, but will begin at back of list if view shows
the entries in reverse.
* index2hndl : list of (srtkey, hndl) tuples. The index gives the
(srtkey, hndl) it belongs to.
This normally is only a part of all possible data
* hndl2index : dictionary of *hndl: index* values
The implementation provides a list of (srtkey, hndl) of which the index is
the path, and a dictionary mapping hndl to index.
To obtain index given a path, method real_index() is available
..Note: glocale.sort_key is applied to the underlying sort key,
so as to have localized sort
"""
def __init__(self):
"""
Create a new instance.
"""
self._index2hndl = []
self._fullhndl = self._index2hndl
self._identical = True
self._hndl2index = {}
self._reverse = False
self.__corr = (0, 1)
#We create a stamp to recognize invalid iterators. From the docs:
#Set the stamp to be equal to your model's stamp, to mark the
#iterator as valid. When your model's structure changes, you should
#increment your model's stamp to mark all older iterators as invalid.
#They will be recognised as invalid because they will then have an
#incorrect stamp.
self.stamp = 0
def destroy(self):
"""
Unset all elements that can prevent garbage collection
"""
self._index2hndl = None
self._fullhndl = None
self._hndl2index = None
def set_path_map(self, index2hndllist, fullhndllist, identical=True,
reverse=False):
"""
This is the core method to set up the FlatNodeMap
Input is a list of (srtkey, handle), of which the index is the path
Calling this method sets the index2hndllist, and creates the hndl2index
map.
fullhndllist is the entire list of (srtkey, handle) that is possible,
normally index2hndllist is only part of this list as determined by
filtering. To avoid memory, if both lists are the same, pass only one
list twice and set identical to True.
Reverse sets up how the path is determined from the index. If True the
first index is the last path
:param index2hndllist: the ascending sorted (sortkey, handle) values
as they will appear in the flat treeview. This often is
a subset of all possible data.
:type index2hndllist: a list of (sortkey, handle) tuples
:param fullhndllist: the list of all possilbe ascending sorted
(sortkey, handle) values as they will appear in the flat
treeview if all data is shown.
:type fullhndllist: a list of (sortkey, handl) tuples
:param identical: identify if index2hndllist and fullhndllist are the
same list, so only one is kept in memory.
:type identical: bool
"""
self.stamp += 1
self._index2hndl = index2hndllist
self._hndl2index = {}
self._identical = identical
self._fullhndl = self._index2hndl if identical else fullhndllist
self._reverse = reverse
self.reverse_order()
def full_srtkey_hndl_map(self):
"""
The list of all possible (sortkey, handle) tuples.
This is stored in FlatNodeMap so that it would not be needed to
reiterate over the database to obtain all posibilities.
"""
return self._fullhndl
def reverse_order(self):
"""
This method keeps the index2hndl map, but sets it up the index in
reverse order. If the hndl2index map does not exist yet, it is created
in the acending order as given in index2hndl
The result is always a hndl2index map wich is correct, so or ascending
order, or reverse order.
"""
if self._hndl2index:
#if hndl2index is build already, invert order, otherwise keep
# requested order
self._reverse = not self._reverse
if self._reverse:
self.__corr = (len(self._index2hndl) - 1, -1)
else:
self.__corr = (0, 1)
if not self._hndl2index:
self._hndl2index = dict((key[1], index)
for index, key in enumerate(self._index2hndl))
def real_path(self, index):
"""
Given the index in the maps, return the real path.
If reverse = False, then index is path, otherwise however, the
path must be calculated so that the last index is the first path
"""
return self.__corr[0] + self.__corr[1] * index
def real_index(self, path):
"""
Given the path in the view, return the real index.
If reverse = False, then path is index, otherwise however, the
index must be calculated so that the last index is the first path
"""
return self.__corr[0] + self.__corr[1] * path
def clear_map(self):
"""
Clears out the index2hndl and the hndl2index
"""
self._index2hndl = []
self._hndl2index = {}
self._fullhndl = self._index2hndl
self._identical = True
def get_path(self, iter):
"""
Return the path from the passed iter.
:param handle: the key of the object for which the path in the treeview
is needed
:type handle: an object handle
:Returns: the path, or None if handle does not link to a path
"""
index = iter.user_data
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
if index is None:
index = 0
return Gtk.TreePath((self.real_path(index),))
def get_path_from_handle(self, handle):
"""
Return the path from the passed handle
:param handle: the key of the object for which the path in the treeview
is needed
:type handle: an object handle
:Returns: the path, or None if handle does not link to a path
"""
index = self._hndl2index.get(handle)
if index is None:
return None
return Gtk.TreePath((self.real_path(index),))
def get_sortkey(self, handle):
"""
Return the sortkey used for the passed handle.
:param handle: the key of the object for which the sortkey
is needed
:type handle: an object handle
:Returns: the sortkey, or None if handle is not present
"""
index = self._hndl2index.get(handle)
return None if index is None else self._index2hndl[index][0]
def new_iter(self, handle):
"""
Return a new iter containing the handle
"""
iter = Gtk.TreeIter()
iter.stamp = self.stamp
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
iter.user_data = self._hndl2index[handle]
return iter
def get_iter(self, path):
"""
Return an iter from the path. The path is assumed to be an integer.
This is accomplished by indexing into the index2hndl
iters are always created afresh
Will raise IndexError if the maps are not filled yet, or if it is empty.
Caller should take care of this if it allows calling with invalid path
:param path: path as it appears in the treeview
:type path: integer
"""
iter = self.new_iter(self._index2hndl[self.real_index(path)][1])
return iter
def get_handle(self, path):
"""
Return the handle from the path. The path is assumed to be an integer.
This is accomplished by indexing into the index2hndl
Will raise IndexError if the maps are not filled yet, or if it is empty.
Caller should take care of this if it allows calling with invalid path
:param path: path as it appears in the treeview
:type path: integer
:return handle: unicode form of the handle
"""
return self._index2hndl[self.real_index(path)][1]
def iter_next(self, iter):
"""
Increments the iter y finding the index associated with the iter,
adding or substracting one.
False is returned if no next handle
:param iter: Gtk.TreeModel iterator
:param type: Gtk.TreeIter
"""
index = iter.user_data
if index is None:
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
index = 0
if self._reverse :
index -= 1
if index < 0:
# -1 does not raise IndexError, as -1 is last element. Catch.
return False
else:
index += 1
if index >= len(self._index2hndl):
return False
iter.user_data = index
return True
def get_first_iter(self):
"""
Return the first handle that must be shown (corresponding to path 0)
Will raise IndexError if the maps are not filled yet, or if it is empty.
Caller should take care of this if it allows calling with invalid path
"""
return self.get_iter(0)
def __len__(self):
"""
Return the number of entries in the map.
"""
return len(self._index2hndl)
def max_rows(self):
"""
Return maximum number of entries that might be present in the
map
"""
return len(self._fullhndl)
def insert(self, srtkey_hndl, allkeyonly=False):
"""
Insert a node. Given is a tuple (sortkey, handle), and this is added
in the correct place, while the hndl2index map is updated.
Returns the path of the inserted row
:param srtkey_hndl: the (sortkey, handle) tuple that must be inserted
:type srtkey_hndl: sortkey key already transformed by self.sort_func, object handle
:Returns: path of the row inserted in the treeview
:Returns type: Gtk.TreePath or None
"""
if srtkey_hndl[1] in self._hndl2index:
print(('WARNING: Attempt to add row twice to the model (%s)' %
srtkey_hndl[1]))
return
if not self._identical:
bisect.insort_left(self._fullhndl, srtkey_hndl)
if allkeyonly:
#key is not part of the view
return None
insert_pos = bisect.bisect_left(self._index2hndl, srtkey_hndl)
self._index2hndl.insert(insert_pos, srtkey_hndl)
#make sure the index map is updated
for srt_key,hndl in self._index2hndl[insert_pos+1:]:
self._hndl2index[hndl] += 1
self._hndl2index[srtkey_hndl[1]] = insert_pos
#update self.__corr so it remains correct
if self._reverse:
self.__corr = (len(self._index2hndl) - 1, -1)
return Gtk.TreePath((self.real_path(insert_pos),))
def delete(self, srtkey_hndl):
"""
Delete the row with the given (sortkey, handle).
This then rebuilds the hndl2index, subtracting one from each item
greater than the deleted index.
path of deleted row is returned
If handle is not present, None is returned
:param srtkey_hndl: the (sortkey, handle) tuple that must be inserted
:Returns: path of the row deleted from the treeview
:Returns type: Gtk.TreePath or None
"""
#remove it from the full list first
if not self._identical:
del_pos = bisect.bisect_left(self._fullhndl, srtkey_hndl)
#check that indeed this is correct:
if not self._fullhndl[del_pos][1] == srtkey_hndl[1]:
raise KeyError('Handle %s not in list of all handles' % \
srtkey_hndl[1])
del self._fullhndl[del_pos]
#now remove it from the index maps
handle = srtkey_hndl[1]
try:
index = self._hndl2index[handle]
except KeyError:
# key not present in the treeview
return None
del self._index2hndl[index]
del self._hndl2index[handle]
#update self.__corr so it remains correct
delpath = self.real_path(index)
if self._reverse:
self.__corr = (len(self._index2hndl) - 1, -1)
#update the handle2path map so it remains correct
for srt_key,hndl in self._index2hndl[index:]:
self._hndl2index[hndl] -= 1
return Gtk.TreePath((delpath,))
#-------------------------------------------------------------------------
#
# FlatBaseModel
#
#-------------------------------------------------------------------------
class FlatBaseModel(GObject.GObject, Gtk.TreeModel, BaseModel):
"""
The base class for all flat treeview models.
It keeps a FlatNodeMap, and obtains data from database as needed
..Note: glocale.sort_key is applied to the underlying sort key,
so as to have localized sort
"""
def __init__(self, db, uistate, scol=0, order=Gtk.SortType.ASCENDING,
search=None, skip=set(),
sort_map=None):
cput = time.clock()
GObject.GObject.__init__(self)
BaseModel.__init__(self)
#inheriting classes must set self.map to obtain the data
self.prev_handle = None
self.prev_data = None
#GTK3 We leak ref, yes??
#self.set_property("leak_references", False)
self.db = db
#normally sort on first column, so scol=0
if sort_map:
#sort_map is the stored order of the columns and if they are
#enabled or not. We need to store on scol of that map
self.sort_map = [ f for f in sort_map if f[0]]
#we need the model col, that corresponds with scol
col = self.sort_map[scol][1]
else:
col = scol
# get the function that maps data to sort_keys
self.sort_func = lambda x: glocale.sort_key(self.smap[col](x))
self.sort_col = scol
self.skip = skip
self._in_build = False
self.node_map = FlatNodeMap()
self.set_search(search)
self._reverse = (order == Gtk.SortType.DESCENDING)
self.rebuild_data()
_LOG.debug(self.__class__.__name__ + ' __init__ ' +
str(time.clock() - cput) + ' sec')
def destroy(self):
"""
Unset all elements that prevent garbage collection
"""
BaseModel.destroy(self)
self.db = None
self.sort_func = None
if self.node_map:
self.node_map.destroy()
self.node_map = None
self.rebuild_data = None
self.search = None
def set_search(self, search):
"""
Change the search function that filters the data in the model.
When this method is called, make sure:
# you call self.rebuild_data() to recalculate what should be seen
in the model
# you reattach the model to the treeview so that the treeview updates
with the new entries
"""
if search:
if search[0]:
#following is None if no data given in filter sidebar
self.search = search[1]
self.rebuild_data = self._rebuild_filter
else:
if search[1]: # Search from topbar in columns
# we have search[1] = (index, text_unicode, inversion)
col = search[1][0]
text = search[1][1]
inv = search[1][2]
func = lambda x: self._get_value(x, col) or UEMPTY
if search[2]:
self.search = ExactSearchFilter(func, text, inv)
else:
self.search = SearchFilter(func, text, inv)
else:
self.search = None
self.rebuild_data = self._rebuild_search
else:
self.search = None
self.rebuild_data = self._rebuild_search
def total(self):
"""
Total number of items that maximally can be shown
"""
return self.node_map.max_rows()
def displayed(self):
"""
Number of items that are currently displayed
"""
return len(self.node_map)
def reverse_order(self):
"""
reverse the sort order of the sort column
"""
self._reverse = not self._reverse
self.node_map.reverse_order()
def color_column(self):
"""
Return the color column.
"""
return None
def sort_keys(self):
"""
Return the (sort_key, handle) list of all data that can maximally
be shown.
This list is sorted ascending, via localized string sort.
"""
# use cursor as a context manager
with self.gen_cursor() as cursor:
#loop over database and store the sort field, and the handle
srt_keys=[(self.sort_func(data), key)
for key, data in cursor]
srt_keys.sort()
return srt_keys
def _rebuild_search(self, ignore=None):
""" function called when view must be build, given a search text
in the top search bar
"""
self.clear_cache()
self._in_build = True
if (self.db is not None) and self.db.is_open():
allkeys = self.node_map.full_srtkey_hndl_map()
if not allkeys:
allkeys = self.sort_keys()
if self.search and self.search.text:
dlist = [h for h in allkeys
if self.search.match(h[1], self.db) and
h[1] not in self.skip and h[1] != ignore]
ident = False
elif ignore is None and not self.skip:
#nothing to remove from the keys present
ident = True
dlist = allkeys
else:
ident = False
dlist = [h for h in allkeys
if h[1] not in self.skip and h[1] != ignore]
self.node_map.set_path_map(dlist, allkeys, identical=ident,
reverse=self._reverse)
else:
self.node_map.clear_map()
self._in_build = False
def _rebuild_filter(self, ignore=None):
""" function called when view must be build, given filter options
in the filter sidebar
"""
self.clear_cache()
self._in_build = True
if (self.db is not None) and self.db.is_open():
allkeys = self.node_map.full_srtkey_hndl_map()
if not allkeys:
allkeys = self.sort_keys()
if self.search:
ident = False
if ignore is None:
dlist = self.search.apply(self.db, allkeys, tupleind=1)
else:
dlist = self.search.apply(self.db,
[ k for k in allkeys if k[1] != ignore],
tupleind=1)
elif ignore is None :
ident = True
dlist = allkeys
else:
ident = False
dlist = [ k for k in allkeys if k[1] != ignore ]
self.node_map.set_path_map(dlist, allkeys, identical=ident,
reverse=self._reverse)
else:
self.node_map.clear_map()
self._in_build = False
def add_row_by_handle(self, handle):
"""
Add a row. This is called after object with handle is created.
Row is only added if search/filter data is such that it must be shown
"""
assert isinstance(handle, str)
if self.node_map.get_path_from_handle(handle) is not None:
return # row is already displayed
data = self.map(handle)
insert_val = (self.sort_func(data), handle)
if not self.search or \
(self.search and self.search.match(handle, self.db)):
#row needs to be added to the model
insert_path = self.node_map.insert(insert_val)
if insert_path is not None:
node = self.do_get_iter(insert_path)[1]
self.row_inserted(insert_path, node)
else:
self.node_map.insert(insert_val, allkeyonly=True)
def delete_row_by_handle(self, handle):
"""
Delete a row, called after the object with handle is deleted
"""
assert isinstance(handle, str)
if self.node_map.get_path_from_handle(handle) is None:
return # row is not currently displayed
self.clear_cache(handle)
delete_val = (self.node_map.get_sortkey(handle), handle)
delete_path = self.node_map.delete(delete_val)
#delete_path is an integer from 0 to n-1
if delete_path is not None:
self.row_deleted(delete_path)
def update_row_by_handle(self, handle):
"""
Update a row, called after the object with handle is changed
"""
if self.node_map.get_path_from_handle(handle) is None:
return # row is not currently displayed
self.clear_cache(handle)
oldsortkey = self.node_map.get_sortkey(handle)
newsortkey = self.sort_func(self.map(handle))
if oldsortkey is None or oldsortkey != newsortkey:
#or the changed object is not present in the view due to filtering
#or the order of the object must change.
self.delete_row_by_handle(handle)
self.add_row_by_handle(handle)
else:
#the row is visible in the view, is changed, but the order is fixed
path = self.node_map.get_path_from_handle(handle)
node = self.do_get_iter(path)[1]
self.row_changed(path, node)
def get_iter_from_handle(self, handle):
"""
Get the iter for a gramps handle.
"""
if self.node_map.get_path_from_handle(handle) is None:
return None
return self.node_map.new_iter(handle)
def get_handle_from_iter(self, iter):
"""
Get the gramps handle for an iter.
"""
index = iter.user_data
if index is None:
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
index = 0
path = self.node_map.real_path(index)
return self.node_map.get_handle(path)
# The following implement the public interface of Gtk.TreeModel
def do_get_flags(self):
"""
Returns the GtkTreeModelFlags for this particular type of model
See Gtk.TreeModel
"""
#print 'do_get_flags'
return Gtk.TreeModelFlags.LIST_ONLY #| Gtk.TreeModelFlags.ITERS_PERSIST
def do_get_n_columns(self):
"""Internal method. Don't inherit"""
return self.on_get_n_columns()
def on_get_n_columns(self):
"""
Return the number of columns. Must be implemented in the child objects
See Gtk.TreeModel. Inherit as needed
"""
#print 'do_get_n_col'
raise NotImplementedError
def do_get_path(self, iter):
"""
Return the tree path (a tuple of indices at the various
levels) for a particular iter. We use handles for unique key iters
See Gtk.TreeModel
"""
#print 'do_get_path', iter
return self.node_map.get_path(iter)
def do_get_column_type(self, index):
"""
See Gtk.TreeModel
"""
#print 'do_get_col_type'
return str
def do_get_iter_first(self):
#print 'get iter first'
raise NotImplementedError
def do_get_iter(self, path):
"""
See Gtk.TreeModel
"""
#print 'do_get_iter', path
for p in path:
break
try:
return True, self.node_map.get_iter(p)
except IndexError:
return False, Gtk.TreeIter()
def _get_value(self, handle, col):
"""
Given handle and column, return unicode value in the column
We need this to search in the column in the GUI
"""
if handle != self.prev_handle:
cached, data = self.get_cached_value(handle, col)
if not cached:
data = self.map(handle)
self.set_cached_value(handle, col, data)
if data is None:
#object is no longer present
return ''
self.prev_data = data
self.prev_handle = handle
return self.fmap[col](self.prev_data)
def do_get_value(self, iter, col):
"""
See Gtk.TreeModel.
col is the model column that is needed, not the visible column!
"""
#print ('do_get_val', iter, iter.user_data, col)
index = iter.user_data
if index is None:
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
index = 0
handle = self.node_map._index2hndl[index][1]
val = self._get_value(handle, col)
#print 'val is', val, type(val)
return val
def do_iter_previous(self, iter):
#print 'do_iter_previous'
raise NotImplementedError
def do_iter_next(self, iter):
"""
Sets iter to the next node at this level of the tree
See Gtk.TreeModel
"""
return self.node_map.iter_next(iter)
def do_iter_children(self, iterparent):
"""
Return the first child of the node
See Gtk.TreeModel
"""
#print 'do_iter_children'
print('ERROR: iter children, should not be called in flat base!!')
raise NotImplementedError
if handle is None and len(self.node_map):
return self.node_map.get_first_handle()
return None
def do_iter_has_child(self, iter):
"""
Returns true if this node has children
See Gtk.TreeModel
"""
#print 'do_iter_has_child'
print('ERROR: iter has_child', iter, 'should not be called in flat base')
return False
if handle is None:
return len(self.node_map) > 0
return False
def do_iter_n_children(self, iter):
"""
See Gtk.TreeModel
"""
#print 'do_iter_n_children'
print('ERROR: iter_n_children', iter, 'should not be called in flat base')
return 0
if handle is None:
return len(self.node_map)
return 0
def do_iter_nth_child(self, iter, nth):
"""
See Gtk.TreeModel
"""
#print 'do_iter_nth_child', iter, nth
if iter is None:
return True, self.node_map.get_iter(nth)
return False, None
def do_iter_parent(self, iter):
"""
Returns the parent of this node
See Gtk.TreeModel
"""
#print 'do_iter_parent'
return False, None
| gpl-2.0 | 737,878,693,955,550,700 | 36.32596 | 91 | 0.571781 | false |
tensorflow/models | official/vision/beta/ops/box_ops.py | 1 | 24043 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Box related ops."""
# Import libraries
import numpy as np
import tensorflow as tf
EPSILON = 1e-8
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def yxyx_to_xywh(boxes):
"""Converts boxes from ymin, xmin, ymax, xmax to xmin, ymin, width, height.
Args:
boxes: a numpy array whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
Returns:
boxes: a numpy array whose shape is the same as `boxes` in new format.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
boxes_ymin = boxes[..., 0]
boxes_xmin = boxes[..., 1]
boxes_width = boxes[..., 3] - boxes[..., 1]
boxes_height = boxes[..., 2] - boxes[..., 0]
new_boxes = np.stack(
[boxes_xmin, boxes_ymin, boxes_width, boxes_height], axis=-1)
return new_boxes
def jitter_boxes(boxes, noise_scale=0.025):
"""Jitter the box coordinates by some noise distribution.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
noise_scale: a python float which specifies the magnitude of noise. The rule
of thumb is to set this between (0, 0.1]. The default value is found to
mimic the noisy detections best empirically.
Returns:
jittered_boxes: a tensor whose shape is the same as `boxes` representing
the jittered boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('jitter_boxes'):
bbox_jitters = tf.random.normal(tf.shape(boxes), stddev=noise_scale)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
width = xmax - xmin
height = ymax - ymin
new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[..., 0:1] * width
new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[..., 1:2] * height
new_width = width * tf.math.exp(bbox_jitters[..., 2:3])
new_height = height * tf.math.exp(bbox_jitters[..., 3:4])
jittered_boxes = tf.concat(
[new_center_y - new_height * 0.5, new_center_x - new_width * 0.5,
new_center_y + new_height * 0.5, new_center_x + new_width * 0.5],
axis=-1)
return jittered_boxes
def normalize_boxes(boxes, image_shape):
"""Converts boxes to the normalized coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
normalized_boxes: a tensor whose shape is the same as `boxes` representing
the normalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('normalize_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1] / height
xmin = boxes[..., 1:2] / width
ymax = boxes[..., 2:3] / height
xmax = boxes[..., 3:4] / width
normalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return normalized_boxes
def denormalize_boxes(boxes, image_shape):
"""Converts boxes normalized by [height, width] to pixel coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
denormalized_boxes: a tensor whose shape is the same as `boxes` representing
the denormalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
with tf.name_scope('denormalize_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.split(image_shape, 2, axis=-1)
ymin, xmin, ymax, xmax = tf.split(boxes, 4, axis=-1)
ymin = ymin * height
xmin = xmin * width
ymax = ymax * height
xmax = xmax * width
denormalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return denormalized_boxes
def clip_boxes(boxes, image_shape):
"""Clips boxes to image boundaries.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
clipped_boxes: a tensor whose shape is the same as `boxes` representing the
clipped boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('clip_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
max_length = [height, width, height, width]
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.unstack(image_shape, axis=-1)
max_length = tf.stack([height, width, height, width], axis=-1)
clipped_boxes = tf.math.maximum(tf.math.minimum(boxes, max_length), 0.0)
return clipped_boxes
def compute_outer_boxes(boxes, image_shape, scale=1.0):
"""Compute outer box encloses an object with a margin.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
scale: a float number specifying the scale of output outer boxes to input
`boxes`.
Returns:
outer_boxes: a tensor whose shape is the same as `boxes` representing the
outer boxes.
"""
if scale < 1.0:
raise ValueError(
'scale is {}, but outer box scale must be greater than 1.0.'.format(
scale))
centers_y = (boxes[..., 0] + boxes[..., 2]) / 2.0
centers_x = (boxes[..., 1] + boxes[..., 3]) / 2.0
box_height = (boxes[..., 2] - boxes[..., 0]) * scale
box_width = (boxes[..., 3] - boxes[..., 1]) * scale
outer_boxes = tf.stack(
[centers_y - box_height / 2.0, centers_x - box_width / 2.0,
centers_y + box_height / 2.0, centers_x + box_width / 2.0],
axis=1)
outer_boxes = clip_boxes(outer_boxes, image_shape)
return outer_boxes
def encode_boxes(boxes, anchors, weights=None):
"""Encode boxes to targets.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
encoded box targets.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('encode_boxes'):
boxes = tf.cast(boxes, dtype=anchors.dtype)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
box_h = ymax - ymin
box_w = xmax - xmin
box_yc = ymin + 0.5 * box_h
box_xc = xmin + 0.5 * box_w
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
encoded_dy = (box_yc - anchor_yc) / anchor_h
encoded_dx = (box_xc - anchor_xc) / anchor_w
encoded_dh = tf.math.log(box_h / anchor_h)
encoded_dw = tf.math.log(box_w / anchor_w)
if weights:
encoded_dy *= weights[0]
encoded_dx *= weights[1]
encoded_dh *= weights[2]
encoded_dw *= weights[3]
encoded_boxes = tf.concat(
[encoded_dy, encoded_dx, encoded_dh, encoded_dw], axis=-1)
return encoded_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
if encoded_boxes.shape[-1] != 4:
raise ValueError(
'encoded_boxes.shape[-1] is {:d}, but must be 4.'
.format(encoded_boxes.shape[-1]))
with tf.name_scope('decode_boxes'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy = encoded_boxes[..., 0:1]
dx = encoded_boxes[..., 1:2]
dh = encoded_boxes[..., 2:3]
dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.math.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.math.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.math.exp(dh) * anchor_h
decoded_boxes_w = tf.math.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h
decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w
decoded_boxes = tf.concat(
[decoded_boxes_ymin, decoded_boxes_xmin,
decoded_boxes_ymax, decoded_boxes_xmax],
axis=-1)
return decoded_boxes
def filter_boxes(boxes, scores, image_shape, min_size_threshold):
"""Filter and remove boxes that are too small or fall outside the image.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
image_shape: a tensor whose shape is the same as, or `broadcastable` to
`boxes` except the last dimension, which is 2, representing [height,
width] of the scaled image.
min_size_threshold: a float representing the minimal box size in each side
(w.r.t. the scaled image). Boxes whose sides are smaller than it will be
filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the positinon of the filtered boxes filled with 0.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0]
width = image_shape[..., 1]
ymin = boxes[..., 0]
xmin = boxes[..., 1]
ymax = boxes[..., 2]
xmax = boxes[..., 3]
h = ymax - ymin
w = xmax - xmin
yc = ymin + 0.5 * h
xc = xmin + 0.5 * w
min_size = tf.cast(
tf.math.maximum(min_size_threshold, 0.0), dtype=boxes.dtype)
filtered_size_mask = tf.math.logical_and(
tf.math.greater(h, min_size), tf.math.greater(w, min_size))
filtered_center_mask = tf.logical_and(
tf.math.logical_and(tf.math.greater(yc, 0.0), tf.math.less(yc, height)),
tf.math.logical_and(tf.math.greater(xc, 0.0), tf.math.less(xc, width)))
filtered_mask = tf.math.logical_and(
filtered_size_mask, filtered_center_mask)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def filter_boxes_by_scores(boxes, scores, min_score_threshold):
"""Filter and remove boxes whose scores are smaller than the threshold.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
min_score_threshold: a float representing the minimal box score threshold.
Boxes whose score are smaller than it will be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with -1.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the
"""
if boxes.shape[-1] != 4:
raise ValueError('boxes.shape[1] is {:d}, but must be 4.'.format(
boxes.shape[-1]))
with tf.name_scope('filter_boxes_by_scores'):
filtered_mask = tf.math.greater(scores, min_score_threshold)
filtered_scores = tf.where(filtered_mask, scores, -tf.ones_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def gather_instances(selected_indices, instances, *aux_instances):
"""Gather instances by indices.
Args:
selected_indices: a Tensor of shape [batch, K] which indicates the selected
indices in instance dimension (2nd dimension).
instances: a Tensor of shape [batch, N, ...] where the 2nd dimension is
the instance dimension to be selected from.
*aux_instances: the additional Tensors whose shapes are in [batch, N, ...]
which are the tensors to be selected from using the `selected_indices`.
Returns:
selected_instances: the tensor of shape [batch, K, ...] which corresponds to
the selected instances of the `instances` tensor.
selected_aux_instances: the additional tensors of shape [batch, K, ...]
which corresponds to the selected instances of the `aus_instances`
tensors.
"""
batch_size = instances.shape[0]
if batch_size == 1:
selected_instances = tf.squeeze(
tf.gather(instances, selected_indices, axis=1), axis=1)
if aux_instances:
selected_aux_instances = [
tf.squeeze(
tf.gather(a, selected_indices, axis=1), axis=1)
for a in aux_instances
]
return tuple([selected_instances] + selected_aux_instances)
else:
return selected_instances
else:
indices_shape = tf.shape(selected_indices)
batch_indices = (
tf.expand_dims(tf.range(indices_shape[0]), axis=-1) *
tf.ones([1, indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack(
[batch_indices, selected_indices], axis=-1)
selected_instances = tf.gather_nd(instances, gather_nd_indices)
if aux_instances:
selected_aux_instances = [
tf.gather_nd(a, gather_nd_indices) for a in aux_instances
]
return tuple([selected_instances] + selected_aux_instances)
else:
return selected_instances
def top_k_boxes(boxes, scores, k):
"""Sort and select top k boxes according to the scores.
Args:
boxes: a tensor of shape [batch_size, N, 4] representing the coordinate of
the boxes. N is the number of boxes per image.
scores: a tensor of shsape [batch_size, N] representing the socre of the
boxes.
k: an integer or a tensor indicating the top k number.
Returns:
selected_boxes: a tensor of shape [batch_size, k, 4] representing the
selected top k box coordinates.
selected_scores: a tensor of shape [batch_size, k] representing the selected
top k box scores.
"""
with tf.name_scope('top_k_boxes'):
selected_scores, top_k_indices = tf.nn.top_k(scores, k=k, sorted=True)
selected_boxes = gather_instances(top_k_indices, boxes)
return selected_boxes, selected_scores
def get_non_empty_box_indices(boxes):
"""Get indices for non-empty boxes."""
# Selects indices if box height or width is 0.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
indices = tf.where(tf.logical_and(tf.greater(height, 0),
tf.greater(width, 0)))
return indices[:, 0]
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `boxes` or `gt_boxes` may have been padded. The returned `iou` tensor
for these boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.math.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.math.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.math.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.math.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = (
tf.math.maximum((i_xmax - i_xmin), 0) *
tf.math.maximum((i_ymax - i_ymin), 0))
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for IoU entries between the padded ground truth boxes.
gt_invalid_mask = tf.less(
tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0)
padding_mask = tf.logical_or(
tf.zeros_like(bb_x_min, dtype=tf.bool),
tf.transpose(gt_invalid_mask, [0, 2, 1]))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
# Fills -1 for for invalid (-1) boxes.
boxes_invalid_mask = tf.less(
tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0)
iou = tf.where(boxes_invalid_mask, -tf.ones_like(iou), iou)
return iou
def box_matching(boxes, gt_boxes, gt_classes):
"""Match boxes to groundtruth boxes.
Given the proposal boxes and the groundtruth boxes and classes, perform the
groundtruth matching by taking the argmax of the IoU between boxes and
groundtruth boxes.
Args:
boxes: a tensor of shape of [batch_size, N, 4] representing the box
coordiantes to be matched to groundtruth boxes.
gt_boxes: a tensor of shape of [batch_size, MAX_INSTANCES, 4] representing
the groundtruth box coordinates. It is padded with -1s to indicate the
invalid boxes.
gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box
classes. It is padded with -1s to indicate the invalid classes.
Returns:
matched_gt_boxes: a tensor of shape of [batch_size, N, 4], representing
the matched groundtruth box coordinates for each input box. If the box
does not overlap with any groundtruth boxes, the matched boxes of it
will be set to all 0s.
matched_gt_classes: a tensor of shape of [batch_size, N], representing
the matched groundtruth classes for each input box. If the box does not
overlap with any groundtruth boxes, the matched box classes of it will
be set to 0, which corresponds to the background class.
matched_gt_indices: a tensor of shape of [batch_size, N], representing
the indices of the matched groundtruth boxes in the original gt_boxes
tensor. If the box does not overlap with any groundtruth boxes, the
index of the matched groundtruth will be set to -1.
matched_iou: a tensor of shape of [batch_size, N], representing the IoU
between the box and its matched groundtruth box. The matched IoU is the
maximum IoU of the box and all the groundtruth boxes.
iou: a tensor of shape of [batch_size, N, K], representing the IoU matrix
between boxes and the groundtruth boxes. The IoU between a box and the
invalid groundtruth boxes whose coordinates are [-1, -1, -1, -1] is -1.
"""
# Compute IoU between boxes and gt_boxes.
# iou <- [batch_size, N, K]
iou = bbox_overlap(boxes, gt_boxes)
# max_iou <- [batch_size, N]
# 0.0 -> no match to gt, or -1.0 match to no gt
matched_iou = tf.reduce_max(iou, axis=-1)
# background_box_mask <- bool, [batch_size, N]
background_box_mask = tf.less_equal(matched_iou, 0.0)
argmax_iou_indices = tf.argmax(iou, axis=-1, output_type=tf.int32)
matched_gt_boxes, matched_gt_classes = gather_instances(
argmax_iou_indices, gt_boxes, gt_classes)
matched_gt_boxes = tf.where(
tf.tile(tf.expand_dims(background_box_mask, axis=-1), [1, 1, 4]),
tf.zeros_like(matched_gt_boxes, dtype=matched_gt_boxes.dtype),
matched_gt_boxes)
matched_gt_classes = tf.where(
background_box_mask,
tf.zeros_like(matched_gt_classes),
matched_gt_classes)
matched_gt_indices = tf.where(
background_box_mask,
-tf.ones_like(argmax_iou_indices),
argmax_iou_indices)
return (matched_gt_boxes, matched_gt_classes, matched_gt_indices,
matched_iou, iou)
| apache-2.0 | 3,766,865,018,543,478,000 | 36.625978 | 80 | 0.655284 | false |
jo-tez/aima-python | nlp.py | 1 | 21959 | """Natural Language Processing; Chart Parsing and PageRanking (Chapter 22-23)"""
from collections import defaultdict
from utils import weighted_choice
import urllib.request
import re
# ______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Article = "the | a | an")
{'Article': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in self.categories[word]
def cnf_rules(self):
"""Returns the tuple (X, Y, Z) for rules in the form:
X -> Y Z"""
cnf = []
for X, rules in self.rules.items():
for (Y, Z) in rules:
cnf.append((X, Y, Z))
return cnf
def generate_random(self, S='S'):
"""Replace each token in S by a random entry in grammar (recursively)."""
import random
def rewrite(tokens, into):
for token in tokens:
if token in self.rules:
rewrite(random.choice(self.rules[token]), into)
elif token in self.lexicon:
into.append(random.choice(self.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(S.split(), []))
def __repr__(self):
return '<Grammar {}>'.format(self.name)
def ProbRules(**rules):
"""Create a dictionary mapping symbols to alternative sequences,
with probabilities.
>>> ProbRules(A = "B C [0.3] | D E [0.7]")
{'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = []
rhs_separate = [alt.strip().split() for alt in rhs.split('|')]
for r in rhs_separate:
prob = float(r[-1][1:-1]) # remove brackets, convert to float
rhs_rule = (r[:-1], prob)
rules[lhs].append(rhs_rule)
return rules
def ProbLexicon(**rules):
"""Create a dictionary mapping symbols to alternative words,
with probabilities.
>>> ProbLexicon(Article = "the [0.5] | a [0.25] | an [0.25]")
{'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = []
rhs_separate = [word.strip().split() for word in rhs.split('|')]
for r in rhs_separate:
prob = float(r[-1][1:-1]) # remove brackets, convert to float
word = r[:-1][0]
rhs_rule = (word, prob)
rules[lhs].append(rhs_rule)
return rules
class ProbGrammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon.
Each rule has a probability."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word, prob in lexicon[lhs]:
self.categories[word].append((lhs, prob))
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in [c for c, _ in self.categories[word]]
def cnf_rules(self):
"""Returns the tuple (X, Y, Z, p) for rules in the form:
X -> Y Z [p]"""
cnf = []
for X, rules in self.rules.items():
for (Y, Z), p in rules:
cnf.append((X, Y, Z, p))
return cnf
def generate_random(self, S='S'):
"""Replace each token in S by a random entry in grammar (recursively).
Returns a tuple of (sentence, probability)."""
import random
def rewrite(tokens, into):
for token in tokens:
if token in self.rules:
non_terminal, prob = weighted_choice(self.rules[token])
into[1] *= prob
rewrite(non_terminal, into)
elif token in self.lexicon:
terminal, prob = weighted_choice(self.lexicon[token])
into[0].append(terminal)
into[1] *= prob
else:
into[0].append(token)
return into
rewritten_as, prob = rewrite(S.split(), [[], 1])
return (' '.join(rewritten_as), prob)
def __repr__(self):
return '<Grammar {}>'.format(self.name)
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Figure 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Figure 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back",
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # Another Trivial Grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
E_Prob = ProbGrammar('E_Prob', # The Probabilistic Grammar from the notebook
ProbRules(
S="NP VP [0.6] | S Conjunction S [0.4]",
NP="Pronoun [0.2] | Name [0.05] | Noun [0.2] | Article Noun [0.15] \
| Article Adjs Noun [0.1] | Digit [0.05] | NP PP [0.15] | NP RelClause [0.1]",
VP="Verb [0.3] | VP NP [0.2] | VP Adjective [0.25] | VP PP [0.15] | VP Adverb [0.1]",
Adjs="Adjective [0.5] | Adjective Adjs [0.5]",
PP="Preposition NP [1]",
RelClause="RelPro VP [1]"
),
ProbLexicon(
Verb="is [0.5] | say [0.3] | are [0.2]",
Noun="robot [0.4] | sheep [0.4] | fence [0.2]",
Adjective="good [0.5] | new [0.2] | sad [0.3]",
Adverb="here [0.6] | lightly [0.1] | now [0.3]",
Pronoun="me [0.3] | you [0.4] | he [0.3]",
RelPro="that [0.5] | who [0.3] | which [0.2]",
Name="john [0.4] | mary [0.4] | peter [0.2]",
Article="the [0.5] | a [0.25] | an [0.25]",
Preposition="to [0.4] | in [0.3] | at [0.3]",
Conjunction="and [0.5] | or [0.2] | but [0.3]",
Digit="0 [0.35] | 1 [0.35] | 2 [0.3]"
))
E_Chomsky = Grammar('E_Prob_Chomsky', # A Grammar in Chomsky Normal Form
Rules(
S='NP VP',
NP='Article Noun | Adjective Noun',
VP='Verb NP | Verb Adjective',
),
Lexicon(
Article='the | a | an',
Noun='robot | sheep | fence',
Adjective='good | new | sad',
Verb='is | say | are'
))
E_Prob_Chomsky = ProbGrammar('E_Prob_Chomsky', # A Probabilistic Grammar in CNF
ProbRules(
S='NP VP [1]',
NP='Article Noun [0.6] | Adjective Noun [0.4]',
VP='Verb NP [0.5] | Verb Adjective [0.5]',
),
ProbLexicon(
Article='the [0.5] | a [0.25] | an [0.25]',
Noun='robot [0.4] | sheep [0.4] | fence [0.2]',
Adjective='good [0.5] | new [0.2] | sad [0.3]',
Verb='is [0.5] | say [0.3] | are [0.2]'
))
E_Prob_Chomsky_ = ProbGrammar('E_Prob_Chomsky_',
ProbRules(
S='NP VP [1]',
NP='NP PP [0.4] | Noun Verb [0.6]',
PP='Preposition NP [1]',
VP='Verb NP [0.7] | VP PP [0.3]',
),
ProbLexicon(
Noun='astronomers [0.18] | eyes [0.32] | stars [0.32] | telescopes [0.18]',
Verb='saw [0.5] | \'\' [0.5]',
Preposition='with [1]'
))
# ______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure.
>>> chart = Chart(E0)
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"""Add edge to chart, and see if it extends or predicts another edge."""
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"""For each edge expecting a word of this category here, extend the edge."""
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"""Add to chart any rules for B that could help extend this edge."""
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"""See what edges can be extended by this edge."""
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# ______________________________________________________________________________
# CYK Parsing
def CYK_parse(words, grammar):
""" [Figure 23.5] """
# We use 0-based indexing instead of the book's 1-based.
N = len(words)
P = defaultdict(float)
# Insert lexical rules for each word.
for (i, word) in enumerate(words):
for (X, p) in grammar.categories[word]:
P[X, i, 1] = p
# Combine first and second parts of right-hand sides of rules,
# from short to long.
for length in range(2, N+1):
for start in range(N-length+1):
for len1 in range(1, length): # N.B. the book incorrectly has N instead of length
len2 = length - len1
for (X, Y, Z, p) in grammar.cnf_rules():
P[X, start, length] = max(P[X, start, length],
P[Y, start, len1] * P[Z, start+len1, len2] * p)
return P
# ______________________________________________________________________________
# Page Ranking
# First entry in list is the base URL, and then following are relative URL pages
examplePagesSet = ["https://en.wikipedia.org/wiki/", "Aesthetics", "Analytic_philosophy",
"Ancient_Greek", "Aristotle", "Astrology", "Atheism", "Baruch_Spinoza",
"Belief", "Betrand Russell", "Confucius", "Consciousness",
"Continental Philosophy", "Dialectic", "Eastern_Philosophy",
"Epistemology", "Ethics", "Existentialism", "Friedrich_Nietzsche",
"Idealism", "Immanuel_Kant", "List_of_political_philosophers", "Logic",
"Metaphysics", "Philosophers", "Philosophy", "Philosophy_of_mind", "Physics",
"Plato", "Political_philosophy", "Pythagoras", "Rationalism",
"Social_philosophy", "Socrates", "Subjectivity", "Theology",
"Truth", "Western_philosophy"]
def loadPageHTML(addressList):
"""Download HTML page content for every URL address passed as argument"""
contentDict = {}
for addr in addressList:
with urllib.request.urlopen(addr) as response:
raw_html = response.read().decode('utf-8')
# Strip raw html of unnessecary content. Basically everything that isn't link or text
html = stripRawHTML(raw_html)
contentDict[addr] = html
return contentDict
def initPages(addressList):
"""Create a dictionary of pages from a list of URL addresses"""
pages = {}
for addr in addressList:
pages[addr] = Page(addr)
return pages
def stripRawHTML(raw_html):
"""Remove the <head> section of the HTML which contains links to stylesheets etc.,
and remove all other unnessecary HTML"""
# TODO: Strip more out of the raw html
return re.sub("<head>.*?</head>", "", raw_html, flags=re.DOTALL) # remove <head> section
def determineInlinks(page):
"""Given a set of pages that have their outlinks determined, we can fill
out a page's inlinks by looking through all other page's outlinks"""
inlinks = []
for addr, indexPage in pagesIndex.items():
if page.address == indexPage.address:
continue
elif page.address in indexPage.outlinks:
inlinks.append(addr)
return inlinks
def findOutlinks(page, handleURLs=None):
"""Search a page's HTML content for URL links to other pages"""
urls = re.findall(r'href=[\'"]?([^\'" >]+)', pagesContent[page.address])
if handleURLs:
urls = handleURLs(urls)
return urls
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
# ______________________________________________________________________________
# HITS Helper Functions
def expand_pages(pages):
"""Adds in every page that links to or is linked from one of
the relevant pages."""
expanded = {}
for addr, page in pages.items():
if addr not in expanded:
expanded[addr] = page
for inlink in page.inlinks:
if inlink not in expanded:
expanded[inlink] = pagesIndex[inlink]
for outlink in page.outlinks:
if outlink not in expanded:
expanded[outlink] = pagesIndex[outlink]
return expanded
def relevant_pages(query):
"""Relevant pages are pages that contain all of the query words. They are obtained by
intersecting the hit lists of the query words."""
hit_intersection = {addr for addr in pagesIndex}
query_words = query.split()
for query_word in query_words:
hit_list = set()
for addr in pagesIndex:
if query_word.lower() in pagesContent[addr].lower():
hit_list.add(addr)
hit_intersection = hit_intersection.intersection(hit_list)
return {addr: pagesIndex[addr] for addr in hit_intersection}
def normalize(pages):
"""Normalize divides each page's score by the sum of the squares of all
pages' scores (separately for both the authority and hub scores).
"""
summed_hub = sum(page.hub**2 for _, page in pages.items())
summed_auth = sum(page.authority**2 for _, page in pages.items())
for _, page in pages.items():
page.hub /= summed_hub**0.5
page.authority /= summed_auth**0.5
class ConvergenceDetector(object):
"""If the hub and authority values of the pages are no longer changing, we have
reached a convergence and further iterations will have no effect. This detects convergence
so that we can stop the HITS algorithm as early as possible."""
def __init__(self):
self.hub_history = None
self.auth_history = None
def __call__(self):
return self.detect()
def detect(self):
curr_hubs = [page.hub for addr, page in pagesIndex.items()]
curr_auths = [page.authority for addr, page in pagesIndex.items()]
if self.hub_history is None:
self.hub_history, self.auth_history = [], []
else:
diffsHub = [abs(x-y) for x, y in zip(curr_hubs, self.hub_history[-1])]
diffsAuth = [abs(x-y) for x, y in zip(curr_auths, self.auth_history[-1])]
aveDeltaHub = sum(diffsHub)/float(len(pagesIndex))
aveDeltaAuth = sum(diffsAuth)/float(len(pagesIndex))
if aveDeltaHub < 0.01 and aveDeltaAuth < 0.01: # may need tweaking
return True
if len(self.hub_history) > 2: # prevent list from getting long
del self.hub_history[0]
del self.auth_history[0]
self.hub_history.append([x for x in curr_hubs])
self.auth_history.append([x for x in curr_auths])
return False
def getInlinks(page):
if not page.inlinks:
page.inlinks = determineInlinks(page)
return [addr for addr, p in pagesIndex.items() if addr in page.inlinks]
def getOutlinks(page):
if not page.outlinks:
page.outlinks = findOutlinks(page)
return [addr for addr, p in pagesIndex.items() if addr in page.outlinks]
# ______________________________________________________________________________
# HITS Algorithm
class Page(object):
def __init__(self, address, inlinks=None, outlinks=None, hub=0, authority=0):
self.address = address
self.hub = hub
self.authority = authority
self.inlinks = inlinks
self.outlinks = outlinks
pagesContent = {} # maps Page relative or absolute URL/location to page's HTML content
pagesIndex = {}
convergence = ConvergenceDetector() # assign function to variable to mimic pseudocode's syntax
def HITS(query):
"""The HITS algorithm for computing hubs and authorities with respect to a query."""
pages = expand_pages(relevant_pages(query))
for p in pages.values():
p.authority = 1
p.hub = 1
while not convergence():
authority = {p: pages[p].authority for p in pages}
hub = {p: pages[p].hub for p in pages}
for p in pages:
# p.authority ← ∑i Inlinki(p).Hub
pages[p].authority = sum(hub[x] for x in getInlinks(pages[p]))
# p.hub ← ∑i Outlinki(p).Authority
pages[p].hub = sum(authority[x] for x in getOutlinks(pages[p]))
normalize(pages)
return pages
| mit | 3,861,374,006,818,428,000 | 37.578207 | 114 | 0.515056 | false |
w495/python-video-shot-detector | etc/experiments/test_pyav.py | 1 | 2469 | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import av
from av.video.frame import VideoFrame
from av.video.stream import VideoStream
# В этом списке будем хранить кадры в виде numpy-векторов.
array_list = []
# Откроем контейнер на чтение
input_container = av.open('input.mp4')
# Применим «инверсное мультиплексирование» =)
# Получим пакеты из потока.
input_packets = input_container.demux()
# Получии все кадры видео и положим их в `array_list`.
for packet in input_packets:
if isinstance(packet.stream, VideoStream):
# Получим все кадры пакета
frames = packet.decode()
for raw_frame in frames:
# Переформатируем кадры, к нужному размеру и виду.
# Это лучше делать средствами pyav (libav)
# потому что быстрее.
frame = raw_frame.reformat(32, 32, 'rgb24')
# Превратить каждый кадр в numpy-вектор (dtype=int).
array = frame.to_nd_array()
# Положим в список numpy-векторов.
array_list += [array]
# Откроем контейнер на запись.
output_container = av.open('out.mp4', mode='w', format='mp4')
# Добавим к контейнеру поток c кодеком h264.
output_stream = output_container.add_stream('h264', rate=25)
# В этом списке будем хранить пакеты выходного потока.
output_packets = []
# Пройдем по списку векторов и упакуем их в пакеты выходного протока.
for array in array_list:
# Построим видео-кадр по вектору.
frame = VideoFrame.from_ndarray(array, format='rgb24')
# Запакуем полученный кадр.
packet = output_stream.encode(frame)
# Положим в список пакетов.
output_packets += [packet]
# Применим «прямое мультиплексирование» =)
# Для каждого пакета вызовем мультиплексор.
for packet in output_packets:
if packet:
output_container.mux(packet)
output_container.close()
| bsd-3-clause | -1,050,391,235,802,443,000 | 31.241379 | 69 | 0.688235 | false |
clgplus/sample | retrain.py | 1 | 37008 | # -*- coding: utf-8 -*-
# ==============================================================================
"""
运行以下命令
python retrain.py \
--bottleneck_dir=/home/clg/tf_files/bottlenecks \
--how_many_training_steps 60 \
--model_dir=/home/clg/tf_files/inception \
--output_graph=/home/clg/tf_files/retrained_graph.pb \
--output_labels=/home/clg/tf_files/retrained_labels.txt \
--image_dir /home/clg/tf_files/flower_photos
上面命令只学了500张照片,以下命令是学习所有的照片4000张左右。
python tensorflow/examples/image_retraining/retrain.py \
--bottleneck_dir=/tf_files/bottlenecks \
--model_dir=/tf_files/inception \
--output_graph=/tf_files/retrained_graph.pb \
--output_labels=/tf_files/retrained_labels.txt \
--image_dir /tf_files/flower_photos
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
查看学习图像化命令如下:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import glob
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.client import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# Input and output file flags.
tf.app.flags.DEFINE_string('image_dir', '',
"""Path to folders of labeled images.""")
tf.app.flags.DEFINE_string('output_graph', '/tmp/output_graph.pb',
"""Where to save the trained graph.""")
tf.app.flags.DEFINE_string('output_labels', '/tmp/output_labels.txt',
"""Where to save the trained graph's labels.""")
tf.app.flags.DEFINE_string('summaries_dir', '/tmp/retrain_logs',
"""Where to save summary logs for TensorBoard.""")
# Details of the training configuration.
tf.app.flags.DEFINE_integer('how_many_training_steps', 4000,
"""How many training steps to run before ending.""")
tf.app.flags.DEFINE_float('learning_rate', 0.01,
"""How large a learning rate to use when training.""")
tf.app.flags.DEFINE_integer(
'testing_percentage', 10,
"""What percentage of images to use as a test set.""")
tf.app.flags.DEFINE_integer(
'validation_percentage', 10,
"""What percentage of images to use as a validation set.""")
tf.app.flags.DEFINE_integer('eval_step_interval', 10,
"""How often to evaluate the training results.""")
tf.app.flags.DEFINE_integer('train_batch_size', 100,
"""How many images to train on at a time.""")
tf.app.flags.DEFINE_integer('test_batch_size', 500,
"""How many images to test on at a time. This"""
""" test set is only used infrequently to verify"""
""" the overall accuracy of the model.""")
tf.app.flags.DEFINE_integer(
'validation_batch_size', 100,
"""How many images to use in an evaluation batch. This validation set is"""
""" used much more often than the test set, and is an early indicator of"""
""" how accurate the model is during training.""")
# File-system cache locations.
tf.app.flags.DEFINE_string('model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string(
'bottleneck_dir', '/tmp/bottleneck',
"""Path to cache bottleneck layer values as files.""")
tf.app.flags.DEFINE_string('final_tensor_name', 'final_result',
"""The name of the output classification layer in"""
""" the retrained graph.""")
# Controls the distortions used during training.
tf.app.flags.DEFINE_boolean(
'flip_left_right', False,
"""Whether to randomly flip half of the training images horizontally.""")
tf.app.flags.DEFINE_integer(
'random_crop', 0,
"""A percentage determining how much of a margin to randomly crop off the"""
""" training images.""")
tf.app.flags.DEFINE_integer(
'random_scale', 0,
"""A percentage determining how much to randomly scale up the size of the"""
""" training images by.""")
tf.app.flags.DEFINE_integer(
'random_brightness', 0,
"""A percentage determining how much to randomly multiply the training"""
""" image input pixels up or down by.""")
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(hash_name.encode('utf-8')).hexdigest()
percentage_hash = (int(hash_name_hashed, 16) % (65536)) * (100 / 65535.0)
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Category has no images - %s.', category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: Numpy array of image data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The number of bottleneck values to return.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(65536)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(65536)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.mul(margin_scale_value, resize_scale_value)
precrop_width = tf.mul(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.mul(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.pack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.mul(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights, layer_name + '/weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.histogram_summary(layer_name + '/pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.histogram_summary(final_tensor_name + '/activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, ground_truth_input)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.scalar_summary('cross entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
sess = tf.Session()
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step = add_evaluation_step(final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.initialize_all_variables()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a catch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%%' %
(datetime.now(), i, validation_accuracy * 100))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
test_accuracy = sess.run(
evaluation_step,
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 1,741,741,162,625,397,500 | 41.977881 | 129 | 0.666829 | false |
vathpela/anaconda | tests/nosetests/regex_tests/iscsi_name_test.py | 1 | 2690 | #!/usr/bin/python
# vim:set fileencoding=utf-8
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from regexcheck import regex_match
from pyanaconda.core.regexes import ISCSI_IQN_NAME_REGEX, ISCSI_EUI_NAME_REGEX
class iSCSIiqnnameRegexTestCase(unittest.TestCase):
def iqnname_test(self):
good_tests = [
'iqn.2014-15.com.example',
'iqn.2014-15.com.example:iscsi',
'iqn.2014-15.c-om.example:iscsi',
'iqn.2014-15.c.om.example:iscsi',
'iqn.2014-15.com.example:...',
'iqn.2014-15.com.example:iscsi_@nything_except_colon_after_colon!'
]
bad_tests = [
'iqn',
'iqn.',
'iqn.2014-15',
'iqn.2014-15.',
'iqn.2014-15..',
'iqn.2014-15.com.example.',
'iqn.2014-15.com.example...',
'iqn.2014-15.com.example:',
'iqn.2014-15.-com.example',
'iqn.2014-15.com-.example',
'iqn.2014-15.-.example',
'iqn.2014-15.com.example-:iscsi',
'abciqn.2014-15.com.example:iscsi',
'iqn.2014-15.-.example:iscsi',
'iqn.2014-15.com&com.example:iscsi',
'iqn.2014-15.com.example:iscsi:doublecolon',
'iqn.2014-15..om.example:iscsi',
]
if not regex_match(ISCSI_IQN_NAME_REGEX, good_tests, bad_tests):
self.fail()
class iSCSIeuinameRegexTestCase(unittest.TestCase):
def euiname_test(self):
good_tests = [
'eui.ABCDEF0123456789',
'eui.abcdef0123456789',
'eui.0123456789ABCDEF'
]
bad_tests = [
'eui',
'eui.',
'eui.2014-',
'eui.exampleeui789abc'
'eui.AAAABBBBCCC2345',
'eui.AAAABBBBCCCCD4567'
]
if not regex_match(ISCSI_EUI_NAME_REGEX, good_tests, bad_tests):
self.fail()
| gpl-2.0 | -5,264,170,302,068,901,000 | 35.351351 | 82 | 0.555762 | false |
mburakergenc/Malware-Detection-using-Machine-Learning | cuckoo/analyzer/windows/modules/packages/ppt.py | 1 | 2066 | # Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from _winreg import HKEY_CURRENT_USER
from lib.common.abstracts import Package
class PPT(Package):
"""PowerPoint analysis package."""
PATHS = [
("ProgramFiles", "Microsoft Office", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office10", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office11", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office12", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office14", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office15", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office16", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office 15", "root", "office15", "POWERPNT.EXE"),
]
REGKEYS = [
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Office\\12.0\\Common\\General",
{
# "Welcome to the 2007 Microsoft Office system"
"ShownOptIn": 1,
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Office\\12.0\\Powerpoint\\Security",
{
# Enable VBA macros in Office 2007.
"VBAWarnings": 1,
"AccessVBOM": 1,
# "The file you are trying to open .xyz is in a different
# format than specified by the file extension. Verify the file
# is not corrupted and is from trusted source before opening
# the file. Do you want to open the file now?"
"ExtensionHardening": 0,
},
],
]
def start(self, path):
powerpoint = self.get_path("Microsoft Office PowerPoint")
return self.execute(
powerpoint, args=["/S", path], mode="office",
trigger="file:%s" % path
)
| mit | 172,675,020,858,053,400 | 37.259259 | 84 | 0.56728 | false |
Notxor/Neuronal | neuronal/capa.py | 1 | 1911 | # -*- coding: utf-8 -*-
# Neuronal - Framework for Neural Networks and Artificial Intelligence
#
# Copyright (C) 2012 dddddd <[email protected]>
# Copyright (C) 2012 Notxor <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from neurona import Neurona
class Capa(object):
"""La capa es un conjunto de neuronas que presenta algunas restricciones.
Dado el problema que representa la 'cuantización' para simular el
procesamiento paralelo las restricciones dentro de una capa son las
siguientes:
1. Las neuronas que pertenezcan a la misma capa no pueden hacer
sinapsis entre sí.
2. Todas las neuronas que pertenezcan a la misma capa deben encontrarse
en el mismo estado: 'cargando' o 'disparando'."""
def __init__(self):
self.neuronas = []
def add_neurona(self, neurona):
"""Añade una neurona a la capa si cumple con las restricciones."""
if (self.cumple_condiciones(neurona)):
neurona.capa = self
self.neuronas.append(neurona)
def numero_neuronas(self):
return len(self.neuronas)
def cumple_condiciones(self, neurona):
condiciones = False
if (isinstance(neurona, Neurona)):
condiciones = True
return condiciones
| agpl-3.0 | 5,911,570,045,528,917,000 | 38.75 | 79 | 0.702306 | false |
kaleoyster/ProjectNBI | nbi-utilities/data_gen/maps.py | 1 | 9134 | """ Contains al dictionaries and list from National Bridge Inventory Records"""
__author__ = "Akshay Kale"
__copyright__ = "GPL"
__credit__ = []
__email__ = '[email protected]'
# key: State code
# Value: Abbreviation of the state
code_state_mapping = { '25':'MA',
'04':'AZ',
'08':'CO',
'38':'ND',
'09':'CT',
'19':'IA',
'26':'MI',
'48':'TX',
'35':'NM',
'17':'IL',
'51':'VA',
'23':'ME',
'16':'ID',
'36':'NY',
'56':'WY',
'29':'MO',
'39':'OH',
'28':'MS',
'11':'DC',
'21':'KY',
'18':'IN',
'06':'CA',
'47':'TN',
'12':'FL',
'24':'MD',
'34':'NJ',
'46':'SD',
'13':'GA',
'55':'WI',
'30':'MT',
'54':'WV',
'15':'HI',
'32':'NV',
'37':'NC',
'10':'DE',
'33':'NH',
'44':'RI',
'50':'VT',
'42':'PA',
'05':'AR',
'20':'KS',
'45':'SC',
'22':'LA',
'40':'OK',
'72':'PR',
'41':'OR',
'27':'MN',
'53':'WA',
'01':'AL',
'31':'NE',
'02':'AK',
'49':'UT'
}
# Global Dictionary
# key: Material code according to the NBI
# Value: Name of the type of material
kind_of_material = { 1:'Concrete',
2:'Concrete Continuous',
3:'Steel',
4:'Steel Continuous',
5:'Prestressed Concrete',
6:'Prestressed Concrete Continuous',
7:'Wood or Timber',
8:'Masonry',
9:'Aluminum, Wrought Iron, or Cast Iron',
10:'Other'
}
# Global Dictionary
# key: Deck protection code according to the NBI
# Value: Name of the type of deck protection
deck_protection = {
'1':'Epoxy Coated Reinforcing',
'2':'Galvanized Reinforcing',
'3':'Other Coated Reinforcing',
'4':'Cathodic Protection',
'6':'Polymer Impregnated',
'7':'Internally Sealed',
'8':'Unknown',
'9':'Other',
'0':'None',
'N':'Not Applicable'
}
# Global Dictionary
# key: Structure type code according to the NBI
# Value: Name of the type of structure
structure_type = {
1:'Slab',
2:'Stringer/Multi-beam or Girder',
3:'Girder and Floorbeam System',
4:'Tee Beam',
5:'Box Beam or Girders - Multiple',
6:'Box Beam or Girders - Single or Spread',
7:'Frame (except frame culverts)',
8:'Orthotropic',
9:'Truss - Deck',
10:'Truss - Thru',
11:'Arch - Deck',
12:'Arch - Thru',
13:'Suspension',
14:'Stayed Girder',
15:'Movable - Lift',
16:'Movable - Bascule',
17:'Movable - Swing',
18:'Tunnel',
19:'Culvert (includes f1rame culverts)',
20:'Mixed types',
21:'Segmental Box Girder',
22:'Channel Beam',
0:'Other'
}
# Global Dictionary
# key: Owner type code according to the NBI
# Value: Name of the type of the owner type
owner_essential = {
1: 'State or Highway Agency',
2: 'County Highway Agency',
3: 'Town or Township Highway Agency',
4: 'City or Manicipal Highway Agency'
}
# Global Dictionary
# key: Owner type code according to the NBI
# Value: Name of the type of the owner type
owner = {
1: 'State or Highway Agency',
2: 'County Highway Agency',
3: 'Town or Township Highway Agency',
4: 'City or Manicipal Highway Agency',
11: 'State Park, Forest, or Reservation Agency',
12: 'Local Park, Forest, or Reservation Agency',
21: 'Other State Agencies',
25: 'Other Local Agencies',
26: 'Private (other than railroad)',
27: 'Railroad',
31: 'State Toll Authority',
32: 'Local Toll Authority',
60: 'Other Federal Agencies',
61: 'Indian Tribal Government',
62: 'Bureau of Indian Affairs',
63: 'Bureau of Fish and Wildlife',
64: 'U.S. Forest Service',
66: 'National Park Service',
67: 'Tennessee Valley Authority',
68: 'Bureau of Land Management',
69: 'Bureau of Reclamation',
70: 'Corps of Engineers (Civil)',
71: 'Corps of Engineers (Military)',
72: 'Air Force',
73: 'Navy/Marine',
74: 'Army',
75: 'NASA',
76: 'Metropolitan Washington Airports Service',
80: 'Unknown',
}
# Global Dictionary
# key: Design type code according to the NBI
# Value: Name of the type of the design code
design_load = {
-1: 'NA',
0: 'Other',
1: 'H 10',
2: 'H 15',
3: 'HS 15',
4: 'H 20',
5: 'HS 20',
6: 'HS 20 + Mod',
7: 'Pedestrian',
8: 'Railroad',
9: 'HS 25'
}
# Global Dictionary
# key: Type of wearing surface
# Value: Name of the type of wearing surface
type_of_wearing_surface = {
"1": "Monolithic Concrete (concurrently placed with structural deck)",
"2": "Integral Concrete (separate non-modified layer of concrete added to structural deck)",
"3": "Latex Concrete or similar additive",
"4": "Low Slump Concrete",
"5": "Epoxy Overlay",
"6": "Bituminous",
"7": "Wood or Timber",
"8": "Gravel",
"9": "Other",
"0": "None (no additional concrete thickness or wearing surface is included in the bridge deck)",
"N":"Not Applicable (applies only to structures with no deck",
"NA": "NA",
}
# Global Dictionary
# key: Transition table
# Value: Transition type / Intervention type
from_to_matrix = {
('8', '9'):'Repair',
('7', '9'):'Repair',
('6', '9'):'Repair / Reconstruction',
('5', '9'):'Repair / Reconstruction',
('4', '9'):'Repair / Reconstruction',
('3', '9'):'Repair / Reconstruction',
('2', '9'):'Repair / Reconstruction',
('1', '9'):'Repair / Reconstruction',
('7', '8'):'Repair',
('6', '8'):'Rehabilitation',
('5', '8'):'Repair / Reconstruction',
('4', '8'):'Repair / Reconstruction',
('3', '8'):'Repair / Reconstruction',
('2', '8'):'Repair / Reconstruction',
('1', '8'):'Repair / Reconstruction',
('6', '7'):'Repair',
('5', '7'):'Rehabilitation',
('4', '7'):'Rehabilitation',
('3', '7'):'Rehabilitation',
('2', '7'):'Rehabilitation',
('1', '7'):'Rehabilitation',
('5', '6'):'Repair',
('4', '6'):'Rehabilitation',
('3', '6'):'Rehabilitation',
('2', '6'):'Rehabilitation',
('1', '6'):'Rehabilitation',
('4', '5'):'Repair',
('3', '5'):'Rehabilitation',
('2', '5'):'Rehabilitation',
('1', '5'):'Rehabilitation',
('3', '4'):'Repair',
('2', '4'):'Repair',
('1', '4'):'Repair',
('2', '3'):'Repair',
('1', '2'):'Repair'
}
| gpl-2.0 | -5,501,000,904,377,520,000 | 35.682731 | 113 | 0.378476 | false |
freifunk-darmstadt/ffda-jarvis | willie/willie/modules/seen.py | 1 | 1825 | # coding=utf8
"""
seen.py - Willie Seen Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2012, Elad Alfassa <[email protected]>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
import time
import datetime
from willie.tools import Identifier, get_timezone, format_time
from willie.module import commands, rule, priority, thread
@commands('seen')
def seen(bot, trigger):
"""Reports when and where the user was last seen."""
if not trigger.group(2):
bot.say(".seen <nick> - Reports when <nick> was last seen.")
return
nick = trigger.group(2).strip()
timestamp = bot.db.get_nick_value(nick, 'seen_timestamp')
if timestamp:
channel = bot.db.get_nick_value(nick, 'seen_channel')
message = bot.db.get_nick_value(nick, 'seen_message')
tz = get_timezone(bot.db, bot.config, None, trigger.nick,
trigger.sender)
saw = datetime.datetime.utcfromtimestamp(timestamp)
timestamp = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, saw)
msg = "I last saw {} at {}".format(nick, timestamp)
if Identifier(channel) == trigger.sender:
msg = msg + " in here, saying " + message
else:
msg += " in another channel."
bot.say(str(trigger.nick) + ': ' + msg)
else:
bot.say("Sorry, I haven't seen {} around.".format(nick))
@thread(False)
@rule('(.*)')
@priority('low')
def note(bot, trigger):
if not trigger.is_privmsg:
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender)
bot.db.set_nick_value(trigger.nick, 'seen_message', trigger)
| mit | -5,590,594,171,366,750,000 | 33.415094 | 75 | 0.631579 | false |
fedoraredteam/elem | setup.py | 1 | 1996 | from distutils.core import setup
from distutils.core import Command
import os
import sys
import unittest
import setuptools
class CleanPycCommand(Command):
user_options = []
def initialize_options(self):
"""Abstract method that is required to be overwritten"""
pass
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
pass
def run(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
filenames = [os.path.join(d, x)
for d, _, files in os.walk(dir_path)
for x in files if os.path.splitext(x)[1] == '.pyc']
for filename in filenames:
os.remove(filename)
ELEM_CONF_ENV = 'ELEMCONFPATH'
if os.getenv(ELEM_CONF_ENV):
path = os.getenv(ELEM_CONF_ENV)
elif hasattr(sys, 'real_prefix'):
path = os.path.join(sys.prefix, '.elem')
else:
path = os.path.join(os.path.expanduser("~"), '.elem')
setup(name='elem',
packages=['elem', 'elem.host', 'elem.score', 'elem.vulnerability', 'elem.exploit'],
package_data={'elem': ['config/elem.conf']},
install_requires=['requests', 'python-dateutil', 'argparse', 'cpe', 'redteamcore'],
data_files=[(path, ['elem/config/elem.conf'])],
version='0.3.0',
description='Tool to correlate published CVE\'s against Enterprise Linux against known exploits.',
author='Kenneth Evensen',
author_email='[email protected]',
license='GPLv3',
url='https://github.com/fedoraredteam/elem',
download_url='https://github.com/fedoraredteam/elem/archive/0.3.0.tar.gz',
keywords=['cve', 'exploit', 'linux'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/elem'],
platforms=['Linux'],
test_suite='tests',
cmdclass={'tidy': CleanPycCommand})
| gpl-3.0 | -1,796,376,005,776,839,000 | 33.413793 | 104 | 0.617735 | false |
hipnusleo/laserjet | resource/pypi/cffi-1.9.1/cffi/verifier.py | 1 | 11834 | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, os, binascii, shutil, io
from . import __version_verifier_modules__
from . import ffiplatform
if sys.version_info >= (3, 3):
import importlib.machinery
def _extension_suffixes():
return importlib.machinery.EXTENSION_SUFFIXES[:]
else:
import imp
def _extension_suffixes():
return [suffix for suffix, _, type in imp.get_suffixes()
if type == imp.C_EXTENSION]
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _hack_at_distutils():
# Windows-only workaround for some configurations: see
# https://bugs.python.org/issue23246 (Python 2.7 with
# a specific MS compiler suite download)
if sys.platform == "win32":
try:
import setuptools # for side-effects, patches distutils
except ImportError:
pass
class Verifier(object):
def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
ext_package=None, tag='', force_generic_engine=False,
source_extension='.c', flags=None, relative_to=None, **kwds):
if ffi._parser._uses_new_feature:
raise ffiplatform.VerificationError(
"feature not supported with ffi.verify(), but only "
"with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,))
self.ffi = ffi
self.preamble = preamble
if not modulename:
flattened_kwds = ffiplatform.flatten(kwds)
vengine_class = _locate_engine_class(ffi, force_generic_engine)
self._vengine = vengine_class(self)
self._vengine.patch_extension_kwds(kwds)
self.flags = flags
self.kwds = self.make_relative_to(kwds, relative_to)
#
if modulename:
if tag:
raise TypeError("can't specify both 'modulename' and 'tag'")
else:
key = '\x00'.join([sys.version[:3], __version_verifier_modules__,
preamble, flattened_kwds] +
ffi._cdefsources)
if sys.version_info >= (3,):
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
k1, k2)
suffix = _get_so_suffixes()[0]
self.tmpdir = tmpdir or _caller_dir_pycache()
self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension)
self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
self.ext_package = ext_package
self._has_source = False
self._has_module = False
def write_source(self, file=None):
"""Write the C source code. It is produced in 'self.sourcefilename',
which can be tweaked beforehand."""
with self.ffi._lock:
if self._has_source and file is None:
raise ffiplatform.VerificationError(
"source code already written")
self._write_source(file)
def compile_module(self):
"""Write the C source code (if not done already) and compile it.
This produces a dynamic link library in 'self.modulefilename'."""
with self.ffi._lock:
if self._has_module:
raise ffiplatform.VerificationError("module already compiled")
if not self._has_source:
self._write_source()
self._compile_module()
def load_library(self):
"""Get a C module from this Verifier instance.
Returns an instance of a FFILibrary class that behaves like the
objects returned by ffi.dlopen(), but that delegates all
operations to the C module. If necessary, the C code is written
and compiled first.
"""
with self.ffi._lock:
if not self._has_module:
self._locate_module()
if not self._has_module:
if not self._has_source:
self._write_source()
self._compile_module()
return self._load_library()
def get_module_name(self):
basename = os.path.basename(self.modulefilename)
# kill both the .so extension and the other .'s, as introduced
# by Python 3: 'basename.cpython-33m.so'
basename = basename.split('.', 1)[0]
# and the _d added in Python 2 debug builds --- but try to be
# conservative and not kill a legitimate _d
if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
basename = basename[:-2]
return basename
def get_extension(self):
_hack_at_distutils() # backward compatibility hack
if not self._has_source:
with self.ffi._lock:
if not self._has_source:
self._write_source()
sourcename = ffiplatform.maybe_relative_path(self.sourcefilename)
modname = self.get_module_name()
return ffiplatform.get_extension(sourcename, modname, **self.kwds)
def generates_python_module(self):
return self._vengine._gen_python_module
def make_relative_to(self, kwds, relative_to):
if relative_to and os.path.dirname(relative_to):
dirname = os.path.dirname(relative_to)
kwds = kwds.copy()
for key in ffiplatform.LIST_OF_FILE_NAMES:
if key in kwds:
lst = kwds[key]
if not isinstance(lst, (list, tuple)):
raise TypeError("keyword '%s' should be a list or tuple"
% (key,))
lst = [os.path.join(dirname, fn) for fn in lst]
kwds[key] = lst
return kwds
# ----------
def _locate_module(self):
if not os.path.isfile(self.modulefilename):
if self.ext_package:
try:
pkg = __import__(self.ext_package, None, None, ['__doc__'])
except ImportError:
return # cannot import the package itself, give up
# (e.g. it might be called differently before installation)
path = pkg.__path__
else:
path = None
filename = self._vengine.find_module(self.get_module_name(), path,
_get_so_suffixes())
if filename is None:
return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
def _write_source_to(self, file):
self._vengine._f = file
try:
self._vengine.write_source_to_f()
finally:
del self._vengine._f
def _write_source(self, file=None):
if file is not None:
self._write_source_to(file)
else:
# Write our source file to an in memory file.
f = NativeIO()
self._write_source_to(f)
source_data = f.getvalue()
# Determine if this matches the current file
if os.path.exists(self.sourcefilename):
with open(self.sourcefilename, "r") as fp:
needs_written = not (fp.read() == source_data)
else:
needs_written = True
# Actually write the file out if it doesn't match
if needs_written:
_ensure_dir(self.sourcefilename)
with open(self.sourcefilename, "w") as fp:
fp.write(source_data)
# Set this flag
self._has_source = True
def _compile_module(self):
# compile this C source
tmpdir = os.path.dirname(self.sourcefilename)
outputfilename = ffiplatform.compile(tmpdir, self.get_extension())
try:
same = ffiplatform.samefile(outputfilename, self.modulefilename)
except OSError:
same = False
if not same:
_ensure_dir(self.modulefilename)
shutil.move(outputfilename, self.modulefilename)
self._has_module = True
def _load_library(self):
assert self._has_module
if self.flags is not None:
return self._vengine.load_library(self.flags)
else:
return self._vengine.load_library()
# ____________________________________________________________
_FORCE_GENERIC_ENGINE = False # for tests
def _locate_engine_class(ffi, force_generic_engine):
if _FORCE_GENERIC_ENGINE:
force_generic_engine = True
if not force_generic_engine:
if '__pypy__' in sys.builtin_module_names:
force_generic_engine = True
else:
try:
import _cffi_backend
except ImportError:
_cffi_backend = '?'
if ffi._backend is not _cffi_backend:
force_generic_engine = True
if force_generic_engine:
from . import vengine_gen
return vengine_gen.VGenericEngine
else:
from . import vengine_cpy
return vengine_cpy.VCPythonEngine
# ____________________________________________________________
_TMPDIR = None
def _caller_dir_pycache():
if _TMPDIR:
return _TMPDIR
result = os.environ.get('CFFI_TMPDIR')
if result:
return result
filename = sys._getframe(2).f_code.co_filename
return os.path.abspath(os.path.join(os.path.dirname(filename),
'__pycache__'))
def set_tmpdir(dirname):
"""Set the temporary directory to use instead of __pycache__."""
global _TMPDIR
_TMPDIR = dirname
def cleanup_tmpdir(tmpdir=None, keep_so=False):
"""Clean up the temporary directory by removing all files in it
called `_cffi_*.{c,so}` as well as the `build` subdirectory."""
tmpdir = tmpdir or _caller_dir_pycache()
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c' # only remove .c files
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if fn.lower().startswith('_cffi_') and (
fn.lower().endswith(suffix) or fn.lower().endswith('.c')):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
def _get_so_suffixes():
suffixes = _extension_suffixes()
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def _ensure_dir(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
| apache-2.0 | -3,877,184,796,602,867,700 | 35.449367 | 86 | 0.533885 | false |
termoshtt/unite-bibtex | src/unite_bibtex.py | 1 | 2073 | # -*- coding: utf-8 -*-
import os.path
from pybtex.database.input import bibtex
class unite_bibtex(object):
"""
Name space for unite_bibtex.vim
(not to pollute global name space)
"""
@staticmethod
def _read_file(filename):
parser = bibtex.Parser()
return parser.parse_file(filename)
@staticmethod
def _check_path(path):
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
raise RuntimeError("file:%s not found" % path)
return path
@staticmethod
def entry_to_str(entry):
try:
persons = entry.persons[u'author']
authors = [unicode(au) for au in persons]
except:
authors = [u'unknown']
title = entry.fields[u"title"] if u"title" in entry.fields else ""
journal = entry.fields[u"journal"] if u"journal" in entry.fields else ""
year = entry.fields[u"year"] if u"year" in entry.fields else ""
desc = u"%s %s %s(%s)" % (",".join(authors), title, journal, year)
return desc.replace("'", "").replace("\\", "")
@staticmethod
def get_entries(bibpath_list):
entries = {}
for bibpath in bibpath_list:
try:
path = unite_bibtex._check_path(bibpath)
bibdata = unite_bibtex._read_file(path)
except Exception as e:
print("Fail to read {}".format(bibpath))
print("Message: {}".format(str(e)))
continue
for key in bibdata.entries:
try:
k = key.encode("utf-8")
except:
print("Cannot encode bibtex key, skip: {}".format(k))
continue
entries[k] = unite_bibtex.entry_to_str(bibdata.entries[key]).encode("utf-8")
return entries
if __name__ == '__main__':
import sys
bibpath_list = sys.argv[1:]
entries = unite_bibtex.get_entries(bibpath_list)
for k, v in entries.items():
print("{}:{}".format(k, v))
| mit | -5,800,734,522,061,089,000 | 31.904762 | 92 | 0.544139 | false |
luiscarlosgph/t-Student-Mixture-Models | setup.py | 1 | 1594 | #!/usr/bin/env python
import setuptools
import unittest
# Read the contents of the README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(name='smm',
version='0.1.6',
description='t-Student-Mixture-Models',
author='Luis C. Garcia-Peraza Herrera',
author_email='[email protected]',
license='BSD 3-Clause License',
url='https://github.com/luiscarlosgph/t-Student-Mixture-Models',
packages=['smm'],
package_dir={'smm' : 'src'},
test_suite = 'tests',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires = ['numpy>=1.16.5', 'scikit-learn', 'scipy', 'setuptools'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
)
| bsd-3-clause | 4,214,325,765,447,081,000 | 35.227273 | 80 | 0.622334 | false |
arteria/django-favicon-plus | favicon/models.py | 1 | 3314 | from compat import python_2_unicode_compatible
import sys
from django.db import models
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.files.uploadedfile import InMemoryUploadedFile
from PIL import Image
from compat import BytesIO
config = {
'shortcut icon': [16, 32, 48, 128, 192],
'touch-icon': [192],
'icon': [192],
'apple-touch-icon': [57, 72, 114, 144, 180],
'apple-touch-icon-precomposed': [57, 72, 76, 114, 120, 144, 152, 180],
}
config = getattr(settings, 'FAVICON_CONFIG', config)
def pre_delete_image(sender, instance, **kwargs):
instance.del_image()
@python_2_unicode_compatible
class Favicon(models.Model):
title = models.CharField(max_length=100)
faviconImage = models.ImageField(upload_to="favicon")
isFavicon = models.BooleanField(default=True)
class Meta:
verbose_name = 'Favicon'
verbose_name_plural = 'Favicons'
def get_favicons(self):
favicons = []
for rel in config:
for size in config[rel]:
favicons.append(self.get_favicon(size, rel))
return favicons
def __str__(self):
return self.faviconImage.name
def get_absolute_url(self):
return "%s" % self.faviconImage.name
def del_image(self):
self.faviconImage.delete()
def get_favicon(self, size, rel, update=False):
"""
get or create a favicon for size, rel(attr) and uploaded favicon
optional:
update=True
"""
fav, _ = FaviconImg.objects.get_or_create(
faviconFK=self, size=size, rel=rel)
if update and fav.faviconImage:
fav.del_image()
if self.faviconImage and not fav.faviconImage:
tmp = Image.open(storage.open(self.faviconImage.name))
tmp.thumbnail((size, size), Image.ANTIALIAS)
tmpIO = BytesIO()
tmp.save(tmpIO, format='PNG')
tmpFile = InMemoryUploadedFile(
tmpIO, None, 'fav-%s.png' %
(size,), 'image/png', sys.getsizeof(tmpIO), None)
fav.faviconImage = tmpFile
fav.save()
return fav
def save(self, *args, **kwargs):
update = False
if self.isFavicon:
for n in Favicon.objects.exclude(pk=self.pk):
n.isFavicon = False
n.save()
super(Favicon, self).save(*args, **kwargs)
if self.faviconImage:
for rel in config:
for size in config[rel]:
self.get_favicon(size=size,rel=rel, update=update)
#make sure default favicon is set
self.get_favicon(size=32, rel='shortcut icon')
class FaviconImg(models.Model):
faviconFK = models.ForeignKey(Favicon, on_delete=models.CASCADE)
size = models.IntegerField()
rel = models.CharField(max_length=250, null=True)
faviconImage = models.ImageField(upload_to='favicon')
def del_image(self):
self.faviconImage.delete()
from django.db.models import signals
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
signals.pre_delete.connect(pre_delete_image, sender=Favicon)
signals.pre_delete.connect(pre_delete_image, sender=FaviconImg)
| mit | -2,069,037,191,939,895,000 | 28.327434 | 74 | 0.630658 | false |
salvoventura/pyunsplash | pyunsplash/tests/documentation_test.py | 1 | 9927 | ###############################################################################
# Copyright (c) 2017 Salvatore Ventura <[email protected]>
#
# File: documentation_test.py
#
# Author: Salvatore Ventura <[email protected]>
# Date: 07 Sep 2017
# Purpose: Test examples in documentation
#
# Revision: 1
# Comment: What's new in revision 1
# Unlike the main unit-tests, this requires live connection.
# Given the rate limit of 50/hr, these can't be run in a single
# shot; although all issues are fixed, still valuable to keep
# around. Name is purposely not following unit test standard.
#
###############################################################################
from __future__ import print_function
import pyunsplash
import os
import logging
api_key = os.environ.get('APPLICATION_ID', None)
# Initialize app logging
logger = logging.getLogger()
logging.basicConfig(filename='app.log', level=logging.DEBUG)
# pyunsplash logger defaults to level logging.ERROR
# If you need to change that, use getLogger/setLevel
# on the module logger, like this:
logging.getLogger("pyunsplash").setLevel(logging.DEBUG)
def funzione_1():
pu = pyunsplash.PyUnsplash(api_key=api_key)
return pu
def funzione_2(pu):
logger.info('Funzione_2')
this_user = pu.user('salvoventura', w=100, h=100)
def funzione_3(pu):
logger.info('Funzione_3')
# retrieve a page from the featured collections, with a maximum
# of 5 collections per-page
collections_page = pu.collections(type_='featured', per_page=5)
def funzione_4(pu):
logger.info('Funzione_4')
#
#
search = pu.search(type_='photos', query='red,car')
for entry in search.entries:
print(entry.link_html)
def funzione_5(pu):
logger.info('Funzione_5')
stats = pu.stats()
print(stats.total) # this is json
def funzione_6(pu):
logger.info('Funzione_6')
# use the PyUnsplash objects: all logs will be recorded to log file
# API: Class Collection
def funzione_7(pu):
logger.info('Funzione_7')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
collection.refresh()
print(collection.id)
def funzione_8(pu):
logger.info('Funzione_8')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.id)
def funzione_9(pu):
logger.info('Funzione_9')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.title)
def funzione_10(pu):
logger.info('Funzione_10')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.description)
def funzione_11(pu):
logger.info('Funzione_11')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.user)
def funzione_12(pu):
logger.info('Funzione_12')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.link_photos)
def funzione_13(pu):
logger.info('Funzione_13')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.link_related)
def funzione_14(pu):
logger.info('Funzione_14')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
photos = collection.photos(order_by='popular', per_page=3)
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_15(pu):
logger.info('Funzione_15')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
related_collections = collection.related
for rel_collection in related_collections.entries:
print(rel_collection.title, rel_collection.description)
# API: Class Collections
def funzione_16(pu):
logger.info('Funzione_16')
this_user = pu.user('salvoventura', w=100, h=100)
collections = this_user.collections(page=1, per_page=5)
for collection in collections.entries:
print(collection.id, collection.title)
# API: Class Photo
def funzione_17(pu):
logger.info('Funzione_17')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
photo.refresh()
print(photo.id, photo.link_download)
def funzione_18(pu):
logger.info('Funzione_18')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
photo.refresh()
print(photo.id, photo.link_download)
def funzione_19(pu):
logger.info('Funzione_19')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.id, photo.link_html)
def funzione_20(pu):
logger.info('Funzione_20')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_21(pu):
logger.info('Funzione_21')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.stats)
# API: Class Photos
def funzione_22(pu):
logger.info('Funzione_22')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.id, photo.link_download)
# API: Class Search
def funzione_23(pu):
logger.info('Funzione_23')
search = pu.search(type_='photos', query='red,car')
for photo in search.entries:
print(photo.id, photo.link_download)
# API: Class Stats
def funzione_24(pu):
logger.info('Funzione_24')
stats = pu.stats()
print(stats.total)
# API: Class User
def funzione_25(pu):
logger.info('Funzione_25')
this_user = pu.user('salvoventura', w=100, h=100)
this_user.refresh()
def funzione_26(pu):
logger.info('Funzione_26')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.id)
def funzione_27(pu):
logger.info('Funzione_27')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.links)
def funzione_28(pu):
logger.info('Funzione_28')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_html)
def funzione_29(pu):
logger.info('Funzione_29')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_portfolio)
def funzione_30(pu):
logger.info('Funzione_30')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_followers)
def funzione_31(pu):
logger.info('Funzione_31')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_following)
def funzione_32(pu):
logger.info('Funzione_32')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_photos)
def funzione_33(pu):
logger.info('Funzione_33')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos(per_page=5)
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_34(pu):
logger.info('Funzione_34')
this_user = pu.user('salvoventura', w=100, h=100)
followers = this_user.followers()
for user in followers.entries:
print(user.id, user.body.get('first_name'), user.body.get('last_name'))
def funzione_35(pu):
logger.info('Funzione_35')
this_user = pu.user('salvoventura', w=100, h=100)
following = this_user.following()
for user in following.entries:
print(user.id, user.body.get('first_name'), user.body.get('last_name'))
def funzione_36(pu):
logger.info('Funzione_36')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.likes(per_page=5)
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_37(pu):
logger.info('Funzione_37')
this_user = pu.user('salvoventura', w=100, h=100)
collections = this_user.collections(page=1, per_page=5)
for collection in collections.entries:
print(collection.id, collection.title)
# API: Class Users
def funzione_38(pu):
logger.info('Funzione_38')
this_user = pu.user('salvoventura', w=100, h=100)
followers = this_user.followers() # followers is an instance of class Users
for user in followers.entries:
print(user.id, user.body.get('first_name'), user.body.get('last_name'))
def main():
pu = funzione_1()
# first chunk
funzione_2(pu)
funzione_3(pu)
funzione_4(pu)
funzione_5(pu)
funzione_6(pu)
funzione_7(pu)
funzione_8(pu)
funzione_9(pu)
funzione_10(pu)
funzione_11(pu)
funzione_12(pu)
funzione_13(pu)
funzione_14(pu)
funzione_15(pu)
funzione_16(pu)
funzione_17(pu)
funzione_18(pu)
# second chunk
funzione_19(pu)
funzione_20(pu)
funzione_21(pu)
funzione_22(pu)
funzione_23(pu)
funzione_24(pu)
funzione_25(pu)
funzione_26(pu)
funzione_27(pu)
funzione_28(pu)
funzione_29(pu)
funzione_30(pu)
funzione_31(pu)
funzione_32(pu)
funzione_33(pu)
funzione_34(pu)
funzione_35(pu)
funzione_36(pu)
funzione_37(pu)
funzione_38(pu)
if __name__ == '__main__':
main()
| mit | -1,336,868,179,278,288,600 | 25.975543 | 80 | 0.652664 | false |
xalt/xalt | py_src/xalt_extract_linker.py | 1 | 1691 | #-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2015 University of Texas at Austin
# Copyright (C) 2013-2015 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
#
# Git Version: @git@
from __future__ import print_function
import os, sys, json
dirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))
sys.path.insert(1,os.path.realpath(os.path.join(dirNm, "../libexec")))
sys.path.insert(1,os.path.realpath(os.path.join(dirNm, "../site")))
from xalt_util import extract_compiler
def main():
compiler, full_path_cmplr, link_line = extract_compiler()
resultT = { 'compiler' : compiler,
'full_path' : full_path_cmplr,
'link_line' : link_line
}
jsonStr = json.dumps(resultT)
print(jsonStr)
if ( __name__ == '__main__'): main()
| lgpl-2.1 | 7,246,546,160,335,109,000 | 36.577778 | 72 | 0.641632 | false |
airbnb/caravel | superset/migrations/versions/6c7537a6004a_models_for_email_reports.py | 1 | 4101 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""models for email reports
Revision ID: 6c7537a6004a
Revises: e502db2af7be
Create Date: 2018-05-15 20:28:51.977572
"""
# revision identifiers, used by Alembic.
revision = '6c7537a6004a'
down_revision = 'a61b40f9f57f'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('dashboard_email_schedules',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('crontab', sa.String(length=50), nullable=True),
sa.Column('recipients', sa.Text(), nullable=True),
sa.Column('deliver_as_group', sa.Boolean(), nullable=True),
sa.Column('delivery_type', sa.Enum('attachment', 'inline', name='emaildeliverytype'), nullable=True),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['dashboard_id'], ['dashboards.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_dashboard_email_schedules_active'), 'dashboard_email_schedules', ['active'], unique=False)
op.create_table('slice_email_schedules',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('crontab', sa.String(length=50), nullable=True),
sa.Column('recipients', sa.Text(), nullable=True),
sa.Column('deliver_as_group', sa.Boolean(), nullable=True),
sa.Column('delivery_type', sa.Enum('attachment', 'inline', name='emaildeliverytype'), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.Column('email_format', sa.Enum('visualization', 'data', name='sliceemailreportformat'), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['slice_id'], ['slices.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_slice_email_schedules_active'), 'slice_email_schedules', ['active'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_slice_email_schedules_active'), table_name='slice_email_schedules')
op.drop_table('slice_email_schedules')
op.drop_index(op.f('ix_dashboard_email_schedules_active'), table_name='dashboard_email_schedules')
op.drop_table('dashboard_email_schedules')
# ### end Alembic commands ###
| apache-2.0 | -3,753,114,214,703,253,500 | 47.247059 | 119 | 0.691539 | false |
actuaryzhang/spark | python/pyspark/ml/tests/test_feature.py | 1 | 14100 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
if sys.version > '3':
basestring = str
from pyspark.ml.feature import Binarizer, CountVectorizer, CountVectorizerModel, HashingTF, IDF, \
NGram, RFormula, StopWordsRemover, StringIndexer, StringIndexerModel, VectorSizeHint
from pyspark.ml.linalg import DenseVector, SparseVector, Vectors
from pyspark.sql import Row
from pyspark.testing.utils import QuietTest
from pyspark.testing.mlutils import check_params, SparkSessionTestCase
class FeatureTests(SparkSessionTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
self.assertIsNotNone(idf0m.docFreq)
self.assertEqual(idf0m.numDocs, 3)
# Test that parameters transferred to Python Model
check_params(self, idf0m)
def test_ngram(self):
dataset = self.spark.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
dataset = self.spark.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
# with language selection
stopwords = StopWordsRemover.loadDefaultStopWords("turkish")
dataset = self.spark.createDataFrame([Row(input=["acaba", "ama", "biri"])])
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
# with locale
stopwords = ["BELKİ"]
dataset = self.spark.createDataFrame([Row(input=["belki"])])
stopWordRemover.setStopWords(stopwords).setLocale("tr")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
def test_count_vectorizer_with_binary(self):
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
def test_count_vectorizer_with_maxDF(self):
dataset = self.spark.createDataFrame([
(0, "a b c d".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0}),),
(3, "a".split(' '), SparseVector(3, {}),)], ["id", "words", "expected"])
cv = CountVectorizer(inputCol="words", outputCol="features")
model1 = cv.setMaxDF(3).fit(dataset)
self.assertEqual(model1.vocabulary, ['b', 'c', 'd'])
transformedList1 = model1.transform(dataset).select("features", "expected").collect()
for r in transformedList1:
feature, expected = r
self.assertEqual(feature, expected)
model2 = cv.setMaxDF(0.75).fit(dataset)
self.assertEqual(model2.vocabulary, ['b', 'c', 'd'])
transformedList2 = model2.transform(dataset).select("features", "expected").collect()
for r in transformedList2:
feature, expected = r
self.assertEqual(feature, expected)
def test_count_vectorizer_from_vocab(self):
model = CountVectorizerModel.from_vocabulary(["a", "b", "c"], inputCol="words",
outputCol="features", minTF=2)
self.assertEqual(model.vocabulary, ["a", "b", "c"])
self.assertEqual(model.getMinTF(), 2)
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 3.0, 1: 2.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 2.0}),),
(2, "a b".split(' '), SparseVector(3, {}),)], ["id", "words", "expected"])
transformed_list = model.transform(dataset).select("features", "expected").collect()
for r in transformed_list:
feature, expected = r
self.assertEqual(feature, expected)
# Test an empty vocabulary
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "vocabSize.*invalid.*0"):
CountVectorizerModel.from_vocabulary([], inputCol="words")
# Test model with default settings can transform
model_default = CountVectorizerModel.from_vocabulary(["a", "b", "c"], inputCol="words")
transformed_list = model_default.transform(dataset) \
.select(model_default.getOrDefault(model_default.outputCol)).collect()
self.assertEqual(len(transformed_list), 3)
def test_rformula_force_index_label(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
# Does not index label by default since it's numeric type.
rf = RFormula(formula="y ~ x + s")
model = rf.fit(df)
transformedDF = model.transform(df)
self.assertEqual(transformedDF.head().label, 1.0)
# Force to index label.
rf2 = RFormula(formula="y ~ x + s").setForceIndexLabel(True)
model2 = rf2.fit(df)
transformedDF2 = model2.transform(df)
self.assertEqual(transformedDF2.head().label, 0.0)
def test_rformula_string_indexer_order_type(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
rf = RFormula(formula="y ~ x + s", stringIndexerOrderType="alphabetDesc")
self.assertEqual(rf.getStringIndexerOrderType(), 'alphabetDesc')
transformedDF = rf.fit(df).transform(df)
observed = transformedDF.select("features").collect()
expected = [[1.0, 0.0], [2.0, 1.0], [0.0, 0.0]]
for i in range(0, len(expected)):
self.assertTrue(all(observed[i]["features"].toArray() == expected[i]))
def test_string_indexer_handle_invalid(self):
df = self.spark.createDataFrame([
(0, "a"),
(1, "d"),
(2, None)], ["id", "label"])
si1 = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="keep",
stringOrderType="alphabetAsc")
model1 = si1.fit(df)
td1 = model1.transform(df)
actual1 = td1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0), Row(id=2, indexed=2.0)]
self.assertEqual(actual1, expected1)
si2 = si1.setHandleInvalid("skip")
model2 = si2.fit(df)
td2 = model2.transform(df)
actual2 = td2.select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0)]
self.assertEqual(actual2, expected2)
def test_string_indexer_from_labels(self):
model = StringIndexerModel.from_labels(["a", "b", "c"], inputCol="label",
outputCol="indexed", handleInvalid="keep")
self.assertEqual(model.labels, ["a", "b", "c"])
df1 = self.spark.createDataFrame([
(0, "a"),
(1, "c"),
(2, None),
(3, "b"),
(4, "b")], ["id", "label"])
result1 = model.transform(df1)
actual1 = result1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=2.0), Row(id=2, indexed=3.0),
Row(id=3, indexed=1.0), Row(id=4, indexed=1.0)]
self.assertEqual(actual1, expected1)
model_empty_labels = StringIndexerModel.from_labels(
[], inputCol="label", outputCol="indexed", handleInvalid="keep")
actual2 = model_empty_labels.transform(df1).select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=0.0), Row(id=2, indexed=0.0),
Row(id=3, indexed=0.0), Row(id=4, indexed=0.0)]
self.assertEqual(actual2, expected2)
# Test model with default settings can transform
model_default = StringIndexerModel.from_labels(["a", "b", "c"], inputCol="label")
df2 = self.spark.createDataFrame([
(0, "a"),
(1, "c"),
(2, "b"),
(3, "b"),
(4, "b")], ["id", "label"])
transformed_list = model_default.transform(df2) \
.select(model_default.getOrDefault(model_default.outputCol)).collect()
self.assertEqual(len(transformed_list), 5)
def test_vector_size_hint(self):
df = self.spark.createDataFrame(
[(0, Vectors.dense([0.0, 10.0, 0.5])),
(1, Vectors.dense([1.0, 11.0, 0.5, 0.6])),
(2, Vectors.dense([2.0, 12.0]))],
["id", "vector"])
sizeHint = VectorSizeHint(
inputCol="vector",
handleInvalid="skip")
sizeHint.setSize(3)
self.assertEqual(sizeHint.getSize(), 3)
output = sizeHint.transform(df).head().vector
expected = DenseVector([0.0, 10.0, 0.5])
self.assertEqual(output, expected)
class HashingTFTest(SparkSessionTestCase):
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
if __name__ == "__main__":
from pyspark.ml.tests.test_feature import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 | -5,482,461,792,148,981,000 | 44.044728 | 98 | 0.602738 | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/shtiker/OptionsPageGUI.py | 1 | 3792 | from direct.gui.DirectGui import DirectButton, DirectLabel
from panda3d.core import TextNode, Vec4
Preloaded = {}
def loadModels():
if Preloaded:
return
gui = loader.loadModel('phase_3.5/models/gui/fishingBook.bam')
Preloaded['tab1'] = gui.find('**/tabs/polySurface1')
Preloaded['tab2'] = gui.find('**/tabs/polySurface2')
gui.removeNode()
del gui
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
Preloaded['button1'] = guiButton.find('**/QuitBtn_UP')
Preloaded['button2'] = guiButton.find('**/QuitBtn_DN')
Preloaded['button3'] = guiButton.find('**/QuitBtn_RLVR')
guiButton.removeNode()
del guiButton
normalColor = (1, 1, 1, 1)
clickColor = (0.8, 0.8, 0, 1)
rolloverColor = (0.15, 0.82, 1.0, 1)
diabledColor = (1.0, 0.98, 0.15, 1)
class OptionTab(DirectButton):
def __init__(self, tabType=2, parent=None, **kw):
loadModels()
if parent is None:
parent = aspect2d
if tabType == 1:
image = Preloaded['tab1']
elif tabType == 2:
image = Preloaded['tab2']
else:
image = None
optiondefs = (
('relief', None, None),
('text_align', TextNode.ALeft, None),
('text_fg', Vec4(0.2, 0.1, 0, 1), None),
('image', image, None),
('image_color', normalColor, None),
('image1_color', clickColor, None),
('image2_color', rolloverColor, None),
('image3_color', diabledColor, None),
('image_scale', (0.033, 0.033, 0.035), None),
('image_hpr', (0, 0, -90), None)
)
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent)
self.initialiseoptions(OptionTab)
buttonbase_xcoord = 0.35
buttonbase_ycoord = 0.45
class OptionButton(DirectButton):
def __init__(self, parent=None, wantLabel=False, z=buttonbase_ycoord, labelZ=None,
labelOrientation='left', labelPos=None, labelText='', image_scale=(0.7, 1, 1), text='', **kw):
loadModels()
if parent is None:
parent = aspect2d
pos = (buttonbase_xcoord, 0, z) if not kw.get('pos') else kw['pos']
optiondefs = (
('relief', None, None),
('image', (Preloaded['button1'], Preloaded['button2'], Preloaded['button3']), None),
('image_scale', image_scale, None),
('text', text, None),
('text_scale', 0.052, None),
('text_pos', (0, -0.02), None),
('pos', pos, None),
)
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent)
self.initialiseoptions(OptionButton)
if wantLabel:
self.label=OptionLabel(parent=self, z=labelZ, pos=labelPos, orientation=labelOrientation,
text=labelText)
titleHeight = 0.61
textStartHeight = 0.45
leftMargin = -0.72
class OptionLabel(DirectLabel):
def __init__(self, parent=None, z=textStartHeight, text_wordwrap=16, text='',
orientation='left', **kw):
loadModels()
if parent is None:
parent = aspect2d
if orientation == 'left':
pos = (leftMargin, 0, z)
text_align = TextNode.ALeft
else:
pos = kw['pos']
text_align = TextNode.ACenter
optiondefs = (
('relief', None, None),
('pos', pos, None),
('text_align', text_align, None),
('text_scale', 0.052, None),
('text_wordwrap', text_wordwrap, None),
('text', text, None)
)
self.defineoptions(kw, optiondefs)
DirectLabel.__init__(self, parent)
self.initialiseoptions(OptionLabel) | apache-2.0 | -5,793,325,956,545,810,000 | 31.144068 | 111 | 0.554325 | false |
ISISComputingGroup/EPICS-inst_servers | CollisionAvoidanceMonitor/configurations/config_larmor.py | 1 | 4210 | from math import radians
from CollisionAvoidanceMonitor.transform import Transformation
import os
# Config happens here:
# Colors for each body
colors = [(0.6, 0.6, 0.6), (1, 0, 1), (1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0.5, 0), (0.2, 0.2, 1), (1, 1, 1)]
# PV prefix
pv_prefix = os.environ["MYPVPREFIX"]
# PV prefix for controlling the system
control_pv = "{}COLLIDE:".format(pv_prefix)
# Define the geometry of the system in mm
# Coordinate origin at arc centre, with nominal beam height
z_stage = dict(name="Z_Stage", size=(1000.0, 1000.0, 630.0), color=colors[0])
rot_stage = dict(name="Rotation", size=(600.0, 600.0, 165.0), color=colors[1])
bot_arc = dict(name="Bottom_Arc", size=(600.0, 600.0, 120.0), color=colors[2])
top_arc = dict(name="Top_Arc", size=(600.0, 600.0, 120.0), color=colors[3])
fine_z = dict(name="Fine_Z", size=(600.0, 600.0, 120.0), color=colors[4])
y_base = dict(name="Y_Stage", size=(900.0, 1200.0, 50.0), color=colors[4])
y_stage = dict(name="Y_Carriage", size=(600.0, 300.0, 20.0), color=colors[5])
x_stage = dict(name="X_Carriage", size=(520.0, 300.0, 20.0), color=colors[6])
sample = dict(name="Sample", size=(250.0, 250.0, 150.0), color=colors[6])
snout = dict(name="Snout", position=(-300, 0, 0), size=(500, 70, 70), color=colors[7])
slits = dict(name="Slits", position=(450, 0, 0), size=(100, 300, 300), color=colors[7])
# Define some variables to describe the geometry
centre_arc = 750.0
beam_ref = 1625.0
# Define some search parameters
coarse = 20.0
fine = 0.5
# Define the oversized-ness of each body - a global value in mm
oversize = coarse / 4
# List of pairs to ignore [0, 1]...[7, 8]
ignore = []
for i in range(0, 9):
for j in range(i, 9):
ignore.append([i, j])
def move_everything(axes):
# Z stage
t = Transformation()
size = axes[0] + z_stage['size'][2]
t.translate(z=-beam_ref + size / 2)
yield t, dict(z=size)
# Rotation
t = Transformation()
t.translate(z=-beam_ref + axes[0] + z_stage['size'][2] + rot_stage['size'][2] / 2)
t.rotate(rz=radians(axes[1]))
yield t
# Bottom arc
t = Transformation()
t.translate(z=-centre_arc - (bot_arc['size'][2] / 2 + top_arc['size'][2]))
t.rotate(ry=radians(axes[2]))
t.translate(z=centre_arc + (bot_arc['size'][2] / 2 + top_arc['size'][2]))
t.translate(z=-beam_ref + axes[0] + z_stage['size'][2] + rot_stage['size'][2] + bot_arc['size'][2] / 2)
t.rotate(rz=radians(axes[1]))
yield t
# Top arc
t = Transformation(t)
t.translate(z=+(centre_arc + top_arc['size'][2] / 2), forward=False)
t.rotate(rx=radians(axes[3]), forward=False)
t.translate(z=-(centre_arc + top_arc['size'][2] / 2), forward=False)
t.translate(z=top_arc['size'][2] / 2 + bot_arc['size'][2] / 2, forward=False)
yield t
# Fine Z
u = Transformation(t)
size = axes[4] + fine_z['size'][2]
u.translate(z=size / 2 + top_arc['size'][2] / 2, forward=False)
yield u, dict(z=size)
# Base of Y stage (top of fine Z)
t = Transformation(t)
size = axes[4] + fine_z['size'][2]
t.translate(z=size + top_arc['size'][2] / 2 + y_base['size'][2] / 2, forward=False)
yield t
# Y stage
t = Transformation(t)
t.translate(y=axes[5], z=y_base['size'][2] / 2 + y_stage['size'][2] / 2, forward=False)
yield t
# X stage
t = Transformation(t)
t.translate(x=axes[6], z=y_stage['size'][2] / 2 + x_stage['size'][2] / 2, forward=False)
yield t
# Sample
t = Transformation(t)
t.translate(z=x_stage['size'][2] / 2 + sample['size'][2] / 2, forward=False)
yield t
moves = move_everything
# Put them in a list
geometries = [z_stage, rot_stage, bot_arc, top_arc, fine_z, y_base, y_stage, x_stage, sample, snout, slits]
# Attach monitors to readbacks
pvs = ["{}MOT:MTR0101",
"{}MOT:MTR0102",
"{}MOT:MTR0103",
"{}MOT:MTR0104",
"{}MOT:MTR0105",
"{}MOT:MTR0106",
"{}MOT:MTR0107"]
pvs = [pv.format(pv_prefix) for pv in pvs]
hardlimits = [[-220, 100],
[-180.0, 180.0],
[-20, 20.0],
[-20.0, 20.0],
[0.0, 30.0],
[-300, 300],
[-37.5, 37.5]]
| bsd-3-clause | 8,660,980,917,994,422,000 | 27.255034 | 109 | 0.585273 | false |
mertyildiran/Cerebrum | cerebrum/hearing/utilities.py | 1 | 1960 | __author__ = 'Mehmet Mert Yildiran, [email protected]'
import rethinkdb as r # Rethinkdb Python driver
# Memory class
class Memory(object):
def __init__(self, starting_time, ending_time, data): # Initialize the object
self.starting_time = starting_time # Starting time attribute
self.ending_time = ending_time # Ending time attribute
self.data = data # Data attribute
# Timestamp class
class Timestamp(object):
def __init__(self, starting_time, ending_time): # Initialize the object
self.starting_time = starting_time # Starting time attribute
self.ending_time = ending_time # Ending time attribute
# Convert object to dictionary
def makeit_dict(obj):
if isinstance(obj, set):
return list(obj)
return obj.__dict__
class HearingMemoryUtil():
# Add a memory function
@staticmethod
def add_memory(data, starting_time, ending_time):
conn = r.connect("localhost", 28015)
r.db('test').table("hearing_memory").insert([
{ "starting_time": starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
"ending_time": ending_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
"data": r.binary(data)
}
]).run(conn)
r.db('test').table("hearing_timestamps").insert([
{ "starting_time": starting_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
"ending_time": ending_time.strftime("%Y-%m-%d %H:%M:%S.%f")
}
]).run(conn)
conn.close()
# Get a memory function
@staticmethod
def get_memory(starting_time):
conn = r.connect("localhost", 28015)
cursor = r.db('test').table("hearing_memory").filter({'starting_time': starting_time}).run(conn)
#r.db('test').table("hearing_memory").filter({'starting_time': starting_time}).delete().run(conn)
conn.close()
return cursor
# Get timestamps function
@staticmethod
def get_timestamps():
conn = r.connect("localhost", 28015)
cursor = r.db('test').table("hearing_timestamps").run(conn)
r.db('test').table("hearing_timestamps").delete().run(conn)
conn.close()
return cursor
| mit | -5,825,145,654,931,058,000 | 32.220339 | 99 | 0.681122 | false |
AndreasHeger/alignlib | python/tests/test_MultAlignment.py | 1 | 9238 | # alignlib - a library for aligning protein sequences
#
# $Id: test_Alignment.py,v 1.3 2004/01/23 17:34:58 aheger Exp $
#
# Copyright (C) 2004 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import unittest, sys, os
from alignlib import *
class MultAlignmentTestCase( unittest.TestCase ):
mReferenceSequence = "0123456789"
mNumSequences = 3
def setUp( self ):
self.mAlignandum = makeSequence( self.mReferenceSequence )
self.mContainer = makeAlignmentBlocks()
def constructMali(self):
mali = makeMultAlignment()
ali = self.mContainer.getNew()
ali.addDiagonal( 0,3,+2 );
ali.addDiagonal( 3,6,+4 );
mali.add( ali );
ali = self.mContainer.getNew()
ali.addDiagonal( 0,1,+1 );
ali.addDiagonal( 1,6,+3 );
mali.add( ali );
mali.add( ali );
seqs = StringVector()
for x in range( self.mNumSequences):
seqs.append( self.mReferenceSequence )
return mali, seqs
def testBuild(self):
mali, seqs = self.constructMali()
self.assertEqual( mali.getNumSequences(), len(seqs) )
self.assertEqual( mali.getLength(), 6 )
def testExpandSimple(self):
"""expand mali without sequences."""
mali, seqs = self.constructMali()
mali.expand( AlignandumVector() )
format = MultAlignmentFormatPlain( mali, seqs )
result = [ x.split("\t") for x in str(format).split("\n") ]
self.assertEqual( result[0], ["2", "2----3456789", "10" ] )
self.assertEqual( result[1], ["1", "123--45--678", "9" ] )
self.assertEqual( result[2], ["1", "1--2345--678", "9" ] )
def testExpandFull(self):
"""expand mali with sequences."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
mali.expand( v )
format = MultAlignmentFormatPlain( mali, seqs )
result = [ x.split("\t") for x in str(format).split("\n") ]
self.assertEqual( result[0], ["0", "01--2----3456789--", "10" ] )
self.assertEqual( result[1], ["0", "--0-123--45--6789-", "10" ] )
self.assertEqual( result[2], ["0", "---01--2345--678-9", "10" ] )
def testGetGapsSum(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggSum )
self.assertEqual( tuple(counts), (0,4,0,2,0,0,0) )
def testGetGapsCount(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggCount )
self.assertEqual( tuple(counts), (0,2,0,1,0,0,0) )
def testGetGapsMin(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggMin )
self.assertEqual( tuple(counts[1:-1]), (0,0,0,0,0) )
def testGetGapsMax(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggMax )
self.assertEqual( tuple(counts), (0,2,0,2,0,0,0) )
def testGetGapsSumFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggSum )
self.assertEqual( tuple(counts), (4,4,0,2,0,0,2) )
def testGetGapsCountFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggCount )
self.assertEqual( tuple(counts), (3,2,0,1,0,0,2) )
def testGetGapsMinFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggMin )
self.assertEqual( tuple(counts), (1,0,0,0,0,0,0) )
def testGetGapsMaxFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggMax )
self.assertEqual( tuple(counts), (2,2,0,2,0,0,1) )
def testMatrix(self):
mali, seqs = self.constructMali()
test_matrix = ( "234789",
"145678",
"145678")
matrix = mali.getPositionMatrix()
self.assertEqual( matrix.getNumRows(), len(test_matrix) )
self.assertEqual( matrix.getNumCols(), len(test_matrix[0]) )
for x in range( len(test_matrix ) ):
for y in range( len(test_matrix[0] ) ):
self.assertEqual( matrix.getValue( x, y), int(test_matrix[x][y]) )
def testRealign(self):
"""test realignment."""
mali, seqs = self.constructMali()
v = AlignandumVector()
seqs = [ "IIACDIIEFG" ,
"IAILCDEFGI" ,
"KALKCDEFGK" ,
]
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggCount )
ma = makeMultipleAlignatorSimple( makeAlignatorDPFull( ALIGNMENT_LOCAL, 0, 0 ) )
map_old2new = makeAlignmentVector()
offset = 0
fragments = []
for col in range(len(counts)):
# realign columns with more than one sequence with
# unaligned preceding residues
if counts[col] > 1:
for s in range(len(seqs)):
ali = mali.getRow( s )
y = col - 1
while y >= 0 and ali.mapRowToCol( y ) < 0:
y -= 1
if y < 0: start = 0
else: start = ali.mapRowToCol( y ) + 1
if col == mali.getLength(): end = len(seqs[s])
else: end = ali.mapRowToCol( col )
v[s].useSegment( start, end )
result = makeMultAlignment()
ma.align( result, v )
# sort out where the fragment belongs and move
# into the right place
l = result.getLength()
result.move( col + offset )
fragments.append( result )
offset += l
map_old2new.addPair( col, col+offset )
# insert gaps into the original
mali.map( map_old2new, RC )
# merge the partial alignments inside
for fragment in fragments:
mali.merge( fragment )
format = MultAlignmentFormatPlain( mali, v )
result = [ x.split("\t") for x in str(format).split("\n") ]
self.assertEqual( result[0], ['0', 'II-A---CDEFG--', '10'] )
self.assertEqual( result[1], ['0', 'I--AIL-CDEFGI-', '10'] )
self.assertEqual( result[2], ['0', '--KA-LKCDEFG-K', '10'] )
class MultAlignmentBlocksTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentBlocks()
class MultAlignmentSetTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentSet()
class MultAlignmentHashTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentHash()
class MultAlignmentSetColTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentSetCol()
class MultAlignmentHashDiagonalTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentHashDiagonal()
def suite():
suite = unittest.TestSuite()
suite.addTest(MultAlignmentTestCase)
suite.addTest(MultAlignmentBlocksTestCase )
suite.addTest(MultAlignmentSetTestCase)
suite.addTest(MultAlignmentHashTestCase)
suite.addTest(MultAlignmentSetColTestCase)
suite.addTest(MultAlignmentHashDiagonalTestCase)
return suite
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -4,594,516,625,187,176,000 | 35.513834 | 88 | 0.582485 | false |
sjTaylor/cmd_queue | client.py | 1 | 2997 | import socket
import select
import codes
import funs
import os
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('server_ip', type=str, help='address of the server (e.g. 198.123.1.3)')
parser.add_argument('--server_port', type=int, default=12345, required=False, help='port server is listening on')
args = parser.parse_args()
myid = 0
output_prefix = None
padding = None
try:
connection = socket.create_connection((args.server_ip, args.server_port))
except:
print('could not connect to server', flush=True)
raise SystemError
running = True
cmd_timeout = None
log = funs.get_logger(__name__)
while running:
readable, foo1, foo2 = select.select([connection], [], [], 2)
for qq in funs.getinput():
if 'exit' in qq:
running = False
funs.send(connection, funs.encode(codes.disconnecting))
for s in readable:
message = funs.recv(s)
code, data = funs.decode(message)
json_data = data
if code == codes.send_config:
assert 'client_id' in json_data and 'working_directory' in json_data and 'output_prefix' in json_data
assert 'padding' in json_data and 'timeout' in json_data
os.chdir(json_data['working_directory'])
myid = json_data['client_id']
output_prefix = json_data['output_prefix']
padding = json_data['padding']
cmd_timeout = json_data['timeout']
elif code == codes.send_cmd:
assert 'command' in json_data and 'cmd_number' in json_data
command = json_data['command']
cmdnumber = json_data['cmd_number']
log.info('Recieved command number : %d' % cmdnumber)
log.info('--executing : %s' % command)
log.info('will write out to: |%s|' % funs.do_dir(output_prefix, padding, 'stdout', cmdnumber))
log.info('will write err to: |%s|' % funs.do_dir(output_prefix, padding, 'stderr', cmdnumber))
with open(funs.do_dir(output_prefix, padding, 'stdout', cmdnumber), 'w') as sstdout:
with open(funs.do_dir(output_prefix, padding, 'stderr', cmdnumber), 'w') as sstderr:
return_code = subprocess.call(command,
shell=True,
stdout=sstdout,
stderr=sstderr,
timeout=cmd_timeout)
if return_code is None:
log.info('--return_code is None')
return_code = 1
# cmd number, client id, return code
funs.send(connection, funs.encode(codes.finished, (cmdnumber, myid, return_code)))
if code == codes.exiting:
log.info('got signal to stop and shut down')
running = False
else:
funs.send(connection, funs.encode(codes.idle))
connection.close()
| mit | -2,199,648,740,256,074,200 | 36 | 113 | 0.575909 | false |
jrconlin/server-key-exchange | keyexchange/util.py | 1 | 3643 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Sync Server
#
# The Initial Developer of the Original Code is the Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2010
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Tarek Ziade ([email protected])
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
""" Various helpers.
"""
import json
from webob import Response
from services.util import randchar
CID_CHARS = '23456789abcdefghijkmnpqrstuvwxyz'
def json_response(data, dump=True, **kw):
"""Returns Response containing a json string"""
if dump:
data = json.dumps(data)
return Response(data, content_type='application/json', **kw)
def generate_cid(size=4):
"""Returns a random channel id."""
return ''.join([randchar(CID_CHARS) for i in range(size)])
class MemoryClient(dict):
"""Fallback if a memcache client is not installed.
"""
def __init__(self, servers):
pass
def set(self, key, value, time=0):
self[key] = value
return True
cas = set
def add(self, key, value, time=0):
if key in self:
return False
self[key] = value
return True
def replace(self, key, value, time=0):
if key not in self:
return False
self[key] = value
return True
def delete(self, key):
if not key in self:
return True # that's how memcache libs do...
del self[key]
return True
def incr(self, key):
val = self[key]
self[key] = str(int(val) + 1)
class PrefixedCache(object):
def __init__(self, cache, prefix=''):
self.cache = cache
self.prefix = ''
def incr(self, key):
return self.cache.incr(self.prefix + key)
def get(self, key):
return self.cache.get(self.prefix + key)
def set(self, key, value, **kw):
return self.cache.set(self.prefix + key, value, **kw)
def delete(self, key):
return self.cache.delete(self.prefix + key)
def add(self, key, value, **kw):
return self.cache.add(self.prefix + key, value, **kw)
def get_memcache_class(memory=False):
"""Returns the memcache class."""
if memory:
return MemoryClient
import memcache
return memcache.Client
| mpl-2.0 | -8,685,065,241,772,336,000 | 29.613445 | 77 | 0.664562 | false |
jddixon/bindex | setup.py | 1 | 1138 | #!/usr/bin/python3
# bindex/setup.py
""" Setuptools project configuration for bindex. """
from os.path import exists
from setuptools import setup
LONG_DESC = None
if exists('README.md'):
with open('README.md', 'r') as file:
LONG_DESC = file.read()
setup(name='bindex',
version='0.0.24',
author='Jim Dixon',
author_email='[email protected]',
long_description=LONG_DESC,
packages=['bindex'],
package_dir={'': 'src'},
py_modules=[],
include_package_data=False,
zip_safe=False,
scripts=[],
description='index content-keyed files',
url='https://jddixon.github.io/bindex',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python 2.7',
'Programming Language :: Python 3.5',
'Programming Language :: Python 3.6',
'Programming Language :: Python 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],)
| mit | -3,196,861,824,986,049,500 | 29.756757 | 73 | 0.593146 | false |
a2ialabelme/LabelMeAnnotationTool | toolBar.py | 1 | 1837 | #
# Copyright (C) 2011 Michael Pitidis, Hussein Abdulwahid.
#
# This file is part of Labelme.
#
# Labelme is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Labelme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labelme. If not, see <http://www.gnu.org/licenses/>.
#
#from PyQt4.QtGui import *
#from PyQt4.QtCore import *
from PySide.QtGui import *
from PySide.QtCore import *
class ToolBar(QToolBar):
def __init__(self, title):
super(ToolBar, self).__init__(title)
layout = self.layout()
m = (0, 0, 0, 0)
layout.setSpacing(0)
layout.setContentsMargins(*m)
self.setContentsMargins(*m)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def addAction(self, action):
if isinstance(action, QWidgetAction):
return super(ToolBar, self).addAction(action)
btn = ToolButton()
btn.setDefaultAction(action)
btn.setToolButtonStyle(self.toolButtonStyle())
self.addWidget(btn)
class ToolButton(QToolButton):
"""ToolBar companion class which ensures all buttons have the same size."""
minSize = (60, 60)
def minimumSizeHint(self):
ms = super(ToolButton, self).minimumSizeHint()
w1, h1 = ms.width(), ms.height()
w2, h2 = self.minSize
ToolButton.minSize = max(w1, w2), max(h1, h2)
return QSize(*ToolButton.minSize)
| gpl-3.0 | 8,428,212,745,408,746,000 | 33.660377 | 79 | 0.684268 | false |
yochow/autotest | client/common_lib/control_data_unittest.py | 1 | 4595 | #!/usr/bin/python
import os, sys, unittest, tempfile
import common
from autotest_lib.client.common_lib import control_data
ControlData = control_data.ControlData
CONTROL = """
AUTHOR = 'Author'
DEPENDENCIES = "console, power"
DOC = \"\"\"\
doc stuff\"\"\"
# EXPERIMENTAL should implicitly be False
NAME = 'nA' "mE"
RUN_VERIFY = False
SYNC_COUNT = 2
TIME='short'
TEST_CLASS=u'Kernel'
TEST_CATEGORY='Stress'
TEST_TYPE='client'
"""
class ParseControlTest(unittest.TestCase):
def setUp(self):
fp, self.control_file = tempfile.mkstemp(text=True)
os.write(fp, CONTROL)
os.close(fp)
def tearDown(self):
os.remove(self.control_file)
def test_parse_control(self):
cd = control_data.parse_control(self.control_file, True)
self.assertEquals(cd.author, "Author")
self.assertEquals(cd.dependencies, set(['console', 'power']))
self.assertEquals(cd.doc, "doc stuff")
self.assertEquals(cd.experimental, False)
self.assertEquals(cd.name, "nAmE")
self.assertEquals(cd.run_verify, False)
self.assertEquals(cd.sync_count, 2)
self.assertEquals(cd.time, "short")
self.assertEquals(cd.test_class, "kernel")
self.assertEquals(cd.test_category, "stress")
self.assertEquals(cd.test_type, "client")
class SetMethodTests(unittest.TestCase):
def setUp(self):
self.required_vars = control_data.REQUIRED_VARS
control_data.REQUIRED_VARS = set()
def tearDown(self):
control_data.REQUIRED_VARS = self.required_vars
def test_bool(self):
cd = ControlData({}, 'filename')
cd._set_bool('foo', 'False')
self.assertEquals(cd.foo, False)
cd._set_bool('foo', True)
self.assertEquals(cd.foo, True)
cd._set_bool('foo', 'FALSE')
self.assertEquals(cd.foo, False)
cd._set_bool('foo', 'true')
self.assertEquals(cd.foo, True)
self.assertRaises(ValueError, cd._set_bool, 'foo', '')
self.assertRaises(ValueError, cd._set_bool, 'foo', 1)
self.assertRaises(ValueError, cd._set_bool, 'foo', [])
self.assertRaises(ValueError, cd._set_bool, 'foo', None)
def test_int(self):
cd = ControlData({}, 'filename')
cd._set_int('foo', 0)
self.assertEquals(cd.foo, 0)
cd._set_int('foo', '0')
self.assertEquals(cd.foo, 0)
cd._set_int('foo', '-1', min=-2, max=10)
self.assertEquals(cd.foo, -1)
self.assertRaises(ValueError, cd._set_int, 'foo', 0, min=1)
self.assertRaises(ValueError, cd._set_int, 'foo', 1, max=0)
self.assertRaises(ValueError, cd._set_int, 'foo', 'x')
self.assertRaises(ValueError, cd._set_int, 'foo', '')
self.assertRaises(TypeError, cd._set_int, 'foo', None)
def test_set(self):
cd = ControlData({}, 'filename')
cd._set_set('foo', 'a')
self.assertEquals(cd.foo, set(['a']))
cd._set_set('foo', 'a,b,c')
self.assertEquals(cd.foo, set(['a', 'b', 'c']))
cd._set_set('foo', ' a , b , c ')
self.assertEquals(cd.foo, set(['a', 'b', 'c']))
cd._set_set('foo', None)
self.assertEquals(cd.foo, set(['None']))
def test_string(self):
cd = ControlData({}, 'filename')
cd._set_string('foo', 'a')
self.assertEquals(cd.foo, 'a')
cd._set_string('foo', 'b')
self.assertEquals(cd.foo, 'b')
cd._set_string('foo', 'B')
self.assertEquals(cd.foo, 'B')
cd._set_string('foo', 1)
self.assertEquals(cd.foo, '1')
cd._set_string('foo', None)
self.assertEquals(cd.foo, 'None')
cd._set_string('foo', [])
self.assertEquals(cd.foo, '[]')
def test_option(self):
options = ['a', 'b']
cd = ControlData({}, 'filename')
cd._set_option('foo', 'a', options)
self.assertEquals(cd.foo, 'a')
cd._set_option('foo', 'b', options)
self.assertEquals(cd.foo, 'b')
cd._set_option('foo', 'B', options)
self.assertEquals(cd.foo, 'B')
self.assertRaises(ValueError, cd._set_option,
'foo', 'x', options)
self.assertRaises(ValueError, cd._set_option,
'foo', 1, options)
self.assertRaises(ValueError, cd._set_option,
'foo', [], options)
self.assertRaises(ValueError, cd._set_option,
'foo', None, options)
# this is so the test can be run in standalone mode
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,934,877,084,798,846,000 | 31.588652 | 69 | 0.576279 | false |
patochectp/navitia | source/jormungandr/jormungandr/__init__.py | 1 | 3983 | # encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
import logging.config
import os
from flask import Flask, got_request_exception
from flask_restful import Api
from flask_caching import Cache
from flask_cors import CORS
import sys
import six
from jormungandr import init
app = Flask(__name__) # type: Flask
init.load_configuration(app)
init.logger(app)
# we want to patch gevent as early as possible
if app.config.get(str('PATCH_WITH_GEVENT_SOCKET'), False):
init.patch_http()
from jormungandr import new_relic
new_relic.init(app.config.get(str('NEWRELIC_CONFIG_PATH'), None))
from jormungandr.exceptions import log_exception
from jormungandr.helper import ReverseProxied, NavitiaRequest, NavitiaRule
from jormungandr import compat, utils
app.url_rule_class = NavitiaRule
app.request_class = NavitiaRequest
CORS(
app,
vary_headers=True,
allow_credentials=True,
send_wildcard=False,
headers=['Access-Control-Request-Headers', 'Authorization'],
)
app.config[str('CORS_HEADERS')] = 'Content-Type'
app.wsgi_app = ReverseProxied(app.wsgi_app) # type: ignore
got_request_exception.connect(log_exception, app)
# we want the old behavior for reqparse
compat.patch_reqparse()
rest_api = Api(app, catch_all_404s=True, serve_challenge_on_401=True)
from navitiacommon.models import db
db.init_app(app)
cache = Cache(app, config=app.config[str('CACHE_CONFIGURATION')]) # type: Cache
memory_cache = Cache(app, config=app.config[str('MEMORY_CACHE_CONFIGURATION')]) # type: Cache
if app.config[str('AUTOCOMPLETE_SYSTEMS')] is not None:
global_autocomplete = {k: utils.create_object(v) for k, v in app.config[str('AUTOCOMPLETE_SYSTEMS')].items()}
else:
from jormungandr.autocomplete.kraken import Kraken
global_autocomplete = {'kraken': Kraken()}
from jormungandr.equipments.equipment_provider_manager import EquipmentProviderManager
equipment_provider_manager = EquipmentProviderManager(app.config[str('EQUIPMENT_DETAILS_PROVIDERS')])
from jormungandr.instance_manager import InstanceManager
i_manager = InstanceManager(
instances_dir=app.config.get(str('INSTANCES_DIR'), None),
instance_filename_pattern=app.config.get(str('INSTANCES_FILENAME_PATTERN'), '*.json'),
start_ping=app.config.get(str('START_MONITORING_THREAD'), True),
)
i_manager.initialisation()
from jormungandr.stat_manager import StatManager
stat_manager = StatManager()
bss_provider_manager = init.bss_providers(app)
from jormungandr.parking_space_availability.car.car_park_provider_manager import CarParkingProviderManager
car_park_provider_manager = CarParkingProviderManager(app.config[str('CAR_PARK_PROVIDER')])
from jormungandr import api
def setup_package():
i_manager.stop()
| agpl-3.0 | -1,356,667,356,695,377,400 | 30.611111 | 113 | 0.762491 | false |
OSSESAC/odoopubarquiluz | addons/hr_timesheet_sheet/hr_timesheet_sheet.py | 1 | 30074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must assign it to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must assign it to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'hr_timesheet_sheet.sheet', sheet.id, 'confirm', cr)
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
_columns = {
'name': fields.char('Note', size=64, select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return time.strftime('%Y-%m-%d')
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return time.strftime('%Y-%m-%d')
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
wf_service = netsvc.LocalService('workflow')
for id in ids:
wf_service.trg_create(uid, self._name, id, cr)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def _check_sheet_state(self, cr, uid, ids, context=None):
if context is None:
context = {}
for timesheet_line in self.browse(cr, uid, ids, context=context):
if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'):
return False
return True
_constraints = [
(_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']),
]
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
hr_timesheet_line()
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
WHERE %(date_to)s >= date_trunc('day', a.name)
AND %(date_from)s <= a.name
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
date_to = datetime.strftime(datetime.strptime(attendance.name[0:10], '%Y-%m-%d'), '%Y-%m-%d %H:%M:%S')
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', date_to), ('date_from', '<=', attendance.name),
('employee_id', '=', attendance.employee_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[attendance.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'sheet_id' in context:
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, context['sheet_id'], context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
res = super(hr_attendance,self).create(cr, uid, vals, context=context)
if 'sheet_id' in context:
if context['sheet_id'] != self.browse(cr, uid, res, context=context).sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
hr_attendance()
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(total_attendance) < 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC')
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
group by l.date::date, s.id
) union (
select
-min(a.id) as id,
a.name::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
WHERE action in ('sign_in', 'sign_out')
group by a.name::date, s.id
)) AS foo
GROUP BY name, sheet_id
)) AS bar""")
hr_timesheet_sheet_sheet_day()
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
hr_timesheet_sheet_sheet_account()
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,142,552,518,188,816,400 | 50.320819 | 290 | 0.557093 | false |
RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_ lambdascrapers/pl/trt.py | 1 | 3892 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.blamo
import re, urllib, urlparse, base64, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['trt.pl']
self.base_link = 'http://www.trt.pl/'
self.search_link = 'szukaj-filmy/%s'
def movie(self, imdb, title, localtitle, aliases, year):
return title + ' ' + year
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return tvshowtitle;
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return url + ' s' + season.zfill(2) + 'e' + episode.zfill(2)
def contains_word(self, str_to_check, word):
return re.search(r'\b' + word + r'\b', str_to_check, re.IGNORECASE)
def contains_all_wors(self, str_to_check, words):
for word in words:
if not self.contains_word(str_to_check, word):
return False
return True
def sources(self, url, hostDict, hostprDict):
try:
words = cleantitle.getsearch(url).split(' ')
search_url = urlparse.urljoin(self.base_link, self.search_link) % urllib.quote_plus(url);
result = client.request(search_url)
sources = []
result = client.parseDOM(result, 'div', attrs={'class':'tile-container'})
for el in result :
main = client.parseDOM(el, 'h3');
link = client.parseDOM(main, 'a', ret='href')[0];
found_title = client.parseDOM(main, 'a')[0];
if not self.contains_all_wors(found_title, words):
continue
quality = client.parseDOM(el, 'a', attrs={'class':'qualityLink'});
q = 'SD'
if quality:
if(quality[0] == '720p'):
q='HD'
if(quality[0]=='1080p'):
q='1080p'
lang, info = self.get_lang_by_type(found_title)
sources.append({'source': 'trt', 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def get_lang_by_type(self, lang_type):
if self.contains_word(lang_type, 'lektor') :
return 'pl', 'Lektor'
if self.contains_word(lang_type, 'Dubbing') :
return 'pl', 'Dubbing'
if self.contains_word(lang_type, 'Napisy') :
return 'pl', 'Napisy'
if self.contains_word(lang_type, 'Polski') :
return 'pl', None
return 'en', None
def resolve(self, url):
try:
return urlparse.urljoin(self.base_link, url);
except:
return
| gpl-2.0 | 2,059,342,441,990,086,000 | 37.92 | 146 | 0.460689 | false |
rboman/progs | apps/pdf2ppt/pdf2ppt.py | 1 | 1429 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Splitte un pdf PPT 4pages/feuilles (nécessite ImageMagick dans le PATH)
#
# . nommer le pdf "cours.pdf"
# . exporter le pdf en PNG en 300DPI
# . lancer le script
# . dans Acrobat: Create PDF => From Multiple Files
#
# ref: http://www-etud.iro.umontreal.ca/~buisteri/info/pdfen.html
import os
import glob
fname = "cours_Page_*.pdf"
for f in glob.glob("Cours_Page_*.png"):
f2 = f.replace('.png', '-crop.png')
cmd = "convert -crop 95x95%%+0+0 %s %s" % (f, f2) # vire le numero
print(cmd)
os.system(cmd)
cmd = "convert -crop 50x50%% %s %s" % (f2, f)
print(cmd)
os.system(cmd)
os.remove(f2)
for g in glob.glob("%s-*.png" % f.replace('.png', '')):
cmd = "mogrify -trim %s" % g
print(cmd)
os.system(cmd)
os.remove(f)
| apache-2.0 | 2,452,702,609,578,649,000 | 30.733333 | 76 | 0.654062 | false |
danja/elfquake | prednet_/ingv_train.py | 1 | 3488 | '''
Train PredNet on INGV sequences.
'''
import os
import numpy as np
np.random.seed(123)
from six.moves import cPickle
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Flatten
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.optimizers import Adam
from prednet import PredNet
from data_utils import SequenceGenerator
from ingv_settings import *
save_model = True # if weights will be saved
weights_file = os.path.join(WEIGHTS_DIR, 'prednet_ingv_weights.hdf5') # where weights will be saved
json_file = os.path.join(WEIGHTS_DIR, 'prednet_ingv_model.json')
# Data files
train_file = os.path.join(DATA_DIR, 'X_train.hkl')
train_sources = os.path.join(DATA_DIR, 'sources_train.hkl')
val_file = os.path.join(DATA_DIR, 'X_val.hkl')
val_sources = os.path.join(DATA_DIR, 'sources_val.hkl')
# Training parameters
nb_epoch = 50 # was 150
batch_size = 2 # was 4
samples_per_epoch = 250 # was 500
N_seq_val = 100 # number of sequences to use for validation
# Model parameters
n_channels, im_height, im_width = (3, 128, 160)
input_shape = (n_channels, im_height, im_width) if K.image_data_format() == 'channels_first' else (im_height, im_width, n_channels)
stack_sizes = (n_channels, 48, 96, 192)
R_stack_sizes = stack_sizes
A_filt_sizes = (3, 3, 3)
Ahat_filt_sizes = (3, 3, 3, 3)
R_filt_sizes = (3, 3, 3, 3)
layer_loss_weights = np.array([1., 0., 0., 0.]) # weighting for each layer in final loss; "L_0" model: [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
nt = 10 # number of timesteps used for sequences in training
time_loss_weights = 1./ (nt - 1) * np.ones((nt,1)) # equally weight all timesteps except the first
time_loss_weights[0] = 0
prednet = PredNet(stack_sizes, R_stack_sizes,
A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
output_mode='error', return_sequences=True)
inputs = Input(shape=(nt,) + input_shape)
errors = prednet(inputs) # errors will be (batch_size, nt, nb_layers)
errors_by_time = TimeDistributed(Dense(1, weights=[layer_loss_weights, np.zeros(1)], trainable=False), trainable=False)(errors) # calculate weighted error by layer
errors_by_time = Flatten()(errors_by_time) # will be (batch_size, nt)
final_errors = Dense(1, weights=[time_loss_weights, np.zeros(1)], trainable=False)(errors_by_time) # weight errors by time
model = Model(inputs=inputs, outputs=final_errors)
model.compile(loss='mean_absolute_error', optimizer='adam')
train_generator = SequenceGenerator(train_file, train_sources, nt, batch_size=batch_size, shuffle=True)
val_generator = SequenceGenerator(val_file, val_sources, nt, batch_size=batch_size, N_seq=N_seq_val)
lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001 # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
if save_model:
if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))
history = model.fit_generator(train_generator, samples_per_epoch / batch_size, nb_epoch, callbacks=callbacks,
validation_data=val_generator, validation_steps=N_seq_val / batch_size)
if save_model:
json_string = model.to_json()
with open(json_file, "w") as f:
f.write(json_string)
| apache-2.0 | 4,245,735,531,598,125,000 | 42.061728 | 164 | 0.716456 | false |
TariqEE/PrivEx | S2/S2-netified/exitListener.py | 1 | 5097 | from collections import defaultdict
from privexUtils import q, epoch, dc_start_delay, dc_reg_delay
from router import router
from tkgserver import tkgserver
from twisted.internet import reactor, protocol, task, ssl
from twisted.protocols import basic
import time
import json
import argparse
import pprint
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i','--input', help='Input website list, one on each line',required=True)
parser.add_argument('-tkg','--tkgList', help='Input tkg list, IP and port, one on each line',required=True)
parser.add_argument('-thp','--tally', help='Input tally server IP and port.',required=True)
parser.add_argument('-p','--port', help='port to listen on',required=True)
parser.add_argument('-f','--fingerprint', help='fingerprint file of exit',required=True)
parser.add_argument('-c','--consensus', help='consensus file of exit',required=True)
args = parser.parse_args()
class exitListener(protocol.Protocol):
def dataReceived(self, data):
action, channelID, circuitID, website = data.split(" ", 3)
action = action.strip()
channelID = int(channelID.strip())
circuitID = int(circuitID.strip())
website = website.strip()
if action == "a":
if channelID not in site_seen:
site_seen[channelID] = {}
if circuitID not in site_seen[channelID]:
site_seen[channelID][circuitID] = {}
if website not in site_seen[channelID][circuitID]:
site_seen[channelID][circuitID][website] = 1
if website != "Other" and website != "Censored":
if website in labels:
r.inc(website)
r.inc("Censored")
# print website + " incremented exitListener!\n"
else:
r.inc("Other")
# print "Other incremented exitListener!\n"
class exitRegister(basic.LineReceiver):
def __init__(self):
self.delimiter = '\n'
def connectionMade(self):
self.register_exit()
self.transport.loseConnection()
def register_exit(self):
global msg
print "DC: Registered with a TKG!"
#self.sendLine(msg[0])
#self.send_msg = json.dumps(msg[0])
#pprint.pprint(self.send_msg)
#self.sendLine(self.send_msg)
self.sendLine(repr(msg[0]))
msg.pop(0)
class exitStatSend(basic.LineReceiver):
def connectionMade(self):
self.send_stats()
self.transport.loseConnection()
def send_stats(self):
global r
global msg
global site_seen
self.send_data = json.dumps(r.publish())
print "DC: Sending TS our stats!"
self.sendLine(self.send_data)
#clean up objects and refresh
site_seen.clear()
r = None
msg = []
r = router(q, labels, tkgs, args.fingerprint, args.consensus)
for kid, a in zip(r.keys, tkgs):
msg.append(r.authority_msg(kid))
time.sleep(dc_reg_delay)
for host, port in tkg_info:
reactor.connectSSL(host, int(port), c_factory, ssl.ClientContextFactory())
if __name__ == "__main__":
labels = []
tkgs = []
site_seen = {}
r = None
tkg_info = []
msg = []
with open(args.input,'r') as f1:
for line in f1:
site = line.strip()
if site not in labels:
labels.append(site)
labels.append("Other")
labels.append("Censored")
with open(args.tally,'r') as f3:
for tallyline in f3:
tallyhost, tallyport = tallyline.strip().split()
with open(args.tkgList,'r') as f2:
for tkgline in f2:
tkgs.append(tkgserver(tkgline.strip()))
host, port = tkgline.strip().split()
tkg_info.append((host, port))
r = router(q, labels, tkgs, args.fingerprint, args.consensus)
for kid, a in zip(r.keys, tkgs):
msg.append(r.authority_msg(kid))
time.sleep((epoch - int(time.time())%epoch) + dc_start_delay)
print "DC starting up..."
last_epoch_start = int(time.time())/epoch
def epoch_change():
global last_epoch_start
global should_send
now = int(time.time())/epoch
if now > last_epoch_start:
last_epoch_start = now
print "Epoch Change!\n"
reactor.connectSSL(tallyhost, int(tallyport), sendtallyfactory, ssl.ClientContextFactory())
epoch_check = task.LoopingCall(epoch_change)
epoch_check.start(1)
sendtallyfactory = protocol.ClientFactory()
sendtallyfactory.protocol = exitStatSend
c_factory = protocol.ClientFactory()
c_factory.protocol = exitRegister
time.sleep(dc_reg_delay)
for host, port in tkg_info:
reactor.connectSSL(host, int(port), c_factory, ssl.ClientContextFactory())
s_factory = protocol.ServerFactory()
s_factory.protocol = exitListener
reactor.listenTCP(int(args.port), s_factory, interface='127.0.0.1') # Local Tor connection
print "DC ready!"
reactor.run()
| bsd-3-clause | 1,234,578,571,938,410,000 | 30.85625 | 107 | 0.613106 | false |
exp-publishing/cloudbot-plugins | plugins/tell.py | 1 | 5133 | """
tell.py
Created By:
- CloudBot IRC <https://github.com/ClodbotIRC>
Modified By:
- Josh Elsasser <https://github.com/jaelsasser>
License:
GNU General Public License (Version 3)
"""
import re
from datetime import datetime
from sqlalchemy import Table, Column, String, Boolean, DateTime
from sqlalchemy.sql import select
from cloudbot import hook
from cloudbot.util import timeformat, database
from cloudbot.event import EventType
table = Table(
'expp-tells',
database.metadata,
Column('connection', String(25)),
Column('channel', String(25, collation='NOCASE')),
Column('sender', String(25, collation='NOCASE')),
Column('target', String(25, collation='NOCASE')),
Column('message', String(500)),
Column('is_read', Boolean),
Column('time_sent', DateTime),
Column('time_read', DateTime),
extend_existing=True
)
@hook.on_start
def load_cache(db):
"""
:type db: sqlalchemy.orm.Session
"""
global tell_cache
tell_cache = []
for row in db.execute(table.select().where(table.c.is_read == 0)):
conn = row["connection"]
chan = row["channel"]
target = row["target"]
tell_cache.append((conn, chan, target))
def get_unread(db, server, target, channel='*'):
clauses = [table.c.channel == '*', table.c.channel == channel.lower()]
query = select([table.c.sender, table.c.channel, table.c.message, table.c.time_sent]) \
.where(table.c.connection == server.lower()) \
.where((table.c.channel == '*') | (table.c.channel == channel.lower())) \
.where(table.c.target == target.lower()) \
.where(table.c.is_read == 0) \
.order_by(table.c.time_sent)
return db.execute(query).fetchall()
def count_unread(db, server, target):
query = select([table]) \
.where(table.c.connection == server.lower()) \
.where(table.c.target == target.lower()) \
.where(table.c.is_read == 0) \
.alias("count") \
.count()
return db.execute(query).fetchone()[0]
def read_tell(db, server, channel, target, message):
query = table.update() \
.where(table.c.connection == server.lower()) \
.where(table.c.channel == channel.lower()) \
.where(table.c.target == target) \
.where(table.c.message == message) \
.values(is_read=1)
db.execute(query)
db.commit()
load_cache(db)
def add_tell(db, server, channel, sender, target, message):
query = table.insert().values(
connection=server.lower(),
channel=channel.lower(),
sender=sender,
target=target.lower(),
message=message,
is_read=False,
time_sent=datetime.today()
)
db.execute(query)
db.commit()
load_cache(db)
def tell_check(conn, nick):
for _conn, _chan, _target in tell_cache:
if (conn.lower(), nick.lower()) == (_conn.lower(), _target.lower()):
return True
@hook.event([EventType.message, EventType.action], singlethread=True)
def tell_watch(event, conn, db, chan, nick, ctcp, reply):
"""
:type event: cloudbot.event.Event
:type conn: cloudbot.client.Client
:type db: sqlalchemy.orm.Session
"""
if tell_check(conn.name, nick):
tells = get_unread(db, conn.name, nick, chan)
else:
return
sent = 0
ratelimit = 5
for _from, _channel, _message, _sent in tells:
# format the send time
reltime = timeformat.time_since(_sent, simple=True, count=1)
if reltime == 0:
reltime = "just now"
else:
reltime += " ago"
out = "[{}, {}] {}".format(_from, reltime, _message)
read_tell(db, conn.name, _channel, nick, _message)
if sent < ratelimit:
reply(out)
else:
if sent == ratelimit + 1:
reply("{} more tells sent privately.".format(len(tells) - sent))
ctcp(out)
sent += 1
@hook.command("tell")
def tell_cmd(text, nick, db, notice, conn, chan):
"""tell <nick> <message> -- Relay <message> to <nick> when <nick> is around."""
query = text.split(' ', 1)
if len(query) != 2:
notice(conn.config("command_prefix") + tell_cmd.__doc__)
return
target = query[0]
message = query[1].strip()
sender = nick
if target.lower() == sender.lower():
notice("Bad user. Bad. Stop trying to .tell yourself")
return
# we can't send messages to ourselves
if target.lower() == conn.nick.lower():
notice("Invalid nick '{}'.".format(target))
return
if not re.match("^[a-z0-9_|.\-\]\[]*$", target.lower()):
notice("Invalid nick '{}'.".format(target))
return
# tells received via PM can be received anywhere
if chan.lower() == nick.lower():
chan = '*'
if count_unread(db, conn.name, target) >= 25:
notice("{} has too many messages queued already. Try again later"
.format(target))
return
add_tell(db, conn.name, chan, sender, target, message)
notice("I'll pass that on when {} is around.".format(target))
| gpl-3.0 | -2,896,529,931,401,494,000 | 28.331429 | 91 | 0.591467 | false |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_fit_to_pages05.py | 1 | 1938 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'fit_to_pages05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with fit to print."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.fit_to_pages(1, 0)
worksheet.set_paper(9)
worksheet.write('A1', 'Foo')
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,416,242,346,363,676,700 | 27.925373 | 91 | 0.512384 | false |
talon-one/talon_one.py | test/test_set_discount_effect_props.py | 1 | 2126 | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.set_discount_effect_props import SetDiscountEffectProps # noqa: E501
from talon_one.rest import ApiException
class TestSetDiscountEffectProps(unittest.TestCase):
"""SetDiscountEffectProps unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test SetDiscountEffectProps
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.set_discount_effect_props.SetDiscountEffectProps() # noqa: E501
if include_optional :
return SetDiscountEffectProps(
name = '0',
value = 1.337,
scope = '0'
)
else :
return SetDiscountEffectProps(
name = '0',
value = 1.337,
)
def testSetDiscountEffectProps(self):
"""Test SetDiscountEffectProps"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | 7,183,432,051,782,559,000 | 36.964286 | 647 | 0.670273 | false |
CanonicalLtd/subiquity | subiquity/models/identity.py | 1 | 1628 | # Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import attr
log = logging.getLogger('subiquity.models.identity')
@attr.s
class User(object):
realname = attr.ib()
username = attr.ib()
password = attr.ib()
class IdentityModel(object):
""" Model representing user identity
"""
def __init__(self):
self._user = None
self._hostname = None
def add_user(self, identity_data):
self._hostname = identity_data.hostname
d = {}
d['realname'] = identity_data.realname
d['username'] = identity_data.username
d['password'] = identity_data.crypted_password
if not d['realname']:
d['realname'] = identity_data.username
self._user = User(**d)
@property
def hostname(self):
return self._hostname
@property
def user(self):
return self._user
def __repr__(self):
return "<LocalUser: {} {}>".format(self.user, self.hostname)
| agpl-3.0 | 4,035,972,600,284,715,000 | 27.068966 | 74 | 0.665233 | false |
open-craft/opencraft | instance/migrations/0002_auto_20150530_1255.py | 1 | 1562 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instance', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('text', models.TextField()),
('level', models.CharField(max_length=5, default='info', choices=[('debug', 'Debug'), ('info', 'Info'), ('warn', 'Warning'), ('error', 'Error')], db_index=True)),
],
),
migrations.AlterField(
model_name='openedxinstance',
name='email',
field=models.EmailField(max_length=254, default='[email protected]'),
),
migrations.AlterField(
model_name='openedxinstance',
name='github_organization_name',
field=models.CharField(max_length=50, default='open-craft', db_index=True),
),
migrations.AlterField(
model_name='openedxinstance',
name='github_repository_name',
field=models.CharField(max_length=50, default='opencraft', db_index=True),
),
migrations.AddField(
model_name='logentry',
name='instance',
field=models.ForeignKey(to='instance.OpenEdXInstance', on_delete=django.db.models.deletion.CASCADE),
),
]
| agpl-3.0 | -7,473,166,844,276,405,000 | 35.325581 | 178 | 0.576184 | false |
geotrellis/geotrellis-osm-elevation | ingest/src/main/python/geotrellis/osme/ingest/translate.py | 1 | 9548 | # 1. function create_object_links() gets a bucket path and returns a list of the link of each .img file
# 2. s3://azavea-datahub/emr/bootstrap.sh: install python2.7: sudo yum install -y python27;
# install gdal;
# install gdal_retile.py: sudo yum install -y gdal-python.x86_64;
# 3. change spark conf file in the master node:
# sudo sed -i '$ a export PYSPARK_PYTHON=/usr/bin/python2.7' /usr/lib/spark/conf/spark-env.sh
# usage: nohup /usr/lib/spark/bin/spark-submit translate.py /path/of/raw/tiles /path/of/workspace jobId &
# example: nohup /usr/lib/spark/bin/spark-submit translate.py s3://azavea-datahub/raw/ned-13arcsec/ s3://osm-elevation/chunk/geotiff emr-test-job-full &
#!/usr/bin/env python
import os
import sys
import json
import errno
import shutil
import zipfile
import tempfile
import traceback
from urlparse import urlparse
from collections import namedtuple
from subprocess import call, check_output
APP_NAME = "OSM Elevation Data Conversion"
def create_tmp_directory(prefix):
tmp = tempfile.mktemp(prefix=prefix, dir=os.path.join(os.environ['PWD'], "translate-temp"))
return makedirs_p(tmp)
def makedirs_p(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def get_local_copy(uri, local_dir):
parsed = urlparse(uri)
local_path = tempfile.mktemp(dir=local_dir)
if parsed.scheme == "s3":
cmd = ["aws", "s3", "cp", uri, local_path]
elif parsed.scheme == "https":
cmd = ["wget", "-O", local_path, uri]
else:
cmd = ["cp", uri, local_path]
c = call(cmd)
return local_path
def create_object_links(bucket):
cmd = ["aws", "s3", "ls", bucket]
ls = check_output(cmd)
lines = ls.splitlines()
links = []
for line in lines:
obj = line.split()[-1]
if ".img" in obj:
links.append(bucket+obj)
return links
def unzip(source_path):
unzipped_dir = source_path + "-unzipped"
with zipfile.ZipFile(source_path) as zf:
zf.extractall(unzipped_dir)
names = zf.namelist()
extensions = ['.flt', '.hdr']
unzipped_paths = {}
for name in names:
for extension in extensions:
if extension in name:
unzipped_paths[extension] = unzipped_dir+'/'+name
return unzipped_paths
def upload_to_working(local_src, dest):
parsed = urlparse(dest)
if parsed.scheme == "s3":
cmd = ["aws", "s3", "cp",
local_src, dest]
else:
d = os.path.dirname(dest)
if not os.path.exists(d):
os.makedirs(d)
cmd = ["cp", local_src, dest]
call(cmd)
return dest
def get_filename(uri):
p = urlparse(uri)
return os.path.splitext(os.path.join(p.netloc, p.path[1:]))[0]
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir):
pass
else: raise
UriSet = namedtuple('UriSet', 'source_uri workspace_target workspace_source_uri image_folder order')
def vsi_curlify(uri):
"""
Creates a GDAL-readable path from the given URI
"""
parsed = urlparse(uri)
result_uri = ""
if not parsed.scheme:
result_uri = uri
else:
if parsed.scheme == "s3":
result_uri = "/vsicurl/http://%s.s3.amazonaws.com%s" % (parsed.netloc, parsed.path)
elif parsed.scheme.startswith("http"):
result_uri = "/vsicurl/%s" % uri
else:
raise Exception("Unsupported scheme: %s" % parsed.schem)
return result_uri
def process_flt(source_uri, order, workspace_uri):
# Download the file and retile
results = []
workspace_prefix = get_filename(source_uri)
local_dir = create_tmp_directory(workspace_prefix)
try :
MAX_HEIGHT = 1024 * 2
MAX_WIDTH = 1024 * 2
local_path = get_local_copy(source_uri, local_dir)
unzipped_paths = unzip(local_path)
# make sure gdal can recognize flt files
hdr = unzipped_paths['.hdr']
flt = unzipped_paths['.flt']
cmd1 = ["gdalinfo"] + [hdr]
cmd2 = ["gdalinfo"] + [flt]
call(cmd1)
call(cmd2)
local_path = flt
# translate
translated_path = local_path + "-translated.tif"
cmd = ["gdal_translate"] + ["-of", "GTiff",
"-co", "compress=deflate",
"-co", "predictor=3",
"-co", "tiled=yes",
"-co", "blockxsize=512",
"-co", "blockysize=512",
local_path,
translated_path]
call(cmd)
# retile
tiled_dir = local_path + "-tiled"
os.mkdir(tiled_dir)
cmd = ["gdal_retile.py"] + ["-co", "compress=deflate",
"-co", "predictor=3",
"-ps",
str(MAX_WIDTH),
str(MAX_HEIGHT),
"-targetDir",
tiled_dir,
translated_path]
call(cmd)
tile_filenames = os.listdir(tiled_dir)
workspace_basename = os.path.basename(workspace_prefix)
translated_path_name = os.path.splitext(os.path.basename(translated_path))[0]
# upload
for tile_filename in tile_filenames:
workspace_key = os.path.splitext(os.path.join(workspace_prefix, tile_filename.replace(translated_path_name, workspace_basename)))[0]
workspace_target = os.path.join(workspace_uri, workspace_key + "-working.tif")
upload_to_working(os.path.join(tiled_dir, tile_filename), workspace_target)
workspace_source_uri = vsi_curlify(workspace_target)
image_folder = os.path.join(workspace_uri, workspace_key)
uri_set = UriSet(source_uri = source_uri,
workspace_target = workspace_target,
workspace_source_uri = workspace_source_uri,
image_folder = image_folder,
order = order)
results.append(uri_set)
shutil.rmtree(local_dir)
finally:
if local_dir:
shutil.rmtree(local_dir, ignore_errors=True)
return results
def process_img(source_uri, order, workspace_uri):
# Download the file and retile
results = []
workspace_prefix = get_filename(source_uri)
local_dir = create_tmp_directory(workspace_prefix)
try :
MAX_HEIGHT = 1024 * 2
MAX_WIDTH = 1024 * 2
local_path = get_local_copy(source_uri, local_dir)
# translate
translated_path = local_path + "-translated.tif"
cmd = ["gdal_translate"] + ["-of", "GTiff",
"-co", "compress=deflate",
"-co", "predictor=3",
"-co", "tiled=yes",
"-co", "blockxsize=512",
"-co", "blockysize=512",
local_path,
translated_path]
call(cmd)
# retile
tiled_dir = local_path + "-tiled"
os.mkdir(tiled_dir)
cmd = ["gdal_retile.py"] + ["-co", "compress=deflate",
"-co", "predictor=3",
"-ps",
str(MAX_WIDTH),
str(MAX_HEIGHT),
"-targetDir",
tiled_dir,
translated_path]
call(cmd)
tile_filenames = os.listdir(tiled_dir)
workspace_basename = os.path.basename(workspace_prefix)
translated_path_name = os.path.splitext(os.path.basename(translated_path))[0]
# upload
for tile_filename in tile_filenames:
workspace_key = os.path.splitext(os.path.join(workspace_prefix.split("/")[-2], tile_filename.replace(translated_path_name, workspace_basename)))[0]
workspace_target = os.path.join(workspace_uri, workspace_key + ".tif")
upload_to_working(os.path.join(tiled_dir, tile_filename), workspace_target)
workspace_source_uri = vsi_curlify(workspace_target)
image_folder = os.path.join(workspace_uri, workspace_key)
uri_set = UriSet(source_uri = source_uri,
workspace_target = workspace_target,
workspace_source_uri = workspace_source_uri,
image_folder = image_folder,
order = order)
results.append(uri_set)
shutil.rmtree(local_dir)
finally:
if local_dir:
shutil.rmtree(local_dir, ignore_errors=True)
return results
if __name__ == '__main__':
from pyspark import SparkConf, SparkContext
bucket = sys.argv[1]
source_uris = create_object_links(bucket)
workspace = sys.argv[2]
jobId = sys.argv[3]
conf = SparkConf().setAppName(APP_NAME)
sc = SparkContext(conf=conf)
uri_sets = sc.parallelize(enumerate(source_uris)).flatMap(lambda (o, i): process_img(i, o, workspace))
source_tile_count = uri_sets.cache().count()
print "Done." | apache-2.0 | 8,990,720,175,938,981,000 | 33.348921 | 159 | 0.542417 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/win_rm_listener.py | 1 | 1814 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMListener(Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: Specifies the protocol of listener. <br><br> Possible
values are: <br>**http** <br><br> **https**. Possible values include:
'Http', 'Https'
:type protocol: str or
~azure.mgmt.compute.v2017_03_30.models.ProtocolTypes
:param certificate_url: This is the URL of a certificate that has been
uploaded to Key Vault as a secret. For adding a secret to the Key Vault,
see [Add a key or secret to the key
vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add).
In this case, your certificate needs to be It is the Base64 encoding of
the following JSON Object which is encoded in UTF-8: <br><br> {<br>
"data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br>
"password":"<pfx-file-password>"<br>}
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'ProtocolTypes'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(self, protocol=None, certificate_url=None):
super(WinRMListener, self).__init__()
self.protocol = protocol
self.certificate_url = certificate_url
| mit | -2,108,547,658,817,203,000 | 42.190476 | 83 | 0.624587 | false |
googleapis/googleapis-gen | google/cloud/ondemandscanning/v1/ondemandscanning-v1-py/google/cloud/ondemandscanning_v1/services/scanner_service/transports/base.py | 1 | 7756 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.ondemandscanning_v1.types import scanner_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-ondemandscanning',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class ScannerServiceTransport(abc.ABC):
"""Abstract transport class for ScannerService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'ondemandscanning.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.analyze_packages: gapic_v1.method.wrap_method(
self.analyze_packages,
default_timeout=None,
client_info=client_info,
),
self.list_vulnerabilities: gapic_v1.method.wrap_method(
self.list_vulnerabilities,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def analyze_packages(self) -> Callable[
[scanner_service.AnalyzePackagesRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def list_vulnerabilities(self) -> Callable[
[scanner_service.ListVulnerabilitiesRequest],
Union[
scanner_service.ListVulnerabilitiesResponse,
Awaitable[scanner_service.ListVulnerabilitiesResponse]
]]:
raise NotImplementedError()
__all__ = (
'ScannerServiceTransport',
)
| apache-2.0 | -98,890,217,601,211,180 | 40.037037 | 161 | 0.640923 | false |
kevana/corpscores | dci_notify/database.py | 1 | 2185 | # -*- coding: utf-8 -*-
'''
Database module, including the SQLAlchemy database object and DB-related
utilities.
'''
from sqlalchemy.orm import relationship
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = relationship
class CRUDMixin(object):
'''Mixin that adds convenience methods for CRUD operations.'''
@classmethod
def create(cls, **kwargs):
'''Create a new record and save it the database.'''
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
'''Update specific fields of a record.'''
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
'''Save the record.'''
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
'''Remove the record from the database.'''
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
'''Base model class that includes CRUD convenience methods.'''
__abstract__ = True
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
'''A mixin that adds a surrogate integer 'primary key' column named
``id`` to any declarative-mapped class.
'''
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
def ReferenceCol(tablename, nullable=False, pk_name='id', **kwargs):
'''Column that adds primary key foreign key reference.
Usage: ::
category_id = ReferenceCol('category')
category = relationship('Category', backref='categories')
'''
return db.Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name)),
nullable=nullable, **kwargs)
| bsd-3-clause | 5,581,491,092,407,811,000 | 26.658228 | 72 | 0.628375 | false |
Symantec/py-statsd | pystatsd/pystatsagent.py | 1 | 2187 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import json
try:
import pystats_config
except ImportError:
import pystatsd.pystats_config as pystats_config
class UDPClient(object):
def __init__(self, server_ip, server_port):
"""Initalize client"""
self.server_ip = server_ip
self.server_port = server_port
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
def send_msg(self, msg):
"""Send message"""
self.sock.sendto(msg, (self.server_ip, self.server_port))
class PystatAgent(object):
def __init__(self):
self.cfg = pystats_config.PyStatConfig()
if self.cfg.parsedyaml is not None:
self.remote_addr = self.cfg.parsedyaml.get('bind_address',
'localhost')
self.remote_port = self.cfg.parsedyaml.get('bind_port', 5090)
else:
self.remote_addr = 'localhost'
self.remote_port = 5090
self.host = socket.gethostname()
self.udpclient = UDPClient(self.remote_addr, self.remote_port)
def trace(self, metric_name, trace_info):
data = self.format_msg_data(metric_name, 'trace', trace_info, None)
self.udpclient.send_msg(data)
def guage(self, metric_name, value, trace_info):
data = self.format_msg_data(metric_name, 'guage', trace_info, value)
self.udpclient.send_msg(data)
def format_msg_data(self, metric_name, metric_type, trace_info, value):
msg = trace_info
msg['metric_name'] = metric_name
msg['metric_type'] = metric_type
msg['host'] = self.host
# Attach additional user provided tags to the msg.
if self.cfg.parsedyaml is not None and \
self.cfg.parsedyaml.get('agent', None) is not None:
agent_tags = self.cfg.parsedyaml['agent'].get('tags', None)
if agent_tags is not None:
for tag in agent_tags:
msg[tag] = agent_tags[tag]
if metric_type == "guage":
msg['value'] = value
jdata = json.dumps(msg)
return jdata
| apache-2.0 | 7,166,294,631,678,672,000 | 32.646154 | 76 | 0.581619 | false |
lukovkin/ufcnn-keras | models/create_signals_bid_ask.py | 1 | 13710 | from __future__ import absolute_import
from __future__ import print_function
import sys
from copy import copy, deepcopy
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 1000)
from signals import *
def find_all_signals(_df, comission=0.0, max_position_size=1, debug=False):
"""
Function finds and returns all signals that could result in profitable deals taking into account comission.
E.g. it will return Buy and Sell signal if ask price at Buy is lower than bid price at Sell minus the comission.
Then it will move one step forward and consider already seen Sell signal and the next Buy for the possible
profitable short deal.
"""
df = deepcopy(_df)
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy Mod'] = np.zeros(df.shape[0])
df['Sell Mod'] = np.zeros(df.shape[0])
inflection_points = pd.DataFrame({'Buy': df["askpx_"].diff().shift(-1) > 0, 'Sell': df["bidpx_"].diff().shift(-1) < 0})
iterator = inflection_points.iterrows()
max_count = 0
position_size = 0
try:
while True:
#for i in range(0, 100):
idx_open, next_idx, row_open, sig_type_open = next_signal(iterator, df)
iterator = inflection_points.loc[next_idx:].iterrows()
iterator.next()
df[sig_type_open][idx_open] = 1
except TypeError:
print("Iteration stopped")
print("Buy candidates: {} Sell candidates: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
candidates = df[(df['Buy'] != 0) | (df['Sell'] != 0)].iterrows()
idx_open, row_open = candidates.next()
for idx, row in candidates:
if row_open['Buy'] == 1 and (df["bidpx_"][idx] > (df["askpx_"][idx_open] + comission)):
df['Buy Mod'][idx_open] += 1
df['Sell Mod'][idx] += 1
elif row_open['Sell'] == 1 and (df["askpx_"][idx] < (df["bidpx_"][idx_open] - comission)):
df['Sell Mod'][idx_open] += 1
df['Buy Mod'][idx] += 1
idx_open = idx
row_open = row
df = df.rename(columns={"Buy": "Buy Candidates", "Sell": "Sell Candidtates"})
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy'][df['Buy Mod'] != 0] = 1
df['Sell'][df['Sell Mod'] != 0] = 1
print("Buy: {} Sell: {}".format(df[df['Buy Mod'] != 0].count()['Buy Mod'], df[df['Sell Mod'] != 0].count()['Sell Mod']))
print("Buy: {} Sell: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
return df
def next_signal(iterator, df=None, sig_type=None, outer_idx=None, outer_row=None):
"""
Recursive function to find best signal (Buy or Sell) of the sequnce of possible candidates (inflection points).
It compares current candidate and next candidates, if one of the next candidates of the same type is better,
e.g. if current candidate is Buy with ask price 20 and next candidate (1) is Buy with ask price 10,
then next candidate (2) is Buy with ask price 15, the function should return next candidate (1) with ask price 10
when it will face first consequtive Sell candidate.
"""
prev_idx = outer_idx
best_idx = outer_idx
best_row = outer_row
for idx, row in iterator:
# print(idx, row)
if row['Buy'] or row['Sell']:
inner_sig_type = 'Buy' if row['Buy'] else 'Sell'
print("Inner signal: ", idx, inner_sig_type)
if sig_type:
print("Outer signal: ", outer_idx, sig_type)
if inner_sig_type == sig_type:
print("Compare {} bid: {} ask: {} with {} bid: {} ask: {}".
format(best_idx, df["bidpx_"][best_idx], df["askpx_"][best_idx], idx, df["bidpx_"][idx], df["askpx_"][idx]))
if sig_type == 'Buy' and df["askpx_"][idx] < df["askpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
best_idx, best_row = idx, row
#return idx, idx, row, sig_type
if sig_type == 'Sell' and df["bidpx_"][idx] > df["bidpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["bidpx_"][idx]))
best_idx, best_row = idx, row
#return idx, idx, row, sig_type
prev_idx = idx
else:
print("Best {} candidate at {}, break...".format(sig_type, outer_idx))
return best_idx, prev_idx, best_row, sig_type
else:
print("Recursion")
return next_signal(iterator, df, inner_sig_type, idx, row)
def set_positions(_df):
df = deepcopy(_df)
df['Pos'] = np.zeros(df.shape[0])
last_position = 0
longs = 0
shorts = 0
iterator = df.iterrows()
last_idx, last_row = iterator.next()
for idx, row in iterator:
df.loc[idx]['Pos'] = row['Buy Mod'] - row ['Sell Mod'] + last_row['Pos']
last_idx, last_row = idx, row
if df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] > 0:
longs += 1
elif df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] < 0:
shorts += 1
last_position = df.loc[idx]['Pos']
print("Long positions: {} Short positions: {}".format(longs, shorts))
return df
def find_signals(df, sig_type, comission=0.0, debug=False):
colnames = {"Buy": ("Buy", "Sell Close"),
"Sell": ("Sell", "Buy Close")}
inflection_points_buy = df["askpx_"].diff().shift(-1) > 0
inflection_points_sell = df["bidpx_"].diff().shift(-1) < 0
iterator = inflection_points_buy.iteritems() if sig_type == "Buy" else inflection_points_sell.iteritems()
inflection_points = inflection_points_buy if sig_type == "Buy" else inflection_points_sell
inner_inflection_points = inflection_points_sell if sig_type == "Buy" else inflection_points_buy
max_count = 0
(major_colname, minor_colname) = colnames[sig_type]
df[major_colname] = np.zeros(df.shape[0])
df[minor_colname] = np.zeros(df.shape[0])
for idx, val in iterator:
if max_count > 10000 and debug:
print("Max count reached, break...")
break
inner_iterator = inner_inflection_points.loc[idx:].iteritems()
if df[df[minor_colname]==1].empty:
can_open = True
else:
can_open = idx > df[df[minor_colname]==1].index[-1]
max_count += 1
if val and can_open:
print("{} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
for inner_idx, inner_val in inner_iterator:
if inner_idx > idx:
if sig_type == "Buy":
if df["askpx_"][inner_idx] < df["askpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx, df["askpx_"][inner_idx]))
break
if df["bidpx_"][inner_idx] > (df["askpx_"][idx] + comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Buy at {} with price {}".format(idx, df["askpx_"][idx]))
print("Sell at {} with price {}".format(inner_idx, df["bidpx_"][inner_idx]))
break
elif sig_type == "Sell":
if df["bidpx_"][inner_idx] > df["bidpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx, df["bidpx_"][inner_idx]))
break
if df["askpx_"][inner_idx] < (df["bidpx_"][idx] - comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Sell at {} with price {}".format(idx, df["bidpx_"][idx]))
print("Buy at {} with price {}".format(inner_idx, df["askpx_"][inner_idx]))
break
return df
def filter_signals(df):
buys = df["Buy"] + df["Buy Close"]
df["Buy Mod"] = np.zeros(df.shape[0])
df["Buy Mod"][buys == 2] = 1
sells = df["Sell"] + df["Sell Close"]
df["Sell Mod"] = np.zeros(df.shape[0])
df["Sell Mod"][sells == 2] = 1
iterator = df.iterrows()
current_signal = 0
for idx, row in iterator:
current_signal = row["Buy Mod"] - row["Sell Mod"]
if current_signal != 0:
print("Signal {} at {}".format(current_signal, idx))
inner_iterator = df.loc[idx:].iterrows()
inner_iterator.next()
for inner_idx, inner_row in inner_iterator:
next_signal = inner_row["Buy Mod"] - inner_row["Sell Mod"]
if next_signal == current_signal:
print("Consecutive similar signal {} at {}".format(next_signal, inner_idx))
if current_signal == 1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Sell"] == 1]
best_candidate = candidates["bidpx_"].idxmax()
print(df.loc[best_candidate])
df["Sell Mod"].loc[best_candidate] = 1
break
elif current_signal == -1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Buy"] == 1]
best_candidate = candidates["askpx_"].idxmin()
print(df.loc[best_candidate])
df["Buy Mod"].loc[best_candidate] = 1
break
elif next_signal != 0 and next_signal != current_signal:
break
df["Buy Open"] = df["Buy"]
df["Sell Open"] = df["Sell"]
df = df.drop(["Buy", "Sell"], axis=1)
print(df.columns)
df = df.rename(columns={"Buy Mod": "Buy", "Sell Mod": "Sell"})
print(df.columns)
# df = df.drop(["Buy Close", "Sell Close"], axis=1)
return df
def make_spans(df, sig_type):
span_colname = "Buys" if sig_type == "Buy" else "Sells"
reversed_df = df[::-1]
df[span_colname] = np.zeros(df.shape[0])
for idx in df[sig_type][df[sig_type] == 1].index:
signal_val = df.loc[idx]
iterator = reversed_df.loc[idx:].iterrows()
_d = print("Outer loop:", idx, signal_val["askpx_"]) if sig_type == "Buy" else print("Outer loop:", idx, signal_val["bidpx_"])
for i, val in iterator:
# _d = print("Inner loop:", i, val["askpx_"]) if sig_type == "Buy" else print("Inner loop:", i, val["bidpx_"])
if sig_type == "Buy":
if val["askpx_"] == signal_val["askpx_"]:
# print("Add to buys")
df[span_colname][i] = 1
else:
break
elif sig_type == "Sell":
if val["bidpx_"] == signal_val["bidpx_"]:
# print("Add to sells")
df[span_colname][i] = 1
else:
break
return df
def pnl(df, chained=False):
deals = []
pnl = 0
if not chained:
for idx, row in df[(df['Buy Mod'] != 0) | (df['Sell Mod'] != 0)].iterrows():
current_trade = row['Sell Mod'] * row["bidpx_"] - row['Buy Mod'] * row["askpx_"]
pnl = pnl + current_trade
deals.append(current_trade)
print("Running PnL: ", pnl)
print("Check PnL: {} vs {}".format(pnl, np.sum(deals)))
return pnl, len(deals)
else:
is_opened = False
for idx, row in df.iterrows():
if row["Buy"]:
if is_opened:
deals.append(-row["askpx_"])
deals.append(-row["askpx_"])
is_opened = True
elif row["Sell"]:
if is_opened:
deals.append(row["bidpx_"])
deals.append(row["bidpx_"])
is_opened = True
print(len(deals))
deals.pop()
print(len(deals))
return np.sum(deals), len(deals)
def __main__():
"""
Trading Simulator from curriculumvite trading competition
see also the arvix Paper from Roni Mittelman http://arxiv.org/pdf/1508.00317v1
Modified by [email protected]
produces data to train a neural net
"""
# Trades smaller than this will be omitted
min_trade_amount = None
comission = 0.0
if len(sys.argv) < 2 :
print ("Usage: day_trading_file, NOT target_price-file ")
sys.exit()
day_file = sys.argv[1]
try:
write_spans = True if sys.argv[2] == "--spans" else False
except IndexError:
write_spans = False
try:
chained_deals = True if sys.argv[3] == "--chained-deals" else False
except IndexError:
chained_deals = False
generate_signals_for_file(day_file, comission, write_spans, chained_deals)
__main__();
| mit | 75,075,838,899,933,100 | 40.41994 | 139 | 0.515536 | false |
ingadhoc/odoo-infrastructure | infrastructure/wizard/instance_update_add_instances.py | 1 | 1450 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
class instance_update_add_instances(models.TransientModel):
_name = 'instance.update.add_instances'
@api.model
def get_update(self):
return self.env['infrastructure.instance.update'].browse(
self.env.context.get('active_id', False))
update_id = fields.Many2one(
'infrastructure.instance.update',
'Update',
default=get_update,
required=True,
ondelete='cascade',
)
actual_instance_ids = fields.Many2many(
'infrastructure.instance',
compute='get_actual_instances',
)
instance_ids = fields.Many2many(
'infrastructure.instance',
string='Instances',
)
@api.one
@api.depends('update_id')
def get_actual_instances(self):
self.actual_instance_ids = self.update_id.detail_ids.mapped(
'instance_id')
@api.multi
def confirm(self):
self.ensure_one()
for instance in self.instance_ids:
vals = {
'instance_id': instance.id,
'update_id': self.update_id.id,
}
self.update_id.detail_ids.create(vals)
| agpl-3.0 | -7,664,425,907,704,967,000 | 29.851064 | 78 | 0.536552 | false |
joehakimrahme/thawra | thawra/hero.py | 1 | 3453 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from thawra import action
class InvalidHero(Exception):
def __init__(self, msg, value):
self.value = value
self.msg = msg
super(InvalidHero, self).__init__()
def __str__(self):
return self.msg
class Hero(object):
def __repr__(self):
return self.name
def __init__(self, name, skillmap, attributes, element, macros=None):
self.name = name
self.element = element
if len(attributes) != 3 or \
not all(map(lambda x: isinstance(x, int), attributes)):
raise InvalidHero(
"Expected array of 3 integers for attributes, got: %s" %
attributes, attributes)
self.attributes = dict(zip(('str', 'int', 'agi'), attributes))
# TODO(rahmu): validate skillmap input
self.skillmap = skillmap
# TODO(rahmu): validate macros input
self.macros = macros
self.status = None
self.stats = {
'ATK': self.strength * 10,
'DEF': self.strength * 2,
'MAG': self.intelligence * 7,
'MDE': self.intelligence * 2,
'SPD': self.agility * 30
}
self.maxHP = self.strength * 100
self.maxMP = self.intelligence * 100
self._hp = self.maxHP
self._mp = self.maxMP
# TODO(rahmu): fill the rest of the dict with the skills
self.actions = {
'ATK': lambda target: action.Action(self, 'ATK', target, 0),
'MAG': lambda target: action.Action(self, 'MAG', target,
self.maxMP / 15)
}
@property
def level(self):
return self._get_level()
@property
def strength(self):
return self.attributes['str']
@property
def intelligence(self):
return self.attributes['int']
@property
def agility(self):
return self.attributes['agi']
@property
def hp(self):
return self._hp
@hp.setter
def hp(self, value):
switch = {
True: value,
value > self.maxHP: self.maxHP,
value < 0: 0}
self._hp = switch[True]
@property
def mp(self):
return self._mp
@mp.setter
def mp(self, value):
switch = {
True: value,
value > self.maxHP: self.maxHP,
value < 0: 0}
self._mp = switch[True]
def _get_level(self):
# TODO(rahmu): it should be a max between this and the highest skill
# TODO(rahmu): it should raise an InvalidHero exception in case of a
# problem
return int(sum(self.attributes.values()) / 10)
def choice(self, allies, enemies):
if self.macros:
return self.macros(allies, enemies)
def randattack(allies, enemies):
return 'ATK', [random.choice([h for h in enemies if h.hp > 0])]
| apache-2.0 | 5,366,765,848,036,491,000 | 26.404762 | 76 | 0.5766 | false |
raonyguimaraes/mendelmd | analyses/migrations/0001_initial.py | 1 | 2097 | # Generated by Django 2.1.4 on 2018-12-27 08:50
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tasks', '__first__'),
('projects', '0001_initial'),
('files', '0001_initial'),
('mapps', '__first__'),
('samples', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Analysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('params', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('name', models.CharField(max_length=30)),
('status', models.TextField(blank=True, null=True)),
('apps', models.ManyToManyField(to='mapps.App')),
('files', models.ManyToManyField(to='files.File')),
('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('samples', models.ManyToManyField(to='samples.Sample')),
('tasks', models.ManyToManyField(to='tasks.Task')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'analyses',
},
),
migrations.CreateModel(
name='AnalysisType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('repository', models.CharField(blank=True, max_length=600, null=True)),
],
options={
'verbose_name_plural': 'analysis_types',
},
),
]
| bsd-3-clause | -6,945,387,423,287,230,000 | 39.326923 | 138 | 0.567477 | false |
rsteed11/GAT | gat/core/sna/resilience.py | 1 | 1298 | import networkx as nx
import random
import scipy as sp
from gat.core.sna import ergm
def resilience(cliques_found, ergm_iters=3000):
scaledResilience = {}
scaledBaseline = {}
toScale = []
baselinesToScale = []
traces = []
formatted_traces = {}
cliques, selected = cliques_found
# Find resilience of subgraphs
for clique in cliques:
initShortestPath = nx.average_shortest_path_length(clique)
baselinesToScale.append(initShortestPath)
# creating perturbation by removing random 10% of nodes and averaging result of x iterations
G = clique.copy() # percent of nodes removed can be changed here
rSample = random.sample(G.nodes(), int(G.number_of_nodes() * 0.1))
G.remove_nodes_from(rSample)
coefs, new_trace = ergm.resilience(G, ergm_iters, mu=initShortestPath*.2)
toScale.append(coefs["aspl"])
traces.append(new_trace["aspl"].tolist())
# scale resilience measures on a normal scale
for i in range(len(cliques)):
scaledResilience[selected[i]] = toScale[i]
scaledBaseline[selected[i]] = sp.stats.percentileofscore(baselinesToScale, baselinesToScale[i])
formatted_traces[selected[i]] = traces[i]
return scaledBaseline, scaledResilience, formatted_traces
| mit | 5,061,723,243,135,137,000 | 37.176471 | 103 | 0.68567 | false |
benreynwar/pyvivado | pyvivado/test_utils.py | 1 | 9478 | import os
import unittest
import logging
import shutil
import time
import testfixtures
from pyvivado import filetestbench_project, fpga_project, axi
from pyvivado.synopsys import synopsys_project
from pyvivado import vivado_project, test_info
from pyvivado import config
from pyvivado import base_test_utils
logger = logging.getLogger(__name__)
# Import to make available is register
from pyvivado.hdl.wrapper import file_testbench
default_clock_period = 10
default_extra_clock_periods = 20
def compare_p(a, b, pause):
if (a != b) and pause:
import pdb
pdb.set_trace()
else:
testfixtures.compare(a, b)
def assert_p(a, pause):
if (not a) and pause:
import pdb
pdb.set_trace()
else:
assert(a)
class TestCase(unittest.TestCase):
def simulate(self, *args, **kwargs):
return simulate(*args, **kwargs)
def check_output(self, *args, **kwargs):
return base_test_utils.check_output(*args, **kwargs)
def simulate(directory, data, sim_type,
test_name='test',
interface=None,
params=None,
board=config.default_board,
clock_period=default_clock_period,
extra_clock_periods=default_extra_clock_periods,
force_refresh=False,
overwrite_ok=False,
project_class=filetestbench_project.FileTestBenchProject,
):
if interface is None:
if params is None:
raise ValueError('No params passed.')
else:
logger.warning('Deprecated: Pass parameters rather than interface')
if params:
raise ValueError('Do not pass interface as well as params. Just pass params.')
params = interface.parameters
params['module_name'] = interface.module_name
if force_refresh and os.path.exists(directory):
shutil.rmtree(directory)
# Make the project.
logger.debug('Making a FileTestBench Project')
p = project_class(
params=params, directory=directory,
overwrite_ok=overwrite_ok,
)
logger.debug('Updating input data')
p.update_input_data(input_data=data, test_name=test_name)
if sim_type.startswith('vivado'):
vivado_sim_type = sim_type[len('vivado_'):]
logger.debug('Making a Vivado Project')
v = vivado_project.VivadoProject(
p, overwrite_ok=overwrite_ok, wait_for_creation=True)
# Run the simulation.
runtime = '{} ns'.format((len(data) + extra_clock_periods) *
clock_period)
errors, output_data = v.run_simulation(
test_name=test_name, runtime=runtime, sim_type=vivado_sim_type)
for error in errors:
logger.error(error)
assert(len(errors) == 0)
elif sim_type.startswith('vcs'):
vcs_sim_type = sim_type[len('vcs_'):]
logger.debug('create vcs project')
v = synopsys_project.SynopsysProject(p)
logger.debug('run simulation')
errors, output_data = v.run_simulation(
test_name=test_name, sim_type=vcs_sim_type)
logger.debug('finished run simulation')
for error in errors:
logger.error(error)
assert(len(errors) == 0)
else:
raise ValueError('Unknown sim_type: {}'.format(sim_type))
return output_data[1:]
def deploy(directory, params,
board=config.default_board,
part=None,
force_refresh=False,
overwrite_ok=False,
):
if force_refresh and os.path.exists(directory):
shutil.rmtree(directory)
# Make the project.
p = fpga_project.FPGAProject(
parameters=params,
directory=directory,
board=board,
overwrite_ok=overwrite_ok,
)
v = vivado_project.VivadoProject(
project=p, board=board, wait_for_creation=True, overwrite_ok=overwrite_ok)
t_implement = v.implement()
t_implement.wait()
t_monitor, conn = v.send_to_fpga_and_monitor()
return conn
def run_test(test_class, test_name='default_test',
logging_level=logging.DEBUG):
suite = unittest.TestSuite()
suite.addTest(test_class(test_name))
runner = unittest.TextTestRunner()
runner.run(suite)
def deploy_and_test(
params, directory, tests, board=config.default_board,
part=None, force_refresh=False, overwrite_ok=False):
'''
Deploy design to an FPGA and run tests on it there.
The DUT must have an AXI4-LITE interface.
'''
# Make sure this directory is not already deployed.
v_dir = os.path.join(directory, 'vivado')
# Import connection down here so that if it's not available
# we can use other test_utils.
from pyvivado import connection
hwcode = connection.get_projdir_hwcode(v_dir)
assert(hwcode is None)
conn = deploy(
directory=directory, params=params,
board=board,
part=part,
force_refresh=force_refresh,
overwrite_ok=overwrite_ok,
)
handler = axi.ConnCommandHandler(conn)
for test in tests:
test.set_handler(handler)
test.prepare()
test.check()
# Sleep for 10 seconds so that we can kill monitor
time.sleep(10)
# Destroy monitoring process
connection.kill_free_monitors(v_dir)
def simulate_and_test(
directory, reset_input, tests,
test_name='test',
interface=None,
params=None,
wait_lines=20,
board=config.default_board,
sim_type=test_info.default_sim_type,
clock_period=default_clock_period,
extra_clock_periods=default_extra_clock_periods,
split_tag=base_test_utils.DEFAULT_SPLIT_TAG,
pause=False,
force_refresh=False,
overwrite_ok=False,
project_class=filetestbench_project.FileTestBenchProject,
):
'''
Run a single vivado simulation which contains many independent tests
that are run one after another in a single simulation.
'''
logger.debug('staring simulate and test')
if interface is None:
if params is None:
raise ValueError('No params passed.')
else:
logger.warning('Deprecated: Pass parameters rather than interface')
if params:
raise ValueError('Do not pass interface as well as params. Just pass params.')
params = interface.parameters
params['module_name'] = interface.module_name
logger.debug('Making input data')
input_data = base_test_utils.tests_to_input_data(
reset_input=reset_input, wait_lines=wait_lines, tests=tests)
logger.debug('Start simulate: simtype is {}'.format(sim_type))
output_data = simulate(
interface=None,
params=params,
directory=directory,
data=input_data,
sim_type=sim_type,
test_name=test_name,
overwrite_ok=overwrite_ok,
project_class=project_class,
)
logger.debug('finish simulate')
base_test_utils.validate_output_data_with_tests(
input_data=input_data,
output_data=output_data,
wait_lines=wait_lines,
pause=pause,
tests=tests,
)
class AxiTest():
def __init__(self):
self.handler = None
def set_handler(self, handler):
assert(self.handler is None)
self.handler = handler
def get_handler(self):
if self.handler is None:
raise Exception('Handler on AxiTest not set')
return self.handler
def prepare(self):
raise Exception('Unimplemented')
def check(self, pause=False):
raise Exception('Unimplemented')
def make_input_data(self):
handler = self.get_handler()
self.prepare()
input_data = [
{'reset': 0,
'i': d,
} for d in handler.make_command_dicts()]
assert(len(input_data) > 0)
return input_data
def check_output_data(self, input_data, output_data, pause=False):
handler = self.get_handler()
response_dicts = [d['o'] for d in output_data]
handler.consume_response_dicts(response_dicts)
self.check(pause=pause)
def axi_run_and_test(
directory,
tests,
test_name='test',
params=None,
wait_lines=20,
board=config.default_board,
sim_type=test_info.default_sim_type,
clock_period=default_clock_period,
extra_clock_periods=default_extra_clock_periods,
pause=False,
force_refresh=False,
overwrite_ok=False,
):
if sim_type == 'fpga':
deploy_and_test(
params=params,
directory=directory,
tests=tests,
board=board,
force_refresh=force_refresh,
overwrite_ok=overwrite_ok,
)
else:
handler = axi.DictCommandHandler()
for test in tests:
logger.debug('setting handler to {}'.format(handler))
test.set_handler(handler)
simulate_and_test(
directory=directory,
reset_input={'reset': 1, 'd': axi.make_empty_axi4lite_m2s_dict()},
tests=tests,
params=params,
wait_lines=wait_lines,
sim_type=sim_type,
clock_period=clock_period,
extra_clock_periods=extra_clock_periods,
pause=pause,
force_refresh=force_refresh,
overwrite_ok=overwrite_ok,
)
| mit | 2,229,433,497,618,155,000 | 29.973856 | 90 | 0.614265 | false |
tuulos/ringo | ringogw/py/ringodisco.py | 1 | 3199 | import ringogw
def ringo_reader(fd, sze, fname):
import struct, zlib
MAGIC_HEAD = (0x47da66b5,)
MAGIC_TAIL = (0xacc50f5d,)
def read_really(s):
t = 0
buf = ""
while t < s:
r = fd.read(s - t)
if not r:
return buf
t += len(r)
buf += r
return buf
def check_body(head_body):
time, entryid, flags, keycrc, keylen, valcrc, vallen =\
struct.unpack("<IIIIIII", head_body)
tot = keylen + vallen + 4
body = read_really(tot)
if len(body) < tot:
return False, head_body + body
key = body[:keylen]
val = body[keylen:-4]
if zlib.crc32(key) != keycrc or zlib.crc32(val) != valcrc or\
struct.unpack("<I", body[-4:]) != MAGIC_TAIL:
return False, head_body + body
else:
return True, (entryid, flags, key, val)
def read_entry():
head = read_really(8)
while len(head) >= 8:
if struct.unpack("<I", head[:4]) == MAGIC_HEAD:
if len(head) < 36:
head += read_really(36 - len(head))
if len(head) < 36:
return None
head_crc = struct.unpack("<I", head[4:8])[0]
head_body = head[8:36]
if zlib.crc32(head_body) == head_crc:
ok, cont = check_body(head_body)
if ok:
return cont
head = cont
head = head[1:]
if len(head) < 8:
head += fd.read(1)
else:
return None
prev_id = None
while True:
entry = read_entry()
if not entry:
break
entryid, flags, key, val = entry
if flags & 1 or flags & 2:
continue
if entryid == prev_id:
continue
prev_id = entryid
yield key, val
def input_domain(ringo_host, name):
ringo = ringogw.Ringo(ringo_host)
code, res = ringo.request("/mon/domains/domain?name=" + name)
if code != 200:
return []
urls = []
for domainid, name, nodeid, chunk, owner, nrepl in res:
nodename, node = nodeid.split('@')
urls.append("disco://%s/_ringo/%s/rdomain-%s/data"\
% (node, nodename[6:], domainid))
return urls
if __name__ == "__main__":
import sys
print "\n".join(input_domain(sys.argv[1], sys.argv[2]))
| bsd-3-clause | 8,622,937,710,840,089,000 | 36.635294 | 77 | 0.371366 | false |
ColumbiaDVMM/ColumbiaImageSearch | cufacesearch/cufacesearch/ingester/deprecated/kafka_image_processor.py | 1 | 7700 | # DEPRECATED
# import json
# import time
# import multiprocessing
# from .generic_kafka_processor import GenericKafkaProcessor
# from ..imgio.imgio import buffer_to_B64
#
# default_prefix = "KIP_"
# default_prefix_frompkl = "KIPFP_"
#
# # TODO: This class should be rewritten to actually extract features from images...
# # TODO: Work on getting a pycaffe sentibank featurizer. Check we get same feature values than command line in 'sentibank_cmdline'
# # at 'https://github.com/ColumbiaDVMM/ColumbiaImageSearch/blob/master/cu_image_search/feature_extractor/sentibank_cmdline.py'
# # Should we have a generic extractor to inherit from, with just a different process_one_core() method?...
#
# class KafkaImageProcessor(GenericKafkaProcessor):
#
# def __init__(self, global_conf_filename, prefix=default_prefix, pid=None):
# # when running as deamon
# self.pid = pid
# # call GenericKafkaProcessor init (and others potentially)
# super(KafkaImageProcessor, self).__init__(global_conf_filename, prefix)
# # any additional initialization needed, like producer specific output logic
# self.cdr_out_topic = self.get_required_param('producer_cdr_out_topic')
# self.images_out_topic = self.get_required_param('producer_images_out_topic')
# # TODO: get s3 url prefix from actual location
# # for now "object_stored_prefix" in "_meta" of domain CDR
# # but just get from conf
# self.url_prefix = self.get_required_param('obj_stored_prefix')
# self.process_count = 0
# self.process_failed = 0
# self.process_time = 0
# self.set_pp()
#
# def set_pp(self):
# self.pp = "KafkaImageProcessor"
# if self.pid:
# self.pp += ":"+str(self.pid)
#
#
#
# def process_one(self, msg):
# from ..imgio.imgio import get_SHA1_img_info_from_buffer, get_buffer_from_URL
#
# self.print_stats(msg)
#
# msg_value = json.loads(msg.value)
#
# # From msg value get list_urls for image objects only
# list_urls = self.get_images_urls(msg_value)
#
# # Get images data and infos
# dict_imgs = dict()
# for url, obj_pos in list_urls:
# start_process = time.time()
# if self.verbose > 2:
# print_msg = "[{}.process_one: info] Downloading image from: {}"
# print print_msg.format(self.pp, url)
# try:
# img_buffer = get_buffer_from_URL(url)
# if img_buffer:
# sha1, img_type, width, height = get_SHA1_img_info_from_buffer(img_buffer)
# dict_imgs[url] = {'obj_pos': obj_pos, 'img_buffer': img_buffer, 'sha1': sha1, 'img_info': {'format': img_type, 'width': width, 'height': height}}
# self.toc_process_ok(start_process)
# else:
# self.toc_process_failed(start_process)
# if self.verbose > 1:
# print_msg = "[{}.process_one: info] Could not download image from: {}"
# print print_msg.format(self.pp, url)
# except Exception as inst:
# self.toc_process_failed(start_process)
# if self.verbose > 0:
# print_msg = "[{}.process_one: error] Could not download image from: {} ({})"
# print print_msg.format(self.pp, url, inst)
#
# # Push to cdr_out_topic
# self.producer.send(self.cdr_out_topic, self.build_cdr_msg(msg_value, dict_imgs))
#
# # TODO: we could have all extraction registered here, and not pushing an image if it has been processed by all extractions. But that violates the consumer design of Kafka...
# # Push to images_out_topic
# for img_out_msg in self.build_image_msg(dict_imgs):
# self.producer.send(self.images_out_topic, img_out_msg)
#
#
# class KafkaImageProcessorFromPkl(GenericKafkaProcessor):
# # To push list of images to be processed from a pickle file containing a dictionary
# # {'update_ids': update['update_ids'], 'update_images': out_update_images}
# # with 'out_update_images' being a list of tuples (sha1, url)
#
# def __init__(self, global_conf_filename, prefix=default_prefix_frompkl):
# # call GenericKafkaProcessor init (and others potentially)
# super(KafkaImageProcessorFromPkl, self).__init__(global_conf_filename, prefix)
# # any additional initialization needed, like producer specific output logic
# self.images_out_topic = self.get_required_param('images_out_topic')
# self.pkl_path = self.get_required_param('pkl_path')
# self.process_count = 0
# self.process_failed = 0
# self.process_time = 0
# self.display_count = 100
# self.set_pp()
#
# def set_pp(self):
# self.pp = "KafkaImageProcessorFromPkl"
#
# def get_next_img(self):
# import pickle
# update = pickle.load(open(self.pkl_path,'rb'))
# for sha1, url in update['update_images']:
# yield sha1, url
#
# def build_image_msg(self, dict_imgs):
# # Build dict ouput for each image with fields 's3_url', 'sha1', 'img_info' and 'img_buffer'
# img_out_msgs = []
# for url in dict_imgs:
# tmp_dict_out = dict()
# tmp_dict_out['s3_url'] = url
# tmp_dict_out['sha1'] = dict_imgs[url]['sha1']
# tmp_dict_out['img_info'] = dict_imgs[url]['img_info']
# # encode buffer in B64?
# tmp_dict_out['img_buffer'] = buffer_to_B64(dict_imgs[url]['img_buffer'])
# img_out_msgs.append(json.dumps(tmp_dict_out).encode('utf-8'))
# return img_out_msgs
#
# def process(self):
# from ..imgio.imgio import get_SHA1_img_info_from_buffer, get_buffer_from_URL
#
# # Get images data and infos
# for sha1, url in self.get_next_img():
#
# if (self.process_count + self.process_failed) % self.display_count == 0:
# avg_process_time = self.process_time / max(1, self.process_count + self.process_failed)
# print_msg = "[%s] dl count: %d, failed: %d, time: %f"
# print print_msg % (self.pp, self.process_count, self.process_failed, avg_process_time)
#
# dict_imgs = dict()
# start_process = time.time()
# if self.verbose > 2:
# print_msg = "[{}.process_one: info] Downloading image from: {}"
# print print_msg.format(self.pp, url)
# try:
# img_buffer = get_buffer_from_URL(url)
# if img_buffer:
# sha1, img_type, width, height = get_SHA1_img_info_from_buffer(img_buffer)
# dict_imgs[url] = {'img_buffer': img_buffer, 'sha1': sha1,
# 'img_info': {'format': img_type, 'width': width, 'height': height}}
# self.toc_process_ok(start_process)
# else:
# self.toc_process_failed(start_process)
# if self.verbose > 1:
# print_msg = "[{}.process_one: info] Could not download image from: {}"
# print print_msg.format(self.pp, url)
# except Exception as inst:
# self.toc_process_failed(start_process)
# if self.verbose > 0:
# print_msg = "[{}.process_one: error] Could not download image from: {} ({})"
# print print_msg.format(self.pp, url, inst)
#
# # Push to images_out_topic
# for img_out_msg in self.build_image_msg(dict_imgs):
# self.producer.send(self.images_out_topic, img_out_msg)
#
# class DaemonKafkaImageProcessor(multiprocessing.Process):
#
# daemon = True
#
# def __init__(self, conf, prefix=default_prefix):
# super(DaemonKafkaImageProcessor, self).__init__()
# self.conf = conf
# self.prefix = prefix
#
# def run(self):
# try:
# print "Starting worker KafkaImageProcessor.{}".format(self.pid)
# kp = KafkaImageProcessor(self.conf, prefix=self.prefix, pid=self.pid)
# for msg in kp.consumer:
# kp.process_one(msg)
# except Exception as inst:
# print "KafkaImageProcessor.{} died ()".format(self.pid, inst) | apache-2.0 | -2,658,118,316,542,654,000 | 42.264045 | 179 | 0.633636 | false |
schollz/prevent-link-rot | lib.py | 1 | 2472 | # -*- coding: utf-8 -*-
import sys
import re
import json
from multiprocessing import Pool, cpu_count
import requests
def isurl(s):
if re.match(r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', s):
return True
else:
return False
def getWebArchiveLink(url):
# will need to handle cases that it can't find, like for http://w3techs.com/technologies/overview/javascript_library/all
if 'web.archive' in url:
return url,url
try:
r = requests.get('https://web.archive.org/save/' + url)
print "Got permanent link for " + url
except:
return url,url
if r.status_code == 403:
return url,url
else:
try:
return url,'https://web.archive.org' + r.headers['content-location']
except:
print url
return url,url
def getPermaccLink(dat):
# will need to handle cases that it can't find, like for http://w3techs.com/technologies/overview/javascript_library/all
url = dat[0]
apikey = dat[1]
payload = {'url': url, 'title': url}
permacc_url = 'https://api.perma.cc/v1/archives/?api_key=' + apikey
r = requests.post(permacc_url, data = json.dumps(payload))
print r.status_code
if r.status_code == 201:
result = json.loads(r.text)
print json.dumps(result,indent=4)
return url,str('http://perma.cc/' + result['guid'] + '?type=source')
else:
return url,url
def replaceText(text_test,apikey):
urls = []
urls_in_order = []
for url in re.findall(r'(https?://[^\s]+)', text_test):
newurl = url.split('"')[0].split('<')[0]
while newurl[-1] == '.' or newurl[-1] == ')' or newurl[-1] == '!':
newurl = newurl[:-1]
if not apikey:
urls.append(newurl)
else:
urls.append((newurl,apikey))
urls_in_order.append(newurl)
f = getWebArchiveLink
if apikey:
f = getPermaccLink
p = Pool(cpu_count())
conversion = {}
for result in p.map(f, list(set(urls))):
conversion[result[0]] = result[1]
p.terminate()
print conversion
curPos = 0
for url in urls_in_order:
if url in text_test[curPos:]:
print url
print conversion[url]
print text_test[curPos:]
newPos = text_test.index(url)
text_test = text_test[0:curPos] + text_test[curPos:].replace(url,conversion[url],1)
curPos = newPos
return text_test
| mit | -5,427,227,733,330,338,000 | 26.977273 | 229 | 0.587327 | false |
GoogleCloudPlatform/tf-estimator-tutorials | 00_Miscellaneous/model_evaluation_pipeline/trainer/model.py | 1 | 7151 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# forward to key-column to export
def forward_key_to_export(estimator):
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
# return estimator
## This shouldn't be necessary (I've filed CL/187793590 to update extenders.py with this code)
config = estimator.config
def model_fn2(features, labels, mode):
estimatorSpec = estimator._call_model_fn(features, labels, mode, config=config)
if estimatorSpec.export_outputs:
for ekey in ['predict', 'serving_default']:
if (ekey in estimatorSpec.export_outputs and
isinstance(estimatorSpec.export_outputs[ekey],
tf.estimator.export.PredictOutput)):
estimatorSpec.export_outputs[ekey] = \
tf.estimator.export.PredictOutput(estimatorSpec.predictions)
return estimatorSpec
return tf.estimator.Estimator(model_fn=model_fn2, config=config)
##
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
estimator = forward_key_to_export(estimator)
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| apache-2.0 | -1,798,591,389,103,198,500 | 39.40113 | 103 | 0.658369 | false |
breuderink/psychic | psychic/tests/testedf.py | 1 | 2121 | # -*- coding: utf-8 -*-
import unittest, os
from ..edfreader import *
class TestEDFBaseReader(unittest.TestCase):
def test_synthetic_content(self):
'''
Test EDF reader using artifical EDF dataset. Note that this is not an
EDF+ dataset and as such does not contain annotations. Annotations decoding
is separately tested, *but not from a real file*!.
'''
reader = BaseEDFReader(
open(os.path.join('data', 'sine3Hz_block0.2Hz.edf'), 'rb'))
reader.read_header()
h = reader.header
# check important header fields
self.assertEqual(h['label'], ['3Hz +5/-5 V', '0.2Hz Blk 1/0uV'])
self.assertEqual(h['units'], ['V', 'uV'])
self.assertEqual(h['contiguous'], True)
fs = np.asarray(h['n_samples_per_record']) / h['record_length']
# get records
recs = list(reader.records())
time = zip(*recs)[0]
signals = zip(*recs)[1]
annotations = list(zip(*recs)[2])
# check EDF+ fields that are *not present in this file*
np.testing.assert_equal(time, np.zeros(11) * np.nan)
self.assertEqual(annotations, [[]] * 11)
# check 3 Hz sine wave
sine, block = [np.hstack(s) for s in zip(*signals)]
target = 5 * np.sin(3 * np.pi * 2 * np.arange(0, sine.size) / fs[0])
assert np.max((sine - target) ** 2) < 1e-4
# check .2 Hz block wave
target = np.sin(.2 * np.pi * 2 * np.arange(1, block.size + 1) / fs[1]) >= 0
assert np.max((block - target) ** 2) < 1e-4
def test_tal(self):
mult_annotations = '+180\x14Lights off\x14Close door\x14\x00'
with_duration = '+1800.2\x1525.5\x14Apnea\x14\x00'
test_unicode = '+180\x14€\x14\x00\x00'
# test annotation with duration
self.assertEqual(tal(with_duration), [(1800.2, 25.5, [u'Apnea'])])
# test multiple annotations
self.assertEqual(tal('\x00' * 4 + with_duration * 3),
[(1800.2, 25.5, [u'Apnea'])] * 3)
# test multiple annotations for one time point
self.assertEqual(tal(mult_annotations),
[(180., 0., [u'Lights off', u'Close door'])])
# test unicode support
self.assertEqual(tal(test_unicode), [(180., 0., [u'€'])])
| bsd-3-clause | 4,709,166,478,044,871,000 | 34.283333 | 79 | 0.619745 | false |
quarkslab/irma | probe/modules/antivirus/comodo/cavl.py | 1 | 2379 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from datetime import datetime
from pathlib import Path
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class ComodoCAVL(AntivirusUnix):
name = "Comodo Antivirus (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# Comodo does not use return value as infection indicator. Distinction
# between INFECTED and CLEAN will be done in the 'false positive
# handler' of Antivirus.scan()
self._scan_retcodes[self.ScanResult.INFECTED] = lambda x: x in [0]
# scan tool variables
self.scan_args = (
"-v", # verbose mode, display more detailed output
"-s", # scan a file or directory
)
self.scan_patterns = [
re.compile('(?P<file>.*) ---> Found .*,' +
' Malware Name is (?P<name>.*)', re.IGNORECASE),
]
self.scan_path = Path("/opt/COMODO/cmdscan")
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
return Path('/opt/COMODO/cavver.dat').read_text()
def get_database(self):
"""return list of files in the database"""
search_paths = [Path('/opt/COMODO/scanners/'), ]
return self.locate('*.cav', search_paths, syspath=False)
def get_virus_database_version(self):
"""Return the Virus Database version"""
d = Path("/opt/COMODO/scanners/bases.cav").stat().st_mtime
return datetime.fromtimestamp(d).strftime('%Y-%m-%d')
| apache-2.0 | 6,662,217,824,837,010,000 | 34.507463 | 78 | 0.592686 | false |
marksamman/pylinkshortener | app/models.py | 1 | 2721 | # Copyright (c) 2014 Mark Samman <https://github.com/marksamman/pylinkshortener>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import config, math, random, time
from datetime import datetime
from sqlalchemy import create_engine, Column, DateTime, ForeignKey, Integer, String, VARCHAR
from sqlalchemy.dialects.postgresql import BIGINT, CIDR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship, sessionmaker
from app.constants import url_safe
engine = create_engine(config.SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine)
Base = declarative_base()
class Link(Base):
__tablename__ = 'links'
id = Column(Integer, primary_key=True)
url = Column(VARCHAR)
creator_ip = Column(CIDR)
created = Column(BIGINT)
random = Column(String(2))
def __init__(self, url, creator_ip):
self.url = url
self.created = math.floor(time.time())
self.creator_ip = creator_ip
self.random = ''.join(random.choice(url_safe) for _ in range(Link.random.property.columns[0].type.length))
def __repr__(self):
return '<Link %r>' % self.url
class Click(Base):
__tablename__ = 'clicks'
id = Column(Integer, primary_key=True)
inserted = Column(BIGINT)
ip = Column(CIDR)
user_agent = Column(VARCHAR)
link_id = Column(Integer, ForeignKey('links.id'))
link = relationship('Link', backref=backref('clicks', order_by=inserted.desc()))
def __init__(self, ip, user_agent, inserted, link_id):
self.inserted = inserted
self.ip = ip
self.user_agent = user_agent
self.link_id = link_id
def __repr__(self):
return '<Click %r>' % self.id
| mit | -7,008,481,928,122,294,000 | 38.434783 | 114 | 0.715913 | false |
corpnewt/CorpBot.py | Cogs/Xp.py | 1 | 41529 | import asyncio
import discord
import datetime
import time
import random
from discord.ext import commands
from Cogs import Settings, DisplayName, Nullify, CheckRoles, UserTime, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Xp(bot, settings))
# This is the xp module. It's likely to be retarded.
class Xp(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.is_current = False # Used for stopping loops
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _can_xp(self, user, server, requiredXP = None, promoArray = None):
# Checks whether or not said user has access to the xp system
if requiredXP == None:
requiredXP = self.settings.getServerStat(server, "RequiredXPRole", None)
if promoArray == None:
promoArray = self.settings.getServerStat(server, "PromotionArray", [])
if not requiredXP:
return True
for checkRole in user.roles:
if str(checkRole.id) == str(requiredXP):
return True
# Still check if we have enough xp
userXP = self.settings.getUserStat(user, server, "XP")
for role in promoArray:
if str(role["ID"]) == str(requiredXP):
if userXP >= role["XP"]:
return True
break
return False
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = False
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = True
self.bot.loop.create_task(self.addXP())
async def addXP(self):
print("Starting XP loop: {}".format(datetime.datetime.now().time().isoformat()))
await self.bot.wait_until_ready()
while not self.bot.is_closed():
try:
await asyncio.sleep(600) # runs only every 10 minutes (600 seconds)
if not self.is_current:
# Bail if we're not the current instance
return
updates = await self.bot.loop.run_in_executor(None, self.update_xp)
t = time.time()
for update in updates:
await CheckRoles.checkroles(update["user"], update["chan"], self.settings, self.bot, **update["kwargs"])
# Sleep after for testing
except Exception as e:
print(str(e))
def update_xp(self):
responses = []
t = time.time()
print("Adding XP: {}".format(datetime.datetime.now().time().isoformat()))
# Get some values that don't require immediate query
server_dict = {}
for x in self.bot.get_all_members():
memlist = server_dict.get(str(x.guild.id), [])
memlist.append(x)
server_dict[str(x.guild.id)] = memlist
for server_id in server_dict:
server = self.bot.get_guild(int(server_id))
if not server:
continue
# Iterate through the servers and add them
xpAmount = int(self.settings.getServerStat(server, "HourlyXP"))
xpAmount = float(xpAmount/6)
xpRAmount = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpRAmount = float(xpRAmount/6)
xpLimit = self.settings.getServerStat(server, "XPLimit")
xprLimit = self.settings.getServerStat(server, "XPReserveLimit")
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
promoArray = self.settings.getServerStat(server, "PromotionArray")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
targetChanID = self.settings.getServerStat(server, "DefaultChannel")
kwargs = {
"xp_promote":self.settings.getServerStat(server,"XPPromote"),
"xp_demote":self.settings.getServerStat(server,"XPDemote"),
"suppress_promotions":self.settings.getServerStat(server,"SuppressPromotions"),
"suppress_demotions":self.settings.getServerStat(server,"SuppressDemotions"),
"only_one_role":self.settings.getServerStat(server,"OnlyOneRole")
}
for user in server_dict[server_id]:
# First see if we're current - we want to bail quickly
if not self.is_current:
print("XP Interrupted, no longer current - took {} seconds.".format(time.time() - t))
return responses
if not self._can_xp(user, server, requiredXP, promoArray):
continue
bumpXP = False
if onlyOnline == False:
bumpXP = True
else:
if user.status == discord.Status.online:
bumpXP = True
# Check if we're blocked
if user.id in xpblock:
# No xp for you
continue
for role in user.roles:
if role.id in xpblock:
bumpXP = False
break
if bumpXP:
if xpAmount > 0:
# User is online add hourly xp reserve
# First we check if we'll hit our limit
skip = False
if not xprLimit == None:
# Get the current values
newxp = self.settings.getUserStat(user, server, "XPReserve")
# Make sure it's this xpr boost that's pushing us over
# This would only push us up to the max, but not remove
# any we've already gotten
if newxp + xpAmount > xprLimit:
skip = True
if newxp < xprLimit:
self.settings.setUserStat(user, server, "XPReserve", xprLimit)
if not skip:
xpLeftover = self.settings.getUserStat(user, server, "XPLeftover")
if xpLeftover == None:
xpLeftover = 0
else:
xpLeftover = float(xpLeftover)
gainedXp = xpLeftover+xpAmount
gainedXpInt = int(gainedXp) # Strips the decimal point off
xpLeftover = float(gainedXp-gainedXpInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPLeftover", xpLeftover)
self.settings.incrementStat(user, server, "XPReserve", gainedXpInt)
if xpRAmount > 0:
# User is online add hourly xp
# First we check if we'll hit our limit
skip = False
if not xpLimit == None:
# Get the current values
newxp = self.settings.getUserStat(user, server, "XP")
# Make sure it's this xpr boost that's pushing us over
# This would only push us up to the max, but not remove
# any we've already gotten
if newxp + xpRAmount > xpLimit:
skip = True
if newxp < xpLimit:
self.settings.setUserStat(user, server, "XP", xpLimit)
if not skip:
xpRLeftover = self.settings.getUserStat(user, server, "XPRealLeftover")
if xpRLeftover == None:
xpRLeftover = 0
else:
xpRLeftover = float(xpRLeftover)
gainedXpR = xpRLeftover+xpRAmount
gainedXpRInt = int(gainedXpR) # Strips the decimal point off
xpRLeftover = float(gainedXpR-gainedXpRInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPRealLeftover", xpRLeftover)
self.settings.incrementStat(user, server, "XP", gainedXpRInt)
# Check our default channels
targetChan = None
if len(str(targetChanID)):
# We *should* have a channel
tChan = self.bot.get_channel(int(targetChanID))
if tChan:
# We *do* have one
targetChan = tChan
responses.append({"user":user, "chan":targetChan if targetChan else self.bot.get_guild(int(server_id)), "kwargs":kwargs})
print("XP Done - took {} seconds.".format(time.time() - t))
return responses
@commands.command(pass_context=True)
async def xp(self, ctx, *, member = None, xpAmount : int = None):
"""Gift xp to other members."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
usage = 'Usage: `{}xp [role/member] [amount]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.message.channel.send(usage)
return
# Check for formatting issues
if xpAmount == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if not roleCheck:
# Returned nothing - means there isn't even an int
msg = 'I couldn\'t find *{}* on the server.'.format(Nullify.escape_all(member))
await ctx.message.channel.send(msg)
return
if roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
xpAmount = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.message.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(Nullify.escape_all(member))
await ctx.message.channel.send(msg)
return
member = nameCheck["Member"]
xpAmount = nameCheck["Int"]
if xpAmount == None:
# Still no xp - let's run stats instead
if isRole:
await ctx.message.channel.send(usage)
else:
await ctx.invoke(self.stats, member=member)
return
if not type(xpAmount) is int:
await ctx.message.channel.send(usage)
return
# Get our user/server stats
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
approve = True
decrement = True
admin_override = False
# RequiredXPRole
if not self._can_xp(author, server):
approve = False
msg = 'You don\'t have the permissions to give xp.'
if xpAmount > int(reserveXP):
approve = False
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
if author == member:
approve = False
msg = 'You can\'t give yourself xp! *Nice try...*'
if xpAmount < 0:
msg = 'Only admins can take away xp!'
approve = False
# Avoid admins gaining xp
decrement = False
if xpAmount == 0:
msg = 'Wow, very generous of you...'
approve = False
# Check bot admin
if isBotAdmin and botAdminAsAdmin:
# Approve as admin
approve = True
admin_override = True
if adminUnlim:
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
admin_override = True
if adminUnlim:
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check author and target for blocks
# overrides admin because admins set this.
if type(member) is discord.Role:
if member.id in xpblock:
msg = "That role cannot receive xp!"
approve = False
else:
# User
if member.id in xpblock:
msg = "That member cannot receive xp!"
approve = False
else:
for role in member.roles:
if role.id in xpblock:
msg = "That member's role cannot receive xp!"
approve = False
if ctx.author.id in xpblock:
msg = "You can't give xp!"
approve = False
else:
for role in ctx.author.roles:
if role.id in xpblock:
msg = "Your role cannot give xp!"
approve = False
if approve:
self.bot.dispatch("xp", member, ctx.author, xpAmount)
if isRole:
# XP was approved - let's iterate through the users of that role,
# starting with the lowest xp
#
# Work through our members
memberList = []
sMemberList = self.settings.getServerStat(server, "Members")
for amem in server.members:
if amem == author:
continue
if amem.id in xpblock:
# Blocked - only if not admin sending it
continue
roles = amem.roles
if member in roles:
# This member has our role
# Add to our list
for smem in sMemberList:
# Find our server entry
if str(smem) == str(amem.id):
# Add it.
sMemberList[smem]["ID"] = smem
memberList.append(sMemberList[smem])
memSorted = sorted(memberList, key=lambda x:int(x['XP']))
if len(memSorted):
# There actually ARE members in said role
totalXP = xpAmount
# Gather presets
xp_p = self.settings.getServerStat(server,"XPPromote")
xp_d = self.settings.getServerStat(server,"XPDemote")
xp_sp = self.settings.getServerStat(server,"SuppressPromotions")
xp_sd = self.settings.getServerStat(server,"SuppressDemotions")
xp_oo = self.settings.getServerStat(server,"OnlyOneRole")
if xpAmount > len(memSorted):
# More xp than members
leftover = xpAmount % len(memSorted)
eachXP = (xpAmount-leftover)/len(memSorted)
for i in range(0, len(memSorted)):
# Make sure we have anything to give
if leftover <= 0 and eachXP <= 0:
break
# Carry on with our xp distribution
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
if leftover>0:
self.settings.incrementStat(cMember, server, "XP", eachXP+1)
leftover -= 1
else:
self.settings.incrementStat(cMember, server, "XP", eachXP)
await CheckRoles.checkroles(
cMember,
channel,
self.settings,
self.bot,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo)
else:
for i in range(0, xpAmount):
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
self.settings.incrementStat(cMember, server, "XP", 1)
await CheckRoles.checkroles(
cMember,
channel,
self.settings,
self.bot,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo)
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
msg = '*{:,} collective xp* was given to *{}!*'.format(totalXP, Nullify.escape_all(member.name))
await channel.send(msg)
else:
msg = 'There are no eligible members in *{}!*'.format(Nullify.escape_all(member.name))
await channel.send(msg)
else:
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
# XP was approved! Let's say it - and check decrement from gifter's xp reserve
msg = '*{}* was given *{:,} xp!*'.format(DisplayName.name(member), xpAmount)
await channel.send(msg)
self.settings.incrementStat(member, server, "XP", xpAmount)
# Now we check for promotions
await CheckRoles.checkroles(member, channel, self.settings, self.bot)
else:
await channel.send(msg)
'''@xp.error
async def xp_error(self, ctx, error):
msg = 'xp Error: {}'.format(error)
await ctx.channel.send(msg)'''
@commands.command(pass_context=True)
async def defaultrole(self, ctx):
"""Lists the default role that new users are assigned."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
role = self.settings.getServerStat(ctx.message.guild, "DefaultRole")
if role == None or role == "":
msg = 'New users are not assigned a role on joining this server.'
await ctx.channel.send(msg)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
msg = 'New users will be assigned to **{}**.'.format(Nullify.escape_all(arole.name))
if not found:
msg = 'There is no role that matches id: `{}` - consider updating this setting.'.format(role)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def gamble(self, ctx, bet : int = None):
"""Gamble your xp reserves for a chance at winning xp!"""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# bet must be a multiple of 10, member must have enough xpreserve to bet
msg = 'Usage: `{}gamble [xp reserve bet] (must be multiple of 10)`'.format(ctx.prefix)
if not (bet or type(bet) == int):
await channel.send(msg)
return
if not type(bet) == int:
await channel.send(msg)
return
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
minRole = self.settings.getServerStat(server, "MinimumXPRole")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
approve = True
decrement = True
# Check Bet
if not bet % 10 == 0:
approve = False
msg = 'Bets must be in multiples of *10!*'
if bet > int(reserveXP):
approve = False
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
if bet < 0:
msg = 'You can\'t bet negative amounts!'
approve = False
if bet == 0:
msg = 'You can\'t bet *nothing!*'
approve = False
# RequiredXPRole
if not self._can_xp(author, server):
approve = False
msg = 'You don\'t have the permissions to gamble.'
# Check bot admin
if isBotAdmin and botAdminAsAdmin:
# Approve as admin
approve = True
if adminUnlim:
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
if adminUnlim:
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check if we're blocked
if ctx.author.id in xpblock:
msg = "You can't gamble for xp!"
approve = False
else:
for role in ctx.author.roles:
if role.id in xpblock:
msg = "Your role cannot gamble for xp!"
approve = False
if approve:
# Bet was approved - let's take the XPReserve right away
if decrement:
takeReserve = -1*bet
self.settings.incrementStat(author, server, "XPReserve", takeReserve)
# Bet more, less chance of winning, but more winnings!
if bet < 100:
betChance = 5
payout = int(bet/10)
elif bet < 500:
betChance = 15
payout = int(bet/4)
else:
betChance = 25
payout = int(bet/2)
# 1/betChance that user will win - and payout is 1/10th of the bet
randnum = random.randint(1, betChance)
# print('{} : {}'.format(randnum, betChance))
if randnum == 1:
# YOU WON!!
self.settings.incrementStat(author, server, "XP", int(payout))
msg = '*{}* bet *{:,}* and ***WON*** *{:,} xp!*'.format(DisplayName.name(author), bet, int(payout))
# Now we check for promotions
await CheckRoles.checkroles(author, channel, self.settings, self.bot)
else:
msg = '*{}* bet *{:,}* and.... *didn\'t* win. Better luck next time!'.format(DisplayName.name(author), bet)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def recheckroles(self, ctx):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
# Gather presets
xp_p = self.settings.getServerStat(server,"XPPromote")
xp_d = self.settings.getServerStat(server,"XPDemote")
xp_sp = self.settings.getServerStat(server,"SuppressPromotions")
xp_sd = self.settings.getServerStat(server,"SuppressDemotions")
xp_oo = self.settings.getServerStat(server,"OnlyOneRole")
message = await ctx.channel.send('Checking roles...')
changeCount = 0
for member in server.members:
# Now we check for promotions
if await CheckRoles.checkroles(
member,
channel,
self.settings,
self.bot,
True,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo):
changeCount += 1
if changeCount == 1:
await message.edit(content='Done checking roles.\n\n*1 user* updated.')
#await channel.send('Done checking roles.\n\n*1 user* updated.')
else:
await message.edit(content='Done checking roles.\n\n*{:,} users* updated.'.format(changeCount))
#await channel.send('Done checking roles.\n\n*{} users* updated.'.format(changeCount))
@commands.command(pass_context=True)
async def recheckrole(self, ctx, *, user : discord.Member = None):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if not user:
user = author
# Now we check for promotions
if await CheckRoles.checkroles(user, channel, self.settings, self.bot):
await channel.send('Done checking roles.\n\n*{}* was updated.'.format(DisplayName.name(user)))
else:
await channel.send('Done checking roles.\n\n*{}* was not updated.'.format(DisplayName.name(user)))
@commands.command(pass_context=True)
async def listxproles(self, ctx):
"""Lists all roles, id's, and xp requirements for the xp promotion/demotion system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
# Get the array
promoArray = self.settings.getServerStat(server, "PromotionArray")
# Sort by XP first, then by name
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
if not len(promoSorted):
roleText = "There are no roles in the xp role list. You can add some with the `{}addxprole [role] [xpamount]` command!\n".format(ctx.prefix)
else:
roleText = "**__Current Roles:__**\n\n"
for arole in promoSorted:
# Get current role name based on id
foundRole = False
for role in server.roles:
if str(role.id) == str(arole['ID']):
# We found it
foundRole = True
roleText = '{}**{}** : *{:,} XP*\n'.format(roleText, Nullify.escape_all(role.name), arole['XP'])
if not foundRole:
roleText = '{}**{}** : *{:,} XP* (removed from server)\n'.format(roleText, Nullify.escape_all(arole['Name']), arole['XP'])
# Get the required role for using the xp system
role = self.settings.getServerStat(ctx.message.guild, "RequiredXPRole")
if role == None or role == "":
roleText = '{}\n**Everyone** can give xp, gamble, and feed the bot.'.format(roleText)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
roleText = '{}\nYou need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, Nullify.escape_all(arole.name))
else:
roleText = '{}\nYou need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, Nullify.escape_all(arole.name))
# roleText = '{}\nYou need to be a/an **{}** to give xp, gamble, or feed the bot.'.format(roleText, arole.name)
if not found:
roleText = '{}\nThere is no role that matches id: `{}` for using the xp system - consider updating that setting.'.format(roleText, role)
await channel.send(roleText)
@commands.command(pass_context=True)
async def rank(self, ctx, *, member = None):
"""Say the highest rank of a listed member."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(Nullify.escape_all(memberName))
await ctx.message.channel.send(msg)
return
# Create blank embed
stat_embed = discord.Embed(color=member.color)
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
memName = member.name
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(member.name, member.nick), icon_url=avURL)
else:
# Add to embed
stat_embed.set_author(name='{}'.format(member.name), icon_url=avURL)
highestRole = ""
for role in promoSorted:
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if highestRole == "":
msg = '*{}* has not acquired a rank yet.'.format(DisplayName.name(member))
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
else:
msg = '*{}* is a **{}**!'.format(DisplayName.name(member), highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
# await ctx.message.channel.send(msg)
await ctx.message.channel.send(embed=stat_embed)
@rank.error
async def rank_error(self, error, ctx):
msg = 'rank Error: {}'.format(error)
await ctx.channel.send(msg)
async def _show_xp(self, ctx, reverse=False):
# Helper to list xp
message = await Message.EmbedText(title="Counting Xp...",color=ctx.author).send(ctx)
sorted_array = sorted([(int(await self.bot.loop.run_in_executor(None, self.settings.getUserStat,x,ctx.guild,"XP",0)),x) for x in ctx.guild.members],key=lambda x:(x[0],x[1].id),reverse=reverse)
# Update the array with the user's place in the list
xp_array = [{
"name":"{}. {} ({}#{} {})".format(i,x[1].display_name,x[1].name,x[1].discriminator,x[1].id),
"value":"{:,} XP".format(x[0])
} for i,x in enumerate(sorted_array,start=1)]
return await PickList.PagePicker(
title="{} Xp-Holders in {} ({:,} total)".format("Top" if reverse else "Bottom",ctx.guild.name,len(xp_array)),
list=xp_array,
color=ctx.author,
ctx=ctx,
message=message
).pick()
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def leaderboard(self, ctx):
"""List the top xp-holders."""
return await self._show_xp(ctx,reverse=True)
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def bottomxp(self, ctx):
"""List the bottom xp-holders."""
return await self._show_xp(ctx,reverse=False)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def stats(self, ctx, *, member= None):
"""List the xp and xp reserve of a listed member."""
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(Nullify.escape_all(memberName))
await ctx.message.channel.send(msg)
return
url = member.avatar_url
if not len(url):
url = member.default_avatar_url
# Create blank embed
stat_embed = Message.Embed(color=member.color,thumbnail=url,pm_after=20)
# Get user's xp
newStat = int(self.settings.getUserStat(member, ctx.message.guild, "XP"))
newState = int(self.settings.getUserStat(member, ctx.message.guild, "XPReserve"))
# Add XP and XP Reserve
stat_embed.add_field(name="XP", value="{:,}".format(newStat), inline=True)
stat_embed.add_field(name="XP Reserve", value="{:,}".format(newState), inline=True)
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(member.name, member.nick)
# Add to embed
stat_embed.author = '{}, who currently goes by {}'.format(member.name, member.nick)
else:
msg = "__***{}:***__\n\n".format(member.name)
# Add to embed
stat_embed.author = '{}'.format(member.name)
# Get localized user time
if member.joined_at != None:
local_time = UserTime.getUserTime(ctx.author, self.settings, member.joined_at)
j_time_str = "{} {}".format(local_time['time'], local_time['zone'])
# Add Joined
stat_embed.add_field(name="Joined", value=j_time_str, inline=True)
else:
stat_embed.add_field(name="Joined", value="Unknown", inline=True)
# Get user's current role
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
highestRole = None
if len(promoSorted):
nextRole = promoSorted[0]
else:
nextRole = None
for role in promoSorted:
if int(nextRole['XP']) < newStat:
nextRole = role
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if len(promoSorted) > (promoSorted.index(role)+1):
# There's more roles above this
nRoleIndex = promoSorted.index(role)+1
nextRole = promoSorted[nRoleIndex]
if highestRole:
msg = '{}**Current Rank:** *{}*\n'.format(msg, highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
else:
if len(promoSorted):
# Need to have ranks to acquire one
msg = '{}They have not acquired a rank yet.\n'.format(msg)
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
if nextRole and (newStat < int(nextRole['XP'])):
# Get role
next_role = DisplayName.roleForID(int(nextRole["ID"]), ctx.guild)
if not next_role:
next_role_text = "Role ID: {} (Removed from server)".format(nextRole["ID"])
else:
next_role_text = next_role.name
msg = '{}\n*{:,}* more *xp* required to advance to **{}**'.format(msg, int(nextRole['XP']) - newStat, next_role_text)
# Add Next Rank
stat_embed.add_field(name="Next Rank", value='{} ({:,} more xp required)'.format(next_role_text, int(nextRole['XP'])-newStat), inline=True)
# Add status
status_text = ":green_heart:"
if member.status == discord.Status.offline:
status_text = ":black_heart:"
elif member.status == discord.Status.dnd:
status_text = ":heart:"
elif member.status == discord.Status.idle:
status_text = ":yellow_heart:"
stat_embed.add_field(name="Status", value=status_text, inline=True)
stat_embed.add_field(name="ID", value=str(member.id), inline=True)
stat_embed.add_field(name="User Name", value="{}#{}".format(member.name, member.discriminator), inline=True)
if member.premium_since:
local_time = UserTime.getUserTime(ctx.author, self.settings, member.premium_since, clock=True)
c_time_str = "{} {}".format(local_time['time'], local_time['zone'])
stat_embed.add_field(name="Boosting Since",value=c_time_str)
if member.activity and member.activity.name:
# Playing a game!
play_list = [ "Playing", "Streaming", "Listening to", "Watching" ]
try:
play_string = play_list[member.activity.type]
except:
play_string = "Playing"
stat_embed.add_field(name=play_string, value=str(member.activity.name), inline=True)
if member.activity.type == 1:
# Add the URL too
stat_embed.add_field(name="Stream URL", value="[Watch Now]({})".format(member.activity.url), inline=True)
# Add joinpos
joinedList = sorted([{"ID":mem.id,"Joined":mem.joined_at} for mem in ctx.guild.members], key=lambda x:x["Joined"].timestamp() if x["Joined"] != None else -1)
if member.joined_at != None:
try:
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
stat_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
except:
stat_embed.add_field(name="Join Position", value="Unknown", inline=True)
else:
stat_embed.add_field(name="Join Position", value="Unknown", inline=True)
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member.created_at, clock=False)
c_time_str = "{} {}".format(local_time['time'], local_time['zone'])
# add created_at footer
created = "Created at " + c_time_str
stat_embed.footer = created
await stat_embed.send(ctx)
@stats.error
async def stats_error(self, ctx, error):
msg = 'stats Error: {}'.format(error)
await ctx.channel.send(msg)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def xpinfo(self, ctx):
"""Gives a quick rundown of the xp system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
serverName = Nullify.escape_all(server.name)
hourlyXP = int(self.settings.getServerStat(server, "HourlyXP"))
hourlyXPReal = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpPerMessage = int(self.settings.getServerStat(server, "XPPerMessage"))
xpRPerMessage = int(self.settings.getServerStat(server, "XPRPerMessage"))
if not xpPerMessage:
xpPerMessage = 0
if not xpRPerMessage:
xpRPerMessage = 0
if not hourlyXPReal:
hourlyXPReal = 0
if not hourlyXP:
hourlyXP = 0
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
xpProm = self.settings.getServerStat(server, "XPPromote")
xpDem = self.settings.getServerStat(server, "XPDemote")
xpStr = None
if xpProm and xpDem:
# Bot promote and demote
xpStr = "This is what I check to handle promotions and demotions.\n"
else:
if xpProm:
xpStr = "This is what I check to handle promotions.\n"
elif xpDem:
xpStr = "This is what I check to handle demotions.\n"
msg = "__***{}'s*** **XP System**__\n\n__What's What:__\n\n".format(serverName)
msg = "{}**XP:** This is the xp you have *earned.*\nIt comes from other users gifting you xp, or if you're lucky enough to `{}gamble` and win.\n".format(msg, ctx.prefix)
if xpStr:
msg = "{}{}".format(msg, xpStr)
hourStr = None
if hourlyXPReal > 0:
hourStr = "Currently, you receive *{} xp* each hour".format(hourlyXPReal)
if onlyOnline:
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpPerMessage > 0:
msg = "{}Currently, you receive *{} xp* per message.\n".format(msg, xpPerMessage)
msg = "{}This can only be taken away by an *admin*.\n\n".format(msg)
msg = "{}**XP Reserve:** This is the xp you can *gift*, *gamble*, or use to *feed* me.\n".format(msg)
hourStr = None
if hourlyXP > 0:
hourStr = "Currently, you receive *{} xp reserve* each hour".format(hourlyXP)
if onlyOnline:
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpRPerMessage > 0:
msg = "{}Currently, you receive *{} xp reserve* per message.\n".format(msg, xpRPerMessage)
msg = "{}\n__How Do I Use It?:__\n\nYou can gift other users xp by using the `{}xp [user] [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve*, and adds to their *xp*.\n".format(msg)
msg = "{}It does not change the *xp* you have *earned*.\n\n".format(msg)
msg = "{}You can gamble your *xp reserve* to have a chance to win a percentage back as *xp* for yourself.\n".format(msg)
msg = "{}You do so by using the `{}gamble [amount in multiple of 10]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and if you win, adds to your *xp*.\n\n".format(msg)
msg = "{}You can also *feed* me.\n".format(msg)
msg = "{}This is done with the `{}feed [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and doesn't affect your *xp*.\n\n".format(msg)
msg = "{}You can check your *xp*, *xp reserve*, current role, and next role using the `{}stats` command.\n".format(msg, ctx.prefix)
msg = "{}You can check another user's stats with the `{}stats [user]` command.\n\n".format(msg, ctx.prefix)
# Get the required role for using the xp system
role = self.settings.getServerStat(server, "RequiredXPRole")
if role == None or role == "":
msg = '{}Currently, **Everyone** can *give xp*, *gamble*, and *feed* the bot.\n\n'.format(msg)
else:
# Role is set - let's get its name
found = False
for arole in server.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
msg = '{}Currently, you need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, Nullify.escape_all(arole.name))
else:
msg = '{}Currently, you need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, Nullify.escape_all(arole.name))
if not found:
msg = '{}There is no role that matches id: `{}` for using the xp system - consider updating that setting.\n\n'.format(msg, role)
msg = "{}Hopefully that clears things up!".format(msg)
await ctx.message.channel.send(msg)
| mit | -1,479,371,113,077,877,200 | 34.143603 | 194 | 0.642081 | false |
5t111111/markdown-preview.vim | markdownpreview_lib/markdown_preview/markdown_preview.py | 1 | 2621 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import webbrowser
from bs4 import BeautifulSoup
import chardet
import markdown
class MarkdownPreview(object):
_source = None
_template_path = None
_html_path = None
_css_path = None
def __init__(self, source=None, template=None, html=None, css=None):
self._source = source
self._template_path = template
self._css_path = css
self._html_path = html
def _read_css(self):
with open(self._css_path, 'r') as f:
css = ''
uniconv = lambda x: x.decode(chardet.detect(x)['encoding'])
line = f.readline()
while line:
line = uniconv(line)
css = ''.join([css, line])
line = f.readline()
return css.encode('utf_8', errors='replace')
def create_html(self):
src = []
for line in self._source.split('\n'):
line = line.rstrip()
# Do not ignore continuous newlines...
if(line == ''):
src.append(' ')
else:
src.append(line)
src.append('\n')
content = ''.join(src)
uniconv = lambda x: x.decode(chardet.detect(x)['encoding'])
content = uniconv(content)
content = markdown.markdown(content, extensions=['extra', 'codehilite', 'nl2br'])
with open(self._template_path, 'r') as f:
html = f.read()
html = html.replace('{{ CSS }}', self._read_css())
html = html.replace('{{ CONTENT }}', content)
dirty_html = html
try:
soup = BeautifulSoup(dirty_html)
html = soup.prettify()
except:
# Failed to prettify a dirty HTML...
html = dirty_html
with open(self._html_path, 'w') as f:
f.write(html.encode('utf_8', errors='replace'))
if sys.platform[:3] == "win":
webbrowser.open(self._html_path)
else:
webbrowser.open('file://' + self._html_path)
def main():
argvs = sys.argv
src_file = argvs[1]
with open(src_file) as f:
src = f.read()
path_to_this = os.path.dirname(os.path.abspath(__file__))
css = os.path.join(path_to_this, 'preview', 'css', 'markdown.css')
template = os.path.join(path_to_this, 'preview', 'view', 'index.html')
html = os.path.join(path_to_this, 'preview', 'index.html')
mdp = MarkdownPreview(source=src, template=template, html=html, css=css)
mdp.create_html()
if __name__ == '__main__':
main()
| lgpl-2.1 | 157,799,395,344,033,540 | 26.882979 | 89 | 0.533003 | false |
Djiit/err-jenkins | test_jenkinsBot.py | 1 | 4684 | # coding: utf-8
from errbot.backends.test import testbot
import jenkinsBot
class TestJenkinsBot(object):
extra_plugin_dir = '.'
def test_jenkins_build_no_args(self, testbot):
testbot.push_message('!jenkins build')
assert ('What job would you like to build?'
in testbot.pop_message())
def test_jenkins_build_shortcut_no_args(self, testbot):
testbot.push_message('!build')
assert ('What job would you like to build?'
in testbot.pop_message())
def test_jenkins_param_no_args(self, testbot):
testbot.push_message('!jenkins param')
assert ('What Job would you like the parameters for?'
in testbot.pop_message())
def test_jenkins_createjob_no_args(self, testbot):
testbot.push_message('!jenkins createjob')
assert ('Oops, I need a type and a name for your new job.'
in testbot.pop_message())
def test_jenkins_deletejob_no_args(self, testbot):
testbot.push_message('!jenkins deletejob')
assert ('Oops, I need the name of the job you want me to delete.'
in testbot.pop_message())
def test_jenkins_enablejob_no_args(self, testbot):
testbot.push_message('!jenkins enablejob')
assert ('Oops, I need the name of the job you want me to enable.'
in testbot.pop_message())
def test_jenkins_disablejob_no_args(self, testbot):
testbot.push_message('!jenkins disablejob')
assert ('Oops, I need the name of the job you want me to disable.'
in testbot.pop_message())
def test_jenkins_createnode_no_args(self, testbot):
testbot.push_message('!jenkins createnode')
assert ('Oops, I need a name and a working dir for your new node.'
in testbot.pop_message())
def test_jenkins_deletenode_no_args(self, testbot):
testbot.push_message('!jenkins deletenode')
assert ('Oops, I need the name of the node you want me to delete.'
in testbot.pop_message())
def test_jenkins_enablenode_no_args(self, testbot):
testbot.push_message('!jenkins enablenode')
assert ('Oops, I need the name of the node you want me to enable.'
in testbot.pop_message())
def test_jenkins_disablenode_no_args(self, testbot):
testbot.push_message('!jenkins disablenode')
assert ('Oops, I need the name of the node you want me to disable.'
in testbot.pop_message())
class TestJenkinsBotStaticMethods(object):
def test_format_jobs_helper(self):
jobs = [{'name': 'foo',
'fullname': 'foo bar',
'url': 'http://jenkins.example.com/job/foo/'}]
result = jenkinsBot.JenkinsBot.format_jobs(jobs)
assert result == 'foo bar (http://jenkins.example.com/job/foo/)'
def test_format_jobs_helper_no_params(self):
jobs = []
result = jenkinsBot.JenkinsBot.format_jobs(jobs)
assert result == 'No jobs found.'
def test_format_params_helper(self):
params = [{
'defaultParameterValue': {'value': 'bar'},
'description': 'foo bar baz',
'name': 'FOO',
'type': 'StringParameterDefinition'
}]
result = jenkinsBot.JenkinsBot.format_params(params)
assert result == """Type: StringParameterDefinition
Description: foo bar baz
Default Value: bar
Parameter Name: FOO
"""
def test_build_parameters_helper(self):
params = ['FOO:bar', 'BAR:baz']
result = jenkinsBot.JenkinsBot.build_parameters(params)
assert result == {'FOO': 'bar', 'BAR': 'baz'}
def test_build_parameters_helper_no_params(self):
params = []
result = jenkinsBot.JenkinsBot.build_parameters(params)
assert result == {'': ''}
def test_format_notification(self):
body = {
"name": "dummy",
"url": "job/dummy/",
"build": {
"full_url": "http://jenkins.example.com/job/dummy/1/",
"number": 1,
"phase": "COMPLETED",
"status": "SUCCESS",
"url": "job/asgard/1/",
"scm": {
"url": "https://github.com/Djiit/err-jenkins.git",
"branch": "origin/master",
"commit": "0e51ed"
},
}
}
result = jenkinsBot.JenkinsBot.format_notification(body)
assert result == """Build #1 SUCCESS for Job dummy \
(http://jenkins.example.com/job/dummy/1/)
Based on https://github.com/Djiit/err-jenkins.git/commit/0e51ed \
(origin/master)"""
| mit | 1,269,524,832,738,409,500 | 36.472 | 75 | 0.591161 | false |
jpurma/Kataja | kataja/SemanticsItem.py | 1 | 5838 | import math
from PyQt5 import QtCore, QtWidgets, QtGui
from kataja.globals import SMALL_FEATURE
from kataja.singletons import ctrl, qt_prefs
FREE = 0
SENTENCE = 1
NOUN_PHRASE = 2
class SemanticsItem(QtWidgets.QGraphicsSimpleTextItem):
def __init__(self, sm, label, array_id, color_key, x=0, y=0):
QtWidgets.QGraphicsSimpleTextItem.__init__(self, label)
self.label = label
self.setFont(qt_prefs.get_font(SMALL_FEATURE))
self.array_id = array_id
self.color_key = color_key
self.color_key_tr = color_key if color_key.endswith('tr') else color_key + 'tr'
self.members = []
self.setZValue(2)
self.setPos(x, y)
if not sm.visible:
self.hide()
def add_member(self, node):
if node not in self.members:
self.members.append(node)
def update_text(self):
words = [self.label]
for node in self.members:
if node.syntactic_object:
checked_features = getattr(node.syntactic_object, 'checked_features', [])
if checked_features and isinstance(checked_features, tuple):
checked_feat, valuing_feat = checked_features
feat_node = ctrl.forest.get_node(checked_feat)
parents = feat_node.get_parents()
words.append('(' + ' '.join([x.label for x in parents]) + ')')
feat_node = ctrl.forest.get_node(valuing_feat)
parents = feat_node.get_parents()
words.append(' '.join([x.label for x in parents]))
self.setText(' '.join(words))
def boundingRect(self):
base = self.label_rect()
if not self.members:
return base.adjusted(-2, -2, 2, 2)
scene_pos = self.pos()
x = scene_pos.x()
y = scene_pos.y()
left = x + base.left()
up = y + base.top()
right = x + base.right()
down = y + base.bottom()
for member in self.members:
p = member.scenePos()
px = p.x()
py = p.y()
if px < left:
left = px
elif px > right:
right = px
if py < up:
up = py
elif py > down:
down = py
return QtCore.QRectF(left - x, up - y, right - left + 2, down - up + 2)
def label_rect(self):
min_w = 40
if not self.members:
return QtCore.QRectF(-2, -1, min_w, 4)
r = QtWidgets.QGraphicsSimpleTextItem.boundingRect(self).adjusted(-2, -1, 2, 1)
if r.width() < min_w:
r.setWidth(min_w)
return r
def paint(self, painter, *args, **kwargs):
painter.setPen(QtCore.Qt.NoPen)
label_rect = self.label_rect()
if self.members:
painter.setBrush(ctrl.cm.get(self.color_key))
painter.drawRoundedRect(label_rect, 4, 4)
p = QtGui.QPen(ctrl.cm.get(self.color_key_tr), 3)
painter.setPen(p)
scene_pos = self.pos()
x = scene_pos.x()
y = scene_pos.y()
mid_height = label_rect.height() / 2
painter.setBrush(QtCore.Qt.NoBrush)
for member in self.members:
if member.cached_sorted_feature_edges:
max_i = len(member.cached_sorted_feature_edges)
i_shift = math.ceil((max_i - 1) / 2) * -3
else:
i_shift = 0
pos = member.scenePos()
px = pos.x()
py = pos.y()
px += i_shift
if True:
painter.setPen(QtCore.Qt.NoPen)
grad = QtGui.QLinearGradient(0, 0, px - x, 0)
grad.setColorAt(0, ctrl.cm.get(self.color_key))
grad.setColorAt(0.1, ctrl.cm.get(self.color_key_tr))
grad.setColorAt(0.6, ctrl.cm.get(self.color_key_tr))
grad.setColorAt(1, ctrl.cm.get(self.color_key))
painter.setBrush(grad)
# painter.setBrush(ctrl.cm.get(self.color_key_tr))
# p.lineTo(px - x, py - y)
if py < y:
p = QtGui.QPainterPath(QtCore.QPointF(0, mid_height + 2))
p.lineTo((px - x) / 2, mid_height + 2)
p.quadTo(((px - x) / 4) * 3 - 2, mid_height + 2, px - x - 0.5, py - y - 1)
p.lineTo(px - x + 3, py - y - 5)
p.quadTo(((px - x) / 4) * 3 + 2, mid_height - 2, (px - x) / 2, mid_height
- 2)
p.lineTo(0, mid_height - 2)
else:
p = QtGui.QPainterPath(QtCore.QPointF(0, mid_height - 2))
p.lineTo((px - x) / 2, mid_height - 2)
p.quadTo(((px - x) / 4) * 3 - 2, mid_height - 2, px - x - 0.5, py - y - 1)
p.lineTo(px - x + 3, py - y - 5)
p.quadTo(((px - x) / 4) * 3 + 2, mid_height + 2, (px - x) / 2, mid_height
+ 2)
p.lineTo(0, mid_height + 2)
painter.drawPath(p)
# else:
# p = QtGui.QPainterPath(QtCore.QPointF(0, mid_height))
# p.lineTo((px - x) / 2, mid_height)
# p.quadTo(((px - x) / 4) * 3, mid_height, px - x, py - y)
# painter.drawPath(p)
self.setBrush(ctrl.cm.paper())
QtWidgets.QGraphicsSimpleTextItem.paint(self, painter, *args, **kwargs)
else:
painter.setBrush(ctrl.cm.get(self.color_key_tr))
painter.drawRoundedRect(label_rect, 4, 4)
| gpl-3.0 | 6,065,196,672,987,887,000 | 39.825175 | 98 | 0.477561 | false |
MjnMixael/knossos | releng/macos/dmgbuild_cfg.py | 1 | 4501 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import biplist
import os.path
import subprocess
# .. Useful stuff ..............................................................
application = 'dist/Knossos.app'
appname = os.path.basename(application)
def icon_from_app(app_path):
plist_path = os.path.join(app_path, 'Contents', 'Info.plist')
plist = biplist.readPlist(plist_path)
icon_name = plist['CFBundleIconFile']
icon_root,icon_ext = os.path.splitext(icon_name)
if not icon_ext:
icon_ext = '.icns'
icon_name = icon_root + icon_ext
return os.path.join(app_path, 'Contents', 'Resources', icon_name)
# .. Basics ....................................................................
# Uncomment to override the output filename
#filename = 'dist/Knossos.dmg'
# Uncomment to override the output volume name
volume_name = 'Knossos'
# Volume format (see hdiutil create -help)
format = 'UDBZ'
# Volume size (must be large enough for your files)
kn_size = subprocess.check_output(['du', '-sm', 'dist/Knossos.app'])
size = defines.get('size', '%dM' % (int(kn_size.split()[0]) + 2))
# Files to include
files = [ application ]
# Symlinks to create
symlinks = { 'Applications': '/Applications' }
# Volume icon
#
# You can either define icon, in which case that icon file will be copied to the
# image, *or* you can define badge_icon, in which case the icon file you specify
# will be used to badge the system's Removable Disk icon
#
badge_icon = icon_from_app(application)
# Where to put the icons
icon_locations = {
appname: (140, 120),
'Applications': (500, 120)
}
# .. Window configuration ......................................................
# Background
#
# This is a STRING containing any of the following:
#
# #3344ff - web-style RGB color
# #34f - web-style RGB color, short form (#34f == #3344ff)
# rgb(1,0,0) - RGB color, each value is between 0 and 1
# hsl(120,1,.5) - HSL (hue saturation lightness) color
# hwb(300,0,0) - HWB (hue whiteness blackness) color
# cmyk(0,1,0,0) - CMYK color
# goldenrod - X11/SVG named color
# builtin-arrow - A simple built-in background with a blue arrow
# /foo/bar/baz.png - The path to an image file
#
# The hue component in hsl() and hwb() may include a unit; it defaults to
# degrees ('deg'), but also supports radians ('rad') and gradians ('grad'
# or 'gon').
#
# Other color components may be expressed either in the range 0 to 1, or
# as percentages (e.g. 60% is equivalent to 0.6).
background = 'builtin-arrow'
show_status_bar = False
show_tab_view = False
show_toolbar = False
show_pathbar = False
show_sidebar = False
sidebar_width = 180
# Window position in ((x, y), (w, h)) format
window_rect = ((100, 100), (640, 280))
# Select the default view; must be one of
#
# 'icon-view'
# 'list-view'
# 'column-view'
# 'coverflow'
#
default_view = 'icon-view'
# General view configuration
show_icon_preview = False
# Set these to True to force inclusion of icon/list view settings (otherwise
# we only include settings for the default view)
include_icon_view_settings = 'auto'
include_list_view_settings = 'auto'
# .. Icon view configuration ...................................................
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 120
scroll_position = (0, 0)
label_pos = 'bottom' # or 'right'
text_size = 16
icon_size = 128
# .. List view configuration ...................................................
# Column names are as follows:
#
# name
# date-modified
# date-created
# date-added
# date-last-opened
# size
# kind
# label
# version
# comments
#
list_icon_size = 16
list_text_size = 12
list_scroll_position = (0, 0)
list_sort_by = 'name'
list_use_relative_dates = True
list_calculate_all_sizes = False,
list_columns = ('name', 'date-modified', 'size', 'kind', 'date-added')
list_column_widths = {
'name': 300,
'date-modified': 181,
'date-created': 181,
'date-added': 181,
'date-last-opened': 181,
'size': 97,
'kind': 115,
'label': 100,
'version': 75,
'comments': 300,
}
list_column_sort_directions = {
'name': 'ascending',
'date-modified': 'descending',
'date-created': 'descending',
'date-added': 'descending',
'date-last-opened': 'descending',
'size': 'descending',
'kind': 'ascending',
'label': 'ascending',
'version': 'ascending',
'comments': 'ascending',
} | apache-2.0 | -4,118,771,942,043,023,000 | 26.284848 | 80 | 0.612531 | false |
rscalzo/pyBoloSN | Tests/test_A82.py | 1 | 3804 | from BoloMass.Arnett82 import tau_0, Lambda, A82LC_full, A82LC_gp
from RetroSpect.Plotting import color_ramp
import sys
import numpy as np
import matplotlib.pyplot as pypl
def test_A82_Lambda():
"""Test plots for Lambda"""
y = np.arange(0.7, 1.41, 0.1)
c = color_ramp(len(y))
for yi, ci in zip(y, c):
pypl.semilogy(t, Lambda(t, yi), color=ci)
pypl.show()
def test_A82LC_full_01():
"""Test plots for A82LC_full"""
y, tg, MNi, Eth0 = 1.0, 40.0, 0.6, 0.0e+51
R0 = np.array([0.0, 0.1, 0.3, 1.0, 3.0, 10.0]) * 1e+14
c = color_ramp(len(R0))
for R0i, ci in zip(R0, c):
td = tau_0(R0i, 0.1, 2.8e+33)
L0, w = Eth0/(td * 86400), y*17.6/td
print "R0, tau0, L0, w =", R0i, td, L0, w
pypl.subplot(2, 1, 1)
pypl.plot(t, A82LC_full(t, y, w, tg, MNi, Eth0), color=ci)
pypl.subplot(2, 1, 2)
pypl.semilogy(t, A82LC_full(t, y, w, tg, MNi, Eth0), color=ci)
pypl.show()
def test_A82LC_full_02():
"""More test plots for A82LC_full"""
y, tg, MNi, R0 = 1.0, 40.0, 0.6, 1e+13
Eth0 = np.arange(0.0, 0.51, 0.1) * 1e+51
c = color_ramp(len(Eth0))
for Ethi, ci in zip(Eth0, c):
td = tau_0(R0, 0.1, 2.8e+33)
L0, w = Ethi/(td * 86400), y*17.6/td
print "R0, tau0, L0, w =", R0, td, L0, w
pypl.subplot(2, 1, 1)
pypl.plot(t, A82LC_full(t, y, w, tg, MNi, Ethi), color=ci)
pypl.subplot(2, 1, 2)
pypl.semilogy(t, A82LC_full(t, y, w, tg, MNi, Ethi), color=ci)
pypl.show()
def test_A82LC_gp():
"""Test plots for the Gaussian process stuff"""
# Set up a Gaussian process interpolator
gpint = A82LC_gp("a82lcgp_4d_alt.pkl")
t = np.arange(0.0, 120.1, 0.5)
test_resids = True
def my_plot_set(p, c, l):
res = [ ]
for pi, ci, li in zip(p, c, l):
gpfit = gpint(t, pi)
pypl.semilogy(t, gpfit, color=ci, label=li)
if test_resids:
orig = A82LC_full(t, *pi)
else:
orig = gpfit
pypl.semilogy(t, orig, color=ci, ls='--')
# calculate residuals
res.append((orig - gpfit)/orig)
res = np.array(res).ravel()
res = res[abs(res) < np.inf]
print "nmad, rms, max resids = {0:.4f}, {1:.4f}, {2:.4f};".format(
np.median(np.abs(res)), res.std(), np.abs(res).max()),
nok, ntot = np.sum(np.abs(res.ravel()) > 0.02), len(res.ravel())
fok = nok / (1.0*ntot)
print "fvals(res > 2\%) = {0}/{1} = {2:.2f}\%".format(
nok, ntot, 100.0*fok)
sys.stdout.flush()
pypl.legend()
pypl.show()
# Vary y
y = np.arange(0.7, 1.41, 0.05)
pars = [(yi, 0.0, 40.0, 0.6, 0.0) for yi in y]
colors = color_ramp(len(pars))
labels = ["y = {0:.2f}".format(yi) for yi in y]
print "varying y:",
my_plot_set(pars, colors, labels)
# Vary w with Eth0 = 0
w = np.arange(0.0, 0.26, 0.05)
pars = [(1.0, wi, 40.0, 0.6, 0.0) for wi in w]
colors = color_ramp(len(pars))
labels = ["w = {0:.2f}".format(wi) for wi in w]
print "varying w:",
my_plot_set(pars, colors, labels)
# Vary w with Eth0 = 0.5e+51 erg
w = np.arange(0.0, 0.26, 0.05)
pars = [(1.0, wi, 40.0, 0.6, 0.5e+51) for wi in w]
colors = color_ramp(len(pars))
labels = ["w = {0:.2f}".format(wi) for wi in w]
print "varying w:",
my_plot_set(pars, colors, labels)
# Vary tg
tg = np.arange(20.0, 70.1, 5.0)
pars = [(1.0, 0.0, tgi, 0.6, 0.0) for tgi in tg]
colors = color_ramp(len(pars))
labels = ["t$_\gamma$ = {0:.0f} days".format(tgi) for tgi in tg]
print "varying tg:",
my_plot_set(pars, colors, labels)
# test_A82_Lambda()
# test_A82LC_full_01()
# test_A82LC_full_02()
test_A82LC_gp()
| mit | 4,331,748,857,861,955,600 | 34.222222 | 74 | 0.52918 | false |
evildmp/django-curated-resources | curated_resources/admin.py | 1 | 4333 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from mptt.forms import TreeNodeMultipleChoiceField
from treeadmin.admin import TreeAdmin
from widgetry.tabs.admin import ModelAdminWithTabs
from widgetry import fk_lookup
# from widgetry.views import search
from arkestra_utilities.admin_mixins import AutocompleteMixin, InputURLMixin
from links import schema
from curated_resources.models import Resource, ResourceType, Audience, Topic, Domain
class ResourceAdminForm(InputURLMixin):
# disabled: https://github.com/django-mptt/django-mptt/issues/255
# domains = TreeNodeMultipleChoiceField(
# queryset=Domain.objects.all(),
# level_indicator=unichr(0x00A0) * 2,
# widget=FilteredSelectMultiple(
# "Domains",
# is_stacked=False,
# )
# )
def __init__(self, *args, **kwargs):
super(ResourceAdminForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None and self.instance.destination_content_type:
destination_content_type = self.instance.destination_content_type.model_class()
else:
destination_content_type = None
# look up the correct widget from the content type
widget = fk_lookup.GenericFkLookup(
'id_%s-destination_content_type' % self.prefix,
destination_content_type,
)
self.fields['destination_object_id'].widget = widget
self.fields['destination_content_type'].widget.choices = schema.content_type_choices()
from django.contrib.admin import SimpleListFilter
class ResourceAdmin(ModelAdminWithTabs, AutocompleteMixin):
form = ResourceAdminForm
related_search_fields = ['destination_content_type']
filter_horizontal = (
'related_to',
'suitable_for',
'topics',
'domains',
'curators'
)
list_filter = ('resource_type', 'published')
list_display = ('title', 'published')
prepopulated_fields = {"slug": ("title",)}
tabs = [
('Description', {'fieldsets': [
[None, {'fields': [('title', 'short_title'), ('resource_type', 'published'), 'description',]}],
["Link to the resource",{'fields': [('destination_content_type', 'destination_object_id',)]}],
["Duration and cost",{'fields': [('duration', 'cost',)]}]
]}),
('Audience', {'fieldsets': [[None,{'fields': ['suitable_for',]}]]}),
('Domains', {'fieldsets': [[None,{'fields': ['domains',]}]]}),
('Topics', {'fieldsets': [[None,{'fields': ['topics',]}]]}),
('Related items', {'fieldsets': [[None,{'fields': ['related_to',]}]]}),
('Curators', {'fieldsets': [[None,{'fields': ['curators',]}]]}),
('Advanced options', {'fieldsets': [[None,{'fields': ['slug',]}]]}),
]
class TreeRoots(SimpleListFilter):
title = _('domain scheme')
parameter_name = 'tree'
def lookups(self, request, model_admin):
roots = Domain.objects.filter(parent=None)
t = [(root.tree_id, root.name) for root in roots]
return t
def queryset(self, request, queryset):
if self.value():
return queryset.filter(tree_id = self.value())
class DomainAdmin(TreeAdmin):
enable_object_permissions = False
jquery_use_google_cdn = True
search_fields = ('name',)
list_display = ('name', 'id_code', 'number_of_resources', 'number_of_children')
list_filter = (TreeRoots,)
filter_horizontal = ('curators',)
class TopicAdmin(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'number_of_resources')
filter_horizontal = ('curators',)
class ResourceTypeAdmin(admin.ModelAdmin):
search_fields = ('resource_type',)
list_display = ('resource_type', 'number_of_resources')
class AudienceAdmin(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'number_of_resources')
filter_horizontal = ('curators',)
admin.site.register(Resource, ResourceAdmin)
admin.site.register(ResourceType, ResourceTypeAdmin)
admin.site.register(Audience, AudienceAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Domain, DomainAdmin)
| bsd-2-clause | 5,507,491,974,509,886,000 | 36.353448 | 107 | 0.643203 | false |
SoBeRBot94/TicTacToe-GE | GameEngine/conf.py | 1 | 9904 | # -*- coding: utf-8 -*-
#
# TicTacToe-GE documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 19 23:45:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('./Players'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TicTacToe-GE'
copyright = u'2017, Group K'
author = u'Group K'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'TicTacToe-GE v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TicTacToe-GEdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TicTacToe-GE.tex', u'TicTacToe-GE Documentation',
u'Group K', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tictactoe-ge', u'TicTacToe-GE Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TicTacToe-GE', u'TicTacToe-GE Documentation',
author, 'TicTacToe-GE', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| gpl-3.0 | -8,068,536,931,307,508,000 | 27.959064 | 80 | 0.69164 | false |
tboyce1/home-assistant | tests/components/test_canary.py | 4 | 2623 | """The tests for the Canary component."""
import unittest
from unittest.mock import patch, MagicMock, PropertyMock
import homeassistant.components.canary as canary
from homeassistant import setup
from tests.common import (
get_test_home_assistant)
def mock_device(device_id, name, is_online=True):
"""Mock Canary Device class."""
device = MagicMock()
type(device).device_id = PropertyMock(return_value=device_id)
type(device).name = PropertyMock(return_value=name)
type(device).is_online = PropertyMock(return_value=is_online)
return device
def mock_location(name, is_celsius=True, devices=None):
"""Mock Canary Location class."""
location = MagicMock()
type(location).name = PropertyMock(return_value=name)
type(location).is_celsius = PropertyMock(return_value=is_celsius)
type(location).devices = PropertyMock(return_value=devices or [])
return location
def mock_reading(sensor_type, sensor_value):
"""Mock Canary Reading class."""
reading = MagicMock()
type(reading).sensor_type = PropertyMock(return_value=sensor_type)
type(reading).value = PropertyMock(return_value=sensor_value)
return reading
class TestCanary(unittest.TestCase):
"""Tests the Canary component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.canary.CanaryData.update')
@patch('canary.api.Api.login')
def test_setup_with_valid_config(self, mock_login, mock_update):
"""Test setup component."""
config = {
"canary": {
"username": "[email protected]",
"password": "bar",
}
}
self.assertTrue(
setup.setup_component(self.hass, canary.DOMAIN, config))
mock_update.assert_called_once_with()
mock_login.assert_called_once_with()
def test_setup_with_missing_password(self):
"""Test setup component."""
config = {
"canary": {
"username": "[email protected]",
}
}
self.assertFalse(
setup.setup_component(self.hass, canary.DOMAIN, config))
def test_setup_with_missing_username(self):
"""Test setup component."""
config = {
"canary": {
"password": "bar",
}
}
self.assertFalse(
setup.setup_component(self.hass, canary.DOMAIN, config))
| apache-2.0 | 2,485,681,822,438,767,000 | 29.858824 | 70 | 0.627526 | false |
palankai/baluster | src/baluster/utils.py | 1 | 1844 | from asyncio import iscoroutinefunction, coroutine
from contextlib import contextmanager
from functools import partial
import re
from .exceptions import MultipleExceptions
class Undefined:
pass
def make_if_none(obj, default):
if obj is not None:
return obj
return default
def dict_partial_copy(source, patterns):
keys = _find_matches(patterns, source.keys())
return dict(filter(lambda i: i[0] in keys, source.items()))
@contextmanager
def capture_exceptions():
exceptions = []
@contextmanager
def capture():
try:
yield
except Exception as ex:
exceptions.append(ex)
try:
yield capture
finally:
if exceptions:
if len(exceptions) == 1:
raise exceptions[0]
raise MultipleExceptions(exceptions)
async def as_async(func, *args, **kwargs):
if iscoroutinefunction(func):
return await func(*args, **kwargs)
return func(*args, **kwargs)
def async_partial(*args, **kwargs):
return coroutine(partial(*args, **kwargs))
def make_caller(what_to_call):
return lambda *a, **k: what_to_call()
def merge_dicts(dicts):
return {k: v for d in dicts for k, v in d.items()}
def get_member_name(own_name, name):
if own_name is None:
return name
return _join_names(own_name, name)
def find_instance(tree, name):
instance = tree
for part in name.split('.')[:-1]:
instance = getattr(instance, part)
return instance
def _join_names(*names):
return '.'.join(names)
def _find_matches(patterns, candidates):
pts = list(map(_compile_regex, patterns))
return list(filter(
lambda c: any(map(lambda p: p.match(c), pts)),
candidates
))
def _compile_regex(name):
return re.compile('^{}(\..*)?$'.format(name))
| mit | 1,445,387,088,957,653,500 | 19.954545 | 63 | 0.632863 | false |
aylward/ITKTubeTK | setup.py | 1 | 1986 | # -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
import numpy as np
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-tubetk',
version='0.9.0',
author='Stephen R. Aylward',
author_email='[email protected]',
include_dirs=[np.get_include()],
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK',
description=r'An open-source toolkit, led by Kitware, Inc., for the segmentation, registration, and analysis of tubes and surfaces in images.',
long_description='TubeTK is an open-source toolkit for the segmentation, registration, and analysis of tubes and surfaces in images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://itk.org/',
install_requires=[
r'itk>=5.2.0.post2',
r'itk-minimalpathextraction>=1.2.0'
]
)
| apache-2.0 | 1,078,648,965,849,893,400 | 36.471698 | 147 | 0.639476 | false |
angst7/far | models/models1.py | 1 | 3286 |
# SQLAlchemy, SQLElixir
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relation, backref
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class AttackType:
UNDEFINED = 0
# Melee attacks
HIT = 1
CRUSH = 2
SLASH = 3
PIERCE = 4
CLEAVE = 5
CLAW = 6
KICK = 7
# Elemental attacks
AIR = 10
FIRE = 11
WATER = 12
EARTH = 13
# Magical attacks
GOOD_MAGIC = 20
EVIL_MAGIC = 21
NEUTRAL_MAGIC = 22
# Other attacks
DISEASE = 31
POISON = 32
class MobFlags:
UNDEFINED = 0
AGGRESSIVE = 1
SENTINAL = 2
ISNPC = 4
WIMPY = 8
class AffectFlags:
UNDEFINED = 0
SENSE_LIFE = 1
SENSE_HIDDEN = 2
SEE_INVISIBLE = 4
NIGHT_VISION = 8
FLYING = 16
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String)
password = Column(String)
email = Column(String, nullable=False)
firstname = Column(String)
lastname = Column(String)
def __init__(self, username, password, firstname, lastname):
self.username = username
self.password = password
self.firstname = firstname
self.lastname = lastname
def __repr__(self):
return "<User('%s', '%s', '%s', '%s')>" % (self.username, self.password, self.firstname, self.lastname)
class Attribute(Base):
__tablename__ = 'attributes'
id = Column(Integer, primary_key=True)
name = Column(String)
class AttributeEffects(Base):
__tablename__ = 'attributeeffects'
id = Column(Integer, primary_key=True)
name = Column(String)
attribute = relation(Attribute, backref('attributeeffects', order_by=id))
modifier = Column(Integer)
class Attack(Base):
__tablename__ = 'attack'
id = Column(Integer, primary_key=True)
name = Column(String)
attacktype = Column(Integer) # this will be one of AttackType class attacks
dice = Column(Integer) # d&d styke number of die
sides = Column(Integer) # d&d style die sides
bonus = Column(Integer) # attack bonus over dice roll
use = Column(Integer) # Percent chance to use this attack 0-100
class Skill(Base):
__tablename__ = 'skills'
id = Column(Integer, primary_key=True)
name = Column(String)
attributeeffects = relation(AttributeEffects, backref('skills'), order_by=id))
class ToonClass(Base):
__tablename__ = 'classes'
id = Column(Integer, primary_key=True)
name = Column(String)
attributeeffects = relation(AttributeEffects, backref('classes'), order_by=id))
class ToonLevel(Base):
__tablename__ = 'levels'
id = Column
toonclass = relation(ToonClass, backref=backref('levels', order_by=id))
level = Column(Integer)
class Toon(Base):
__tablename__ = 'toons'
id = Column(Integer, primary_key=True)
name = Column(String)
levels = relation(ToonLevel, backref=backref('toons', order_by=id))
affectflags = Column(Integer)
| mit | -6,676,001,317,496,061,000 | 23.340741 | 111 | 0.590992 | false |
farert/farert | db/scripts/jr_db_reg_old_1.py | 1 | 2553 | #!python3.0.1
# -*- coding: utf-8 -*-
"""
f[^x[Xo^
kC¹ JRkC¹ Ùü Ùw ͱ¾Ä 0 0
"""
import os
import sqlite3
import sys
import re
from collections import defaultdict
if 1 < len(sys.argv):
fn = sys.argv[1]
else:
fn = 'jr.txt'
dbname = 'jr.db'
if os.access(dbname, os.F_OK):
os.unlink(dbname)
con = sqlite3.connect(dbname, isolation_level=None)
###########################################
sql = """
create table t_company (
name char(11) not null primary key
);
"""
con.execute(sql)
###########################################
sql = """
create table t_prefect(
name char(12) not null primary key
);
"""
con.execute(sql)
###########################################
sql = """
create table t_line(
name text not null primary key
);
"""
con.execute(sql)
###########################################
sql = """
create table t_station(
name text not null,
kana text not null,
company_id integer not null references t_company(rowid),
prefect_id integer not null references t_prefect(rowid),
jctflg integer not null,
sameflg integer not null default(0),
cityflg integer not null,
primary key(name, prefect_id)
);
"""
con.execute(sql)
###########################################
sql = """
create table t_lines (
line_id integer not null references t_line(rowid),
station_id integer not null references t_station(rowid),
sales_km integer not null,
calc_km integer not null,
spe_route integer not null default(0),
primary key (line_id, station_id)
);
"""
con.execute(sql)
###########################################
sql = """
create table t_jct (
line_id integer not null references t_line(rowid),
station_id integer not null references t_station(rowid),
primary key (line_id, station_id)
);
"""
con.execute(sql)
###########################################
items = [[], [], []]
h_items = [defaultdict(int), defaultdict(int), defaultdict(int)]
n_lin = 0
for lin in open(fn, 'r'):
n_lin += 1
if n_lin == 1:
continue
linitems = lin.split('\t')
for i in [0, 1, 2]:
key = linitems[i].strip();
h_items[i][key] += 1
if 1 == h_items[i][key]:
items[i].append([key])
con.executemany('insert into t_prefect values(?)', items[0])
print("registerd t_prefect.")
con.executemany('insert into t_company values(?)', items[1])
print("registerd t_company.")
con.executemany('insert into t_line values(?)', items[2])
print("registerd t_line.")
print("complete success.")
| gpl-3.0 | 5,349,268,513,671,626,000 | 19.275 | 64 | 0.550333 | false |
aziele/alfpy | alfpy/utils/seqrecords.py | 1 | 3791 | from . import fasta
class SeqRecords:
"""Object representing an ordered collection of sequence records.
Attributes:
id_list (list) : List of sequence record identifiers
seq_list (list) : List of sequence strings
count (int) : Number of sequence records
"""
def __init__(self, id_list=None, seq_list=None):
"""Create a collection (may be empty) of sequence records.
Example:
>>> ids = ['seq1', 'seq2']
>>> seqs = ['ATGCTG', 'TGCTGATAGTA']
>>> seq_records = SeqRecords(id_list=ids, seq_list=seqs)
>>> print seq_records
SeqRecords (noseqs: 2)
"""
self.count = 0 if not id_list else len(seq_list)
self.id_list = id_list if id_list else []
# Make all sequences uppercased.
self.seq_list = [s.upper() for s in seq_list] if seq_list else []
def add(self, seqid, seq):
"""Add a sequence record to the existing collection.
Args:
id (str) : sequence identifier
seq (str) : sequence string
Example:
>>> seq_record.add("seq3", "TGCTGA")
"""
self.id_list.append(seqid)
self.seq_list.append(seq.upper())
self.count += 1
def fasta(self, wrap=70):
"""Return sequence records as a mutli-FASTA string.
Example:
>>> ids = ['seq1', 'seq2']
>>> seqs = ['ATGCTG', 'TGCTGATAGTA']
>>> seq_records = SeqRecords(id_list=ids, seq_list=seqs)
>>> print seq_records.fasta()
>seq1
ATGCTG
>seq2
TGCTGATAGTA
"""
l = []
for seqid, seq in self:
seq_record = fasta.FastaRecord(seq=seq, seqid=seqid)
l.append(seq_record.format(wrap=wrap))
return "\n".join(l)
@property
def length_list(self):
"""Return a list of the sequences' length_list"""
return [len(seq) for seq in self.seq_list]
def __iter__(self):
"""
Iterate over sequence records in the collection.
Example:
>>> for amino_acid in record:
... print(amino_acid)
seq1
ATGCTG
seq2
TGCTGATAGTA
"""
for i in range(self.count):
seqid = self.id_list[i]
seq = self.seq_list[i]
yield seqid, seq
def __len__(self):
"""
Return the number of sequence records in the collection.
Example:
>>> len(seq_records)
3
"""
return len(self.seq_list)
def __repr__(self):
return "{0} (noseqs: {1})".format(self.__class__.__name__,
self.count)
def read_fasta(handle):
"""Create a SeqRecords object from Fasta file.
Args:
file handle : a file containing Fasta sequences.
"""
id_list = []
seq_list = []
for seq_record in fasta.parse(handle):
id_list.append(seq_record.id)
seq_list.append(seq_record.seq)
return SeqRecords(id_list=id_list, seq_list=seq_list)
def main():
seq_records = SeqRecords()
seq_records.add(
'seq1', 'AACGTACCATTGAACGTACCATTGAACGTACCATTGATGCATGGTAGAT')
seq_records.add('seq2', 'CTAGGGGACTTATCTAGGGGACTTATCTAGGGGACTTAT')
seq_records.add('seq3', 'CTAGGGAAAATTCTAGGGAAAATTCTAGGGAAAATT')
import uuid
import os
outfilename = uuid.uuid4().hex
oh = open(outfilename, 'w')
oh.write(seq_records.fasta())
oh.close()
fh = open(outfilename)
seq_records = read_fasta(fh)
fh.close()
os.remove(outfilename)
return seq_records
if __name__ == '__main__':
seq_records = main()
print(seq_records.fasta())
| mit | -537,299,720,962,794,500 | 26.273381 | 73 | 0.543128 | false |
blukat29/notifyhere | notifyhere/dash/api/gmail.py | 1 | 2935 | from httplib import HTTPSConnection
import json
import imaplib
import re
import base
import tools
import secrets
class GmailApi(base.ApiBase):
list_re = re.compile(r'\((.+)\) "(.+)" "(.+)"')
def __init__(self):
base.ApiBase.__init__(self, "gmail")
self.token = ""
def icon_url(self):
return "https://mail.google.com/favicon.ico"
def oauth_link(self):
url = "https://accounts.google.com/o/oauth2/auth"
args = {
"response_type":"code",
"client_id":secrets.GMAIL_CLIENT_ID,
"redirect_uri":secrets.BASE_REDIRECT_URL + "gmail",
"scope":"https://mail.google.com/ https://www.googleapis.com/auth/userinfo.email",
"approval_prompt":"force",
}
return url + "?" + tools.encode_params(args)
def oauth_callback(self, params):
if 'code' not in params:
return None
conn = HTTPSConnection("accounts.google.com")
body = tools.encode_params({
"grant_type":"authorization_code",
"code":params['code'],
"client_id":secrets.GMAIL_CLIENT_ID,
"client_secret":secrets.GMAIL_CLIENT_SECRET,
"redirect_uri":secrets.BASE_REDIRECT_URL + "gmail",
})
headers = {
"Content-Type":"application/x-www-form-urlencoded",
}
conn.request("POST", "/o/oauth2/token", body, headers)
resp = conn.getresponse()
try:
self.token = json.loads(resp.read())['access_token']
self.is_auth = True
except (KeyError, ValueError):
return None
conn.close()
conn = HTTPSConnection("www.googleapis.com")
conn.request("GET","/oauth2/v1/tokeninfo?alt=json&access_token="+self.token,"",{})
resp = conn.getresponse()
self.username = json.loads(resp.read())['email']
def update(self):
auth = "user=%s\1auth=Bearer %s\1\1" % (self.username, self.token)
m = imaplib.IMAP4_SSL("imap.gmail.com")
m.authenticate("XOAUTH2", lambda x: auth)
status, raw_list = m.list()
boxes = []
for line in raw_list:
attr, root, raw_name = GmailApi.list_re.search(line).groups()
if "Noselect" in attr:
continue
decoded_name = raw_name.replace("&","+").decode("utf-7")
boxes.append((raw_name, decoded_name))
noti = {}
for box in boxes:
raw_name, decoded_name = box
status, result = m.select(raw_name)
total = int(result[0])
status, result = m.search(None, "(UNSEEN)")
unseen = len(result[0].split())
if unseen > 0:
noti[decoded_name] = unseen
m.close()
m.logout()
return noti
def logout(self):
self.is_auth = False
self.token = ""
| mit | 1,942,986,103,054,837,800 | 28.94898 | 94 | 0.539353 | false |
eldarerathis/xpertmud | xpertmud/scripting/python/perl2python.py | 1 | 1233 | #!/usr/bin/python
import sys
import re
line = sys.stdin.readline()
while(line != ""):
line = re.sub(r' {', r':', line)
line = re.sub(r'}\s*', r'', line)
line = re.sub(r'->', r'.', line)
line = re.sub(r'sub', r'def', line)
line = re.sub(r'elsif', r'elif', line)
line = re.sub(r'\$(\d)', r'reS.match.group(\1)', line)
line = re.sub(r'\$(\w+)', r'\1', line)
line = re.sub(r';$', r'', line)
line = re.sub(r'\.=', r'+=', line)
line = re.sub(r'^(\s*print\s+.*)\\n("\s*.*)$', r'\1\2', line)
line = re.sub(r'\s+eq\s+', r' == ', line)
line = re.sub(r'::', r'.', line)
line = re.sub(r'&&', r'and', line)
line = re.sub(r'\|\|', r'or', line)
line = re.sub(r'length\(', r'len(', line)
line = re.sub(r'my ', r'', line)
line = re.sub(r'defined ', r'', line)
line = re.sub(r'foreach', r'for', line)
line = re.sub(r'(\s*)(.*[^\w\d_])([\w\d_]+)\s+=~\s+s/([^/]*)/([^/]*)/(.*)',
r"\1\2reS.perlSub(r'\4', r'\5', \3)\6\n\1\3 = reS.string", line)
line = re.sub(r'/([^/]*)/',
r"reS.perlMatch(r'\1', line)", line)
line = re.sub(r'push\(@(.*), ', r'\1.append(', line)
sys.stdout.write(line)
line = sys.stdin.readline()
| gpl-2.0 | -8,560,738,911,464,285,000 | 35.264706 | 82 | 0.453366 | false |
kashefy/nideep | nideep/iow/copy_lmdb.py | 1 | 1826 | '''
Created on May 30, 2016
@author: kashefy
'''
import lmdb
from lmdb_utils import IDX_FMT, MAP_SZ
def copy_samples_lmdb(path_lmdb, path_dst, keys, func_data=None):
"""
Copy select samples from an lmdb into another.
Can be used for sampling from an lmdb into another and generating a random shuffle
of lmdb content.
Parameters:
path_lmdb -- source lmdb
path_dst -- destination lmdb
keys -- list of keys or indices to sample from source lmdb
"""
db = lmdb.open(path_dst, map_size=MAP_SZ)
key_dst = 0
with db.begin(write=True) as txn_dst:
with lmdb.open(path_lmdb, readonly=True).begin() as txn_src:
for key_src in keys:
if not isinstance(key_src, basestring):
key_src = IDX_FMT.format(key_src)
if func_data is None:
txn_dst.put(IDX_FMT.format(key_dst), txn_src.get(key_src))
else:
txn_dst.put(IDX_FMT.format(key_dst), func_data(txn_src.get(key_src)))
key_dst += 1
db.close()
def concatenate_lmdb(paths_lmdb, path_dst):
"""
Copy select samples from an lmdb into another.
Can be used for sampling from an lmdb into another and generating a random shuffle
of lmdb content.
Parameters:
paths_lmdb -- list of lmdbs to conatenate
path_dst -- destination lmdb
keys -- list of keys or indices to sample from source lmdb
"""
db = lmdb.open(path_dst, map_size=MAP_SZ)
key_dst = 0
with db.begin(write=True) as txn_dst:
for p in paths_lmdb:
with lmdb.open(p, readonly=True).begin() as txn_src:
for _, value in txn_src.cursor():
txn_dst.put(IDX_FMT.format(key_dst), value)
key_dst += 1
db.close()
| bsd-2-clause | -6,608,046,657,129,439,000 | 30.482759 | 89 | 0.594743 | false |
leductan-nguyen/RaionPi | src/octoprint/plugins/softwareupdate/scripts/update-octoprint.py | 1 | 6006 | #!/bin/env python
from __future__ import absolute_import
__author__ = "Gina Haeussge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The RaionPi Project - Released under terms of the AGPLv3 License"
import errno
import subprocess
import sys
def _get_git_executables():
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
return GITS
def _git(args, cwd, hide_stderr=False, verbose=False, git_executable=None):
if git_executable is not None:
commands = [git_executable]
else:
commands = _get_git_executables()
for c in commands:
try:
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return p.returncode, stdout
def _python(args, cwd, python_executable, sudo=False):
command = [python_executable] + args
if sudo:
command = ["sudo"] + command
try:
p = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
return None, None
stdout = p.communicate()[0].strip()
if sys.version >= "3":
stdout = stdout.decode()
return p.returncode, stdout
def update_source(git_executable, folder, target, force=False):
print(">>> Running: git diff --shortstat")
returncode, stdout = _git(["diff", "--shortstat"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git diff\" failed with returncode %d: %s" % (returncode, stdout))
if stdout and stdout.strip():
# we got changes in the working tree, maybe from the user, so we'll now rescue those into a patch
import time
import os
timestamp = time.strftime("%Y%m%d%H%M")
patch = os.path.join(folder, "%s-preupdate.patch" % timestamp)
print(">>> Running: git diff and saving output to %s" % timestamp)
returncode, stdout = _git(["diff"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, installation directory was dirty and state could not be persisted as a patch to %s" % patch)
with open(patch, "wb") as f:
f.write(stdout)
print(">>> Running: git reset --hard")
returncode, stdout = _git(["reset", "--hard"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git reset --hard\" failed with returncode %d: %s" % (returncode, stdout))
print(">>> Running: git pull")
returncode, stdout = _git(["pull"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git pull\" failed with returncode %d: %s" % (returncode, stdout))
print(stdout)
if force:
reset_command = ["reset"]
reset_command += [target]
print(">>> Running: git %s" % " ".join(reset_command))
returncode, stdout = _git(reset_command, folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Error while updating, \"git %s\" failed with returncode %d: %s" % (" ".join(reset_command), returncode, stdout))
print(stdout)
def install_source(python_executable, folder, user=False, sudo=False):
print(">>> Running: python setup.py clean")
returncode, stdout = _python(["setup.py", "clean"], folder, python_executable)
if returncode != 0:
print("\"python setup.py clean\" failed with returncode %d: %s" % (returncode, stdout))
print("Continuing anyways")
print(stdout)
print(">>> Running: python setup.py install")
args = ["setup.py", "install"]
if user:
args.append("--user")
returncode, stdout = _python(args, folder, python_executable, sudo=sudo)
if returncode != 0:
raise RuntimeError("Could not update, \"python setup.py install\" failed with returncode %d: %s" % (returncode, stdout))
print(stdout)
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(prog="update-octoprint.py")
parser.add_argument("--git", action="store", type=str, dest="git_executable",
help="Specify git executable to use")
parser.add_argument("--python", action="store", type=str, dest="python_executable",
help="Specify python executable to use")
parser.add_argument("--force", action="store_true", dest="force",
help="Set this to force the update to only the specified version (nothing newer)")
parser.add_argument("--sudo", action="store_true", dest="sudo",
help="Install with sudo")
parser.add_argument("--user", action="store_true", dest="user",
help="Install to the user site directory instead of the general site directory")
parser.add_argument("folder", type=str,
help="Specify the base folder of the RaionPi installation to update")
parser.add_argument("target", type=str,
help="Specify the commit or tag to which to update")
args = parser.parse_args()
return args
def main():
args = parse_arguments()
git_executable = None
if args.git_executable:
git_executable = args.git_executable
python_executable = sys.executable
if args.python_executable:
python_executable = args.python_executable
folder = args.folder
target = args.target
import os
if not os.access(folder, os.W_OK):
raise RuntimeError("Could not update, base folder is not writable")
update_source(git_executable, folder, target, force=args.force)
install_source(python_executable, folder, user=args.user, sudo=args.sudo)
if __name__ == "__main__":
main()
| agpl-3.0 | 8,245,839,758,198,759,000 | 32.366667 | 135 | 0.670663 | false |
TopherGopher/aws-infra.jenkins-scripts | gluster.py | 1 | 7395 | #!/usr/bin/python
import click
import aws_instance
import jenkins
import jenkinspoll
import subprocess
import os
import time
aws_key='/var/jenkins_home/.ssh/aws.pem'
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version='1.0.0')
def siteman():
pass
def build_and_run_command(user, host, command):
ssh_cmd = ['ssh', '-p22', '-i', aws_key]
ssh_cmd += ['-o', 'StrictHostKeyChecking=no']
ssh_cmd += ["{user}@{host}".format(user=user,host=host)]
ssh_cmd += 'sudo {command}'.format(command=command).split(" ")
try:
return subprocess.check_output(ssh_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Gracefully handle previously stopped procs
if "stop: Unknown instance" in e.output:
return ""
elif "no process found" in e.output or "no process killed" in e.output:
return ""
elif "command not found" in e.output:
return "command not found"
elif "/etc/init.d/glusterfs-server: No such file or directory" in e.output:
return "command not found"
exit(e.output)
@siteman.command()
@click.option('--user')
@click.option('--host')
@click.option('--peer')
def peer_disconnect(user, host, peer):
peer_disconnect_fnc(user, host, peer)
def peer_disconnect_fnc(user, host, peer):
"""
gluster peer detach gluster02.newmediadenver.com
"""
command="gluster peer detach {peer}".format(peer=peer)
print build_and_run_command(user, host, command)
@siteman.command()
@click.option('--user')
@click.option('--host')
@click.option('--peer')
def peer_connect(user, host, peer):
peer_connect_fnc(user, host, peer)
def peer_connect_fnc(user, host, peer):
"""
gluster peer probe gluster02.newmediadenver.com
"""
command="gluster peer probe {peer}".format(peer=peer)
print build_and_run_command(user, host, command)
@siteman.command()
@click.option('--user')
@click.option('--host')
def kill_gluster(user, host):
kill_gluster_fnc(user, host)
def kill_gluster_fnc(user, host):
print "Killing gluster..."
out = build_and_run_command(user, host, "/etc/init.d/glusterfs-server stop")
if out == "command not found":
print build_and_run_command(user, host, "service glusterd stop")
print build_and_run_command(user, host, "service glusterfsd stop")
print build_and_run_command(user, host, "killall glusterfsd")
print build_and_run_command(user, host, "killall glusterfs")
@siteman.command()
@click.option('--user')
@click.option('--host')
def start_gluster(user, host):
start_gluster_fnc(user, host)
def start_gluster_fnc(user, host):
out = build_and_run_command(user, host, "/etc/init.d/glusterfs-server start")
if out == "command not found":
print build_and_run_command(user, host, "service glusterd start")
print build_and_run_command(user, host, "service glusterfsd start")
@siteman.command()
@click.option('--user')
@click.option('--host')
def gluster_status(user, host):
gluster_status_fnc(user, host)
def gluster_status_fnc(user, host):
"""
gluster volume status
"""
command="gluster volume status"
print build_and_run_command(user, host, command)
@siteman.command()
@click.option('--user')
@click.option('--host')
def gluster_heal(user, host):
gluster_heal_fnc(user, host)
def gluster_heal_fnc(user, host):
"""
gluster volume heal nmd
"""
command="gluster volume heal nmd"
print build_and_run_command(user, host, command)
@siteman.command()
@click.option('--user')
@click.option('--host')
def gluster_heal_info(user, host):
gluster_heal_info_fnc(user, host)
def gluster_heal_info_fnc(user, host):
"""
gluster volume heal nmd info
"""
command="gluster volume heal nmd info"
print build_and_run_command(user, host, command)
@siteman.command()
@click.option('--user')
@click.option('--host')
def configure_new_gluster_instance(user, host):
configure_new_gluster_instance_fnc(user, host)
def configure_new_gluster_instance_fnc(user, host):
"""
Kick off jenkins-playbook to make sure the software is installed
"""
J = jenkins.Jenkins('https://leroy.nmdev.us', username=os.environ.get('JENKINS_SERVICE_USERNAME'), password=os.environ.get('JENKINS_SERVICE_PASSWORD'))
# Set build parameters, kick off a new build, and block until complete.
environment = "staging" if "nmdev.us" in host else "production"
# Override - any new gluster instance should be user=ubuntu
user="ubuntu"
# Run the ansible installer on the gluster box
print "Running jenkins-playbook with install options on the gluster box"
params = {"TARGET_HOST": "gluster", "AWS_ENVIRONMENT": environment, "AWS_SSH_USER": user, "ANSIBLE_TAGS": 'aws,provision,packages'}
J.build_job("jenkins-playbook", params)
jenkinspoll.wait_for_job_to_finish("jenkins-playbook", jenkins_connection=J)
# Run just the configuration components of the Jenkins playbook
print "Running jenkins-playbook with the 'configuration' tag on the gluster box"
params['ANSIBLE_TAGS'] = "configuration"
J.build_job("jenkins-playbook", params)
jenkinspoll.wait_for_job_to_finish("jenkins-playbook", jenkins_connection=J)
@siteman.command()
@click.option('--old-host', prompt='old_host', help='old_host')
@click.option('--old-mount-point', prompt='old_mount_point', help='old_mount_point')
@click.option('--new-host', prompt='new_host', help='new_host')
@click.option('--new-user', prompt='new_user', help='new_user')
@click.option('--new-mount-point', prompt='new_mount_point', help='new_mount_point')
@click.option('--gluster-user', prompt='gluster_user')
@click.option('--gluster-host', prompt='gluster_host')
def replace_brick(old_host, old_mount_point, new_host, new_user, new_mount_point, gluster_user, gluster_host):
replace_brick_fnc(old_host, old_user, old_mount_point, new_host, new_user, new_mount_point)
def replace_brick_fnc(old_host, old_mount_point, new_host, new_user, new_mount_point, gluster_user, gluster_host):
"""
gluster volume replace-brick nmd gluster02.newmediadenver.com:/srv/sdb1/nmd gluster06.newmediadenver.com:/srv/sdg1/nmd commit force
"""
command="gluster volume replace-brick nmd {old_host}:{old_mount_point}/nmd {new_host}:{new_mount_point}/nmd commit force".format(old_user=old_user, old_host=old_host, old_mount_point=old_mount_point, new_user=new_user, new_host=new_host, new_mount_point=new_mount_point)
print build_and_run_command(gluster_user, gluster_host, command)
@siteman.command()
@click.option('--user')
@click.option('--host')
@click.option('--device')
def format_brick_to_ext4(user, host, device):
format_brick_to_ext4_fnc(user,host,device)
def format_brick_to_ext4(user, host, device):
command="mkfs.ext4 {device}".format(device=device)
print build_and_run_command(user, host, command)
def add_gluster_repo(user, host):
command="add-apt-repository ppa:gluster/glusterfs-3.7"
print build_and_run_command(user, host, command)
def ping_server(host):
try:
out = subprocess.check_output("ping -c 1 {host}".format(host=host).split(" "), stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
if e.returncode != 0:
return False
def poll_server_with_ping(host):
print "Polling {host}. Will continue polling until host responds...".format(host=host)
while ping_server(host) == False:
time.sleep(5)
if __name__ == '__main__':
siteman()
| mit | -5,279,975,518,175,508,000 | 34.552885 | 272 | 0.710074 | false |
sassoftware/pymaven | pymaven/artifact.py | 1 | 4769 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The artifact module provides objects and functions for working with artifacts
in a maven repository
"""
import functools
import re
import sys
import six
from .errors import ArtifactParseError
from .versioning import VersionRange
if sys.version_info > (2,):
from .utils import cmp
MAVEN_COORDINATE_RE = re.compile(
r'(?P<group_id>[^:]+)'
r':(?P<artifact_id>[^:]+)'
r'(:(?P<type>[^:]+)(:(?P<classifier>[^:]+))?)?'
r':(?P<version>[^:])'
)
@functools.total_ordering
class Artifact(object):
"""Represents an artifact within a maven repository."""
__slots__ = ("group_id", "artifact_id", "version", "type", "classifier",
"contents")
def __init__(self, coordinate):
self.version = None
self.type = "jar"
self.classifier = None
self.contents = None
parts = coordinate.split(':')
length = len(parts)
if length < 2 or length > 5:
raise ArtifactParseError(
"Too many items in coordinate: '%s'" % coordinate)
self.group_id, self.artifact_id = parts[:2]
if length == 3:
self.version = parts[2]
elif length == 4:
self.type = parts[2]
self.version = parts[3]
elif length == 5:
self.type = parts[2]
self.classifier = parts[3]
self.version = parts[4]
if self.version:
self.version = VersionRange(self.version)
def __cmp__(self, other):
if self is other:
return 0
if not isinstance(other, Artifact):
if isinstance(other, six.string_types):
try:
return cmp(self, Artifact(other))
except ArtifactParseError:
pass
return 1
result = cmp(self.group_id, other.group_id)
if result == 0:
result = cmp(self.artifact_id, other.artifact_id)
if result == 0:
result = cmp(self.type, other.type)
if result == 0:
if self.classifier is None:
if other.classifier is not None:
result = 1
else:
if other.classifier is None:
result = -1
else:
result = cmp(self.classifier, other.classifier)
if result == 0:
result = cmp(self.version.version,
other.version.version)
return result
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __hash__(self):
return hash((self.group_id, self.artifact_id, self.version, self.type,
self.classifier))
def __str__(self):
s = ':'.join((self.group_id, self.artifact_id))
if self.version:
s += ':' + self.type
if self.classifier:
s += ':' + self.classifier
s += ':' + str(self.version.version if self.version.version
else self.vserion)
return s
def __repr__(self):
return "<pymaven.Artifact(%r)" % self.coordinate
@property
def coordinate(self):
coordinate = "%s:%s" % (self.group_id, self.artifact_id)
if self.type != "jar":
coordinate += ":%s" % self.type
if self.classifier is not None:
coordinate += ":%s" % self.classifier
if self.version is not None:
coordinate += ":%s" % self.version
return coordinate
@property
def path(self):
path = "%s/%s" % (self.group_id.replace('.', '/'), self.artifact_id)
if self.version and self.version.version:
version = self.version.version
path += "/%s/%s-%s" % (version, self.artifact_id, version)
if self.classifier:
path += "-%s" % self.classifier
path += ".%s" % self.type
return path
| apache-2.0 | 7,652,122,665,989,008,000 | 30.375 | 78 | 0.534284 | false |
krzychb/rtd-test-bed | components/partition_table/test_gen_esp32part_host/gen_esp32part_tests.py | 1 | 16713 | #!/usr/bin/env python
from __future__ import print_function, division
import unittest
import struct
import csv
import sys
import subprocess
import tempfile
import os
import io
import re
try:
import gen_esp32part
except ImportError:
sys.path.append("..")
import gen_esp32part
SIMPLE_CSV = """
# Name,Type,SubType,Offset,Size,Flags
factory,0,2,65536,1048576,
"""
LONGER_BINARY_TABLE = b""
# type 0x00, subtype 0x00,
# offset 64KB, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x00\x00" + \
b"\x00\x00\x01\x00" + \
b"\x00\x00\x10\x00" + \
b"factory\0" + (b"\0" * 8) + \
b"\x00\x00\x00\x00"
# type 0x01, subtype 0x20,
# offset 0x110000, size 128KB
LONGER_BINARY_TABLE += b"\xAA\x50\x01\x20" + \
b"\x00\x00\x11\x00" + \
b"\x00\x02\x00\x00" + \
b"data" + (b"\0" * 12) + \
b"\x00\x00\x00\x00"
# type 0x10, subtype 0x00,
# offset 0x150000, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x10\x00" + \
b"\x00\x00\x15\x00" + \
b"\x00\x10\x00\x00" + \
b"second" + (b"\0" * 10) + \
b"\x00\x00\x00\x00"
# MD5 checksum
LONGER_BINARY_TABLE += b"\xEB\xEB" + b"\xFF" * 14
LONGER_BINARY_TABLE += b'\xf9\xbd\x06\x1b\x45\x68\x6f\x86\x57\x1a\x2c\xd5\x2a\x1d\xa6\x5b'
# empty partition
LONGER_BINARY_TABLE += b"\xFF" * 32
def _strip_trailing_ffs(binary_table):
"""
Strip all FFs down to the last 32 bytes (terminating entry)
"""
while binary_table.endswith(b"\xFF" * 64):
binary_table = binary_table[0:len(binary_table) - 32]
return binary_table
class Py23TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Py23TestCase, self).__init__(*args, **kwargs)
try:
self.assertRaisesRegex
except AttributeError:
# assertRaisesRegexp is deprecated in Python3 but assertRaisesRegex doesn't exist in Python2
# This fix is used in order to avoid using the alias from the six library
self.assertRaisesRegex = self.assertRaisesRegexp
class CSVParserTests(Py23TestCase):
def test_simple_partition(self):
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
self.assertEqual(len(table), 1)
self.assertEqual(table[0].name, "factory")
self.assertEqual(table[0].type, 0)
self.assertEqual(table[0].subtype, 2)
self.assertEqual(table[0].offset, 65536)
self.assertEqual(table[0].size, 1048576)
def test_require_type(self):
csv = """
# Name,Type, SubType,Offset,Size
ihavenotype,
"""
with self.assertRaisesRegex(gen_esp32part.InputError, "type"):
gen_esp32part.PartitionTable.from_csv(csv)
def test_type_subtype_names(self):
csv_magicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, 0, 0,, 0x100000
myota_0, 0, 0x10,, 0x100000
myota_1, 0, 0x11,, 0x100000
myota_15, 0, 0x1f,, 0x100000
mytest, 0, 0x20,, 0x100000
myota_status, 1, 0,, 0x100000
"""
csv_nomagicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, app, factory,, 0x100000
myota_0, app, ota_0,, 0x100000
myota_1, app, ota_1,, 0x100000
myota_15, app, ota_15,, 0x100000
mytest, app, test,, 0x100000
myota_status, data, ota,, 0x100000
"""
# make two equivalent partition tables, one using
# magic numbers and one using shortcuts. Ensure they match
magic = gen_esp32part.PartitionTable.from_csv(csv_magicnumbers)
magic.verify()
nomagic = gen_esp32part.PartitionTable.from_csv(csv_nomagicnumbers)
nomagic.verify()
self.assertEqual(nomagic["myapp"].type, 0)
self.assertEqual(nomagic["myapp"].subtype, 0)
self.assertEqual(nomagic["myapp"], magic["myapp"])
self.assertEqual(nomagic["myota_0"].type, 0)
self.assertEqual(nomagic["myota_0"].subtype, 0x10)
self.assertEqual(nomagic["myota_0"], magic["myota_0"])
self.assertEqual(nomagic["myota_15"], magic["myota_15"])
self.assertEqual(nomagic["mytest"], magic["mytest"])
self.assertEqual(nomagic["myota_status"], magic["myota_status"])
# self.assertEqual(nomagic.to_binary(), magic.to_binary())
def test_unit_suffixes(self):
csv = """
# Name, Type, Subtype, Offset, Size
one_megabyte, app, factory, 64k, 1M
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].offset, 64 * 1024)
self.assertEqual(t[0].size, 1 * 1024 * 1024)
def test_default_offsets(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory,, 1M
second, data, 0x15,, 1M
minidata, data, 0x40,, 32K
otherapp, app, factory,, 1M
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
# 'first'
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
# 'second'
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
# 'minidata'
self.assertEqual(t[2].offset, 0x210000)
# 'otherapp'
self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image
def test_negative_size_to_offset(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory, 0x10000, -2M
second, data, 0x15, , 1M
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
# 'first'
self.assertEqual(t[0].offset, 0x10000) # in CSV
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
# 'second'
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
def test_overlapping_offsets_fail(self):
csv = """
first, app, factory, 0x100000, 2M
second, app, ota_0, 0x200000, 1M
"""
with self.assertRaisesRegex(gen_esp32part.InputError, "overlap"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
def test_unique_name_fail(self):
csv = """
first, app, factory, 0x100000, 1M
first, app, ota_0, 0x200000, 1M
"""
with self.assertRaisesRegex(gen_esp32part.InputError, "Partition names must be unique"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
class BinaryOutputTests(Py23TestCase):
def test_binary_entry(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 64 + 32)
self.assertEqual(b'\xAA\x50', tb[0:2]) # magic
self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype
eo, es = struct.unpack("<LL", tb[4:12])
self.assertEqual(eo, 0x100400) # offset
self.assertEqual(es, 0x300000) # size
self.assertEqual(b"\xEB\xEB" + b"\xFF" * 14, tb[32:48])
self.assertEqual(b'\x43\x03\x3f\x33\x40\x87\x57\x51\x69\x83\x9b\x40\x61\xb1\x27\x26', tb[48:64])
def test_multiple_entries(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
second,0x31, 0xEF, , 0x100000
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 96 + 32)
self.assertEqual(b'\xAA\x50', tb[0:2])
self.assertEqual(b'\xAA\x50', tb[32:34])
def test_encrypted_flag(self):
csv = """
# Name, Type, Subtype, Offset, Size, Flags
first, app, factory,, 1M, encrypted
"""
t = gen_esp32part.PartitionTable.from_csv(csv)
self.assertTrue(t[0].encrypted)
tb = _strip_trailing_ffs(t.to_binary())
tr = gen_esp32part.PartitionTable.from_binary(tb)
self.assertTrue(tr[0].encrypted)
class BinaryParserTests(Py23TestCase):
def test_parse_one_entry(self):
# type 0x30, subtype 0xee,
# offset 1MB, size 2MB
entry = b"\xAA\x50\x30\xee" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00" + \
b"\xFF" * 32
# verify that parsing 32 bytes as a table
# or as a single Definition are the same thing
t = gen_esp32part.PartitionTable.from_binary(entry)
self.assertEqual(len(t), 1)
t[0].verify()
e = gen_esp32part.PartitionDefinition.from_binary(entry[:32])
self.assertEqual(t[0], e)
e.verify()
self.assertEqual(e.type, 0x30)
self.assertEqual(e.subtype, 0xEE)
self.assertEqual(e.offset, 0x100000)
self.assertEqual(e.size, 0x200000)
self.assertEqual(e.name, "0123456789abc")
def test_multiple_entries(self):
t = gen_esp32part.PartitionTable.from_binary(LONGER_BINARY_TABLE)
t.verify()
self.assertEqual(3, len(t))
self.assertEqual(t[0].type, gen_esp32part.APP_TYPE)
self.assertEqual(t[0].name, "factory")
self.assertEqual(t[1].type, gen_esp32part.DATA_TYPE)
self.assertEqual(t[1].name, "data")
self.assertEqual(t[2].type, 0x10)
self.assertEqual(t[2].name, "second")
round_trip = _strip_trailing_ffs(t.to_binary())
self.assertEqual(round_trip, LONGER_BINARY_TABLE)
def test_bad_magic(self):
bad_magic = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00"
with self.assertRaisesRegex(gen_esp32part.InputError, "Invalid magic bytes"):
gen_esp32part.PartitionTable.from_binary(bad_magic)
def test_bad_length(self):
bad_length = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789"
with self.assertRaisesRegex(gen_esp32part.InputError, "32 bytes"):
gen_esp32part.PartitionTable.from_binary(bad_length)
class CSVOutputTests(Py23TestCase):
def _readcsv(self, source_str):
return list(csv.reader(source_str.split("\n")))
def test_output_simple_formatting(self):
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(True)
c = self._readcsv(as_csv)
# first two lines should start with comments
self.assertEqual(c[0][0][0], "#")
self.assertEqual(c[1][0][0], "#")
row = c[2]
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "0")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000") # reformatted as hex
self.assertEqual(row[4], "0x100000") # also hex
# round trip back to a PartitionTable and check is identical
roundtrip = gen_esp32part.PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
def test_output_smart_formatting(self):
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(False)
c = self._readcsv(as_csv)
# first two lines should start with comments
self.assertEqual(c[0][0][0], "#")
self.assertEqual(c[1][0][0], "#")
row = c[2]
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "app")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000")
self.assertEqual(row[4], "1M")
# round trip back to a PartitionTable and check is identical
roundtrip = gen_esp32part.PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
class CommandLineTests(Py23TestCase):
def test_basic_cmdline(self):
try:
binpath = tempfile.mktemp()
csvpath = tempfile.mktemp()
# copy binary contents to temp file
with open(binpath, 'wb') as f:
f.write(LONGER_BINARY_TABLE)
# run gen_esp32part.py to convert binary file to CSV
output = subprocess.check_output([sys.executable, "../gen_esp32part.py",
binpath, csvpath], stderr=subprocess.STDOUT)
# reopen the CSV and check the generated binary is identical
self.assertNotIn(b"WARNING", output)
with open(csvpath, 'r') as f:
from_csv = gen_esp32part.PartitionTable.from_csv(f.read())
self.assertEqual(_strip_trailing_ffs(from_csv.to_binary()), LONGER_BINARY_TABLE)
# run gen_esp32part.py to conver the CSV to binary again
output = subprocess.check_output([sys.executable, "../gen_esp32part.py",
csvpath, binpath], stderr=subprocess.STDOUT)
self.assertNotIn(b"WARNING", output)
# assert that file reads back as identical
with open(binpath, 'rb') as f:
binary_readback = f.read()
binary_readback = _strip_trailing_ffs(binary_readback)
self.assertEqual(binary_readback, LONGER_BINARY_TABLE)
finally:
for path in binpath, csvpath:
try:
os.remove(path)
except OSError:
pass
class VerificationTests(Py23TestCase):
def test_bad_alignment(self):
csv = """
# Name,Type, SubType,Offset,Size
app,app, factory, 32K, 1M
"""
with self.assertRaisesRegex(gen_esp32part.ValidationError, r"Offset.+not aligned"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
def test_warnings(self):
try:
sys.stderr = io.StringIO() # capture stderr
csv_1 = "app, 1, 2, 32K, 1M\n"
gen_esp32part.PartitionTable.from_csv(csv_1).verify()
self.assertIn("WARNING", sys.stderr.getvalue())
self.assertIn("partition type", sys.stderr.getvalue())
sys.stderr = io.StringIO()
csv_2 = "ota_0, app, ota_1, , 1M\n"
gen_esp32part.PartitionTable.from_csv(csv_2).verify()
self.assertIn("WARNING", sys.stderr.getvalue())
self.assertIn("partition subtype", sys.stderr.getvalue())
finally:
sys.stderr = sys.__stderr__
class PartToolTests(Py23TestCase):
def _run_parttool(self, csvcontents, args, info):
csvpath = tempfile.mktemp()
with open(csvpath, "w") as f:
f.write(csvcontents)
try:
output = subprocess.check_output([sys.executable, "../parttool.py"] + args.split(" ")
+ ["--partition-table-file", csvpath, "get_partition_info", "--info", info],
stderr=subprocess.STDOUT)
self.assertNotIn(b"WARNING", output)
m = re.search(b"0x[0-9a-fA-F]+", output)
return m.group(0) if m else ""
finally:
os.remove(csvpath)
def test_find_basic(self):
csv = """
nvs, data, nvs, 0x9000, 0x4000
otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
factory, app, factory, 0x10000, 1M
"""
def rpt(args, info):
return self._run_parttool(csv, args, info)
self.assertEqual(
rpt("--partition-type=data --partition-subtype=nvs -q", "offset"), b"0x9000")
self.assertEqual(
rpt("--partition-type=data --partition-subtype=nvs -q", "size"), b"0x4000")
self.assertEqual(
rpt("--partition-name=otadata -q", "offset"), b"0xd000")
self.assertEqual(
rpt("--partition-boot-default -q", "offset"), b"0x10000")
def test_fallback(self):
csv = """
nvs, data, nvs, 0x9000, 0x4000
otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
ota_0, app, ota_0, 0x30000, 1M
ota_1, app, ota_1, , 1M
"""
def rpt(args, info):
return self._run_parttool(csv, args, info)
self.assertEqual(
rpt("--partition-type=app --partition-subtype=ota_1 -q", "offset"), b"0x130000")
self.assertEqual(
rpt("--partition-boot-default -q", "offset"), b"0x30000") # ota_0
csv_mod = csv.replace("ota_0", "ota_2")
self.assertEqual(
self._run_parttool(csv_mod, "--partition-boot-default -q", "offset"),
b"0x130000") # now default is ota_1
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 5,730,980,972,176,826,000 | 35.097192 | 121 | 0.585891 | false |
moxgreen/partial_corr.py | partial_corr.py | 1 | 3301 | #!/usr/bin/env python
from sys import stdin, stderr
from optparse import OptionParser
import numpy as np
from scipy import stats, linalg
"""
Partial Correlation in Python (clone of Matlab's partialcorr)
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
the algorithm can be summarized as
1) perform a normal linear least-squares regression with X as the target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normal linear least-squares regression with Y as the target and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
The result is the partial correlation between X and Y while controlling for the effect of Z
Date: Nov 2014
Author: Fabian Pedregosa-Izquierdo, [email protected]
Testing: Valentina Borghesani, [email protected]
Date: March 2015:
Modified by: Ivan Molineris, [email protected]
"""
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def main():
usage = '''%prog < STDIN
Returns the sample linear partial correlation coefficients between pairs of rows in the STDIN, controlling
for the remaining variables in STDIN.
The first column of each row of the input matrix is intended as row_id
'''
parser = OptionParser(usage=usage)
options, args = parser.parse_args()
if len(args) != 0:
exit('Unexpected argument number.')
cols_len=None
matrix=[]
row_ids=[]
for line in stdin:
cols = line.rstrip().split('\t')
row_ids.append(cols.pop(0))
cols = [float(c) for c in cols]
if cols_len is None:
cols_len = len(cols)
assert cols_len == len(cols)
matrix.append(cols)
matrix = np.asarray(matrix)
matrix = matrix.T
C=partial_corr(matrix)
for i,k in enumerate(row_ids):
for j,l in enumerate(row_ids):
if j>i:
print row_ids[i], row_ids[j], C[i,j]
if __name__ == '__main__':
main()
| agpl-3.0 | 8,374,982,293,936,116,000 | 27.704348 | 108 | 0.646471 | false |
htcondor/htcondor | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/utils.py | 1 | 17863 | '''Utility functions and classes used internally by Skype4Py.
'''
import sys
import weakref
import threading
from new import instancemethod
def chop(s, n=1, d=None):
'''Chops initial words from a string and returns a list of them and the rest of the string.
@param s: String to chop from.
@type s: str or unicode
@param n: Number of words to chop.
@type n: int
@param d: Optional delimeter. Any white-char by default.
@type d: str or unicode
@return: A list of n first words from the string followed by the rest of the string
(C{[w1, w2, ..., wn, rest_of_string]}).
@rtype: list of str or unicode
'''
spl = s.split(d, n)
if len(spl) == n:
spl.append(s[:0])
if len(spl) != n + 1:
raise ValueError('chop: Could not chop %d words from \'%s\'' % (n, s))
return spl
def args2dict(s):
'''Converts a string in 'ARG="value", ARG2="value2"' format into a dictionary.
@param s: Input string with comma-separated 'ARG="value"' strings.
@type s: str or unicode
@return: C{{'ARG': 'value'}} dictionary.
@rtype: dict
'''
d = {}
while s:
t, s = chop(s, 1, '=')
if s.startswith('"'):
i = 0
while True:
i = s.find('"', i+1)
# XXX How are the double-quotes escaped? The code below implements VisualBasic technique.
try:
if s[i+1] != '"':
break
else:
i += 1
except IndexError:
break
if i > 0:
d[t] = s[1:i]
s = s[i+1:]
else:
d[t] = s
break
else:
i = s.find(', ')
if i >= 0:
d[t] = s[:i]
s = s[i+2:]
else:
d[t] = s
break
return d
def quote(s, always=False):
'''Adds double-quotes to string if needed.
@param s: String to add double-quotes to.
@type s: str or unicode
@param always: If True, adds quotes even if the input string contains no spaces.
@type always: bool
@return: If the given string contains spaces or always=True, returns the string enclosed
in double-quotes (if it contained quotes too, they are preceded with a backslash).
Otherwise returns the string unchnaged.
@rtype: str or unicode
'''
if always or ' ' in s:
return '"%s"' % s.replace('"', '\\"')
return s
def esplit(s, d=None):
'''Splits a string into words.
@param s: String to split.
@type s: str or unicode
@param d: Optional delimeter. Any white-char by default.
@type d: str or unicode
@return: A list of words or C{[]} if the string was empty.
@rtype: list of str or unicode
@note: This function works like C{s.split(d)} except that it always returns an
empty list instead of C{['']} for empty strings.
'''
if s:
return s.split(d)
return []
def cndexp(condition, truevalue, falsevalue):
'''Simulates a conditional expression known from C or Python 2.5+.
@param condition: Boolean value telling what should be returned.
@type condition: bool, see note
@param truevalue: Value returned if condition was True.
@type truevalue: any
@param falsevalue: Value returned if condition was False.
@type falsevalue: any
@return: Either truevalue or falsevalue depending on condition.
@rtype: same as type of truevalue or falsevalue
@note: The type of condition parameter can be anything as long as
C{bool(condition)} returns a bool value.
'''
if condition:
return truevalue
return falsevalue
class _WeakMethod(object):
'''Helper class for WeakCallableRef function (see below).
Don't use directly.
'''
def __init__(self, method, callback=None):
'''__init__.
@param method: Method to be referenced.
@type method: method
@param callback: Callback to be called when the method is collected.
@type callback: callable
'''
self.im_func = method.im_func
try:
self.weak_im_self = weakref.ref(method.im_self, self._dies)
except TypeError:
self.weak_im_self = None
self.im_class = method.im_class
self.callback = callback
def __call__(self):
if self.weak_im_self:
im_self = self.weak_im_self()
if im_self is None:
return None
else:
im_self = None
return instancemethod(self.im_func, im_self, self.im_class)
def __repr__(self):
obj = self()
objrepr = repr(obj)
if obj is None:
objrepr = 'dead'
return '<weakref at 0x%x; %s>' % (id(self), objrepr)
def _dies(self, ref):
# weakref to im_self died
self.im_func = self.im_class = None
if self.callback is not None:
self.callback(self)
def WeakCallableRef(c, callback=None):
'''Creates and returns a new weak reference to a callable object.
In contrast to weakref.ref() works on all kinds of callables.
Usage is same as weakref.ref().
@param c: A callable that the weak reference should point at.
@type c: callable
@param callback: Callback called when the callable is collected (freed).
@type callback: callable
@return: A weak callable reference.
@rtype: weakref
'''
try:
return _WeakMethod(c, callback)
except AttributeError:
return weakref.ref(c, callback)
class _EventHandlingThread(threading.Thread):
def __init__(self, name=None):
'''__init__.
@param name: name
@type name: unicode
'''
threading.Thread.__init__(self, name='%s event handler' % name)
self.setDaemon(False)
self.lock = threading.Lock()
self.queue = []
def enqueue(self, target, args, kwargs):
'''enqueue.
@param target: Callable to be called.
@type target: callable
@param args: Positional arguments for the callable.
@type args: tuple
@param kwargs: Keyword arguments for the callable.
@type kwargs: dict
'''
self.queue.append((target, args, kwargs))
def run(self):
'''Executes all enqueued targets.
'''
while True:
try:
try:
self.lock.acquire()
h = self.queue[0]
del self.queue[0]
except IndexError:
break
finally:
self.lock.release()
h[0](*h[1], **h[2])
class EventHandlingBase(object):
'''This class is used as a base by all classes implementing event handlers.
Look at known subclasses (above in epydoc) to see which classes will allow you to
attach your own callables (event handlers) to certain events occuring in them.
Read the respective classes documentations to learn what events are provided by them. The
events are always defined in a class whose name consist of the name of the class it provides
events for followed by C{Events}). For example class L{ISkype} provides events defined in
L{ISkypeEvents}. The events class is always defined in the same submodule as the main class.
The events class is just informative. It tells you what events you can assign your event
handlers to, when do they occur and what arguments lists should your event handlers
accept.
There are three ways of attaching an event handler to an event.
1. C{Events} object.
Use this method if you need to attach many event handlers to many events.
Write your event handlers as methods of a class. The superclass of your class
doesn't matter, Skype4Py will just look for methods with apropriate names.
The names of the methods and their arguments lists can be found in respective
events classes (see above).
Pass an instance of this class as the C{Events} argument to the constructor of
a class whose events you are interested in. For example::
import Skype4Py
class MySkypeEvents:
def UserStatus(self, Status):
print 'The status of the user changed'
skype = Skype4Py.Skype(Events=MySkypeEvents())
The C{UserStatus} method will be called when the status of the user currently logged
into skype is changed.
2. C{On...} properties.
This method lets you use any callables as event handlers. Simply assign them to C{On...}
properties (where "C{...}" is the name of the event) of the object whose events you are
interested in. For example::
import Skype4Py
def user_status(Status):
print 'The status of the user changed'
skype = Skype4Py.Skype()
skype.OnUserStatus = user_status
The C{user_status} function will be called when the status of the user currently logged
into skype is changed.
The names of the events and their arguments lists should be taken from respective events
classes (see above). Note that there is no C{self} argument (which can be seen in the events
classes) simply because our event handler is a function, not a method.
3. C{RegisterEventHandler} / C{UnregisterEventHandler} methods.
This method, like the second one, also let you use any callables as event handlers. However,
it additionally let you assign many event handlers to a single event.
In this case, you use L{RegisterEventHandler} and L{UnregisterEventHandler} methods
of the object whose events you are interested in. For example::
import Skype4Py
def user_status(Status):
print 'The status of the user changed'
skype = Skype4Py.Skype()
skype.RegisterEventHandler('UserStatus', user_status)
The C{user_status} function will be called when the status of the user currently logged
into skype is changed.
The names of the events and their arguments lists should be taken from respective events
classes (see above). Note that there is no C{self} argument (which can be seen in the events
classes) simply because our event handler is a function, not a method.
B{Important notes!}
The event handlers are always called on a separate thread. At any given time, there is at most
one handling thread per event type. This means that when a lot of events of the same type are
generated at once, handling of an event will start only after the previous one is handled.
Handling of events of different types may happen simultaneously.
In case of second and third method, only weak references to the event handlers are stored. This
means that you must make sure that Skype4Py is not the only one having a reference to the callable
or else it will be garbage collected and silently removed from Skype4Py's handlers list. On the
other hand, it frees you from worrying about cyclic references.
'''
_EventNames = []
def __init__(self):
'''Initializes the object.
'''
self._EventHandlerObj = None
self._DefaultEventHandlers = {}
self._EventHandlers = {}
self._EventThreads = {}
for event in self._EventNames:
self._EventHandlers[event] = []
def _CallEventHandler(self, Event, *args, **kwargs):
'''Calls all event handlers defined for given Event (str), additional parameters
will be passed unchanged to event handlers, all event handlers are fired on
separate threads.
'''
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
handlers = filter(None, handlers.values())
# try the On... handlers
try:
h = self._DefaultEventHandlers[Event]()
if h:
handlers.append(h)
except KeyError:
pass
# try the object handlers
try:
handlers.append(getattr(self._EventHandlerObj, Event))
except AttributeError:
pass
# if no handlers, leave
if not handlers:
return
# initialize event handling thread if needed
if Event in self._EventThreads:
t = self._EventThreads[Event]
t.lock.acquire()
if not self._EventThreads[Event].isAlive():
t = self._EventThreads[Event] = _EventHandlingThread(Event)
else:
t = self._EventThreads[Event] = _EventHandlingThread(Event)
# enqueue handlers in thread
for h in handlers:
t.enqueue(h, args, kwargs)
# start serial event processing
try:
t.lock.release()
except:
t.start()
def RegisterEventHandler(self, Event, Target):
'''Registers any callable as an event handler.
@param Event: Name of the event. For event names, see the respective C{...Events} class.
@type Event: str
@param Target: Callable to register as the event handler.
@type Target: callable
@return: True is callable was successfully registered, False if it was already registered.
@rtype: bool
@see: L{EventHandlingBase}
'''
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
if Target in handlers.values():
return False
self._EventHandlers[Event].append(WeakCallableRef(Target))
return True
def UnregisterEventHandler(self, Event, Target):
'''Unregisters a previously registered event handler (a callable).
@param Event: Name of the event. For event names, see the respective C{...Events} class.
@type Event: str
@param Target: Callable to unregister.
@type Target: callable
@return: True if callable was successfully unregistered, False if it wasn't registered first.
@rtype: bool
@see: L{EventHandlingBase}
'''
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
# get list of relevant handlers
handlers = dict([(x, x()) for x in self._EventHandlers[Event]])
if None in handlers.values():
# cleanup
self._EventHandlers[Event] = list([x[0] for x in handlers.items() if x[1] is not None])
for wref, trg in handlers.items():
if trg == Target:
self._EventHandlers[Event].remove(wref)
return True
return False
def _SetDefaultEventHandler(self, Event, Target):
if Target:
if not callable(Target):
raise TypeError('%s is not callable' % repr(Target))
self._DefaultEventHandlers[Event] = WeakCallableRef(Target)
else:
try:
del self._DefaultEventHandlers[Event]
except KeyError:
pass
def _GetDefaultEventHandler(self, Event):
try:
return self._DefaultEventHandlers[Event]()
except KeyError:
pass
def _SetEventHandlerObj(self, Obj):
'''Registers an object (Obj) as event handler, object should contain methods with names
corresponding to event names, only one obj is allowed at a time.
'''
self._EventHandlerObj = Obj
@staticmethod
def __AddEvents_make_event(Event):
# TODO: rework to make compatible with cython
return property(lambda self: self._GetDefaultEventHandler(Event),
lambda self, value: self._SetDefaultEventHandler(Event, value))
@classmethod
def _AddEvents(cls, klass):
'''Adds events to class based on 'klass' attributes.'''
for event in dir(klass):
if not event.startswith('_'):
setattr(cls, 'On%s' % event, cls.__AddEvents_make_event(event))
cls._EventNames.append(event)
class Cached(object):
'''Base class for all cached objects.
Every object is identified by an Id specified as first parameter of the constructor.
Trying to create two objects with same Id yields the same object. Uses weak references
to allow the objects to be deleted normally.
@warning: C{__init__()} is always called, don't use it to prevent initializing an already
initialized object. Use C{_Init()} instead, it is called only once.
'''
_cache_ = weakref.WeakValueDictionary()
def __new__(cls, Id, *args, **kwargs):
h = cls, Id
try:
return cls._cache_[h]
except KeyError:
o = object.__new__(cls)
cls._cache_[h] = o
if hasattr(o, '_Init'):
o._Init(Id, *args, **kwargs)
return o
def __copy__(self):
return self
| apache-2.0 | -8,032,516,272,118,471,000 | 34.869478 | 105 | 0.60477 | false |
bhrzslm/uncertainty-reasoning | my_engine/others/pbnt/examples/ExampleModels.py | 1 | 3873 | # PBNT: Python Bayes Network Toolbox
#
# Copyright (c) 2005, Elliot Cohen
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * The name "Elliot Cohen" may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import sys
from numarray import *
sys.path.append('../lib')
import numarray.objects as obj
from pbnt.Graph import *
from pbnt.Distribution import *
from pbnt.Node import *
def water():
""" This is an example of how to implement the basic water network (4 nodes, cloudy, sprinkler, rain, and wetgrass. sprinkler and rain are children of cloudy, and wetgrass is a child of both sprinkler and rain).
"""
#testing basic bayes net class implementation
numberOfNodes = 4
#name the nodes
cloudy = 0
sprinkler = 1
rain = 2
wetgrass = 3
cNode = BayesNode(0, 2, name="cloudy")
sNode = BayesNode(1, 2, name="sprinkler")
rNode = BayesNode(2, 2, name="rain")
wNode = BayesNode(3, 2, name="wetgrass")
#cloudy
cNode.add_child(sNode)
cNode.add_child(rNode)
#sprinkler
sNode.add_parent(cNode)
sNode.add_child(wNode)
#rain
rNode.add_parent(cNode)
rNode.add_child(wNode)
#wetgrass
wNode.add_parent(sNode)
wNode.add_parent(rNode)
nodes = [cNode, sNode, rNode, wNode]
#create distributions
#cloudy distribution
cDistribution = DiscreteDistribution(cNode)
index = cDistribution.generate_index([],[])
cDistribution[index] = 0.5
cNode.set_dist(cDistribution)
#sprinkler
dist = zeros([cNode.size(),sNode.size()], type=Float32)
dist[0,] = 0.5
dist[1,] = [0.9,0.1]
sDistribution = ConditionalDiscreteDistribution(nodes=[cNode, sNode], table=dist)
sNode.set_dist(sDistribution)
#rain
dist = zeros([cNode.size(), rNode.size()], type=Float32)
dist[0,] = [0.8,0.2]
dist[1,] = [0.2,0.8]
rDistribution = ConditionalDiscreteDistribution(nodes=[cNode, rNode], table=dist)
rNode.set_dist(rDistribution)
#wetgrass
dist = zeros([sNode.size(), rNode.size(), wNode.size()], type=Float32)
dist[0,0,] = [1.0,0.0]
dist[1,0,] = [0.1,0.9]
dist[0,1,] = [0.1,0.9]
dist[1,1,] = [0.01,0.99]
wgDistribution = ConditionalDiscreteDistribution(nodes=[sNode, rNode, wNode], table=dist)
wNode.set_dist(wgDistribution)
#create bayes net
bnet = BayesNet(nodes)
return bnet
| mit | 3,967,815,240,085,659,600 | 32.580357 | 216 | 0.67338 | false |
ychab/mymoney-server | mymoney/core/tests/test_views.py | 1 | 2328 | from django.test import override_settings
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from mymoney.transactions.models import Transaction
from ..factories import UserFactory
class ConfigAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
cls.url = reverse('config')
def test_access_anonymous(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_access_authenticated(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_currencies(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertIn('EUR', response.data['currencies'])
self.assertIn('USD', response.data['currencies'])
@override_settings(LANGUAGE_CODE='fr-fr')
def test_currencies_localize(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertIn(response.data['currencies']['EUR'], 'Euro')
self.assertIn(response.data['currencies']['USD'], 'US Dollar')
def test_payment_methods(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertIn(
Transaction.PAYMENT_METHOD_CASH,
response.data['payment_methods'],
)
@override_settings(LANGUAGE_CODE='fr-fr')
def test_payment_methods_localize(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertEqual(
response.data['payment_methods'][Transaction.PAYMENT_METHOD_CASH],
'Espèce',
)
def test_statuses(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertIn(Transaction.STATUS_IGNORED, response.data['statuses'])
@override_settings(LANGUAGE_CODE='fr-fr')
def test_statuses_localize(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url)
self.assertEqual(
response.data['statuses'][Transaction.STATUS_IGNORED], 'Ignoré')
| bsd-3-clause | -8,764,923,689,607,414,000 | 33.716418 | 78 | 0.66896 | false |
fungos/gemuo | src/gemuo/data.py | 1 | 8601 | #
# GemUO
#
# (c) 2005-2012 Max Kellermann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Loader for the UO client data files.
import os
import struct
FLAG_IMPASSABLE = 0x40
FLAG_SURFACE = 0x200
class TileData:
def __init__(self, path):
f = file(path)
# detect file format
f.seek(36)
x = f.read(20).rstrip('\0')
f.seek(0)
if x.find('\0') == -1:
# old file format
read_flags = lambda f: struct.unpack('<I', f.read(4))[0]
item_count = 0x200
else:
# new file format (>= 7.0)
read_flags = lambda f: struct.unpack('<Q', f.read(8))[0]
item_count = 0x400
self.land_flags = []
for a in range(0x200):
f.seek(4, 1)
for b in range(0x20):
self.land_flags.append(read_flags(f))
f.seek(22, 1) # skip texture and name
assert len(self.land_flags) == 0x4000
self.item_flags = []
for a in range(item_count):
f.seek(4, 1)
for b in range(0x20):
self.item_flags.append(read_flags(f))
f.seek(33, 1)
assert len(self.item_flags) == item_count * 0x20
def land_passable(self, id):
assert id >= 0 and id < len(self.land_flags)
return (self.land_flags[id] & FLAG_IMPASSABLE) == 0
def item_passable(self, id):
assert id >= 0 and id < len(self.item_flags)
return (self.item_flags[id] & FLAG_IMPASSABLE) == 0
def item_surface(self, id):
assert id >= 0 and id < len(self.item_flags)
return (self.item_flags[id] & FLAG_SURFACE) == 0
class LandBlock:
def __init__(self, data):
assert len(data) == 192
self.data = data
def get_id(self, x, y):
assert x >= 0 and x < 8
assert y >= 0 and y < 8
i = (y * 8 + x) * 3
return struct.unpack_from('<H', self.data, i)[0]
def get_height(self, x, y):
assert x >= 0 and x < 8
assert y >= 0 and y < 8
i = (y * 8 + x) * 3
return ord(self.data[i + 2])
class LandLoader:
def __init__(self, path, width, height):
self.file = file(path)
self.width = width
self.height = height
def load_block(self, x, y):
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
self.file.seek(((x * self.height) + y) * 196 + 4)
return LandBlock(self.file.read(192))
class IndexLoader:
def __init__(self, path, width, height):
self.file = file(path)
self.width = width
self.height = height
def load_block(self, x, y):
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
self.file.seek(((x * self.height) + y) * 12)
data = self.file.read(8)
offset, length = struct.unpack('<ii', data)
if offset < 0 or length <= 0:
return None, 0
return offset, length
class Static:
def __init__(self, id, x, y, z, hue=None):
self.id = id
self.x = x
self.y = y
self.z = z
self.hue = hue
class StaticsList:
def __init__(self, data):
self.data = data
self.passable = None # bit field, see _build_passable()
self.surface = None
def __iter__(self):
i = 0
while i < len(self.data):
id, x, y, z, hue = struct.unpack_from('<HBBbH', self.data, i)
yield id, x, y, z, hue
i += 7
def iter_at(self, x, y):
for id, ix, iy, z, hue in self:
if ix == x and iy == y:
yield id, z, hue
def _build_passable(self, tile_data):
# each of the 64 bits tells whether the position is passable
passable = 0xffffffffffffffffL
for id, x, y, z, hue in self:
if not tile_data.item_passable(id):
bit = x * 8 + y
passable &= ~(1 << bit)
self.passable = passable
def is_passable(self, tile_data, x, y, z):
if self.passable is None:
self._build_passable(tile_data)
bit = x * 8 + y
return (self.passable & (1 << bit)) != 0
def _build_surface(self, tile_data):
# each of the 64 bits tells whether the position is surface
surface = 0L
for id, x, y, z, hue in self:
if not tile_data.item_surface(id):
bit = x * 8 + y
surface |= 1 << bit
self.surface = surface
def is_surface(self, tile_data, x, y):
if self.surface is None:
self._build_surface(tile_data)
bit = x * 8 + y
return (self.surface & (1 << bit)) != 0
class StaticsLoader:
def __init__(self, path):
self.file = file(path)
def load_block(self, offset, length):
self.file.seek(offset)
return StaticsList(self.file.read(length))
class StaticsGlue:
def __init__(self, index, static):
self.index = index
self.static = static
def load_block(self, x, y):
offset, length = self.index.load_block(x, y)
if length == 0: return None
return self.static.load_block(offset, length)
class MapGlue:
def __init__(self, tile_data, map_path, index_path, statics_path, width, height):
self.tile_data = tile_data
self.land = LandLoader(map_path, width, height)
self.statics = StaticsGlue(IndexLoader(index_path, width, height),
StaticsLoader(statics_path))
def land_tile_id(self, x, y):
block = self.land.load_block(x / 8, y / 8)
return block.get_id(x % 8, y % 8)
def land_tile_flags(self, x, y):
return self.tile_data.land_flags[self.land_tile_id(x, y)]
def land_tile_height(self, x, y):
block = self.land.load_block(x / 8, y / 8)
return block.get_height(x % 8, y % 8)
def statics_at(self, x, y):
block = self.statics.load_block(x / 8, y / 8)
if block is None: return iter(())
return block.iter_at(x % 8, y %8)
def is_passable(self, x, y, z):
statics = self.statics.load_block(x / 8, y / 8)
if statics is not None and not statics.is_passable(self.tile_data, x % 8, y % 8, z):
return False
# even if land is impassable, there may be statics that build
# a "surface" to walk on
block = self.land.load_block(x / 8, y / 8)
if not self.tile_data.land_passable(block.get_id(x % 8, y % 8)) and \
(statics is None or not statics.is_surface(self.tile_data, x % 8, y % 8)):
return False
#bz = block.get_height(x % 8, y % 8)
#if bz > z: return False
return True
def surface_at(self, x, y):
for id, z, hue in self.statics_at(x, y):
if self.tile_data.item_surface(id):
return Static(id, x, y, z, hue)
return None
def flush_cache(self):
# not implemented in this base class
pass
class BlockCache:
def __init__(self, loader):
self._loader = loader
self._cache = dict()
def load_block(self, x, y):
i = x * 65536 + y
try:
return self._cache[i]
except KeyError:
b = self._loader.load_block(x, y)
self._cache[i] = b
return b
class CachedMapGlue(MapGlue):
def __init__(self, *args, **keywords):
MapGlue.__init__(self, *args, **keywords)
self.land = BlockCache(self.land)
self.statics = BlockCache(self.statics)
class TileCache:
def __init__(self, path):
self._path = path
self._tile_data = TileData(os.path.join(self._path, 'tiledata.mul'))
self._maps = {}
def get_map(self, i):
if i in self._maps:
return self._maps[i]
m = CachedMapGlue(self._tile_data,
os.path.join(self._path, 'map%u.mul' % i),
os.path.join(self._path, 'staidx%u.mul' % i),
os.path.join(self._path, 'statics%u.mul' % i),
768, 512)
self._maps[i] = m
return m
| gpl-2.0 | -6,442,318,921,544,303,000 | 29.938849 | 92 | 0.537379 | false |
gonicus/gosa | backend/src/gosa/backend/objects/filter/strings.py | 1 | 10619 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import json
import logging
import re
from gosa.backend.objects.filter import ElementFilter
import datetime
from gosa.common.gjson import loads, dumps
class SplitString(ElementFilter):
"""
splits a string by the given separator
=========== ===========================
Key Description
=========== ===========================
glue The separator string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>SplitString</Name>
>>> <Param>,</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(SplitString, self).__init__(obj)
def process(self, obj, key, valDict, glue=", "):
if type(valDict[key]['value']) is not None and len(valDict[key]['value']):
tmp = valDict[key]['value'][0].split(glue)
new_val = [n for n in tmp if n != ""]
valDict[key]['value'] = new_val
return key, valDict
class JoinArray(ElementFilter):
"""
Joins an array into a single string using the given separator
=========== ===========================
Key Description
=========== ===========================
glue The joining string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>JoinArray</Name>
>>> <Param>,</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(JoinArray, self).__init__(obj)
def process(self, obj, key, valDict, glue=", "):
if type(valDict[key]['value'] is not None):
new_val = glue.join(valDict[key]['value'])
if not new_val:
valDict[key]['value'] = []
else:
valDict[key]['value'] = [new_val]
return key, valDict
class ConcatString(ElementFilter):
"""
Concatenate a string to the current value.
=========== ===========================
Key Description
=========== ===========================
appstr The string to concatenate
position The position 'left' or 'right' we want to concatenate the string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>ConcatString</Name>
>>> <Param>Hello Mr. </Param>
>>> <Param>left</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(ConcatString, self).__init__(obj)
def process(self, obj, key, valDict, appstr, position):
if type(valDict[key]['value'] is not None):
if position == "right":
new_val = list(map(lambda x: x + appstr, valDict[key]['value']))
else:
new_val = list(map(lambda x: appstr + x, valDict[key]['value']))
valDict[key]['value'] = new_val
return key, valDict
class Replace(ElementFilter):
"""
Perform a replacement using a reqular expression.
=========== ===========================
Key Description
=========== ===========================
regex The regular expression to use
replacement The replacement string
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>Replace</Name>
>>> <Param>^{([^}]*)}.*$</Param>
>>> <Param>Result: \1</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(Replace, self).__init__(obj)
def process(self, obj, key, valDict, regex, replacement):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: re.sub(regex, str(replacement), x), valDict[key]['value']))
return key, valDict
class DateToString(ElementFilter):
"""
Converts a datetime object into a string.
=========== ===========================
Key Description
=========== ===========================
fmt The outgoing format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>DateToString</Name>
>>> <Param>%Y-%m-%d</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(DateToString, self).__init__(obj)
def process(self, obj, key, valDict, fmt="%Y%m%d%H%M%SZ"):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: x.strftime(fmt), valDict[key]['value']))
return key, valDict
class TimeToString(DateToString):
"""
Converts a datetime object into a string.
=========== ===========================
Key Description
=========== ===========================
fmt The outgoing format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>DateToString</Name>
>>> <Param>%Y-%m-%d</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(TimeToString, self).__init__(obj)
class StringToDate(ElementFilter):
"""
Converts a string object into a datetime.date object..
=========== ===========================
Key Description
=========== ===========================
fmt The format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToDate</Name>
>>> <Param>%Y-%m-%d</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToDate, self).__init__(obj)
def process(self, obj, key, valDict, fmt="%Y%m%d%H%M%SZ"):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: datetime.datetime.strptime(x, fmt).date(), valDict[key]['value']))
return key, valDict
class StringToTime(ElementFilter):
"""
Converts a string object into a datetime.datetime object..
=========== ===========================
Key Description
=========== ===========================
fmt The format string. E.g. '%Y%m%d%H%M%SZ'
=========== ===========================
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToTime</Name>
>>> <Param>%Y%m%d%H%M%SZ</Param>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToTime, self).__init__(obj)
def process(self, obj, key, valDict, fmt="%Y%m%d%H%M%SZ"):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: datetime.datetime.strptime(x, fmt), valDict[key]['value']))
return key, valDict
class IdnaToUnicode(ElementFilter):
"""
Converts a idna object into a unicode object..
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>IdnaToUnicode</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(IdnaToUnicode, self).__init__(obj)
def process(self, obj, key, valDict):
valDict[key]['value'] = list(map(lambda x: x.encode('ascii').decode('idna'), valDict[key]['value']))
valDict[key]['backend_type'] = 'UnicodeString'
return key, valDict
class UnicodeToIdna(ElementFilter):
"""
Converts an unicode object into a idna value ...
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>UnicodeToIdna</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(UnicodeToIdna, self).__init__(obj)
def process(self, obj, key, valDict):
valDict[key]['value'] = list(map(lambda x: x.encode('idna'), valDict[key]['value']))
valDict[key]['backend_type'] = 'String'
return key, valDict
class StringToJson(ElementFilter):
"""
Parses a string with the json parser.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToJson</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToJson, self).__init__(obj)
self.log = logging.getLogger(__name__)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
try:
valDict[key]['value'] = list(map(lambda x: loads(x), valDict[key]['value']))
except json.decoder.JSONDecodeError as e:
self.log.error("invalid JSON value property %s [DN=%s]: %s" % (key, obj.dn if obj is not None else '', valDict[key]['value']))
return key, valDict
class JsonToString(ElementFilter):
"""
Serializes an object to a json string.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>JsonToString</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(JsonToString, self).__init__(obj)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = list(map(lambda x: dumps(x), valDict[key]['value']))
return key, valDict
class IntegerToString(ElementFilter):
"""
Converts a integer into a string.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>IntegerToString</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(IntegerToString, self).__init__(obj)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = [str(i) for i in valDict[key]['value']]
return key, valDict
class StringToInteger(ElementFilter):
"""
Converts a string into an integer.
e.g.:
>>> <FilterEntry>
>>> <Filter>
>>> <Name>StringToInteger</Name>
>>> </Filter>
>>> </FilterEntry>
>>> ...
"""
def __init__(self, obj):
super(StringToInteger, self).__init__(obj)
def process(self, obj, key, valDict):
if type(valDict[key]['value'] is not None):
valDict[key]['value'] = [int(i) for i in valDict[key]['value']]
return key, valDict | lgpl-2.1 | -5,871,137,458,832,482,000 | 26.371134 | 142 | 0.486957 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.