repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nathanbjenx/cairis | cairis/test/test_UseCaseAPI.py | 1 | 14168 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
import jsonpickle
from cairis.core.UseCase import UseCase
from cairis.core.Trace import Trace
from cairis.tools.JsonConverter import json_deserialize
from cairis.core.UseCaseEnvironmentProperties import UseCaseEnvironmentProperties
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
import os
from cairis.mio.ModelImport import importModelFile, importRequirementsFile
from cairis.tools.JsonConverter import json_deserialize
from cairis.tools.PseudoClasses import StepsAttributes, StepAttributes,ExceptionAttributes
from cairis.tools.ModelDefinitions import UseCaseContributionModel
__author__ = 'Shamal Faily'
class UseCaseAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
importRequirementsFile(os.environ['CAIRIS_SRC'] + '/test/testusecase.xml','test')
def setUp(self):
# region Class fields
self.logger = logging.getLogger(__name__)
self.existing_usecase_name = 'Test use case'
self.existing_environment_name = 'Psychosis'
self.existing_author = 'Shamal Faily'
self.existing_code = 'TUC-1'
self.existing_description = 'A test description'
self.existing_actors = ['Researcher']
self.existing_precond = 'Test preconditions'
self.existing_steps = []
anException = ExceptionAttributes('anException','requirement','Anonymisation guidelines','Confidentiality Threat','anException description')
self.existing_steps.append(StepAttributes('Researcher does something','','','',[anException]))
self.existing_steps.append(StepAttributes('System does something','','','',[]))
self.existing_postcond = 'Test postconditions'
usecase_class = UseCase.__module__+'.'+UseCase.__name__
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/usecases?session_id=test')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
usecases = jsonpickle.decode(responseData)
self.assertIsNotNone(usecases, 'No results after deserialization')
self.assertIsInstance(usecases, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(usecases), 0, 'No usecases in the dictionary')
self.logger.info('[%s] Use Cases found: %d', method, len(usecases))
usecase = list(usecases.values())[0]
self.logger.info('[%s] First usecase: %s\n', method, usecase['theName'])
def test_get_all_summary(self):
method = 'test_get_all_summary'
rv = self.app.get('/api/usecases/summary?session_id=test')
if (sys.version_info > (3,)):
ucs = json_deserialize(rv.data.decode('utf-8'))
else:
ucs = json_deserialize(rv.data)
self.assertIsNotNone(ucs, 'No results after deserialization')
self.assertGreater(len(ucs), 0, 'No goal summaries')
self.logger.info('[%s] Use Cases found: %d', method, len(ucs))
self.logger.info('[%s] First use case summary: %s \n', method, ucs[0]['theName'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/usecases/name/%s?session_id=test' % quote(self.existing_usecase_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
usecase = jsonpickle.decode(responseData)
self.assertIsNotNone(usecase, 'No results after deserialization')
self.logger.info('[%s] UseCase: %s\n', method, usecase['theName'])
def test_get_usecase_requirements(self):
new_tr = Trace(
fObjt = 'requirement',
fName = 'Dataset policy',
tObjt = 'usecase',
tName = 'Test use case')
new_tr_dict = {
'session_id' : 'test',
'object': new_tr
}
rv = self.app.post('/api/traces', content_type='application/json', data=jsonpickle.encode(new_tr_dict))
method = 'test_get_requirements_by_usecase_name'
url = '/api/usecases/name/%s/requirements?session_id=test' % quote(self.existing_usecase_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
reqs = jsonpickle.decode(responseData)
self.assertIsNotNone(reqs, 'No results after deserialization')
self.assertEqual(new_tr.theFromName,reqs[0]);
def test_generate_obstacle_from_exception(self):
method = 'test_generate_obstacle_from_exception'
url = '/api/usecases/name/%s?session_id=test' % quote(self.existing_usecase_name)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
uc = jsonpickle.decode(responseData)
url = '/api/usecases/environment/Psychosis/step/' + quote('Researcher does something') + '/exception/anException/generate_obstacle?session_id=test'
existing_uc_dict = {
'session_id': 'test',
'object': uc
}
rv = self.app.post(url, content_type='application/json', data=jsonpickle.encode(existing_uc_dict))
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('generated from exception'), -1, 'The obstacle was not generated')
def test_delete(self):
method = 'test_delete'
rv = self.app.get('/api/persona_characteristics/name/Managers%20delegate%20security%20decisions?session_id=test')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
pc = jsonpickle.decode(responseData)
pc['theCharacteristicSynopsis'] = {"theActor" : "Claire", "theActorType" : "persona", "theSynopsis" : "Security delegated", "theDimension" : "goal"}
pcDict = {'session_id' : 'test','object' : pc}
rv = self.app.put('/api/persona_characteristics/name/Managers%20delegate%20security%20decisions?session_id=test', content_type='application/json', data=jsonpickle.encode(pcDict))
url = '/api/usecases/name/%s?session_id=test' % quote(self.prepare_new_usecase().name())
new_usecase_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_usecase_body)
self.app.post('/api/usecases', content_type='application/json', data=new_usecase_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.info('[%s] Response data: %s', method, responseData)
self.assertIsNotNone(responseData, 'No response')
json_resp = jsonpickle.decode(responseData)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
rv = self.app.get('/api/persona_characteristics/name/Managers%20delegate%20security%20decisions?session_id=test')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
pc = jsonpickle.decode(responseData)
pc['theCharacteristicSynopsis'] = {"theActor" : "Claire", "theActorType" : "persona", "theSynopsis" : "Security delegated", "theDimension" : "goal"}
pcDict = {'session_id' : 'test','object' : pc}
rv = self.app.put('/api/persona_characteristics/name/Managers%20delegate%20security%20decisions?session_id=test', content_type='application/json', data=jsonpickle.encode(pcDict))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'Persona Characteristic successfully updated')
url = '/api/usecases'
self.logger.info('[%s] URL: %s', method, url)
new_usecase_body = self.prepare_json()
self.app.delete('/api/usecases/name/%s?session_id=test' % quote(self.prepare_new_usecase().name()))
rv = self.app.post(url, content_type='application/json', data=new_usecase_body)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
msg = json_resp.get('message', None)
self.assertIsNotNone(msg, 'No message returned')
self.logger.info('[%s] Message: %s\n', method, msg)
rv = self.app.delete('/api/usecases/name/%s?session_id=test' % quote(self.prepare_new_usecase().name()))
def test_put(self):
method = 'test_put'
url = '/api/usecases'
self.logger.info('[%s] URL: %s', method, url)
new_usecase_body = self.prepare_json()
rv = self.app.delete('/api/usecases/name/%s?session_id=test' % quote(self.prepare_new_usecase().name()))
rv = self.app.post(url, content_type='application/json', data=new_usecase_body)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
msg = json_resp.get('message', None)
self.assertIsNotNone(msg, 'No message returned')
self.logger.info('[%s] Message: %s', method, msg)
usecase_to_update = self.prepare_new_usecase()
usecase_to_update.theName = 'Edited test usecase'
upd_env_body = self.prepare_json(usecase=usecase_to_update)
rv = self.app.put('/api/usecases/name/%s?session_id=test' % quote(self.prepare_new_usecase().name()), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('successfully updated'), -1, 'The usecase was not successfully updated')
rv = self.app.get('/api/usecases/name/%s?session_id=test' % quote(usecase_to_update.name()))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
upd_usecase = jsonpickle.decode(responseData)
self.assertIsNotNone(upd_usecase, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, responseData)
self.logger.info('[%s] UseCase: %s\n', method, upd_usecase['theName'])
rv = self.app.delete('/api/usecases/name/%s?session_id=test' % quote(usecase_to_update.theName))
def prepare_new_usecase(self):
new_usecase_props = [
UseCaseEnvironmentProperties(
environmentName=self.existing_environment_name,
preCond=self.existing_precond,
steps=self.existing_steps,
postCond=self.existing_postcond
)
]
new_usecase = UseCase(
ucId=-1,
ucName='New usecase',
ucAuth='NU',
ucCode='New objective',
ucActors=['Researcher'],
ucDesc='New Author',
tags=[],
cProps=[]
)
new_usecase.theReferenceContributions = [UseCaseContributionModel('Security delegated',{'theMeansEnd':'means','theContribution':'SomePositive'})]
new_usecase.theEnvironmentProperties = new_usecase_props
new_usecase.theEnvironmentDictionary = {}
delattr(new_usecase, 'theEnvironmentDictionary')
return new_usecase
def prepare_dict(self, usecase=None):
if usecase is None:
usecase = self.prepare_new_usecase()
else:
assert isinstance(usecase, UseCase)
return {
'session_id': 'test',
'object': usecase,
}
def prepare_json(self, data_dict=None, usecase=None):
if data_dict is None:
data_dict = self.prepare_dict(usecase=usecase)
else:
assert isinstance(data_dict, dict)
new_usecase_body = jsonpickle.encode(data_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_usecase_body)
return new_usecase_body
| apache-2.0 | 3,877,249,079,924,481,500 | 42.593846 | 182 | 0.687959 | false |
kupuguy/mg | SampleStatusHistory/SampleStatusHistory/tests/SampleStatushistoryTests.py | 1 | 5985 | # Copyright (c) 2014, Duncan Booth
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# "OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from SampleStatusHistory import do_report
from datetime import date, timedelta
class SampleStatusHistoryTest_test1(unittest.TestCase):
def test_null(self):
"""to_date before from_date returns empty list"""
from_date = date(2014, 8, 2)
to_date = date(2014, 8, 1)
result = do_report(from_date, to_date, [])
self.assertEqual(result, [])
def test_no_events(self):
"""No events, but the resulting list should still have an entry for each date"""
from_date = date(2014, 8, 2)
to_date = from_date + timedelta(2)
result = do_report(from_date, to_date, [])
self.assertEqual(result, [{}, {}, {},])
def test_single_event(self):
"""No events, but the resulting list should still have an entry for each date"""
from_date = date(2014, 8, 1)
to_date = from_date
events = [
(date(2014, 8, 1),1,"DISPATCH"),
]
result = do_report(from_date, to_date, events)
self.assertEqual(result,
[{"DISPATCH":1}])
def test_multi_events_one_day(self):
"""Still on one day so we don't have to aggregate days, but multiple events."""
from_date = date(2014, 8, 1)
to_date = from_date
events = [
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),-1,"DISPATCH"),
(date(2014, 8, 1),1,"WITH_CUSTOMER"),
]
result = do_report(from_date, to_date, events)
self.assertEqual(result,
[{"DISPATCH":2, "WITH_CUSTOMER":1}])
def test_multi_events_two_days(self):
"""Aggregate results from previous day"""
from_date = date(2014, 8, 1)
to_date = from_date + timedelta(1)
events = [
(date(2014, 8, 2),-1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),-1,"DISPATCH"),
(date(2014, 8, 1),1,"WITH_CUSTOMER"),
(date(2014, 8, 2),1,"WITH_CUSTOMER"),
]
result = do_report(from_date, to_date, events)
self.assertEqual(result,
[
{"DISPATCH":2, "WITH_CUSTOMER":1},
{"DISPATCH":1, "WITH_CUSTOMER":2},
])
def test_data_out_of_range(self):
"""Include data before start, ignore data after end"""
from_date = date(2014, 8, 1)
to_date = from_date + timedelta(1)
events = [
(date(2014, 8, 2),-1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 7, 31),1,"DISPATCH"),
(date(2014, 8, 3),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),-1,"DISPATCH"),
(date(2014, 8, 1),1,"WITH_CUSTOMER"),
(date(2014, 8, 2),1,"WITH_CUSTOMER"),
]
result = do_report(from_date, to_date, events)
self.assertEqual(result,
[
{"DISPATCH":2, "WITH_CUSTOMER":1},
{"DISPATCH":1, "WITH_CUSTOMER":2},
])
def test_all_flags_exist(self):
"""Aggregate results from previous day"""
from_date = date(2014, 8, 1)
to_date = from_date + timedelta(1)
events = [
(date(2014, 8, 2),-1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),1,"DISPATCH"),
(date(2014, 8, 1),-1,"DISPATCH"),
(date(2014, 8, 1),1,"WITH_CUSTOMER"),
(date(2014, 8, 2),1,"WITH_CUSTOMER"),
(date(2014, 8, 2),1,"EXTRACT"),
(date(2014, 7, 2),1,"RECEIPT_EMAIL"),
(date(2014, 7, 2),-1,"RECEIPT_EMAIL"),
]
result = do_report(from_date, to_date, events)
self.assertEqual(result,
[
{"DISPATCH":2, "WITH_CUSTOMER":1, "EXTRACT":0, "RECEIPT_EMAIL":0},
{"DISPATCH":1, "WITH_CUSTOMER":2, "EXTRACT":1, "RECEIPT_EMAIL":0},
])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,036,616,602,388,525,600 | 40.853147 | 90 | 0.558229 | false |
seslab/MIPVC | HarmGen/HarmGen.py | 1 | 1610 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf-850 -*-
#Titulo :HarmGen.py
#Descripción :Biblioteca para la generación de vectores de señales con armonicas.
#Autor :Javier Campos Rojas
#Fecha :Junio-2017
#Versión :1.0
#Notas :
#==============================================================================
import matplotlib.pyplot as plt
import numpy as np
class HarmGen:
def __init__(self, amplitud,freq,nHarm): ##Función para iniciar la
self.tau=1.0/(2*freq);
self.t0=self.tau/2.0;
self.a=float(amplitud);
self.f=float(freq);
self.y=nHarm;
Tp=1.0/self.f;
self.Tp=Tp;
#Tm=0.0002;
Tm=0.0005;
self.Tm=Tm;
self.w0=2*np.pi*self.f;
def fourier_ak(self,k):
self.k=k;
self.Tp=1/self.f;
Y=(2/self.Tp)*np.sinc(self.k*self.w0*self.tau/2)*np.cos(self.w0*k*(self.t0+(self.tau/2)));
return Y
def fourier_bk(self,k):
self.k=k;
self.Tp=1/self.f;
Y=(2/self.Tp)*np.sinc(self.k*self.w0*self.tau/2)*np.sin(self.w0*k*(self.t0+(self.tau/2)));
return Y
def Harm(self):
a0=2/self.Tp;
t=np.arange(0,2*self.Tp,self.Tm);
Y=a0/2;
if len(self.y)>1:
for k in self.y:
a=self.fourier_ak(k);
b=self.fourier_bk(k);
Y=Y+a*np.cos(self.w0*k*t)+b*np.sin(self.w0*k*t);
elif len(self.y)==1:
for k in range(1,self.y[0]+1 ):
a=self.fourier_ak(k);
b=self.fourier_bk(k);
Y=Y+a*np.cos(self.w0*k*t)+b*np.sin(self.w0*k*t);
m1=max(Y);
m2=min(Y);
m=m2+(m1-m2)/2;
A=(m1-m2)/2;
Y=(Y-m)*(self.a/A);
#plt.plot(t,Y);
#plt.show(block=False)
#print(str(len(Y)));
#print(Y);
return Y
| gpl-3.0 | -899,932,597,048,497,400 | 23.692308 | 92 | 0.565109 | false |
smurfix/HomEvenT | homevent/base.py | 1 | 3807 | # -*- coding: utf-8 -*-
##
## Copyright © 2007-2012, Matthias Urlichs <[email protected]>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
"""\
This module holds a few random constants and stuff.
"""
SYS_PRIO = -10
MIN_PRIO = 0
MAX_PRIO = 100
# This construct represents a type as-is
class singleNameMeta(type):
def __repr__(cls):
return cls.__name__
class singleName(object):
__metaclass__ = singleNameMeta
class Name(tuple):
"""A class that knows how to print itself the "right" way"""
delim = u"¦"
prefix = ""
suffix = ""
def __new__(cls,*data):
if len(data) == 1 and isinstance(data[0],tuple):
data = data[0]
if len(data) > 0 and not isinstance(data[0],(basestring,int,float)):
raise RuntimeError("Name:"+repr(data))
return super(Name,cls).__new__(cls,data)
def __str__(self):
return unicode(self).encode("utf-8")
def __unicode__(self):
return self.prefix + self.delim.join((unicode(x) for x in self)) + self.suffix
def __repr__(self):
return self.__class__.__name__+super(Name,self).__repr__()
def apply(self, ctx=None, drop=0):
"""\
Copy a name, applying substitutions.
This code dies with an AttributeError if there are no
matching substitutes. This is intentional.
"""
if ctx is None:
if drop:
return self.__class__(*self[drop:])
else:
return self
res = []
for n in self[drop:]:
if hasattr(n,"startswith") and n.startswith('$'):
n = getattr(ctx,n[1:])
res.append(n)
return self.__class__(*res)
# The following are rich comparison and hashign methods, intended so
# that one-element names compare identically to the corresponding strings
for s in "hash".split(): ## id
s="__"+s+"__"
def gen_id(s):
def f(self):
if len(self) == 1:
return getattr(unicode(self),s)()
return getattr(super(Name,self),s)()
f.__name__ = s
return f
setattr(Name,s,gen_id(s))
for s in "le lt ge gt eq ne".split(): ## cmp
s="__"+s+"__"
def gen_cmp(s):
def f(self,other):
if isinstance(other,basestring):
return getattr(unicode(self),s)(other)
return getattr(super(Name,self),s)(other)
f.__name__ = s
return f
setattr(Name,s,gen_cmp(s))
def SName(data,attr="name"):
"""An alternate Name constructor that accepts a single argument"""
if isinstance(data,Name):
return data
n = getattr(data,attr,None)
if isinstance(n,Name):
return n
if isinstance(data,basestring):
data = data.split(" ")
return Name(*data)
class RaisedError(RuntimeError):
"""An error that has been explicitly raised by a script"""
no_backtrace = True
def __init__(self,*params):
self.params = params
def __repr__(self):
return u"‹%s: %s›" % (self.__class__.__name__, repr(self.params))
def __str__(self):
return u"%s: %s" % (self.__class__.__name__, " ".join(str(x) for x in self.params))
def __unicode__(self):
return u"%s: %s" % (self.__class__.__name__, " ".join(unicode(x) for x in self.params))
def flatten(out,s,p=""):
if hasattr(s,"list") and callable(s.list):
for ss in s.list():
flatten(out,ss,p)
return
s = list(s)
t = s.pop()
if p != "":
s.insert(0,p)
p = u" ".join((str(ss) for ss in s))
if hasattr(t,"list") and callable(t.list):
t = t.list()
if hasattr(t,"next"):
pp = " "*len(p)
for tt in t:
flatten(out,tt,p)
p = pp
else:
out.put((p,t))
| gpl-3.0 | 2,617,710,084,144,590,300 | 25.767606 | 89 | 0.642463 | false |
ployground/ploy_virtualbox | setup.py | 1 | 1216 | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
HISTORY = open(os.path.join(here, 'HISTORY.rst')).read()
version = "1.2.1.dev0"
setup(
version=version,
description="Plugin for ploy to provision virtual machines using VirtualBox.",
long_description=README + "\n\n" + HISTORY,
name="ploy_virtualbox",
author='Florian Schulze',
author_email='[email protected]',
license="BSD 3-Clause License",
url='http://github.com/ployground/ploy_virtualbox',
classifiers=[
'Environment :: Console',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration'],
include_package_data=True,
zip_safe=False,
packages=['ploy_virtualbox'],
install_requires=[
'setuptools',
'ploy >= 1.2.0, < 2dev',
'lazy'],
entry_points="""
[ploy.plugins]
virtualbox = ploy_virtualbox:plugin
""")
| bsd-3-clause | 6,762,444,998,633,661,000 | 29.4 | 82 | 0.623355 | false |
katrid/keops | keops/api/views/rpc.py | 1 | 2990 | import json
from django.shortcuts import render
from django.conf import settings
from django.http import JsonResponse
from django.db import models
from django.core import exceptions
from django.db.utils import IntegrityError
from django.contrib.auth.decorators import login_required
from keops.api.services import ViewService, ModelService
from keops.api.registry import site
@login_required
def rpc(request, service, method_name):
svc = site.services[service]
if issubclass(svc, ViewService):
svc = svc(request)
meth = getattr(svc, method_name)
status = 200
if getattr(meth, 'exposed', None):
if request.body:
data = json.loads(request.body.decode('utf-8'))
else:
data = {}
try:
if 'args' in data:
args = data['args']
elif 'args' in request.GET:
args = request.GET.getlist('args')
else:
args = ()
if 'kwargs' in data:
kwargs = data['kwargs']
else:
kwargs = {}
res = meth(*args, **kwargs)
except models.ObjectDoesNotExist as e:
status = 404
res = {'status': 'not found', 'ok': False, 'fail': True, 'result': None, 'message': str(e)}
except exceptions.PermissionDenied as e:
status = 403
res = {'status': 'denied', 'ok': False, 'fail': True, 'result': None, 'message': str(e)}
except exceptions.ValidationError as e:
res = {'status': 'fail', 'ok': False, 'fail': True, 'result': None, 'messages': e.message_dict}
except IntegrityError as e:
raise
res = {'status': 'fail', 'ok': False, 'fail': True, 'result': None, 'message': str(e)}
except Exception as e:
raise
status = 500
res = {'status': 'fail', 'ok': False, 'fail': True, 'result': None, 'message': str(e)}
else:
if isinstance(res, dict):
if 'status' not in res and 'result' not in res:
res = {'status': 'ok', 'ok': True, 'result': res}
elif isinstance(res, models.Model) and isinstance(svc, ModelService):
res = {
'status': 'ok',
'ok': True,
'result': {
'data': [svc.serialize(res, view_type='form')],
}
}
elif isinstance(res, models.QuerySet) and isinstance(svc, ModelService):
res = {
'status': 'ok',
'ok': True,
'result': {
'data': [svc.serialize(obj, view_type='search') for obj in res],
'count': getattr(res, '_count', None),
}
}
else:
res = {'result': res, 'status': 'ok', 'ok': True}
return JsonResponse(res, status=status)
| bsd-3-clause | 4,315,749,456,939,318,300 | 38.342105 | 107 | 0.508027 | false |
sara-nl/opennaas-dc-gui | opennaas_manager.py | 1 | 13590 | #!/usr/bin/python
# Copyright (c) 2014, Erik Ruiter, SURFsara BV, Amsterdam, The Netherlands
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
from collections import defaultdict
import xml.etree.ElementTree as ET
import json, urllib, re
import settings
def etree_to_dict(t):
"""Function which converts a ElementTree XML object to a python Dictionary."""
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.iteritems():
dd[k].append(v)
d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
def getResourceName(resource_id, url, opennaas_user, opennaas_pwd):
"""Returns the name of a resource, using the resource ID."""
r = requests.get("%sresources/%s/name" % (url, resource_id), auth=(opennaas_user, opennaas_pwd))
return r.text
def getResourceID(resource_name, url, opennaas_user, opennaas_pwd):
"""Returns the ID of a resource, using the resource name."""
r = requests.get("%sresources/type/router/name/%s" % (url, resource_name), auth=(opennaas_user, opennaas_pwd))
return r.text
def getQueue(resource_name, url, opennaas_user, opennaas_pwd):
"""Returns the Queue of a resource, in JSON format."""
r = requests.get("%srouter/%s/queue/getActionsId" % (url, resource_name), auth=(opennaas_user, opennaas_pwd))
queue = r.text[1:-1].replace(" ","").split(",")
return json.dumps(queue)
def executeQueue(resources, url, opennaas_user, opennaas_pwd):
"""Executes the queues of all resources in a list of resourcenames."""
if isinstance(resources, list):
for resource in resources:
r = requests.post("%srouter/%s/queue/execute" % (url, resource), auth=(opennaas_user, opennaas_pwd))
else:
r = requests.post("%srouter/%s/queue/execute" % (url, resources), auth=(opennaas_user, opennaas_pwd))
return json.dumps(r.text)
def clearQueue(resources, url, opennaas_user, opennaas_pwd):
"""Clears the queues of all resources in a list of resourcenames."""
if isinstance(resources, list):
for resource in resources:
r = requests.post("%srouter/%s/queue/clear" % (url, resource), auth=(opennaas_user, opennaas_pwd))
else:
r = requests.post("%srouter/%s/queue/clear" % (url, resources), auth=(opennaas_user, opennaas_pwd))
return json.dumps(r.text)
def getContext(resource_name, url, opennaas_user, opennaas_pwd):
context = dict()
r = requests.get("%srouter/%s/protocolSessionManager/context" % (url, resource_name), auth=(opennaas_user, opennaas_pwd))
data = etree_to_dict( ET.fromstring(r.text) )
data = data['protocolSessionContexts']['protocolSessionContext']['sessionParameters']['entry']
for i in data:
context.update( {i.get('key'): i.get('value').get('#text') })
return json.dumps(context)
def removeQueueItems(resource_name, resources, url,opennaas_user,opennaas_pwd):
"""
<?xml version="1.0" encoding="UTF-8"?>
<modifyParams>
<posAction>0</posAction>
<queueOper>REMOVE</queueOper>
</modifyParams>
"""
return "ok"
def getResource(resource_id, url, opennaas_user, opennaas_pwd):
if re.match('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}',resource_id) == None:
resource_id = getResourceID(resource_id, url, opennaas_user, opennaas_pwd)
r = requests.get("%sresources/%s" % (url, resource_id), auth=(opennaas_user, opennaas_pwd))
data = etree_to_dict( ET.fromstring(r.text) )
queue = getQueue(data['resourceInfo'].get('name'), url, opennaas_user, opennaas_pwd)
data['resourceInfo'].update({'queue' : json.loads(queue)})
return json.dumps(data['resourceInfo'])
def getResources(url, opennaas_user, opennaas_pwd):
resources = list()
r = requests.get(url + "resources/", auth=(opennaas_user, opennaas_pwd))
data = etree_to_dict( ET.fromstring(r.text) )
for i in data['resources']['resource']:
resources.append(json.loads(getResource(i, url, opennaas_user, opennaas_pwd)))
return json.dumps(resources)
def updateResources(action, resources, url,opennaas_user,opennaas_pwd):
for resource in resources:
r = requests.put("%sresources/%s/status/%s" % ( url, resource, action), auth=(opennaas_user, opennaas_pwd))
def getResourceStatus(resourcename, url, opennaas_user, opennaas_pwd):
id = getResourceID(resourcename , url, opennaas_user, opennaas_pwd)
r = requests.get("%sresources/%s/status" % (url, id), auth=(opennaas_user, opennaas_pwd))
return r.text
def getRouterInterfaces(resourcename, url , opennaas_user, opennaas_pwd):
interfaces = list()
# Get list of interfaces and convert to dict
r = requests.get("%srouter/%s/chassis/interfaces" % (url, resourcename), auth=(opennaas_user, opennaas_pwd))
data = etree_to_dict( ET.fromstring(r.text) )
data = data['{opennaas.api}interfaces']['interface']
# Get list of aggregates
aggregates = json.loads(getRouterAggregates(resourcename, url , opennaas_user, opennaas_pwd))
for i in data:
#Get detailed interface info per interface
ri = requests.get("%srouter/%s/chassis/interfaces/info?interfaceName=%s" % (url, resourcename, i), auth=(opennaas_user, opennaas_pwd))
ifInfo = etree_to_dict( ET.fromstring(ri.text) )
# Remove prepending '{opennaas.api}interfaceInfo' key
ifInfo = ifInfo['{opennaas.api}interfaceInfo']
# If there are aggregates, add the info
if aggregates['{opennaas.api}interfaces'] != None:
ifInfo.update( {'isAggr' : ifInfo['name'][:-2] in aggregates['{opennaas.api}interfaces']['interface'] })
rip = requests.get("%srouter/%s/ip/interfaces/addresses?interface=%s" % (url, resourcename, i), auth=(opennaas_user, opennaas_pwd))
ipInfo = etree_to_dict( ET.fromstring(rip.text) )
if ipInfo['{opennaas.api}ipAddresses'] != None : ifInfo.update( ipInfo['{opennaas.api}ipAddresses'] )
else: ifInfo.update( {'ipAddress' : ""} )
if (ifInfo.get('description') == None): ifInfo['description'] = ''
interfaces.append(ifInfo)
return json.dumps(interfaces)
def getRouterAggregates(resourcename, url , opennaas_user, opennaas_pwd):
deviceInfo = json.loads(getResource(resourcename, url, opennaas_user, opennaas_pwd))
if 'linkaggregation' not in deviceInfo['capabilities']['capability']:
return json.dumps({'{opennaas.api}interfaces': None})
r = requests.get("%srouter/%s/linkaggregation/" % (url, resourcename), auth=(opennaas_user, opennaas_pwd))
tree = ET.fromstring(r.text)
data = etree_to_dict(tree)
return json.dumps(data)
def getRouterAggregate(resourcename, interfacename, url , opennaas_user, opennaas_pwd):
r = requests.get("%srouter/%s/linkaggregation/%s" % (url, resourcename,interfacename), auth=(opennaas_user, opennaas_pwd))
tree = ET.fromstring(r.text)
data = etree_to_dict(tree)
data = data['{opennaas.api}aggregatedInterface']
return json.dumps(data)
def getRouterVLANs(resourcename, url , opennaas_user, opennaas_pwd):
interfaces = defaultdict(dict)
vlanBridges = defaultdict(dict)
vlaninterfaces = []
# Build dictionary of all vlans with their attached interfaces
r = requests.get("%srouter/%s/vlanbridge" % (url, resourcename), auth=(opennaas_user, opennaas_pwd))
vlans = etree_to_dict( ET.fromstring(r.text) )
vlans = vlans['{opennaas.api}bridgeDomains']['domainName']
for i in vlans :
rv = requests.get("%srouter/%s/vlanbridge/%s" % (url, resourcename, i), auth=(opennaas_user, opennaas_pwd))
vlanDetails = etree_to_dict( ET.fromstring(rv.text) )
vlanBridges.update({ i : vlanDetails['{opennaas.api}bridgeDomain']})
r = requests.get("%srouter/%s/chassis/interfaces" % (url, resourcename), auth=(opennaas_user, opennaas_pwd))
ifdata = etree_to_dict( ET.fromstring(r.text) )
ifdata = ifdata['{opennaas.api}interfaces']['interface']
for i in ifdata:
rv = requests.get("%srouter/%s/vlanbridge/vlanoptions?iface=%s" % (url, resourcename, i), auth=(opennaas_user, opennaas_pwd))
vlanOptions = etree_to_dict( ET.fromstring(rv.text) )
print vlanOptions
"""rv_tree = ET.fromstring(rv.text)
for rv_elem in rv_tree.iter():
print rv_elem.text
if rv_elem.text == "native-vlan-id":
print "bla"
index = index + 1
"""
def getNetworkOverview(url , opennaas_user, opennaas_pwd):
domains = defaultdict();
routers = json.loads(getResources(url, opennaas_user, opennaas_pwd))
for router in routers:
if router['type'] == 'router':
r = requests.get("%srouter/%s/vlanbridge" % (url, router['name']), auth=(opennaas_user, opennaas_pwd))
vlans = etree_to_dict( ET.fromstring(r.text) )
vlans = vlans['{opennaas.api}bridgeDomains']['domainName']
for vlan in vlans:
r = requests.get("%srouter/%s/vlanbridge/%s" % (url, router['name'],vlan), auth=(opennaas_user, opennaas_pwd))
domain = etree_to_dict( ET.fromstring(r.text) )
domain = domain['{opennaas.api}bridgeDomain']
domain.update( { 'resources' : [router['name']]})
if 'description' not in domain.keys(): domain.update({ 'description' : "" })
if vlan not in domains.keys():
domains.update( { vlan : domain })
else:
resources = domains[vlan]['resources']
resources.append(router['name'])
domains[vlan].update({ 'resources' : resources })
return json.dumps(domains)
def buildTopology(resourcename, url, opennaas_user, opennaas_pwd):
routers = json.loads(getResources(url, opennaas_user, opennaas_pwd))
for router in routers:
if (router['type'] == 'router') and (router['state'] == 'ACTIVE') and ('topologydiscovery' in router['capabilities']['capability']):
r = requests.post("%snetwork/%s/topology/resource" % (url, resourcename), data = str(router['resourceId']) , auth=(opennaas_user, opennaas_pwd), headers = {'Content-Type': 'application/xml','Accept' : 'application/xml'})
def getTopology(resourcename , url, opennaas_user, opennaas_pwd):
r = requests.get("%snetwork/%s/topology" % (url, resourcename), auth=(opennaas_user, opennaas_pwd))
topo = etree_to_dict( ET.fromstring(r.text) )
topo = topo['{opennaas.api}network-topology']
resources = json.loads(getResources(url, opennaas_user, opennaas_pwd))
topo.update({'resources' : resources})
return json.dumps(topo)
def createLinkAggregation(resourcename, interface, type, value, url,opennaas_user,opennaas_pwd):
payload = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<ns2:aggregatedInterface xmlns:ns2="opennaas.api">
<id>ae2</id>
<interfaces>
<interface>ge-0/0/3</interface>
<interface>ge-0/0/4</interface>
</interfaces>
<aggregationOptions>
<entry>
<key>link-speed</key>
<value>1g</value>
</entry>
<entry>
<key>minimum-links</key>
<value>1</value>
</entry>
</aggregationOptions>
</ns2:aggregatedInterface>"""
r = requests.post("%srouter/%s/linkaggregation" % (url, resourcename), headers = {'Content-Type': 'application/xml'} , data = payload, auth=(opennaas_user, opennaas_pwd))
print r.status_code
print r.headers
print r.text
return vlanBridges
def addToQueue(resourcename, interface, type, value, url,opennaas_user,opennaas_pwd):
if type == "description": url = "%srouter/%s/ip/interfaces/description?interface=%s" % (url, resourcename, interface)
if type == "ipv4address": url = "%srouter/%s/ip/interfaces/addresses/ipv4?interface=%s" % (url, resourcename, interface)
if type == "ipv6address": url = "%srouter/%s/ip/interfaces/addresses/ipv6?interface=%s" % (url, resourcename, interface)
if type == "status": url = "%srouter/%s/chassis/interfaces/status/%s?ifaceName=%s" % (url, resourcename, value, interface[:-2])
if type == "status":
r = requests.put(url, auth=(opennaas_user, opennaas_pwd), headers = {"content-type":"application/xml"})
else:
r = requests.post(url, data = value, auth=(opennaas_user, opennaas_pwd), headers = {"content-type":"application/xml"})
return json.dumps(r.text)
def main():
#print getRouterInterfaces('switch1',settings.opennaas_url,settings.opennaas_user, settings.opennaas_pwd)
print getTopology("net1",settings.opennaas_url,settings.opennaas_user, settings.opennaas_pwd)
#print getRouterAggregate("switch1","ae0",settings.opennaas_url,settings.opennaas_user, settings.opennaas_pwd)
print ""
if __name__ == "__main__":
main()
| apache-2.0 | 6,549,476,206,768,126,000 | 43.703947 | 223 | 0.709934 | false |
ulif/pulp | server/pulp/plugins/util/metadata_writer.py | 4 | 16961 | from gettext import gettext as _
import glob
import gzip
import logging
import os
import shutil
import traceback
from xml.sax.saxutils import XMLGenerator
from pulp.common import error_codes
from pulp.server.exceptions import PulpCodedValidationException, PulpCodedException
from pulp.server.util import CHECKSUM_FUNCTIONS
_LOG = logging.getLogger(__name__)
BUFFER_SIZE = 1024
class MetadataFileContext(object):
"""
Context manager class for metadata file generation.
"""
def __init__(self, metadata_file_path, checksum_type=None):
"""
:param metadata_file_path: full path to metadata file to be generated
:type metadata_file_path: str
:param checksum_type: checksum type to be used to generate and prepend checksum
to the file names of files. If checksum_type is None,
no checksum is added to the filename
:type checksum_type: str or None
"""
self.metadata_file_path = metadata_file_path
self.metadata_file_handle = None
self.checksum_type = checksum_type
self.checksum = None
if self.checksum_type is not None:
checksum_function = CHECKSUM_FUNCTIONS.get(checksum_type)
if not checksum_function:
raise PulpCodedValidationException(
[PulpCodedException(error_codes.PLP1005, checksum_type=checksum_type)])
self.checksum_constructor = checksum_function
def __enter__(self):
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if None not in (exc_type, exc_val, exc_tb):
err_msg = '\n'.join(traceback.format_exception(exc_type, exc_val, exc_tb))
log_msg = _('Exception occurred while writing [%(m)s]\n%(e)s')
# any errors here should have already been caught and logged
_LOG.debug(log_msg % {'m': self.metadata_file_path, 'e': err_msg})
self.finalize()
return True
def initialize(self):
"""
Create the new metadata file and write the header.
"""
if self.metadata_file_handle is not None:
# initialize has already, at least partially, been run
return
self._open_metadata_file_handle()
self._write_file_header()
def finalize(self):
"""
Write the footer into the metadata file and close it.
"""
if self._is_closed(self.metadata_file_handle):
# finalize has already been run or initialize has not been run
return
try:
self._write_file_footer()
except Exception, e:
_LOG.exception(e)
try:
self._close_metadata_file_handle()
except Exception, e:
_LOG.exception(e)
# Add calculated checksum to the filename
file_name = os.path.basename(self.metadata_file_path)
if self.checksum_type is not None:
with open(self.metadata_file_path, 'rb') as file_handle:
content = file_handle.read()
checksum = self.checksum_constructor(content).hexdigest()
self.checksum = checksum
file_name_with_checksum = checksum + '-' + file_name
new_file_path = os.path.join(os.path.dirname(self.metadata_file_path),
file_name_with_checksum)
os.rename(self.metadata_file_path, new_file_path)
self.metadata_file_path = new_file_path
# Set the metadata_file_handle to None so we don't double call finalize
self.metadata_file_handle = None
def _open_metadata_file_handle(self):
"""
Open the metadata file handle, creating any missing parent directories.
If the file already exists, this will overwrite it.
"""
assert self.metadata_file_handle is None
_LOG.debug('Opening metadata file: %s' % self.metadata_file_path)
if not os.path.exists(self.metadata_file_path):
parent_dir = os.path.dirname(self.metadata_file_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir, mode=0770)
elif not os.access(parent_dir, os.R_OK | os.W_OK | os.X_OK):
msg = _('Insufficient permissions to write metadata file in directory [%(d)s]')
raise RuntimeError(msg % {'d': parent_dir})
else:
msg = _('Overwriting existing metadata file [%(p)s]')
_LOG.warn(msg % {'p': self.metadata_file_path})
if not os.access(self.metadata_file_path, os.R_OK | os.W_OK):
msg = _('Insufficient permissions to overwrite [%(p)s]')
raise RuntimeError(msg % {'p': self.metadata_file_path})
msg = _('Opening metadata file handle for [%(p)s]')
_LOG.debug(msg % {'p': self.metadata_file_path})
if self.metadata_file_path.endswith('.gz'):
self.metadata_file_handle = gzip.open(self.metadata_file_path, 'w')
else:
self.metadata_file_handle = open(self.metadata_file_path, 'w')
def _write_file_header(self):
"""
Write any headers for the metadata file
"""
pass
def _write_file_footer(self):
"""
Write any file footers for the metadata file.
"""
pass
def _close_metadata_file_handle(self):
"""
Flush any cached writes to the metadata file handle and close it.
"""
_LOG.debug('Closing metadata file: %s' % self.metadata_file_path)
if not self._is_closed(self.metadata_file_handle):
self.metadata_file_handle.flush()
self.metadata_file_handle.close()
@staticmethod
def _is_closed(file_object):
"""
Determine if the file object has been closed. If it is None, it is assumed to be closed.
:param file_object: a file object
:type file_object: file
:return: True if the file object is closed or is None, otherwise False
:rtype: bool
"""
if file_object is None:
# finalize has already been run or initialize has not been run
return True
try:
return file_object.closed
except AttributeError:
# python 2.6 doesn't have a "closed" attribute on a GzipFile,
# so we must look deeper.
if isinstance(file_object, gzip.GzipFile):
return file_object.myfileobj is None or file_object.myfileobj.closed
else:
raise
class JSONArrayFileContext(MetadataFileContext):
"""
Context manager for writing out units as a json array.
"""
def __init__(self, *args, **kwargs):
"""
:param args: any positional arguments to be passed to the superclass
:type args: list
:param kwargs: any keyword arguments to be passed to the superclass
:type kwargs: dict
"""
super(JSONArrayFileContext, self).__init__(*args, **kwargs)
self.units_added = False
def _write_file_header(self):
"""
Write out the beginning of the json file
"""
self.metadata_file_handle.write('[')
def _write_file_footer(self):
"""
Write out the end of the json file
"""
self.metadata_file_handle.write(']')
def add_unit_metadata(self, unit):
"""
Add the specific metadata for this unit
"""
if self.units_added:
self.metadata_file_handle.write(',')
else:
self.units_added = True
class XmlFileContext(MetadataFileContext):
"""
Context manager for writing out units as xml
"""
def __init__(self, metadata_file_path, root_tag, root_attributes=None, *args, **kwargs):
"""
:param metadata_file_path: The file path for the file to write
:type metadata_file_path: str
:param root_tag: The root tag for the xml tree
:type root_tag: str
:param root_attributes: Any attributes to populate on the root xml tag
:type root_attributes: dict of str
:param args: any positional arguments to be passed to the superclass
:type args: list
:param kwargs: any keyword arguments to be passed to the superclass
:type kwargs: dict
"""
super(XmlFileContext, self).__init__(metadata_file_path, *args, **kwargs)
self.root_tag = root_tag
if not root_attributes:
root_attributes = {}
self.root_attributes = root_attributes
def _open_metadata_file_handle(self):
"""
Open the metadata file handle, creating any missing parent directories.
If the file already exists, this will overwrite it.
"""
super(XmlFileContext, self)._open_metadata_file_handle()
self.xml_generator = XMLGenerator(self.metadata_file_handle, 'UTF-8')
def _write_file_header(self):
"""
Write out the beginning of the json file
"""
self.xml_generator.startDocument()
self.xml_generator.startElement(self.root_tag, self.root_attributes)
def _write_file_footer(self):
"""
Write out the end of the json file
"""
self.xml_generator.endElement(self.root_tag)
self.xml_generator.endDocument()
class FastForwardXmlFileContext(XmlFileContext):
"""
Context manager for reopening an existing XML file context to insert more data.
"""
def __init__(self, metadata_file_path, root_tag, search_tag, root_attributes=None,
*args, **kwargs):
"""
:param metadata_file_path: The file path for the file to write
:type metadata_file_path: str
:param root_tag: The root tag for the xml tree
:type root_tag: str
:param search_tag: The tag that denotes the beginning of content to copy.
If None, no content will be copied.
:param root_attributes: Any attributes to populate on the root xml tag
:type root_attributes: dict of str, str
:param args: any positional arguments to be passed to the superclass
:type args: list
:param kwargs: any keyword arguments to be passed to the superclass
:type kwargs: dict
"""
super(FastForwardXmlFileContext, self).__init__(metadata_file_path, root_tag,
root_attributes, *args, **kwargs)
self.fast_forward = False
self.search_tag = search_tag
self.existing_file = None
self.xml_generator = None
def _open_metadata_file_handle(self):
"""
Open the metadata file handle, creating any missing parent directories.
If the file already exists, this will copy it to a new name and open it as an input
for filtering/modification.
"""
# Figure out if we are fast forwarding a file
# find the primary file
working_dir, file_name = os.path.split(self.metadata_file_path)
if self.checksum_type:
# Look for a file matching the checksum-filename pattern
expression = '[0-9a-zA-Z]*-%s' % file_name
expression = os.path.join(working_dir, expression)
file_list = glob.glob(expression)
if file_list:
# We only want to work on the latest one
stat_files = ((os.stat(path).st_mtime, path) for path in file_list)
sorted_files = sorted(stat_files, reverse=True)
working_dir, existing_file_name = os.path.split(sorted_files[0][1])
self.existing_file = existing_file_name
self.fast_forward = True
elif not self.checksum_type and os.path.exists(self.metadata_file_path):
self.existing_file = file_name
self.fast_forward = True
if self.fast_forward:
# move the file so that we can still process it if the name is the same
if self.existing_file:
new_file_name = 'original.%s' % self.existing_file
shutil.move(os.path.join(working_dir, self.existing_file),
os.path.join(working_dir, new_file_name))
self.existing_file = new_file_name
self.existing_file = os.path.join(working_dir, self.existing_file)
# Open the file, unzip if necessary so that seek operations can be performed
self.original_file_handle = None
if self.existing_file.endswith('.gz'):
non_compressed_file = self.existing_file[:self.existing_file.rfind('.gz')]
with open(os.path.join(working_dir, non_compressed_file), 'wb') as plain_handle:
gzip_handle = gzip.open(os.path.join(working_dir, self.existing_file), 'rb')
try:
content = gzip_handle.read(BUFFER_SIZE)
while content:
plain_handle.write(content)
content = gzip_handle.read(BUFFER_SIZE)
finally:
if gzip_handle:
gzip_handle.close()
# clean up the zipped file
os.unlink(self.existing_file)
self.existing_file = non_compressed_file
self.original_file_handle = open(os.path.join(working_dir, self.existing_file), 'r')
super(FastForwardXmlFileContext, self)._open_metadata_file_handle()
def _write_file_header(self):
"""
Write out the beginning of the file only if we are not in fast forward mode.
No fast forward will happen if search_tag attribute is None or not found.
"""
super(FastForwardXmlFileContext, self)._write_file_header()
if self.fast_forward and self.search_tag is not None:
start_tag = '<%s' % self.search_tag
end_tag = '</%s' % self.root_tag
# Find the start offset
content = ''
index = -1
while index < 0:
content_buffer = self.original_file_handle.read(BUFFER_SIZE)
if not content_buffer:
# The search tag was never found, This is an empty file where no FF is necessary
msg = _('When attempting to fast forward the file %(file)s, the search tag '
'%(tag)s was not found so the assumption is that no fast forward is to '
'take place.')
_LOG.debug(msg, {'file': self.metadata_file_path, 'tag': start_tag})
return
content += content_buffer
index = content.find(start_tag)
start_offset = index
# Find the end offset
content = ''
index = -1
self.original_file_handle.seek(0, os.SEEK_END)
while index < 0:
amount_to_read = min(BUFFER_SIZE, self.original_file_handle.tell())
self.original_file_handle.seek(-amount_to_read, os.SEEK_CUR)
content_buffer = self.original_file_handle.read(amount_to_read)
if not content_buffer:
raise Exception(_('Error: %(tag)s not found in the xml file.')
% {'tag': end_tag})
bytes_read = len(content_buffer)
self.original_file_handle.seek(-bytes_read, os.SEEK_CUR)
content = content_buffer + content
index = content.rfind(end_tag)
end_offset = self.original_file_handle.tell() + index
# stream out the content
self.original_file_handle.seek(start_offset)
bytes_to_read = end_offset - start_offset
content_buffer = self.original_file_handle.read(BUFFER_SIZE)
while bytes_to_read > 0:
buffer_size = len(content_buffer)
if buffer_size > bytes_to_read:
content_buffer = content_buffer[:bytes_to_read]
self.metadata_file_handle.write(content_buffer)
bytes_to_read -= buffer_size
content_buffer = self.original_file_handle.read(BUFFER_SIZE)
def _close_metadata_file_handle(self):
"""
Close any open file handles and remove the original file if a new one
was generated
"""
super(FastForwardXmlFileContext, self)._close_metadata_file_handle()
# Close & remove the existing file that was copied
if self.fast_forward:
if not self._is_closed(self.original_file_handle):
self.original_file_handle.close()
# We will always have renamed the original file so remove it
os.unlink(self.existing_file)
| gpl-2.0 | -1,971,907,027,029,194,800 | 37.635535 | 100 | 0.583515 | false |
nditech/elections | apollo/participants/utils.py | 1 | 1081 | from apollo import services
def update_participant_completion_rating(participant):
forms = services.forms.find(form_type='CHECKLIST')
submissions = services.submissions.find(
contributor=participant,
form__in=forms,
submission_type='O'
)
numerator = 0
denominator = 0
completion_map = {
'Missing': 0,
'Partial': 1,
'Complete': 2,
'Conflict': 2 # TODO: find a better way to compute the ratings
}
if submissions.count() == 0:
participant.completion_rating = 1
else:
for submission in submissions:
completion_values = [
completion_map[i] for i in
submission.completion.values()
]
denominator += len(submission.form.groups) * 2.0
numerator += sum(completion_values)
try:
participant.completion_rating = (numerator / denominator)
except ZeroDivisionError:
# this should never happen
participant.completion_rating = 1
participant.save()
| gpl-3.0 | -7,611,908,437,965,738,000 | 28.216216 | 71 | 0.590194 | false |
orbitinstasis/pifun | continuous_recordings_tester.py | 1 | 2870 | #!/usr/bin/env python3
# call with python3
# test [1] times; stream from [2]; play on [3]
import os
import time
import sys
import threading
import saleae
# from gtts import gTTS
# TEST_SECONDS = 10
TEST_LOOPS = int(sys.argv[1])
TTY_SOURCE = "/dev/" + str(sys.argv[2])
TTY_DESTINATION = "/dev/" + str(sys.argv[3])
TTY_KILL_OMX = "/dev/" + str(sys.argv[4])
folder = time.strftime('%Y-%m-%d--%H-%M-%S')
os.mkdir(folder)
def _sendToTty(_input, _recipient):
os.system("ttyecho -n " + _recipient + " " + _input)
return;
def _killBackend():
# _sendToTty("echo \"Debug\"", TTY_SOURCE)
_sendToTty("./bBENSSHIT_2.sh", "/dev/ttys004")
return;
def _startBackend():
# _sendToTty("echo \"Debug\"", TTY_SOURCE)
_sendToTty("./bBENSSHIT.sh", TTY_SOURCE) # NEED THIS TO BE THE MASSIVE STREAM START COMMAND
return;
def _probe():
s = saleae.Saleae()
s.set_capture_seconds(5) # POSSIBLY CHANGE
s.set_trigger_one_channel(2, saleae.Trigger.Posedge)
path = os.path.abspath(os.path.join(folder, "Test " + str(i) + "; " + folder))
s.capture_to_file(path)
return;
def _testInfernoSide():
time.sleep(4) # POSSIBLY CHANGE i have it so that the scope is nice and ready bbefore playing
_startBackend()
time.sleep(2)
_sayShit("Test " + str(i))
time.sleep(23) # POSSIBLY CHANGE we want this to be quite long now with streamer
_killBackend()
return;
def _startWaitKillOMXplayer():
time.sleep(9) # POSSIBLY CHANGE
_sendToTty("omxplayer --live udp://239.0.0.1:1234", TTY_DESTINATION)
# _sendToTty("echo \"Debug\"", TTY_SOURCE) # POSSIBLY CHANGE
time.sleep(17)
_sendToTty("killall omxplayer.bin", TTY_KILL_OMX)
return;
def _startProbeThread():
try:
threading.Thread(target=_probe).start()
except:
print ("Error: unable to start thread")
return;
def _startInfernoThread():
try:
threading.Thread(target=_testInfernoSide).start()
except:
print ("Error: unable to start thread")
return;
def _startOMXThread():
try:
threading.Thread(target=_startWaitKillOMXplayer).start()
except:
print ("Error: unable to start thread")
return;
def _sayShit(message):
# tts = gTTS(text=message, lang='en')
# tts.save("Audio.mp3")
# os.system("mpg321 -q Audio.mp3")
return;
# _sayShit("Sup' my main nigga! Lets start, I Hope shit works!")
print("\n\nTest folder " + folder + "\n")
for i in range(TEST_LOOPS):
print("Test: " + str(i) + "\n")
_sendToTty("echo \"Test number: " + str(i) + "\"", TTY_SOURCE)
_sendToTty("echo \"Test number: " + str(i) + "\"", TTY_DESTINATION)
_sendToTty("echo \"Test number: " + str(i) + "\"", TTY_KILL_OMX)
_startProbeThread()
_startInfernoThread()
_startOMXThread()
time.sleep(36) # POSSIBLY CHANGE
# os.system("rm *.mp3")
sys.exit()
# TEST THIS need to change the play durations in arecord apay ffmpeg raspivid etc
# change the contents of killBackend and backend if necessary
| gpl-3.0 | 3,837,845,235,144,074,000 | 25.574074 | 100 | 0.669686 | false |
mrjimenez/JTAG | extras/python/Uploader.py | 1 | 4930 | #!/usr/bin/python3
# coding: utf-8
import os
import string
import sys
import time
try:
import serial
except ImportError:
print("Error importing pyserial. Please check if it is installed.")
sys.exit(1)
class Uploader(object):
"""Uploads a XSVF file to the arduino board.
"""
# Create a translation array of printable characters
_printable_chars = string.printable
_translate_str = ''.join(
[(chr(x) in _printable_chars) and chr(x) or '.' for x in range(256)])
@staticmethod
def add_arguments(p):
"""Adds the necessary arguments to the parser."""
p.add_argument(
'-p', '--port',
default='/dev/ttyACM0',
help='Serial port device name'
' (default=%(default)s)')
p.add_argument(
'-b', '--baud',
default=115200,
type=int,
help='BAUD rate'
' (type %(type)s, default=%(default)s)')
def __init__(self, args):
self._args = args
self._serial = serial.Serial(port=args.port, baudrate=args.baud)
# Help printing new lines
self._need_lf = False
#
self._file_size = 0
# Hashes
self._sum = 0
# To compute the elapsed time
self._start_time = 0
# Error code
self._error_code = 0
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
def reset_arduino(self):
"""Resets the arduino and clear any garbage on the serial port."""
self._serial.setDTR(False)
time.sleep(1)
self._serial.flushInput()
self._serial.flushOutput()
self._serial.setDTR(True)
self._start_time = 0
def print_lf(self):
if self._need_lf:
self._need_lf = False
print
def initialize_hashes(self):
self._sum = 0
def update_hashes(self, s):
for c in s:
self._sum += ord(c)
def print_hashes(self):
cksum = (-self._sum) & 0xFF
if self._args.debug > 1:
print(' Expected checksum: 0x%02X/%lu.' %
(cksum, self._file_size))
print(' Expected sum: 0x%08lX/%lu.' %
(self._sum, self._file_size))
if self._start_time > 0:
print('Elapsed time: %.02f seconds.' %
(time.time() - self._start_time))
def upload_one_file(self, fd):
self.reset_arduino()
self._file_size = os.fstat(fd.fileno()).st_size
bytes_written = 0
while True:
line = self._serial.readline().strip()
if not line:
continue
command = line[0]
argument = line[1:]
if command == 'S':
num_bytes = int(argument)
xsvf_data = fd.read(num_bytes)
bytes_written += len(xsvf_data)
self.update_hashes(xsvf_data)
xsvf_data += chr(0xff) * (num_bytes - len(xsvf_data))
self._serial.write(xsvf_data)
if self._args.debug > 1:
print('\rSent: %8d bytes, %8d remaining' %
(bytes_written, self._file_size - bytes_written), end='')
sys.stdout.flush()
self._need_lf = True
elif command == 'R':
self.initialize_hashes()
if self._args.debug > 1:
print('File: %s' % os.path.realpath(fd.name))
print('Ready to send %d bytes.' % self._file_size)
self._start_time = time.time()
elif command == 'Q':
self.print_lf()
# Split the argument. The first field is the error code,
# the next field is the error message.
args = argument.split(',')
self.error_code = int(args[0])
if self._args.debug > 1:
print('Quit: {1:s} ({0:d}).'.format(
self.error_code, args[1]))
self.print_hashes()
return self.error_code == 0
elif command == 'D':
if self._args.debug > 0:
self.print_lf()
print('Device:', argument)
elif command == '!':
if self._args.debug > 0:
self.print_lf()
print('IMPORTANT:', argument)
else:
self.print_lf()
print('Unrecognized line:',
line.translate(Uploader._translate_str))
def upload_all_files(self, fd_list):
ok = True
for fd in fd_list:
with fd:
ok = self.upload_one_file(fd)
if not ok:
break
return ok
| bsd-2-clause | 7,011,194,952,468,164,000 | 31.222222 | 83 | 0.486207 | false |
ymap/aioredis | tests/pool_test.py | 1 | 15568 | import asyncio
import pytest
import async_timeout
from unittest.mock import patch
from aioredis import (
ReplyError,
PoolClosedError,
ConnectionClosedError,
ConnectionsPool,
MaxClientsError,
)
def _assert_defaults(pool):
assert isinstance(pool, ConnectionsPool)
assert pool.minsize == 1
assert pool.maxsize == 10
assert pool.size == 1
assert pool.freesize == 1
assert pool._close_waiter is None
def test_connect(pool):
_assert_defaults(pool)
def test_global_loop(create_pool, loop, server):
asyncio.set_event_loop(loop)
pool = loop.run_until_complete(create_pool(
server.tcp_address))
_assert_defaults(pool)
@pytest.mark.run_loop
async def test_clear(pool):
_assert_defaults(pool)
await pool.clear()
assert pool.freesize == 0
@pytest.mark.run_loop
@pytest.mark.parametrize('minsize', [None, -100, 0.0, 100])
async def test_minsize(minsize, create_pool, loop, server):
with pytest.raises(AssertionError):
await create_pool(
server.tcp_address,
minsize=minsize, maxsize=10, loop=loop)
@pytest.mark.run_loop
@pytest.mark.parametrize('maxsize', [None, -100, 0.0, 1])
async def test_maxsize(maxsize, create_pool, loop, server):
with pytest.raises(AssertionError):
await create_pool(
server.tcp_address,
minsize=2, maxsize=maxsize, loop=loop)
@pytest.mark.run_loop
async def test_create_connection_timeout(create_pool, loop, server):
with patch.object(loop, 'create_connection') as\
open_conn_mock:
open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2,
loop=loop)
with pytest.raises(asyncio.TimeoutError):
await create_pool(
server.tcp_address, loop=loop,
create_connection_timeout=0.1)
def test_no_yield_from(pool):
with pytest.raises(RuntimeError):
with pool:
pass # pragma: no cover
@pytest.mark.run_loop
async def test_simple_command(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=10, loop=loop)
with (await pool) as conn:
msg = await conn.execute('echo', 'hello')
assert msg == b'hello'
assert pool.size == 10
assert pool.freesize == 9
assert pool.size == 10
assert pool.freesize == 10
@pytest.mark.run_loop
async def test_create_new(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=1, loop=loop)
assert pool.size == 1
assert pool.freesize == 1
with (await pool):
assert pool.size == 1
assert pool.freesize == 0
with (await pool):
assert pool.size == 2
assert pool.freesize == 0
assert pool.size == 2
assert pool.freesize == 2
@pytest.mark.run_loop
async def test_create_constraints(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=1, maxsize=1, loop=loop)
assert pool.size == 1
assert pool.freesize == 1
with (await pool):
assert pool.size == 1
assert pool.freesize == 0
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(pool.acquire(),
timeout=0.2,
loop=loop)
@pytest.mark.run_loop
async def test_create_no_minsize(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=0, maxsize=1, loop=loop)
assert pool.size == 0
assert pool.freesize == 0
with (await pool):
assert pool.size == 1
assert pool.freesize == 0
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(pool.acquire(),
timeout=0.2,
loop=loop)
assert pool.size == 1
assert pool.freesize == 1
@pytest.mark.run_loop
async def test_create_pool_cls(create_pool, loop, server):
class MyPool(ConnectionsPool):
pass
pool = await create_pool(
server.tcp_address,
loop=loop,
pool_cls=MyPool)
assert isinstance(pool, MyPool)
@pytest.mark.run_loop
async def test_create_pool_cls_invalid(create_pool, loop, server):
with pytest.raises(AssertionError):
await create_pool(
server.tcp_address,
loop=loop,
pool_cls=type)
@pytest.mark.run_loop
async def test_release_closed(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=1, loop=loop)
assert pool.size == 1
assert pool.freesize == 1
with (await pool) as conn:
conn.close()
await conn.wait_closed()
assert pool.size == 0
assert pool.freesize == 0
@pytest.mark.run_loop
async def test_release_pending(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=1, loop=loop)
assert pool.size == 1
assert pool.freesize == 1
with pytest.logs('aioredis', 'WARNING') as cm:
with (await pool) as conn:
try:
await asyncio.wait_for(
conn.execute(
b'blpop',
b'somekey:not:exists',
b'0'),
0.1,
loop=loop)
except asyncio.TimeoutError:
pass
assert pool.size == 0
assert pool.freesize == 0
assert cm.output == [
'WARNING:aioredis:Connection <RedisConnection [db:0]>'
' has pending commands, closing it.'
]
@pytest.mark.run_loop
async def test_release_bad_connection(create_pool, create_redis, loop, server):
pool = await create_pool(
server.tcp_address,
loop=loop)
conn = await pool.acquire()
assert conn.address[0] in ('127.0.0.1', '::1')
assert conn.address[1] == server.tcp_address.port
other_conn = await create_redis(
server.tcp_address,
loop=loop)
with pytest.raises(AssertionError):
pool.release(other_conn)
pool.release(conn)
other_conn.close()
await other_conn.wait_closed()
@pytest.mark.run_loop
async def test_select_db(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
loop=loop)
await pool.select(1)
with (await pool) as conn:
assert conn.db == 1
@pytest.mark.run_loop
async def test_change_db(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=1, db=0,
loop=loop)
assert pool.size == 1
assert pool.freesize == 1
with (await pool) as conn:
await conn.select(1)
assert pool.size == 0
assert pool.freesize == 0
with (await pool):
assert pool.size == 1
assert pool.freesize == 0
await pool.select(1)
assert pool.db == 1
assert pool.size == 1
assert pool.freesize == 0
assert pool.size == 0
assert pool.freesize == 0
assert pool.db == 1
@pytest.mark.run_loop
async def test_change_db_errors(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
minsize=1, db=0,
loop=loop)
with pytest.raises(TypeError):
await pool.select(None)
assert pool.db == 0
with (await pool):
pass
assert pool.size == 1
assert pool.freesize == 1
with pytest.raises(TypeError):
await pool.select(None)
assert pool.db == 0
with pytest.raises(ValueError):
await pool.select(-1)
assert pool.db == 0
with pytest.raises(ReplyError):
await pool.select(100000)
assert pool.db == 0
@pytest.mark.xfail(reason="Need to refactor this test")
@pytest.mark.run_loop
async def test_select_and_create(create_pool, loop, server):
# trying to model situation when select and acquire
# called simultaneously
# but acquire freezes on _wait_select and
# then continues with propper db
# TODO: refactor this test as there's no _wait_select any more.
with async_timeout.timeout(10, loop=loop):
pool = await create_pool(
server.tcp_address,
minsize=1, db=0,
loop=loop)
db = 0
while True:
db = (db + 1) & 1
_, conn = await asyncio.gather(pool.select(db),
pool.acquire(),
loop=loop)
assert pool.db == db
pool.release(conn)
if conn.db == db:
break
# await asyncio.wait_for(test(), 3, loop=loop)
@pytest.mark.run_loop
async def test_response_decoding(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
encoding='utf-8', loop=loop)
assert pool.encoding == 'utf-8'
with (await pool) as conn:
await conn.execute('set', 'key', 'value')
with (await pool) as conn:
res = await conn.execute('get', 'key')
assert res == 'value'
@pytest.mark.run_loop
async def test_hgetall_response_decoding(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
encoding='utf-8', loop=loop)
assert pool.encoding == 'utf-8'
with (await pool) as conn:
await conn.execute('del', 'key1')
await conn.execute('hmset', 'key1', 'foo', 'bar')
await conn.execute('hmset', 'key1', 'baz', 'zap')
with (await pool) as conn:
res = await conn.execute('hgetall', 'key1')
assert res == ['foo', 'bar', 'baz', 'zap']
@pytest.mark.run_loop
async def test_crappy_multiexec(create_pool, loop, server):
pool = await create_pool(
server.tcp_address,
encoding='utf-8', loop=loop,
minsize=1, maxsize=1)
with (await pool) as conn:
await conn.execute('set', 'abc', 'def')
await conn.execute('multi')
await conn.execute('set', 'abc', 'fgh')
assert conn.closed is True
with (await pool) as conn:
value = await conn.execute('get', 'abc')
assert value == 'def'
@pytest.mark.run_loop
async def test_pool_size_growth(create_pool, server, loop):
pool = await create_pool(
server.tcp_address,
loop=loop,
minsize=1, maxsize=1)
done = set()
tasks = []
async def task1(i):
with (await pool):
assert pool.size <= pool.maxsize
assert pool.freesize == 0
await asyncio.sleep(0.2, loop=loop)
done.add(i)
async def task2():
with (await pool):
assert pool.size <= pool.maxsize
assert pool.freesize >= 0
assert done == {0, 1}
for _ in range(2):
tasks.append(asyncio.ensure_future(task1(_), loop=loop))
tasks.append(asyncio.ensure_future(task2(), loop=loop))
await asyncio.gather(*tasks, loop=loop)
@pytest.mark.run_loop
async def test_pool_with_closed_connections(create_pool, server, loop):
pool = await create_pool(
server.tcp_address,
loop=loop,
minsize=1, maxsize=2)
assert 1 == pool.freesize
conn1 = pool._pool[0]
conn1.close()
assert conn1.closed is True
assert 1 == pool.freesize
with (await pool) as conn2:
assert conn2.closed is False
assert conn1 is not conn2
@pytest.mark.run_loop
async def test_pool_close(create_pool, server, loop):
pool = await create_pool(
server.tcp_address, loop=loop)
assert pool.closed is False
with (await pool) as conn:
assert (await conn.execute('ping')) == b'PONG'
pool.close()
await pool.wait_closed()
assert pool.closed is True
with pytest.raises(PoolClosedError):
with (await pool) as conn:
assert (await conn.execute('ping')) == b'PONG'
@pytest.mark.run_loop
async def test_pool_close__used(create_pool, server, loop):
pool = await create_pool(
server.tcp_address, loop=loop)
assert pool.closed is False
with (await pool) as conn:
pool.close()
await pool.wait_closed()
assert pool.closed is True
with pytest.raises(ConnectionClosedError):
await conn.execute('ping')
@pytest.mark.run_loop
@pytest.redis_version(2, 8, 0, reason="maxclients config setting")
async def test_pool_check_closed_when_exception(
create_pool, create_redis, start_server, loop):
server = start_server('server-small')
redis = await create_redis(server.tcp_address, loop=loop)
await redis.config_set('maxclients', 2)
errors = (MaxClientsError, ConnectionClosedError, ConnectionError)
with pytest.logs('aioredis', 'DEBUG') as cm:
with pytest.raises(errors):
await create_pool(address=tuple(server.tcp_address),
minsize=3, loop=loop)
assert len(cm.output) >= 3
connect_msg = (
"DEBUG:aioredis:Creating tcp connection"
" to ('localhost', {})".format(server.tcp_address.port))
assert cm.output[:2] == [connect_msg, connect_msg]
assert cm.output[-1] == "DEBUG:aioredis:Closed 1 connection(s)"
@pytest.mark.run_loop
async def test_pool_get_connection(create_pool, server, loop):
pool = await create_pool(server.tcp_address, minsize=1, maxsize=2,
loop=loop)
res = await pool.execute("set", "key", "val")
assert res == b'OK'
res = await pool.execute_pubsub("subscribe", "channel:1")
assert res == [[b"subscribe", b"channel:1", 1]]
res = await pool.execute("getset", "key", "value")
assert res == b'val'
res = await pool.execute_pubsub("subscribe", "channel:2")
assert res == [[b"subscribe", b"channel:2", 2]]
res = await pool.execute("get", "key")
assert res == b'value'
@pytest.mark.run_loop
async def test_pool_get_connection_with_pipelining(create_pool, server, loop):
pool = await create_pool(server.tcp_address, minsize=1, maxsize=2,
loop=loop)
fut1 = pool.execute('set', 'key', 'val')
fut2 = pool.execute_pubsub("subscribe", "channel:1")
fut3 = pool.execute('getset', 'key', 'next')
fut4 = pool.execute_pubsub("subscribe", "channel:2")
fut5 = pool.execute('get', 'key')
res = await fut1
assert res == b'OK'
res = await fut2
assert res == [[b"subscribe", b"channel:1", 1]]
res = await fut3
assert res == b'val'
res = await fut4
assert res == [[b"subscribe", b"channel:2", 2]]
res = await fut5
assert res == b'next'
@pytest.mark.run_loop
async def test_pool_idle_close(create_pool, start_server, loop):
server = start_server('idle')
conn = await create_pool(server.tcp_address, minsize=2, loop=loop)
ok = await conn.execute("config", "set", "timeout", 1)
assert ok == b'OK'
await asyncio.sleep(2, loop=loop)
assert (await conn.execute('ping')) == b'PONG'
@pytest.mark.run_loop
async def test_await(create_pool, server, loop):
pool = await create_pool(
server.tcp_address,
minsize=10, loop=loop)
with await pool as conn:
msg = await conn.execute('echo', 'hello')
assert msg == b'hello'
@pytest.mark.run_loop
async def test_async_with(create_pool, server, loop):
pool = await create_pool(
server.tcp_address,
minsize=10, loop=loop)
async with pool.get() as conn:
msg = await conn.execute('echo', 'hello')
assert msg == b'hello'
| mit | -4,112,878,076,831,393,000 | 27.101083 | 79 | 0.601747 | false |
oVirt/ovirt-hosted-engine-setup | src/plugins/gr-he-common/network/gateway.py | 1 | 3548 | #
# ovirt-hosted-engine-setup -- ovirt hosted engine setup
# Copyright (C) 2013-2019 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
gateway configuration plugin.
"""
import gettext
import os
import socket
import struct
from otopi import plugin
from otopi import util
from ovirt_hosted_engine_setup import constants as ohostedcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-hosted-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
gateway configuration plugin.
"""
ROUTE_DESTINATION = 1
ROUTE_GW = 2
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = True
def _get_default_gw(self):
gateway = ''
with open('/proc/net/route', 'r') as f:
lines = f.read().splitlines()
for line in lines:
data = line.split()
if data[self.ROUTE_DESTINATION] == '00000000':
gateway = socket.inet_ntoa(
struct.pack(
'I',
int(data[self.ROUTE_GW], 16)
)
)
break
return gateway
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
ohostedcons.NetworkEnv.GATEWAY,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
if (
os.environ.get('proxy') or
os.environ.get('http_proxy') or
os.environ.get('https_proxy')
):
self.logger.warning(_(
'It seems that this host is configured to use a proxy, '
'please ensure that this host will be able to reach the '
'engine VM trough that proxy or add a specific exception.'
))
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
after=(
ohostedcons.Stages.DIALOG_TITLES_S_NETWORK,
),
before=(
ohostedcons.Stages.DIALOG_TITLES_E_NETWORK,
),
name=ohostedcons.Stages.CONFIG_GATEWAY,
)
def _customization(self):
interactive = self.environment[
ohostedcons.NetworkEnv.GATEWAY
] is None
if interactive:
self.environment[
ohostedcons.NetworkEnv.GATEWAY
] = self.dialog.queryString(
name='OVEHOSTED_GATEWAY',
note=_(
'Please indicate the gateway IP address '
'[@DEFAULT@]: '
),
prompt=True,
caseSensitive=True,
default=self._get_default_gw(),
)
# vim: expandtab tabstop=4 shiftwidth=4
| lgpl-2.1 | -8,138,350,650,244,593,000 | 27.845528 | 78 | 0.578072 | false |
mdasifhasan/Experiments_HTN_Planner | PyHop/pyhop.py | 1 | 9847 | """
Pyhop, version 1.2.2 -- a simple SHOP-like planner written in Python.
Author: Dana S. Nau, 2013.05.31
Copyright 2013 Dana S. Nau - http://www.cs.umd.edu/~nau
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Pyhop should work correctly in both Python 2.7 and Python 3.2.
For examples of how to use it, see the example files that come with Pyhop.
Pyhop provides the following classes and functions:
- foo = State('foo') tells Pyhop to create an empty state object named 'foo'.
To put variables and values into it, you should do assignments such as
foo.var1 = val1
- bar = Goal('bar') tells Pyhop to create an empty goal object named 'bar'.
To put variables and values into it, you should do assignments such as
bar.var1 = val1
- print_state(foo) will print the variables and values in the state foo.
- print_goal(foo) will print the variables and values in the goal foo.
- declare_operators(o1, o2, ..., ok) tells Pyhop that o1, o2, ..., ok
are all of the planning operators; this supersedes any previous call
to declare_operators.
- print_operators() will print out the list of available operators.
- declare_methods('foo', m1, m2, ..., mk) tells Pyhop that m1, m2, ..., mk
are all of the methods for tasks having 'foo' as their taskname; this
supersedes any previous call to declare_methods('foo', ...).
- print_methods() will print out a list of all declared methods.
- pyhop(state1,tasklist) tells Pyhop to find a plan for accomplishing tasklist
(a list of tasks), starting from an initial state state1, using whatever
methods and operators you declared previously.
- In the above call to pyhop, you can add an optional 3rd argument called
'verbose' that tells pyhop how much debugging printout it should provide:
- if verbose = 0 (the default), pyhop returns the solution but prints nothing;
- if verbose = 1, it prints the initial parameters and the answer;
- if verbose = 2, it also prints a message on each recursive call;
- if verbose = 3, it also prints info about what it's computing.
"""
# Pyhop's planning algorithm is very similar to the one in SHOP and JSHOP
# (see http://www.cs.umd.edu/projects/shop). Like SHOP and JSHOP, Pyhop uses
# HTN methods to decompose tasks into smaller and smaller subtasks, until it
# finds tasks that correspond directly to actions. But Pyhop differs from
# SHOP and JSHOP in several ways that should make it easier to use Pyhop
# as part of other programs:
#
# (1) In Pyhop, one writes methods and operators as ordinary Python functions
# (rather than using a special-purpose language, as in SHOP and JSHOP).
#
# (2) Instead of representing states as collections of logical assertions,
# Pyhop uses state-variable representation: a state is a Python object
# that contains variable bindings. For example, to define a state in
# which box b is located in room r1, you might write something like this:
# s = State()
# s.loc['b'] = 'r1'
#
# (3) You also can define goals as Python objects. For example, to specify
# that a goal of having box b in room r2, you might write this:
# g = Goal()
# g.loc['b'] = 'r2'
# Like most HTN planners, Pyhop will ignore g unless you explicitly
# tell it what to do with g. You can do that by referring to g in
# your methods and operators, and passing g to them as an argument.
# In the same fashion, you could tell Pyhop to achieve any one of
# several different goals, or to achieve them in some desired sequence.
#
# (4) Unlike SHOP and JSHOP, Pyhop doesn't include a Horn-clause inference
# engine for evaluating preconditions of operators and methods. So far,
# I've seen no need for it; I've found it easier to write precondition
# evaluations directly in Python. But I could consider adding such a
# feature if someone convinces me that it's really necessary.
#
# Accompanying this file are several files that give examples of how to use
# Pyhop. To run them, launch python and type "import blocks_world_examples"
# or "import simple_travel_example".
from __future__ import print_function
import copy,sys, pprint
############################################################
# States and goals
class State():
"""A state is just a collection of variable bindings."""
def __init__(self,name):
self.__name__ = name
class Goal():
"""A goal is just a collection of variable bindings."""
def __init__(self,name):
self.__name__ = name
### print_state and print_goal are identical except for the name
def print_state(state,indent=4):
"""Print each variable in state, indented by indent spaces."""
if state != False:
for (name,val) in vars(state).items():
if isinstance(val, State):
# sys.stdout.write(state.__name__ + '.' + name + "\n")
print_state(val)
continue
if name != '__name__':
for x in range(indent): sys.stdout.write(' ')
sys.stdout.write(state.__name__ + '.' + name)
print(' =', val)
else: print('False')
def print_goal(goal,indent=4):
"""Print each variable in goal, indented by indent spaces."""
if goal != False:
for (name,val) in vars(goal).items():
if name != '__name__':
for x in range(indent): sys.stdout.write(' ')
sys.stdout.write(goal.__name__ + '.' + name)
print(' =', val)
else: print('False')
############################################################
# Helper functions that may be useful in domain models
def forall(seq,cond):
"""True if cond(x) holds for all x in seq, otherwise False."""
for x in seq:
if not cond(x): return False
return True
def find_if(cond,seq):
"""
Return the first x in seq such that cond(x) holds, if there is one.
Otherwise return None.
"""
for x in seq:
if cond(x): return x
return None
############################################################
# Commands to tell Pyhop what the operators and methods are
operators = {}
methods = {}
def declare_operators(*op_list):
"""
Call this after defining the operators, to tell Pyhop what they are.
op_list must be a list of functions, not strings.
"""
operators.update({op.__name__:op for op in op_list})
return operators
def declare_methods(task_name,*method_list):
"""
Call this once for each task, to tell Pyhop what the methods are.
task_name must be a string.
method_list must be a list of functions, not strings.
"""
methods.update({task_name:list(method_list)})
return methods[task_name]
############################################################
# Commands to find out what the operators and methods are
def print_operators(olist=operators):
"""Print out the names of the operators"""
print('OPERATORS:', ', '.join(olist))
def print_methods(mlist=methods):
"""Print out a table of what the methods are for each task"""
print('{:<14}{}'.format('TASK:','METHODS:'))
for task in mlist:
print('{:<14}'.format(task) + ', '.join([f.__name__ for f in mlist[task]]))
############################################################
# The actual planner
def pyhop(state,tasks,verbose=0):
"""
Try to find a plan that accomplishes tasks in state.
If successful, return the plan. Otherwise return False.
"""
if verbose>0: print('** pyhop, verbose={}: **\n state = {}\n tasks = {}'.format(verbose, state.__name__, tasks))
result = seek_plan(state,tasks,[],0,verbose)
if verbose>0: print('** result =',result,'\n')
return result
def seek_plan(state,tasks,plan,depth,verbose=0):
"""
Workhorse for pyhop. state and tasks are as in pyhop.
- plan is the current partial plan.
- depth is the recursion depth, for use in debugging
- verbose is whether to print debugging messages
"""
if verbose>1: print('depth {} tasks {}'.format(depth,tasks))
if tasks == []:
if verbose>2: print('depth {} returns plan {}'.format(depth,plan))
return plan
task1 = tasks[0]
if verbose > 2: print("ah subtasks:", task1[0])
if task1[0] in operators:
if verbose>2: print('depth {} action {}'.format(depth,task1))
operator = operators[task1[0]]
newstate = operator(copy.deepcopy(state),*task1[1:])
if verbose>2:
print('depth {} new state:'.format(depth))
print_state(newstate)
if newstate:
solution = seek_plan(newstate,tasks[1:],plan+[task1],depth+1,verbose)
if solution != False:
return solution
if task1[0] in methods:
if verbose>2: print('depth {} method instance {}'.format(depth,task1))
relevant = methods[task1[0]]
for method in relevant:
subtasks = method(state,*task1[1:])
# Can't just say "if subtasks:", because that's wrong if subtasks == []
if verbose>2:
print('depth {} new tasks: {}'.format(depth,subtasks))
if subtasks != False:
solution = seek_plan(state,subtasks+tasks[1:],plan,depth+1,verbose)
if solution != False:
return solution
if verbose>2: print('depth {} returns failure'.format(depth))
return False
| gpl-3.0 | 8,083,552,663,818,885,000 | 39.191837 | 120 | 0.639078 | false |
YuanYuLin/iopcbs | BuildRoot/BuildRootCommandHandler_Prepare.py | 1 | 4713 | import os
import shutil
from DefconfigParser import DefconfigParser
from BuildRootCommandHandler import BuildRootCommandHandler
from ScriptBuilder import ScriptBuilder
class BuildRootCommandHandler_Prepare(BuildRootCommandHandler):
def __init__(self, next_command_handler):
BuildRootCommandHandler.__init__(self, next_command_handler)
def action(self, config_obj):
print 'BuildRootCommandHandler_Prepare'
self._generate_buildroot_defconfig(config_obj)
self._create_link_and_output(config_obj)
self._create_rootfs_skeleton(config_obj)
self._set_os_environ(config_obj)
self._create_build_script(config_obj)
self.do_next_command_handler(config_obj)
def _generate_buildroot_defconfig(self, config_obj):
project_buildroot_cfg = config_obj.get_project_top() + os.sep + config_obj.get_default_config_buildroot()
parser = DefconfigParser(config_obj.get_default_config_buildroot())
parser.parse()
parser.set_config("BR2_DEFCONFIG", config_obj.get_default_config_buildroot())
parser.set_config("BR2_DL_DIR", config_obj.get_download_top())
if config_obj.is_default_rootfs_override():
print "----------------------"
parser.set_config("BR2_ROOTFS_SKELETON_CUSTOM_PATH", config_obj.get_rootfs_skeleton())
parser.set_config("BR2_ROOTFS_OVERLAY", "")
else:
print "====================="
parser.set_config("BR2_ROOTFS_SKELETON_CUSTOM_PATH", "")
parser.set_config("BR2_ROOTFS_OVERLAY", config_obj.get_rootfs_skeleton())
parser.set_config("BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE", config_obj.get_default_config_linux())
parser.set_config("BR2_PACKAGE_BUSYBOX_CONFIG", config_obj.get_default_config_busybox())
parser.set_config("BR2_TARGET_GENERIC_ISSUE", "")
parser.set_config("BR2_TARGET_GENERIC_HOSTNAME", "")
#parser.show_configs()
parser.save_configs(config_obj.get_default_config_buildroot())
def _create_rootfs_skeleton(self, config_obj):
rootfs_top = config_obj.get_rootfs_skeleton()
if not os.path.exists(rootfs_top):
print 'Create ' + rootfs_top
os.makedirs(rootfs_top)
rootfs_list = config_obj.get_default_rootfs()
for rootfs_name in sorted(rootfs_list.keys()):
rootfs = rootfs_list[rootfs_name]
print 'Copy ' + rootfs
self._copy_tree(rootfs, rootfs_top)
def _copy_tree(self, src, dst):
names = os.listdir(src)
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
if not os.path.exists(dstname):
os.makedirs(dstname)
self._copy_tree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError as why:
errors.extend((src, dst, str(why)))
def _create_link_and_output(self, config_obj):
output_top = config_obj.get_output_top()
output_link = config_obj.get_output_link()
if (not os.path.exists(output_top)) and (not os.path.exists(output_link)):
os.mkdir(output_top)
os.symlink(output_top, output_link)
print "create output and link"
elif not os.path.exists(output_top):
print "output is not exist ", output_top
elif not os.path.exists(output_link):
print "link is not exist ", output_link
else:
print "link and output are exist "
buildroot_defconfig = output_top + os.sep + ".config"
if not os.path.exists(buildroot_defconfig):
os.symlink(config_obj.get_default_config_buildroot(), buildroot_defconfig)
return 0
def _set_os_environ(self, config_obj):
os.environ["BR2_DEFCONFIG"] = config_obj.get_default_config_buildroot()
def _create_build_script(self, config_obj):
output_top = config_obj.get_output_top()
output_link = config_obj.get_output_link()
build_script_path = output_top + os.sep + "BuildScript"
builder = ScriptBuilder()
builder.generate_buildscript(config_obj, build_script_path)
| gpl-2.0 | -8,869,341,640,128,807,000 | 38.940678 | 113 | 0.610651 | false |
petermlm/ProbPy | tests/test_base.py | 1 | 2268 | from ProbPy import RandVar, Factor
class TestBase:
def __init__(self):
# Scalars
self.scalar = 10
self.scalarf = Factor([], [10])
# Binary variables
self.X = RandVar("X", ["T", "F"])
self.Y = RandVar("Y", ["T", "F"])
self.Z = RandVar("Z", ["T", "F"])
self.W = RandVar("W", ["T", "F"])
self.K = RandVar("K", ["T", "F"])
self.T = RandVar("T", ["T", "F"])
# Domains
self.X_domain = list(range(1, 3))
self.Y_domain = list(range(3, 5))
self.Z_domain = list(range(5, 7))
self.XY_domain = list(range(1, 5))
self.XZ_domain = list(range(5, 9))
self.ZW_domain = list(range(9, 13))
self.XYZ_domain = list(range(1, 9))
self.XYW_domain = list(range(9, 17))
self.XKW_domain = list(range(17, 25))
self.TKW_domain = list(range(25, 33))
# Factors
self.X_factor = Factor(self.X, self.X_domain)
self.Y_factor = Factor(self.Y, self.Y_domain)
self.Z_factor = Factor(self.Z, self.Z_domain)
self.XY_factor = Factor([self.X, self.Y], self.XY_domain)
self.XZ_factor = Factor([self.X, self.Z], self.XZ_domain)
self.ZW_factor = Factor([self.Z, self.W], self.ZW_domain)
self.XYZ_factor = Factor([self.X, self.Y, self.Z], self.XYZ_domain)
self.XYW_factor = Factor([self.X, self.Y, self.W], self.XYW_domain)
self.XKW_factor = Factor([self.X, self.K, self.W], self.XKW_domain)
self.TKW_factor = Factor([self.T, self.K, self.W], self.TKW_domain)
# Factors for normalization
self.X_factor_n = Factor(self.X, [1, 2])
self.XY_factor_n = Factor([self.X, self.Y], [1, 1, 2, 2])
self.XYZ_factor_n = Factor([self.X, self.Y, self.Z],
[1, 1, 2, 2, 3, 3, 4, 4])
# Distributions for expected value
self.X_dist = Factor(self.X, [0.8, 0.2])
self.Y_dist = Factor(self.Y, [0.1, 0.9])
self.XY_dist = Factor([self.X, self.Y], [0.1, 0.2, 0.3, 0.4])
# Function for expected values f(X)
self.x_ev = Factor(self.X, [10, 20])
self.y_ev = Factor(self.Y, [15, 25])
self.xy_ev = Factor([self.X, self.Y], [25, 35, 35, 45])
| mit | 3,184,444,580,707,223,600 | 36.8 | 75 | 0.525573 | false |
simgunz/anki | qt/aqt/progress.py | 1 | 7938 | # Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from __future__ import annotations
import time
from typing import Callable, Optional
import aqt.forms
from aqt.qt import *
from aqt.utils import TR, disable_help_button, tr
# Progress info
##########################################################################
class ProgressManager:
def __init__(self, mw: aqt.AnkiQt) -> None:
self.mw = mw
self.app = QApplication.instance()
self.inDB = False
self.blockUpdates = False
self._show_timer: Optional[QTimer] = None
self._win: Optional[ProgressDialog] = None
self._levels = 0
# Safer timers
##########################################################################
# A custom timer which avoids firing while a progress dialog is active
# (likely due to some long-running DB operation)
def timer(
self, ms: int, func: Callable, repeat: bool, requiresCollection: bool = True
) -> QTimer:
"""Create and start a standard Anki timer.
If the timer fires while a progress window is shown:
- if it is a repeating timer, it will wait the same delay again
- if it is non-repeating, it will try again in 100ms
If requiresCollection is True, the timer will not fire if the
collection has been unloaded. Setting it to False will allow the
timer to fire even when there is no collection, but will still
only fire when there is no current progress dialog."""
def handler() -> None:
if requiresCollection and not self.mw.col:
# no current collection; timer is no longer valid
print(f"Ignored progress func as collection unloaded: {repr(func)}")
return
if not self._levels:
# no current progress; safe to fire
func()
else:
if repeat:
# skip this time; we'll fire again
pass
else:
# retry in 100ms
self.timer(100, func, False, requiresCollection)
t = QTimer(self.mw)
if not repeat:
t.setSingleShot(True)
qconnect(t.timeout, handler)
t.start(ms)
return t
# Creating progress dialogs
##########################################################################
def start(
self,
max: int = 0,
min: int = 0,
label: Optional[str] = None,
parent: Optional[QDialog] = None,
immediate: bool = False,
) -> Optional[ProgressDialog]:
self._levels += 1
if self._levels > 1:
return None
# setup window
parent = parent or self.app.activeWindow()
if not parent and self.mw.isVisible():
parent = self.mw
label = label or tr(TR.QT_MISC_PROCESSING)
self._win = ProgressDialog(parent)
self._win.form.progressBar.setMinimum(min)
self._win.form.progressBar.setMaximum(max)
self._win.form.progressBar.setTextVisible(False)
self._win.form.label.setText(label)
self._win.setWindowTitle("Anki")
self._win.setWindowModality(Qt.ApplicationModal)
self._win.setMinimumWidth(300)
self._setBusy()
self._shown: float = 0
self._counter = min
self._min = min
self._max = max
self._firstTime = time.time()
self._lastUpdate = time.time()
self._updating = False
self._show_timer = QTimer(self.mw)
self._show_timer.setSingleShot(True)
self._show_timer.start(immediate and 100 or 600)
qconnect(self._show_timer.timeout, self._on_show_timer)
return self._win
def update(
self,
label: Optional[str] = None,
value: Optional[int] = None,
process: bool = True,
maybeShow: bool = True,
max: Optional[int] = None,
) -> None:
# print self._min, self._counter, self._max, label, time.time() - self._lastTime
if not self.mw.inMainThread():
print("progress.update() called on wrong thread")
return
if self._updating:
return
if maybeShow:
self._maybeShow()
if not self._shown:
return
elapsed = time.time() - self._lastUpdate
if label:
self._win.form.label.setText(label)
self._max = max or 0
self._win.form.progressBar.setMaximum(self._max)
if self._max:
self._counter = value or (self._counter + 1)
self._win.form.progressBar.setValue(self._counter)
if process and elapsed >= 0.2:
self._updating = True
self.app.processEvents() # type: ignore #possibly related to https://github.com/python/mypy/issues/6910
self._updating = False
self._lastUpdate = time.time()
def finish(self) -> None:
self._levels -= 1
self._levels = max(0, self._levels)
if self._levels == 0:
if self._win:
self._closeWin()
self._unsetBusy()
if self._show_timer:
self._show_timer.stop()
self._show_timer = None
def clear(self) -> None:
"Restore the interface after an error."
if self._levels:
self._levels = 1
self.finish()
def _maybeShow(self) -> None:
if not self._levels:
return
if self._shown:
return
delta = time.time() - self._firstTime
if delta > 0.5:
self._showWin()
def _showWin(self) -> None:
self._shown = time.time()
self._win.show()
def _closeWin(self) -> None:
if self._shown:
while True:
# give the window system a second to present
# window before we close it again - fixes
# progress window getting stuck, especially
# on ubuntu 16.10+
elap = time.time() - self._shown
if elap >= 0.5:
break
self.app.processEvents(QEventLoop.ExcludeUserInputEvents) # type: ignore #possibly related to https://github.com/python/mypy/issues/6910
self._win.cancel()
self._win = None
self._shown = 0
def _setBusy(self) -> None:
self.mw.app.setOverrideCursor(QCursor(Qt.WaitCursor))
def _unsetBusy(self) -> None:
self.app.restoreOverrideCursor()
def busy(self) -> int:
"True if processing."
return self._levels
def _on_show_timer(self) -> None:
self._show_timer = None
self._showWin()
def want_cancel(self) -> bool:
win = self._win
if win:
return win.wantCancel
else:
return False
def set_title(self, title: str) -> None:
win = self._win
if win:
win.setWindowTitle(title)
class ProgressDialog(QDialog):
def __init__(self, parent: QWidget) -> None:
QDialog.__init__(self, parent)
disable_help_button(self)
self.form = aqt.forms.progress.Ui_Dialog()
self.form.setupUi(self)
self._closingDown = False
self.wantCancel = False
# required for smooth progress bars
self.form.progressBar.setStyleSheet("QProgressBar::chunk { width: 1px; }")
def cancel(self) -> None:
self._closingDown = True
self.hide()
def closeEvent(self, evt: QCloseEvent) -> None:
if self._closingDown:
evt.accept()
else:
self.wantCancel = True
evt.ignore()
def keyPressEvent(self, evt: QKeyEvent) -> None:
if evt.key() == Qt.Key_Escape:
evt.ignore()
self.wantCancel = True
| agpl-3.0 | -4,262,308,446,230,237,000 | 31.801653 | 153 | 0.546485 | false |
wackerly/faucet | faucet/faucet_dot1x.py | 1 | 2989 | """802.1x implementation for FAUCET."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch()
from ryu.lib import hub # pylint: disable=wrong-import-position
from chewie.chewie import Chewie # pylint: disable=wrong-import-position
from chewie.mac_address import MacAddress # pylint: disable=wrong-import-position
class FaucetDot1x(object):
"""Wrapper for experimental Chewie 802.1x authenticator."""
# TODO: support other credentials.
CREDENTIALS = {
'gary': 'microphone',
}
def __init__(self, logger, metrics, send_flow_msgs):
self.logger = logger
self.metrics = metrics
self._send_flow_msgs = send_flow_msgs
self._valve = None
self.dot1x_speaker = None
self.dot1x_intf = None
self.dot1x_port = None
def _create_dot1x_speaker(self):
chewie = Chewie(
self.dot1x_intf, self.CREDENTIALS,
self.logger, self.auth_handler,
MacAddress.from_string('00:00:00:00:00:01'))
hub.spawn(chewie.run)
return chewie
def auth_handler(self, address, _group_address):
"""Callback for when a successful auth happens."""
self.logger.info(
'Successful auth from MAC %s on %s' % (
str(address), self.dot1x_port))
flowmods = self._valve.add_authed_mac(
self.dot1x_port.number, str(address))
if flowmods:
self._send_flow_msgs(self._valve, flowmods)
def reset(self, valves):
"""Set up a dot1x speaker."""
# TODO: support multiple Valves and ports.
if self.dot1x_speaker is None:
for valve in list(valves.values()):
if valve.dp.dot1x and valve.dp.dot1x_ports():
self._valve = valve
self.dot1x_intf = self._valve.dp.dot1x['nfv_intf']
self.dot1x_port = self._valve.dp.dot1x_ports()[0]
self.dot1x_speaker = self._create_dot1x_speaker()
self.logger.info(
'dot1x enabled on %s %s, NFV interface %s' % (
self._valve.dp, self.dot1x_port, self.dot1x_intf))
break
| apache-2.0 | -77,410,599,712,950,910 | 37.818182 | 81 | 0.632653 | false |
pombreda/https-gitorious.org-appstream-software-center | softwarecenter/backend/channel.py | 1 | 12578 | # Copyright (C) 2010 Canonical
#
# Authors:
# Gary Lasker
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import xapian
from gettext import gettext as _
from softwarecenter.distro import get_distro
from softwarecenter.enums import (SortMethods,
Icons,
ViewPages,
)
LOG = logging.getLogger(__name__)
class ChannelsManager(object):
def __init__(self, db, **kwargs):
self.distro = get_distro()
self.db = db
# public
@property
def channels(self):
return self._get_channels_from_db()
@property
def channels_installed_only(self):
return self._get_channels_from_db(True)
@classmethod
def channel_available(kls, channelname):
pass
# private
def _get_channels_from_db(self, installed_only=False):
"""
(internal) implements 'channels()' and 'channels_installed_only()'
properties
"""
distro_channel_origin = self.distro.get_distro_channel_name()
# gather the set of software channels and order them
other_channel_list = []
cached_origins = []
for channel_iter in self.db.xapiandb.allterms("XOL"):
if len(channel_iter.term) == 3:
continue
channel_name = channel_iter.term[3:]
channel_origin = ""
# get origin information for this channel
m = self.db.xapiandb.postlist_begin(channel_iter.term)
doc = self.db.xapiandb.get_document(m.get_docid())
for term_iter in doc.termlist():
if (term_iter.term.startswith("XOO") and
len(term_iter.term) > 3):
channel_origin = term_iter.term[3:]
break
LOG.debug("channel_name: %s" % channel_name)
LOG.debug("channel_origin: %s" % channel_origin)
if channel_origin not in cached_origins:
other_channel_list.append((channel_name, channel_origin))
cached_origins.append(channel_origin)
dist_channel = None
other_channels = []
unknown_channel = []
local_channel = None
for (channel_name, channel_origin) in other_channel_list:
if not channel_name:
unknown_channel.append(SoftwareChannel(
channel_name,
channel_origin,
None,
installed_only=installed_only))
elif channel_origin == distro_channel_origin:
dist_channel = (SoftwareChannel(
channel_name,
channel_origin,
None,
installed_only=installed_only))
elif channel_name == "notdownloadable":
if installed_only:
local_channel = SoftwareChannel(
channel_name,
None,
None,
installed_only=installed_only)
else:
other_channels.append(SoftwareChannel(
channel_name,
channel_origin,
None,
installed_only=installed_only))
# set them in order
channels = []
if dist_channel is not None:
channels.append(dist_channel)
channels.extend(other_channels)
channels.extend(unknown_channel)
if local_channel is not None:
channels.append(local_channel)
for channel in channels:
if installed_only:
channel._channel_view_id = ViewPages.INSTALLED
else:
channel._channel_view_id = ViewPages.AVAILABLE
return channels
class SoftwareChannel(object):
"""
class to represent a software channel
"""
ICON_SIZE = 24
def __init__(self, channel_name, channel_origin, channel_component,
source_entry=None, installed_only=False,
channel_icon=None, channel_query=None,
channel_sort_mode=SortMethods.BY_ALPHABET):
"""
configure the software channel object based on channel name,
origin, and component (the latter for detecting the partner
channel)
"""
self._channel_name = channel_name
self._channel_origin = channel_origin
self._channel_component = channel_component
self._channel_color = None
self._channel_view_id = None
self.installed_only = installed_only
self._channel_sort_mode = channel_sort_mode
# distro specific stuff
self.distro = get_distro()
# configure the channel
self._channel_display_name = self._get_display_name_for_channel(
channel_name, channel_origin, channel_component)
if channel_icon is None:
self._channel_icon = self._get_icon_for_channel(
channel_name, channel_origin, channel_component)
else:
self._channel_icon = channel_icon
if channel_query is None:
self._channel_query = self._get_channel_query_for_channel(
channel_name, channel_origin, channel_component)
else:
self._channel_query = channel_query
# a sources.list entry attached to the channel (this is currently
# only used for not-yet-enabled channels)
self._source_entry = source_entry
# when the channel needs to be added to the systems sources.list
self.needs_adding = False
@property
def name(self):
"""
return the channel name as represented in the xapian database
"""
return self._channel_name
@property
def origin(self):
"""
return the channel origin as represented in the xapian database
"""
return self._channel_origin
@property
def component(self):
"""
return the channel component as represented in the xapian database
"""
return self._channel_component
@property
def display_name(self):
"""
return the display name for the corresponding channel for use in the UI
"""
return self._channel_display_name
@property
def icon(self):
"""
return the icon that corresponds to each channel based
on the channel name, its origin string or its component
"""
return self._channel_icon
@property
def query(self):
"""
return the xapian query to be used with this software channel
"""
return self._channel_query
@property
def sort_mode(self):
"""
return the sort mode for this software channel
"""
return self._channel_sort_mode
# TODO: implement __cmp__ so that sort for channels is encapsulated
# here as well
def _get_display_name_for_channel(self, channel_name, channel_origin,
channel_component):
if channel_component == "partner":
channel_display_name = _("Canonical Partners")
elif not channel_origin:
channel_display_name = _("Unknown")
elif channel_origin == self.distro.get_distro_channel_name():
channel_display_name = self.distro.get_distro_channel_description()
elif channel_name == "For Purchase":
channel_display_name = _("For Purchase")
elif channel_name == "Application Review Board PPA":
channel_display_name = _("Independent")
elif channel_name == "notdownloadable":
channel_display_name = _("Other")
else:
return channel_name
return channel_display_name
def _get_icon_for_channel(self, channel_name, channel_origin,
channel_component):
if channel_component == "partner":
channel_icon = "partner"
elif not channel_name:
channel_icon = "unknown-channel"
elif channel_origin == self.distro.get_distro_channel_name():
channel_icon = "distributor-logo"
elif channel_name == "Application Review Board PPA":
channel_icon = "system-users"
elif channel_name == "For Purchase":
channel_icon = "emblem-money"
elif channel_origin and channel_origin.startswith("LP-PPA"):
channel_icon = "ppa"
elif channel_name == "notdownloadable":
channel_icon = "application-default-icon"
# TODO: add check for generic repository source (e.g., Google, Inc.)
# channel_icon = "generic-repository"
else:
channel_icon = "unknown-channel"
return channel_icon
def _get_channel_query_for_channel(self, channel_name, channel_origin,
channel_component):
if channel_component == "partner":
q1 = xapian.Query("XOCpartner")
q2 = xapian.Query("AH%s-partner" % self.distro.get_codename())
channel_query = xapian.Query(xapian.Query.OP_OR, q1, q2)
# show only apps when displaying the new apps archive
elif channel_name == "Application Review Board PPA":
channel_query = xapian.Query(xapian.Query.OP_AND,
xapian.Query("XOL" + channel_name),
xapian.Query("ATapplication"))
elif channel_origin:
channel_query = xapian.Query("XOO" + channel_origin)
else:
channel_query = xapian.Query("XOL" + channel_name)
return channel_query
def __str__(self):
details = []
details.append("* SoftwareChannel")
details.append(" name: %s" % self.name)
details.append(" origin: %s" % self.origin)
details.append(" component: %s" % self.component)
details.append(" display_name: %s" % self.display_name)
details.append(" iconname: %s" % self.icon)
details.append(" query: %s" % self.query)
details.append(" sort_mode: %s" % self.sort_mode)
details.append(" installed_only: %s" % self.installed_only)
return unicode('\n'.join(details), 'utf8').encode('utf8')
class AllChannel(SoftwareChannel):
def __init__(self, channel_name, installed_only):
SoftwareChannel.__init__(
self, channel_name, "all", None,
installed_only=installed_only,
channel_icon=Icons.FALLBACK)
# overrides
def _get_display_name_for_channel(self, channel_name, channel_origin,
channel_component):
return channel_name
def _get_channel_query_for_channel(self, *args):
pass
class AllAvailableChannel(AllChannel):
def __init__(self):
AllChannel.__init__(self, _("All Software"), False)
class AllInstalledChannel(AllChannel):
def __init__(self):
AllChannel.__init__(self, _("All Installed"), True)
# singleton
channels_manager = None
def get_channels_manager(db):
global channels_manager
if channels_manager is None:
from softwarecenter.enums import USE_PACKAGEKIT_BACKEND
if not USE_PACKAGEKIT_BACKEND:
from softwarecenter.backend.channel_impl.aptchannels import (
AptChannelsManager)
channels_manager = AptChannelsManager(db)
else:
channels_manager = ChannelsManager(db)
return channels_manager
def is_channel_available(channelname):
from softwarecenter.backend.channel_impl.aptchannels import (
AptChannelsManager)
return AptChannelsManager.channel_available(channelname)
if __name__ == "__main__":
distro = get_distro()
channel = SoftwareChannel(distro.get_distro_channel_name(),
None, None)
print(channel)
channel = SoftwareChannel(distro.get_distro_channel_name(), None,
"partner")
print(channel)
| gpl-3.0 | 6,576,376,662,612,633,000 | 33.938889 | 79 | 0.591827 | false |
mteule/StationMeteo | messy-doc/StationMeteo.Diagram/StationMeteo.Diagram.py | 1 | 1485 | class Sensor :
'''
https://www.google.fr/#q=NVARCHAR+encodage+mysql
https://stackoverflow.com/questions/612430/when-must-we-use-nvarchar-nchar-instead-of-varchar-char-in-sql-servers
Nvarchar ne sert que pour les utilisateurs MS-SQL. '''
def __init__(self) :
pass
class Station :
'''(NULL)'''
def __init__(self) :
self.logger = logging.getLogger(__name__) #
self.ser = serial.Serial() #
self.datab = DatabManager() #
self.raw_received_meterings = "" # str
self.metering_quantity = 0 # int
self.last_meterings_list = list() #
self.sensor_dict = dict('id': ,'name': ) #
pass
def _get_meterings_raw_data (self) :
# returns
pass
def _parse_raw_data (self) :
# returns
pass
def _store_meterings (self) :
# returns
pass
def setup (self) :
# returns
pass
def loop (self) :
# returns
pass
class DatabManager :
'''
http://docs.sqlalchemy.org/en/rel_0_8/orm/tutorial.html#adding-new-objects'''
def __init__(self) :
self.logger = logging.getLogger(__name__) #
self.engine_url = 'sqlite:///:memory:' # str
self.engine = sqlalchemy.create_engine(engine_url, echo = True) #
self.Session = sqlalchemy.orm.sessionmaker(bind=engine) #
self.session = Session() #
pass
class Metering :
'''
http://docs.sqlalchemy.org/en/rel_0_8/orm/tutorial.html#declare-a-mapping
>>> from sqlalchemy.ext.declarative import declarative_base
>>> declarative_base()
<class 'sqlalchemy.ext.declarative.Base'>
>>>
'''
def __init__(self) :
pass
| mit | -8,164,009,308,065,374,000 | 26 | 113 | 0.665993 | false |
ppwwyyxx/tensorflow | tensorflow/python/ops/array_ops.py | 1 | 192961 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
# LINT.IfChange
matrix_diag_v3_forward_compat_date = (2019, 12, 6)
# LINT.ThenChange(
# //tensorflow/compiler/tests/matrix_diag_ops_test.py,
# //tensorflow/python/kernel_tests/diag_op_test.py,
# //tensorflow/python/ops/parallel_for/array_test.py
# )
@tf_export("reshape", v1=["reshape", "manip.reshape"])
def reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name
r"""Reshapes a tensor.
Given `tensor`, this operation returns a new `tf.Tensor` that has the same
values as `tensor` in the same order, except with a new shape given by
`shape`.
>>> t1 = [[1, 2, 3],
... [4, 5, 6]]
>>> print(tf.shape(t1).numpy())
[2 3]
>>> t2 = tf.reshape(t1, [6])
>>> t2
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> tf.reshape(t2, [3, 2])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
The `tf.reshape` does not change the order of or the total number of elements
in the tensor, and so it can reuse the underlying data buffer. This makes it
a fast operation independent of how big of a tensor it is operating on.
>>> tf.reshape([1, 2, 3], [2, 2])
Traceback (most recent call last):
...
InvalidArgumentError: Input to reshape is a tensor with 3 values, but the
requested shape has 4
To instead reorder the data to rearrange the dimensions of a tensor, see
`tf.transpose`.
>>> t = [[1, 2, 3],
... [4, 5, 6]]
>>> tf.reshape(t, [3, 2]).numpy()
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)
>>> tf.transpose(t, perm=[1, 0]).numpy()
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total size remains constant. In particular,
a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can
be -1.
>>> t = [[1, 2, 3],
... [4, 5, 6]]
>>> tf.reshape(t, [-1])
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> tf.reshape(t, [3, -1])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
>>> tf.reshape(t, [-1, 2])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
`tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.
>>> tf.reshape([7], []).numpy()
7
More examples:
>>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(tf.shape(t).numpy())
[9]
>>> tf.reshape(t, [3, 3])
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=int32)>
>>> t = [[[1, 1], [2, 2]],
... [[3, 3], [4, 4]]]
>>> print(tf.shape(t).numpy())
[2 2 2]
>>> tf.reshape(t, [2, 4])
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[1, 1, 2, 2],
[3, 3, 4, 4]], dtype=int32)>
>>> t = [[[1, 1, 1],
... [2, 2, 2]],
... [[3, 3, 3],
... [4, 4, 4]],
... [[5, 5, 5],
... [6, 6, 6]]]
>>> print(tf.shape(t).numpy())
[3 2 3]
>>> # Pass '[-1]' to flatten 't'.
>>> tf.reshape(t, [-1])
<tf.Tensor: shape=(18,), dtype=int32,
numpy=array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
dtype=int32)>
>>> # -- Using -1 to infer the shape --
>>> # Here -1 is inferred to be 9:
>>> tf.reshape(t, [2, -1])
<tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
>>> # -1 is inferred to be 2:
>>> tf.reshape(t, [-1, 9])
<tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
>>> # -1 is inferred to be 3:
>>> tf.reshape(t, [ 2, -1, 3])
<tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=
array([[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]], dtype=int32)>
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: Optional string. A name for the operation.
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = gen_array_ops.reshape(tensor, shape, name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("fill")
def fill(dims, value, name=None):
r"""Creates a tensor filled with a scalar value.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
```
# Output tensor has shape [2, 3].
fill([2, 3], 9) ==> [[9, 9, 9]
[9, 9, 9]]
```
`tf.fill` differs from `tf.constant` in a few ways:
* `tf.fill` only supports scalar contents, whereas `tf.constant` supports
Tensor values.
* `tf.fill` creates an Op in the computation graph that constructs the
actual
Tensor value at runtime. This is in contrast to `tf.constant` which embeds
the entire Tensor into the graph with a `Const` node.
* Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
based on other runtime Tensors, unlike `tf.constant`.
Args:
dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.
Represents the shape of the output tensor.
value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
@compatibility(numpy) Equivalent to np.full @end_compatibility
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
result = gen_array_ops.fill(dims, value, name=name)
tensor_util.maybe_set_static_shape(result, dims)
return result
@tf_export("identity")
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
For example:
```python
import tensorflow as tf
val0 = tf.ones((1,), dtype=tf.float32)
a = tf.atan2(val0, val0)
a_identity = tf.identity(a)
print(a.numpy()) #[0.7853982]
print(a_identity.numpy()) #[0.7853982]
```
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(input, composite_tensor.CompositeTensor):
return nest.map_structure(identity, input, expand_composites=True)
if context.executing_eagerly() and not hasattr(input, "graph"):
# Make sure we get an input with handle data attached from resource
# variables. Variables have correct handle data when graph building.
input = ops.convert_to_tensor(input)
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if either both or neither of `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
"""Returns a tensor with an additional dimension inserted at index `axis`.
Given a tensor `input`, this operation inserts a dimension of size 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of one image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Examples:
>>> t = [[1, 2, 3],[4, 5, 6]] # shape [2, 3]
>>> tf.expand_dims(t, 0)
<tf.Tensor: shape=(1, 2, 3), dtype=int32, numpy=
array([[[1, 2, 3],
[4, 5, 6]]], dtype=int32)>
>>> tf.expand_dims(t, 1)
<tf.Tensor: shape=(2, 1, 3), dtype=int32, numpy=
array([[[1, 2, 3]],
[[4, 5, 6]]], dtype=int32)>
>>> tf.expand_dims(t, 2)
<tf.Tensor: shape=(2, 3, 1), dtype=int32, numpy=
array([[[1],
[2],
[3]],
[[4],
[5],
[6]]], dtype=int32)>
>>> tf.expand_dims(t, -1) # Last dimension index. In this case, same as 2.
<tf.Tensor: shape=(2, 3, 1), dtype=int32, numpy=
array([[[1],
[2],
[3]],
[[4],
[5],
[6]]], dtype=int32)>
This operation is related to:
* `tf.squeeze`, which removes dimensions of size 1.
* `tf.reshape`, which provides more flexible reshaping capability
Args:
input: A `Tensor`.
axis: Integer specifying the dimension index at which to expand the
shape of `input`. Given an input of D dimensions, `axis` must be in range
`[-(D+1), D]` (inclusive).
name: Optional string. The name of the output `Tensor`.
Returns:
A tensor with the same data as `input`, with an additional dimension
inserted at the index specified by `axis`.
Raises:
ValueError: If `axis` is not specified.
InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.
"""
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated("2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated("2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given known shapes.
When shape_x and shape_y are fully known TensorShapes this computes a
TensorShape which is the shape of the result of a broadcasting op applied in
tensors of shapes shape_x and shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
TensorShape whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors have statically known shapes.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
>>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
>>> tf.shape(t)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>
>>> tf.shape(t).numpy()
array([2, 2, 3], dtype=int32)
Note: When using symbolic tensors, such as when using the Keras functional
API, tf.shape() will return the shape of the symbolic tensor.
>>> a = tf.keras.layers.Input((None, 10))
>>> tf.shape(a)
<tf.Tensor ... shape=(3,) dtype=int32>
In these cases, using `tf.Tensor.shape` will return more informative results.
>>> a.shape
TensorShape([None, None, 10])
Args:
input: A `Tensor` or `SparseTensor`.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return shape(input, name, out_type)
@tf_export(v1=["shape"])
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation (`int32` or `int64`).
Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if (context.executing_eagerly() and not hasattr(input, "graph") and
not isinstance(
input,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
dtypes.int64_ref)
def _check_index(idx):
"""Check if a given value is a valid index into a tensor."""
if isinstance(idx, (six.integer_types, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _is_undefined_dimension(d):
return isinstance(d, tensor_shape.Dimension) and d.value is None
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# Strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# Skip every other row and reverse the order of the columns
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
# Masks
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable object to slice
(i.e. tensor is the read-only view of this variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, ellipsis,
tf.newaxis or scalar int32/int64 tensors.
"""
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
if s.start is not None and not _is_undefined_dimension(s.start):
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and not _is_undefined_dimension(s.stop):
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None and not _is_undefined_dimension(s.step):
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
"strided_slice", [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input_` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input_` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input_`. In other
words, `begin[i]` is the offset into the i'th dimension of `input_` that you
want to slice from.
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input_.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input_`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1, taking on the value at index
`begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if not (var is None and isinstance(op, ops.EagerTensor)):
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
else:
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: TypeError: If the slice indices aren't int, slice,
ellipsis, tf.newaxis or int32/int64 tensors.
"""
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
>>> x = tf.constant([1, 4])
>>> y = tf.constant([2, 5])
>>> z = tf.constant([3, 6])
>>> tf.stack([x, y, z])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)>
>> tf.stack([x, y, z], axis=1)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
This is the opposite of unstack. The numpy equivalent is `np.stack`
>>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))
True
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" %
(elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be converted
to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if ops.is_dense_tensor_like(elem):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
def _should_not_autopack(v):
# The condition we really want is
# ops.is_dense_tensor_like(...)
# but it is >5x slower due to abc.ABCMeta.__instancecheck__.
# pylint: disable=unidiomatic-typecheck
# TODO(slebedev): add nest.all?
return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
# pylint: enable=unidiomatic-typecheck
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref or _should_not_autopack(v):
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred if
`None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
>>> t1 = [[1, 2, 3], [4, 5, 6]]
>>> t2 = [[7, 8, 9], [10, 11, 12]]
>>> concat([t1, t2], 0)
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> concat([t1, t2], 1)
<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 7, 8, 9],
[ 4, 5, 6, 10, 11, 12]], dtype=int32)>
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
>>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
>>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
>>> tf.concat([t1, t2], -1)
<tf.Tensor: shape=(2, 2, 4), dtype=int32, numpy=
array([[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]], dtype=int32)>
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing for
axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers
to `axis`-th dimension. And negative axis refers to `axis +
rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_has_rank(0)
return identity(values[0], name=name)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where_v2(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(
tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
[first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer, then `value` is split along dimension
`axis` into `num_split` smaller tensors. This requires that `num_split` evenly
divides `value.shape[axis]`.
If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
element has the same size as the `value` except along dimension `axis` where
the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of splits along
`axis` or a 1-D integer `Tensor` or Python list containing the sizes of
each output tensor along `axis`. If a scalar, then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split axis
must match that of the `value`.
axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if isinstance(num_or_size_splits,
six.integer_types + (tensor_shape.Dimension,)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if not tensor_util.is_tensor(a):
a = ops.convert_to_tensor(a, name="a")
if conjugate and a.dtype.is_complex:
transpose_fn = gen_array_ops.conjugate_transpose
else:
transpose_fn = gen_array_ops.transpose
if perm is not None:
return transpose_fn(a, perm, name=name)
rank = a.shape.rank
if rank is None:
perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)
else:
perm = np.arange(rank - 1, -1, -1, dtype=np.int32)
return transpose_fn(a, perm, name=name)
# pylint: disable=invalid-name
@tf_export(
"linalg.matrix_transpose",
v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.linalg.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.linalg.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `linalg.matrix_transpose` returns a new
tensor with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.linalg.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
@deprecation.deprecated_endpoints("matrix_diag")
def matrix_diag(diagonal,
name="diag",
k=0,
num_rows=-1,
num_cols=-1,
padding_value=0,
align="RIGHT_LEFT"):
"""Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
diagonals of a matrix, with everything else padded with `padding`. `num_rows`
and `num_cols` specify the dimension of the innermost matrix of the output. If
both are not specified, the op assumes the innermost matrix is square and
infers its size from `k` and the innermost dimension of `diagonal`. If only
one of them is specified, the op assumes the unspecified value is the smallest
possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor
has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only
one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has
rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is
scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and
the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
padding_value ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the
same batch (`M = k[1]-k[0]+1`), and the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
padding_value ; otherwise
```
where `d = n - m`, `diag_index = k[1] - d`, and
`index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
```
# The main diagonal.
diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]],
[[5, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 7, 0],
[0, 0, 0, 8]]]
# A superdiagonal (per batch).
diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]],
[[0, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 6],
[0, 0, 0, 0]]]
# A tridiagonal band (per batch).
diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)
[1, 2, 3],
[0, 4, 5]],
[[2, 3, 0],
[6, 7, 9],
[0, 9, 1]]])
tf.matrix_diag(diagonals, k = (-1, 1))
==> [[[1, 8, 0], # Output shape: (2, 3, 3)
[4, 2, 9],
[0, 5, 3]],
[[6, 2, 0],
[9, 7, 3],
[0, 1, 9]]]
# RIGHT_LEFT alignment.
diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)
[1, 2, 3],
[4, 5, 0]],
[[0, 2, 3],
[6, 7, 9],
[9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 1), align="RIGHT_LEFT")
==> [[[1, 8, 0], # Output shape: (2, 3, 3)
[4, 2, 9],
[0, 5, 3]],
[[6, 2, 0],
[9, 7, 3],
[0, 1, 9]]]
# Rectangular matrix.
diagonal = np.array([1, 2]) # Input shape: (2)
tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4)
[1, 0, 0, 0],
[0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding_value = 9.
tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
==> [[9, 9], # Output shape: (3, 2)
[1, 9],
[9, 2]]
```
Args:
diagonal: A `Tensor` with `rank k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns:
A Tensor. Has the same type as `diagonal`.
"""
if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_v3(
diagonal=diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
align=align,
name=name)
# Call v1 to maintain forward compatibility.
# (We skip v2 because its alignment conflicts with v3's default alignment.)
return gen_array_ops.matrix_diag(diagonal=diagonal, name=name)
@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
@deprecation.deprecated_endpoints("matrix_diag_part")
@dispatch.add_dispatch_support
def matrix_diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0,
align="RIGHT_LEFT"):
"""Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
`input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
Let `max_diag_len` be the maximum length among all diagonals to be extracted,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
Let `num_diags` be the number of diagonals to extract,
`num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
`[I, J, ..., L, max_diag_len]` and values:
```
diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
padding_value ; otherwise.
```
where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions
`[I, J, ..., L, num_diags, max_diag_len]` with values:
```
diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
padding_value ; otherwise.
```
where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
The input must be at least a matrix.
For example:
```
input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8],
[9, 8, 7, 6]],
[[5, 4, 3, 2],
[1, 2, 3, 4],
[5, 6, 7, 8]]])
# A main diagonal from each batch.
tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch.
tf.matrix_diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3)
[4, 3, 8]]
# A band from each batch.
tf.matrix_diag_part(input, k = (-1, 2))
==> [[[3, 8, 0], # Output shape: (2, 4, 3)
[2, 7, 6],
[1, 6, 7],
[0, 5, 8]],
[[3, 4, 0],
[4, 3, 8],
[5, 2, 7],
[0, 1, 6]]]
# RIGHT_LEFT alignment.
tf.matrix_diag_part(input, k = (-1, 2), align="RIGHT_LEFT")
==> [[[0, 3, 8], # Output shape: (2, 4, 3)
[2, 7, 6],
[1, 6, 7],
[5, 8, 0]],
[[0, 3, 4],
[4, 3, 8],
[5, 2, 7],
[1, 6, 0]]]
# max_diag_len can be shorter than the main diagonal.
tf.matrix_diag_part(input, k = (-2, -1))
==> [[[5, 8],
[0, 9]],
[[1, 6],
[0, 5]]]
# padding_value = 9
tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
==> [[[4, 9, 9], # Output shape: (2, 3, 3)
[3, 8, 9],
[2, 7, 6]],
[[2, 9, 9],
[3, 4, 9],
[4, 3, 8]]]
```
Args:
input: A `Tensor` with `rank k >= 2`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns:
A Tensor containing diagonals of `input`. Has the same type as `input`.
"""
if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(input, "dtype") and input.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_part_v3(
input=input, k=k, padding_value=padding_value, align=align, name=name)
# Call v1 to maintain forward compatibility.
# (We skip v2 because its alignment conflicts with v3's default alignment.)
return gen_array_ops.matrix_diag_part(input=input, name=name)
@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
@deprecation.deprecated_endpoints("matrix_set_diag")
def matrix_set_diag(
input, # pylint:disable=redefined-builtin
diagonal,
name="set_diag",
k=0,
align="RIGHT_LEFT"):
"""Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the
same shape and values as `input`, except for the specified diagonals of the
innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
If `k` is scalar or `k[0] == k[1]`:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
input[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`, `diag_index = k[1] - d`, and
`index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
```
# The main diagonal.
input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_set_diag(input, diagonal)
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
# A superdiagonal (per batch).
tf.matrix_set_diag(input, diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
[7, 7, 2, 7],
[7, 7, 7, 3]],
[[7, 4, 7, 7],
[7, 7, 5, 7],
[7, 7, 7, 6]]]
# A band of diagonals.
diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
[6, 5, 8],
[1, 2, 3],
[0, 4, 5]],
[[1, 2, 0],
[5, 6, 4],
[6, 1, 2],
[0, 3, 4]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2))
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
[4, 2, 5, 1],
[7, 5, 3, 8]],
[[6, 5, 1, 7],
[3, 1, 6, 2],
[7, 4, 2, 4]]]
# RIGHT_LEFT alignment.
diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
[6, 5, 8],
[1, 2, 3],
[4, 5, 0]],
[[0, 1, 2],
[5, 6, 4],
[6, 1, 2],
[3, 4, 0]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="RIGHT_LEFT")
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
[4, 2, 5, 1],
[7, 5, 3, 8]],
[[6, 5, 1, 7],
[3, 1, 6, 2],
[7, 4, 2, 4]]]
```
Args:
input: A `Tensor` with rank `k + 1`, where `k >= 1`.
diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,
otherwise. `k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
"""
if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):
return gen_array_ops.matrix_set_diag_v3(
input=input, diagonal=diagonal, k=k, align=align, name=name)
# Call v1 to maintain forward compatibility.
# (We skip v2 because its alignment conflicts with v3's default alignment.)
return gen_array_ops.matrix_set_diag(
input=input, diagonal=diagonal, name=name)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except TypeError:
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
>>> tf.zeros([3, 4], tf.int32)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int32)>
Args:
shape: A `list` of integers, a `tuple` of integers, or
a 1-D `Tensor` of type `int32`.
dtype: The DType of an element in the resulting `Tensor`.
name: Optional string. A name for the operation.
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# Create a constant if it won't be very big. Otherwise create a fill
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.add_dispatch_support
def zeros_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]] with dtype=int32
If dtype of input `tensor` is `float32`, then the output is also of `float32`
tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor) # [[0., 0., 0.], [0., 0., 0.]] with dtype=floa32
If you want to specify desired dtype of output `tensor`, then specify it in
the op tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor,dtype=tf.int32) # [[0, 0, 0], [0, 0, 0]] with
dtype=int32
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(input, dtype, name, optimize=True)
def zeros_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 zeros_like API calls."""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
if not tensor_util.is_tensor(tensor):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor_shape = tensor.shape
tensor_dtype = tensor.dtype
if context.executing_eagerly():
if dtype is not None and dtype != tensor_dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor_shape.is_fully_defined() and
tensor_dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)
if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,
`complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.add_dispatch_support
def ones_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to one.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to 1. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to one.
"""
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 ones_like API calls."""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to one (1).
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to one.
>>> tf.ones([3, 4], tf.int32)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=int32)>
Args:
shape: A `list` of integers, a `tuple` of integers, or
a 1-D `Tensor` of type `int32`.
dtype: Optional DType of an element in the resulting `Tensor`. Default is
`tf.float32`.
name: Optional string. A name for the operation.
Returns:
A `Tensor` with all elements set to one (1).
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# Create a constant if it won't be very big. Otherwise create a fill
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility(eager)
Placeholders are not compatible with eager execution.
@end_compatibility
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin
"""A placeholder op that passes through `input` when its output is not fed.
Args:
input: A `Tensor`. The default value to produce when output is not fed.
shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
return gen_array_ops.placeholder_with_default(input, shape, name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.sparse.placeholder(tf.float32)
y = tf.sparse.reduce_sum(x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will
succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if not tensor_util.is_tensor(constant_values) and constant_values == 0:
result = gen_array_ops.pad(tensor, paddings, name=name)
else:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = _get_paddings_constant(paddings)
input_shape = (
tensor_shape.TensorShape(tensor.shape)
if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)
if (input_shape.ndims is not None and
not result.shape.is_fully_defined() and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
def _get_paddings_constant(paddings):
"""Helper to get the constant values of the paddings arg to pad().
Used under V1 graph mode to facilitate computation of the shape of the output
tensor of `pad()`.
Args:
paddings: The same paddings arg as passed to pad(). Can be a Tensor, or
a nested list or tuple of Tensor and/or numbers.
Returns:
A nested list or numbers or `None`, in which `None` indicates unknown
padding size.
"""
if isinstance(paddings, ops.Tensor):
return tensor_util.constant_value(paddings, partial=True)
elif isinstance(paddings, (list, tuple)):
return [_get_paddings_constant(x) for x in paddings]
else:
return paddings
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
paddings,
block_size=None,
name=None,
block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
shape `block_shape + [batch]`, interleaves these blocks back into the grid
defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
same rank as the input. The spatial dimensions of this intermediate result
are then optionally cropped according to `crops` to produce the output. This
is the reverse of SpaceToBatch (see `tf.space_to_batch`).
Args:
input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +
remaining_shape`, where `spatial_shape` has M dimensions.
block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following
types: `int32`, `int64`. All values must be >= 1. For backwards
compatibility with TF 1.0, this parameter may be an int, in which case it
is converted to
`numpy.array([block_shape, block_shape],
dtype=numpy.int64)`.
crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the
following types: `int32`, `int64`. All values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from
input dimension `i + 1`, which corresponds to spatial dimension `i`.
It is required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,
block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,
input_shape[N-1]]
2. Permute dimensions of `reshaped` to produce `permuted` of shape
[batch / prod(block_shape), input_shape[1], block_shape[0], ...,
input_shape[M], block_shape[M-1], input_shape[M+1],
..., input_shape[N-1]]
3. Reshape `permuted` to produce `reshaped_permuted` of shape
[batch / prod(block_shape), input_shape[1] * block_shape[0], ...,
input_shape[M] * block_shape[M-1], input_shape[M+1], ...,
input_shape[N-1]]
4. Crop the start and end of dimensions `[1, ..., M]` of
`reshaped_permuted` according to `crops` to produce the output
of shape:
[batch / prod(block_shape), input_shape[1] *
block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],
..., input_shape[N-1]]
Some Examples:
(1) For the following input of shape `[4, 1, 1, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
[[[[1]]],
[[[2]]],
[[[3]]],
[[[4]]]]
```
The output tensor has shape `[1, 2, 2, 1]` and value:
``` x = [[[[1], [2]],
[[3], [4]]]] ```
(2) For the following input of shape `[4, 1, 1, 3]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
[[[1, 2, 3]],
[[4, 5, 6]],
[[7, 8, 9]],
[[10, 11, 12]]]
```
The output tensor has shape `[1, 2, 2, 3]` and value:
```python
x = [[[[1, 2, 3], [4, 5, 6 ]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following
input of shape `[4, 2, 2, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
x = [[[[1], [3]], [[ 9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
```python
x = [[[1], [2], [ 3], [ 4]],
[[5], [6], [ 7], [ 8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]
```
(4) For the following input of shape
`[8, 1, 3, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:
```python
x = [[[[0], [ 1], [ 3]]],
[[[0], [ 9], [11]]],
[[[0], [ 2], [ 4]]],
[[[0], [10], [12]]],
[[[0], [ 5], [ 7]]],
[[[0], [13], [15]]],
[[[0], [ 6], [ 8]]],
[[[0], [14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
```python
x = [[[[ 1], [ 2], [ 3], [ 4]],
[[ 5], [ 6], [ 7], [ 8]]],
[[[ 9], [10], [11], [12]],
[[13], [14], [15], [16]]]] ```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(
input=input, block_shape=block_shape, crops=crops, name=name)
@tf_export("one_hot")
@dispatch.add_dispatch_support
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer
to a non-ragged axis. The output will be equivalent to applying 'one_hot' on
the values of the RaggedTensor, and creating a new RaggedTensor from the
result.
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
indices = tf.ragged.constant([[0, 1], [2]])
depth = 3
tf.one_hot(indices, depth) # output: [2 x None x 3]
# [[[1., 0., 0.],
# [0., 1., 0.]],
# [[0., 0., 1.]]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(
name, "one_hot",
[indices, depth, on_value, off_value, axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (
ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists else None)
off_dtype = (
ops.convert_to_tensor(off_value).dtype.base_dtype
if off_exists else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
>>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
>>> t = tf.ones([1, 2, 1, 3, 1, 1])
>>> print(tf.shape(tf.squeeze(t)).numpy())
[2 3]
Or, to remove specific size 1 dimensions:
>>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
>>> t = tf.ones([1, 2, 1, 3, 1, 1])
>>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy())
[1 2 3 1]
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
@dispatch.add_dispatch_support
def squeeze_v2(input, axis=None, name=None):
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a
deprecated `squeeze_dims` argument.
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: The input cannot be converted to a tensor, or the specified
axis cannot be squeezed.
"""
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export(v1=["where"])
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are tensors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("where", v1=["where_v2"])
def where_v2(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `condition`, `x` and `y` must be broadcastable to the same
shape.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which is of the same type as `y`, and may be broadcastable with
`condition` and `y`.
y: A Tensor which is of the same type as `x`, and may be broadcastable with
`condition` and `x`.
name: A name of the operation (optional).
Returns:
A `Tensor` with the same type as `x` and `y`, and shape that
is broadcast from `condition`, `x`, and `y`, if `x`, `y` are non-None.
Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(None,
"seq_dim is deprecated, use seq_axis instead",
"seq_dim")
@deprecation.deprecated_args(None,
"batch_dim is deprecated, use batch_axis instead",
"batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
"""Reverses variable length slices.
This op first slices `input` along the dimension `batch_axis`, and for
each slice `i`, reverses the first `seq_lengths[i]` elements along the
dimension `seq_axis`.
The elements of `seq_lengths` must obey `seq_lengths[i] <=
input.dims[seq_dim]`, and `seq_lengths` must be a vector of length
`input.dims[batch_dim]`.
The output slice `i` along dimension `batch_axis` is then given by
input slice `i`, with the first `seq_lengths[i]` slices along
dimension `seq_axis` reversed.
Example usage:
>>> seq_lengths = [7, 2, 3, 5]
>>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],
... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]
>>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)
>>> output
<tf.Tensor: shape=(4, 8), dtype=int32, numpy=
array([[0, 0, 5, 4, 3, 2, 1, 0],
[2, 1, 0, 0, 0, 0, 0, 0],
[3, 2, 1, 4, 0, 0, 0, 0],
[5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>
Args:
`input`: A `Tensor`. The input to reverse.
`seq_lengths`: A `Tensor`. Must be one of the following types: `int32`,
`int64`. 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) <=
input.dims(seq_dim)`
`seq_axis`: An `int`. The dimension which is partially reversed.
`batch_axis`: An optional `int`. Defaults to `0`. The dimension along which
reversal is performed.
`name`: A name for the operation (optional).
Returns:
A Tensor. Has the same type as input.
"""
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
@tf_export("reverse_sequence", v1=[])
def reverse_sequence_v2(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None):
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence_v2.__doc__ = reverse_sequence.__doc__
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0): # pylint: disable=g-doc-args
r"""Gather slices from params axis `axis` according to indices.
Gather slices from params axis `axis` according to `indices`. `indices` must
be an integer tensor of any dimension (usually 0-D or 1-D).
For 0-D (scalar) `indices`:
$$\begin{align*}
output[p_0, ..., p_{axis-1}, && &&& p_{axis + 1}, ..., p_{N-1}] = \\
params[p_0, ..., p_{axis-1}, && indices, &&& p_{axis + 1}, ..., p_{N-1}]
\end{align*}$$
Where *N* = `ndims(params)`.
For 1-D (vector) `indices` with `batch_dims=0`:
$$\begin{align*}
output[p_0, ..., p_{axis-1}, && &i, &&p_{axis + 1}, ..., p_{N-1}] =\\
params[p_0, ..., p_{axis-1}, && indices[&i], &&p_{axis + 1}, ..., p_{N-1}]
\end{align*}$$
In the general case, produces an output tensor where:
$$\begin{align*}
output[p_0, &..., p_{axis-1}, &
&i_{B}, ..., i_{M-1}, &
p_{axis + 1}, &..., p_{N-1}] = \\
params[p_0, &..., p_{axis-1}, &
indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}], &
p_{axis + 1}, &..., p_{N-1}]
\end{align*}$$
Where *N* = `ndims(params)`, *M* = `ndims(indices)`, and *B* = `batch_dims`.
Note that `params.shape[:batch_dims]` must be identical to
`indices.shape[:batch_dims]`.
The shape of the output tensor is:
> `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
> params.shape[axis + 1:]`.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the corresponding
output value.
See also `tf.gather_nd`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
alt>
</div>
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
validate_indices: Deprecated, does nothing.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
batch_dims: An `integer`. The number of batch dimensions. Must be less
than `rank(indices)`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
del validate_indices
if axis is None:
axis = batch_dims
if tensor_util.constant_value(axis) != 0:
return gen_array_ops.gather_v2(
params, indices, axis, batch_dims=batch_dims, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
return gather(
params,
indices,
validate_indices=validate_indices,
name=name,
axis=axis,
batch_dims=batch_dims)
gather_v2.__doc__ = gather.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
r"""Gather slices from params according to indices with leading batch dims.
This operation assumes that the leading `batch_dims` dimensions of `indices`
and `params` are batch dimensions; and performs a `tf.gather` operation within
each batch. (If `batch_dims` is not specified, then it defaults to
`rank(indices)-1`.) In the case in which `batch_dims==0`, this operation
is equivalent to `tf.gather`.
Args:
params: A Tensor. The tensor from which to gather values.
indices: A Tensor. Must be one of the following types: int32, int64. Index
tensor. Must be in range `[0, params.shape[batch_dims]]`.
batch_dims: An integer or none. The number of batch dimensions. Must be
less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
Returns:
A Tensor. Has the same type as `params`.
Raises:
ValueError: if `indices` has an unknown shape.
"""
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines
a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Additionally both 'params' and 'indices' can have M leading batch
dimensions that exactly match. In this case 'batch_dims' must be M.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
The examples below are for the case when only indices have leading extra
dimensions. If both 'params' and 'indices' have leading batch dimensions, use
the 'batch_dims' parameter to run gather_nd in batch mode.
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Examples with batched 'params' and 'indices':
```python
batch_dims = 1
indices = [[1], [0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
batch_dims = 1
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0']], [['a1', 'b1']]]
batch_dims = 1
indices = [[[1, 0]], [[0, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0'], ['b1']]
```
See also `tf.gather`.
Args:
params: A `Tensor`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
Returns:
A `Tensor`. Has the same type as `params`.
"""
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.gather_nd(indices, name=name)
except AttributeError:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
"""gather_nd implementation with batch support."""
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list
]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(
index_grid,
concat([
index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
],
axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
# (And also now because of 'axis' processing).
@tf_export(v1=["quantize_v2"])
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO",
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if compat.forward_compatible(2019, 11, 13) or ensure_minimum_range != 0.01:
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantization.quantize instead of
# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
# version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@deprecation.deprecated_endpoints("quantize")
def quantize(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None,
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
"""Quantize the input tensor."""
if compat.forward_compatible(2019, 11, 13) or ensure_minimum_range != 0.01:
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis)
@tf_export("quantization.dequantize", v1=["quantization.dequantize",
"dequantize"])
@deprecation.deprecated_endpoints("dequantize")
def dequantize( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
min_range,
max_range,
mode="MIN_COMBINED",
name=None,
axis=None,
narrow_range=False):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if compat.forward_compatible(2019, 10, 22) or axis >= 0 or narrow_range:
return gen_array_ops.dequantize(
input, min_range, max_range, mode=mode, name=name,
narrow_range=narrow_range, axis=axis)
return gen_array_ops.dequantize(
input, min_range, max_range, mode=mode, name=name)
dequantize.__doc__ = gen_array_ops.dequantize.__doc__
@tf_export("quantization.quantize_and_dequantize")
def quantize_and_dequantize(
input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False,
axis=None):
"""Quantizes then dequantizes a tensor.
Args:
input: A `Tensor` to quantize and dequantize.
input_min: If range_given=True, the minimum input value, that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of minimum values for each slice along axis.
input_max: If range_given=True, the maximum input value that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of maximum values for each slice along axis.
signed_input: True if the quantization is signed or unsigned.
num_bits: The bitwidth of the quantization.
range_given: If true use `input_min` and `input_max` for the range of the
input, otherwise determine min and max from the input `Tensor`.
round_mode: Rounding mode when rounding from float values to quantized ones.
one of ['HALF_TO_EVEN', 'HALF_UP']
name: Optional name for the operation.
narrow_range: If true, then the absolute value of the quantized minimum
value is the same as the quantized maximum value, instead of 1 greater.
i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
axis: Integer. If specified, refers to a dimension of the input tensor, such
that quantization will be per slice along that dimension.
Returns:
A `Tensor`. Each element is the result of quantizing and dequantizing the
corresponding element of `input`.
"""
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
return gen_array_ops.quantize_and_dequantize_v2(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
name=name)
@tf_export("searchsorted")
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
"""Searches input tensor for values on the innermost dimension.
A 2-D example:
```
sorted_sequence = [[0, 3, 9, 9, 10],
[1, 2, 3, 4, 5]]
values = [[2, 4, 9],
[0, 2, 6]]
result = searchsorted(sorted_sequence, values, side="left")
result == [[1, 2, 2],
[0, 1, 5]]
result = searchsorted(sorted_sequence, values, side="right")
result == [[1, 2, 4],
[0, 2, 5]]
```
Args:
sorted_sequence: N-D `Tensor` containing a sorted sequence.
values: N-D `Tensor` containing the search values.
side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
upper_bound.
out_type: The output type (`int32` or `int64`). Default is `tf.int32`.
name: Optional name for the operation.
Returns:
An N-D `Tensor` the size of values containing the result of applying either
lower_bound or upper_bound (depending on side) to each value. The result
is not a global index to the entire `Tensor`, but the index in the last
dimension.
Raises:
ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
If the total size of values exceeds `2^31 - 1` elements.
If the first `N-1` dimensions of the two tensors don't match.
"""
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_patches")
def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
r"""Extract `patches` from `images`.
This op collects patches from the input image, as if applying a
convolution. All extracted patches are stacked in the depth (last) dimension
of the output.
Specifically, the op extracts patches of shape `sizes` which are `strides`
apart in the input image. The output is subsampled using the `rates` argument,
in the same manner as "atrous" or "dilated" convolutions.
The result is a 4D tensor which is indexed by batch, row, and column.
`output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`
which is taken from the input starting at
`images[i, x*strides[1], y*strides[2]]`.
Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where
`depth` is `images.shape[3]`.
The output elements are taken from the input at intervals given by the `rate`
argument, as in dilated convolutions.
The `padding` argument has no effect on the size of each patch, it determines
how many patches are extracted. If `VALID`, only patches which are fully
contained in the input image are included. If `SAME`, all patches whose
starting point is inside the input are included, and areas outside the input
default to zero.
Example:
```
n = 10
# images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100
images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
# We generate two outputs as follows:
# 1. 3x3 patches with stride length 5
# 2. Same as above, but the rate is increased to 2
tf.extract_image_patches(images=images,
ksizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 1, 1, 1],
padding='VALID')
# Yields:
[[[[ 1 2 3 11 12 13 21 22 23]
[ 6 7 8 16 17 18 26 27 28]]
[[51 52 53 61 62 63 71 72 73]
[56 57 58 66 67 68 76 77 78]]]]
```
If we mark the pixels in the input image which are taken for the output with
`*`, we see the pattern:
```
* * * 4 5 * * * 9 10
* * * 14 15 * * * 19 20
* * * 24 25 * * * 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
* * * 54 55 * * * 59 60
* * * 64 65 * * * 69 70
* * * 74 75 * * * 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
```
```
tf.extract_image_patches(images=images,
sizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 2, 2, 1],
padding='VALID')
# Yields:
[[[[ 1 3 5 21 23 25 41 43 45]
[ 6 8 10 26 28 30 46 48 50]]
[[ 51 53 55 71 73 75 91 93 95]
[ 56 58 60 76 78 80 96 98 100]]]]
```
We can again draw the effect, this time using the symbols `*`, `x`, `+` and
`o` to distinguish the patches:
```
* 2 * 4 * x 7 x 9 x
11 12 13 14 15 16 17 18 19 20
* 22 * 24 * x 27 x 29 x
31 32 33 34 35 36 37 38 39 40
* 42 * 44 * x 47 x 49 x
+ 52 + 54 + o 57 o 59 o
61 62 63 64 65 66 67 68 69 70
+ 72 + 74 + o 77 o 79 o
81 82 83 84 85 86 87 88 89 90
+ 92 + 94 + o 97 o 99 o
```
Args:
images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
sizes: The size of the extracted patches. Must be [1, size_rows, size_cols,
1].
strides: A 1-D Tensor of length 4. How far the centers of two consecutive
patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
This is the input stride, specifying how far two consecutive patch samples
are in the input. Equivalent to extracting patches with `patch_sizes_eff =
patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
them spatially by a factor of `rates`. This is equivalent to `rate` in
dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A 4-D Tensor of the same type as the input.
"""
return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
"ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
ksizes)
return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
@tf_export("fingerprint")
def fingerprint(data, method="farmhash64", name=None):
r"""Generates fingerprint values.
Generates fingerprint values of `data`.
Fingerprint op considers the first dimension of `data` as the batch dimension,
and `output[i]` contains the fingerprint value generated from contents in
`data[i, ...]` for all `i`.
Fingerprint op writes fingerprint values as byte arrays. For example, the
default method `farmhash64` generates a 64-bit fingerprint value at a time.
This 8-byte value is written out as an `tf.uint8` array of size 8, in
little-endian order.
For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),
and that the fingerprint method is `farmhash64`. In this case, the output
shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the
size of each fingerprint value in bytes. `output[0, :]` is generated from
12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from
other 12 integers in `data[1, :, :]`.
Note that this op fingerprints the raw underlying buffer, and it does not
fingerprint Tensor's metadata such as data type and/or shape. For example, the
fingerprint values are invariant under reshapes and bitcasts as long as the
batch dimension remain the same:
```python
tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))
tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))
```
For string data, one should expect `tf.fingerprint(data) !=
tf.fingerprint(tf.string.reduce_join(data))` in general.
Args:
data: A `Tensor`. Must have rank 1 or higher.
method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.
Currently available method is `farmhash64`.
name: A name for the operation (optional).
Returns:
A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
`data`'s first dimension, and the second dimension size depends on the
fingerprint algorithm.
"""
return gen_array_ops.fingerprint(data, method, name)
def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
"""Converts the given value to an integer Tensor."""
tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
if tensor.dtype.is_integer:
tensor = gen_math_ops.cast(tensor, dtype)
else:
raise TypeError("%s must be an integer tensor; dtype=%s" %
(name, tensor.dtype))
return tensor
def get_positive_axis(axis, ndims):
"""Validate an `axis` parameter, and normalize it to be positive.
If `ndims` is known (i.e., not `None`), then check that `axis` is in the
range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or
`axis + ndims` (otherwise).
If `ndims` is not known, and `axis` is positive, then return it as-is.
If `ndims` is not known, and `axis` is negative, then report an error.
Args:
axis: An integer constant
ndims: An integer constant, or `None`
Returns:
The normalized `axis` value.
Raises:
ValueError: If `axis` is out-of-bounds, or if `axis` is negative and
`ndims is None`.
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
if ndims is not None:
if 0 <= axis < ndims:
return axis
elif -ndims <= axis < 0:
return axis + ndims
else:
raise ValueError("axis=%s out of bounds: expected %s<=axis<%s" %
(axis, -ndims, ndims))
elif axis < 0:
raise ValueError("axis may only be negative if ndims is statically known.")
return axis
# This op is intended to exactly match the semantics of numpy.repeat, with
# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior
# when axis is not specified. Rather than implement that special behavior, we
# simply make `axis` be a required argument.
#
# External (OSS) `tf.repeat` feature request:
# https://github.com/tensorflow/tensorflow/issues/8246
def repeat_with_axis(data, repeats, axis, name=None):
"""Repeats elements of `data`.
Args:
data: An `N`-dimensional tensor.
repeats: A 1-D integer tensor specifying how many times each element in
`axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.
Supports broadcasting from a scalar value.
axis: `int`. The axis along which to repeat values. Must be less than
`max(N, 1)`.
name: A name for the operation.
Returns:
A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,
except that dimension `axis` has size `sum(repeats)`.
Example usage:
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
<tf.Tensor: shape=(5,), dtype=string,
numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
<tf.Tensor: shape=(5, 2), dtype=int32, numpy=
array([[1, 2],
[1, 2],
[3, 4],
[3, 4],
[3, 4]], dtype=int32)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
<tf.Tensor: shape=(2, 5), dtype=int32, numpy=
array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)>
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
with ops.name_scope(name, "Repeat", [data, repeats]):
data = ops.convert_to_tensor(data, name="data")
repeats = convert_to_int_tensor(repeats, name="repeats")
repeats.shape.with_rank_at_most(1)
# If `data` is a scalar, then upgrade it to a vector.
data = _with_nonzero_rank(data)
data_shape = shape(data)
# If `axis` is negative, then convert it to a positive value.
axis = get_positive_axis(axis, data.shape.ndims)
# Check data Tensor shapes.
if repeats.shape.ndims == 1:
data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])
# If we know that `repeats` is a scalar, then we can just tile & reshape.
if repeats.shape.ndims == 0:
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, repeats)
result_shape = concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
axis=0)
return reshape(tiled, result_shape)
# Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
if repeats.shape.ndims != axis + 1:
repeats_shape = shape(repeats)
repeats_ndims = rank(repeats)
broadcast_shape = concat(
[data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
repeats = broadcast_to(repeats, broadcast_shape)
repeats.set_shape([None] * (axis + 1))
# Create a "sequence mask" based on `repeats`, where slices across `axis`
# contain one `True` value for each repetition. E.g., if
# `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
max_repeat = gen_math_ops.maximum(
0, gen_math_ops._max(repeats, _all_dimensions(repeats)))
mask = sequence_mask(repeats, max_repeat)
# Add a new dimension around each value that needs to be repeated, and
# then tile that new dimension to match the maximum number of repetitions.
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, max_repeat)
# Use `boolean_mask` to discard the extra repeated values. This also
# flattens all dimensions up through `axis`.
masked = boolean_mask(tiled, mask)
# Reshape the output tensor to add the outer dimensions back.
if axis == 0:
result = masked
else:
result_shape = concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
axis=0)
result = reshape(masked, result_shape)
# Preserve shape information.
if data.shape.ndims is not None:
new_axis_size = 0 if repeats.shape[0] == 0 else None
result.set_shape(data.shape[:axis].concatenate(
[new_axis_size]).concatenate(data.shape[axis + 1:]))
return result
def tile_one_dimension(data, axis, multiple):
"""Tiles a single dimension of a tensor."""
# Assumes axis is a nonnegative int.
if data.shape.ndims is not None:
multiples = [1] * data.shape.ndims
multiples[axis] = multiple
else:
ones_value = ones(rank(data), dtypes.int32)
multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],
axis=0)
return tile(data, multiples)
def _with_nonzero_rank(data):
"""If `data` is scalar, then add a dimension; otherwise return as-is."""
if data.shape.ndims is not None:
if data.shape.ndims == 0:
return stack([data])
else:
return data
else:
data_shape = shape(data)
data_ndims = rank(data)
return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])
@tf_export("repeat")
def repeat(input, repeats, axis=None, name=None): # pylint: disable=redefined-builtin
"""Repeat elements of `input`.
Args:
input: An `N`-dimensional Tensor.
repeats: An 1-D `int` Tensor. The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis. `len(repeats)`
must equal `input.shape[axis]` if axis is not None.
axis: An int. The axis along which to repeat values. By default (axis=None),
use the flattened input array, and return a flat output array.
name: A name for the operation.
Returns:
A Tensor which has the same shape as `input`, except along the given axis.
If axis is None then the output array is flattened to match the flattened
input array.
Example usage:
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
<tf.Tensor: shape=(5,), dtype=string,
numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
<tf.Tensor: shape=(5, 2), dtype=int32, numpy=
array([[1, 2],
[1, 2],
[3, 4],
[3, 4],
[3, 4]], dtype=int32)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
<tf.Tensor: shape=(2, 5), dtype=int32, numpy=
array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)>
>>> repeat(3, repeats=4)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([3, 3, 3, 3], dtype=int32)>
>>> repeat([[1,2], [3,4]], repeats=2)
<tf.Tensor: shape=(8,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)>
"""
if axis is None:
input = reshape(input, [-1])
axis = 0
return repeat_with_axis(input, repeats, axis, name)
| apache-2.0 | 5,979,389,566,144,273,000 | 34.926457 | 129 | 0.608781 | false |
FKlama/hycud | REMO.py | 1 | 2344 | # HYCUD
# Copyright (C) 2014 Klama, Nina Alexandra and Rezaei-Ghaleh, Nasrollah
#
# This file is part of HYCUD.
#
# HYCUD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HYCUD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HYCUD. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import subprocess
import gc
from os import path
from HelperFunctions import waitTillFileExists, ANSI_ESC, flush
from multiprocessing import Pool
esc = ANSI_ESC()
class runREMO:
"""Class returning a function to run REMO for one model"""
def __init__(self, opt, count):
self.opt = opt
self.size = count
def __call__(self, model):
tmpDir = ""
if self.opt.keepTemp:
tmpDir = path.join(self.opt.tmpPath, ("REMO_%09i" % model.num))
os.makedirs(tmpDir)
else:
tmpDir = tempfile.mkdtemp(prefix="REMO_", suffix="", dir=self.opt.tmpPath)
(PDB_dir, PDB_file) = path.split(model.getPDB())
tmpPDB = path.join(tmpDir, PDB_file)
remoExe = path.join(self.opt.REMOPath, "REMO.pl")
subprocess.check_call(['ln', '-s', model.getPDB(), tmpPDB])
waitTillFileExists(tmpPDB)
os.chdir(tmpDir)
if self.opt.verbose > 1:
print("nice -n", str(self.opt.nice), "perl" , remoExe, "0", PDB_file)
elif self.opt.verbose > 0:
print("{0}2K{0}1GCalculating REMO {1:6n}/{2:n}".format(
esc, model.num, self.size), end='')
flush()
subprocess.check_output(['nice', '-n', str(self.opt.nice), 'perl', remoExe, "0", PDB_file])
waitTillFileExists(tmpPDB + ".h")
subprocess.check_call(['mv', (tmpPDB + '.h'), model.getSprouted()])
def sproutModels(opt, models):
"""Function sprouts full sidechains for a given set of protein models"""
sprout_pool = Pool(processes=opt.threads)
task = runREMO(opt, models.size())
sprout_pool.map(task, models.models)
if opt.verbose > 0:
print("")
gc.collect()
| gpl-3.0 | -9,100,186,542,631,282,000 | 32.971014 | 95 | 0.680461 | false |
rwth-ti/gr-ofdm | python/ofdm/qa_freqshift.py | 1 | 13032 | #!/usr/bin/env python
#
# Copyright 2014 Institute for Theoretical Information Technology,
# RWTH Aachen University
# www.ti.rwth-aachen.de
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, eng_notation
import ofdm as ofdm
import os
import sys, numpy, random, math, cmath
from numpy import concatenate
import numpy
class qa_ofdm (gr_unittest.TestCase):
def setUp (self):
self.fg = gr.top_block ("test_block")
def tearDown (self):
self.fg = None
# no shift
def test_001 (self):
vlen = 128
syms = 4
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[vlen/2-vlen/4] = 1.0
vec = concatenate([vec]*syms)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f([0.0]*syms)
trig = gr.vector_source_b([1]*syms)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual(vec, numpy.array(dst.data()))
# simple shift by -1.0, one frequency bin
def test_002 (self):
vlen = 128
syms = 4
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[vlen/2-vlen/4] = 1.0
vec = concatenate([vec]*syms)
epsilon = [-1]
frame_trigger = numpy.concatenate([[1],[0]*(syms-1)])
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[vlen/2-vlen/4+i*vlen+epsilon[0]] = 1.0
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger.tolist())
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-6)
# simple shift by -1.0, two frequency bins, asymmetric
def test_003 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/3
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = 1.0
vec[bin2] = -1.0
vec = concatenate([vec]*syms)
epsilon = [-1]*syms
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = 1.0
expec[bin2+i*vlen+epsilon[i]] = -1.0
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-6)
# simple shift by -1.0, two frequency bins, _symmetric_
def test_004 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/4
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = 1.0
vec[bin2] = -1.0
vec = concatenate([vec]*syms)
epsilon = [-1]*syms
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = 1.0
expec[bin2+i*vlen+epsilon[i]] = -1.0
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-6)
# simple shift by +10.0, two frequency bins, asymmetric
def test_005 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/3
bin1_val = 1.0
bin2_val = -1.0j
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = bin1_val
vec[bin2] = bin2_val
vec = concatenate([vec]*syms)
epsilon = [+10]*syms
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = bin1_val
expec[bin2+i*vlen+epsilon[i]] = bin2_val
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5)
# different shifts per symbol, two frequency bins, asymmetric
def test_006 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/3
bin1_val = 1.0j
bin2_val = -1.0
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = bin1_val
vec[bin2] = bin2_val
vec = concatenate([vec]*syms)
epsilon = [1,-4,5,2]
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = bin1_val
expec[bin2+i*vlen+epsilon[i]] = bin2_val
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5)
# one signal at frequency 1.5 / vlen, shifted to 2.0+vlen/2 bin
# initial phase offset remains constant through all symbols in one frame
def test_007 (self):
vlen = 128
syms = 4
bin1 = vlen/2 + 2
bin1_val = 1.0
expec = numpy.array(numpy.zeros(vlen), numpy.complex)
expec[bin1] = bin1_val
expec = concatenate([expec]*syms)
epsilon = [0.5]
frame_trigger = numpy.concatenate([[1],[0]*(syms-1)])
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.sig_source_c(vlen, gr.GR_COS_WAVE, 1.5, 1.0, 0.0)
# bin vlen/2 + 1.5
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger.tolist())
self.fg.connect(src, s2v, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5, 1e-5)
# one signal at frequency 4.5 / vlen, shifted to 4.0+vlen/2 bin
# tests phase correction for cyclic prefix
def test_008 (self):
vlen = 128
syms = 4
bin1 = vlen/2 + 4
bin1_val = 1.0
cp_length = vlen/4
expec = numpy.array(numpy.zeros(vlen), numpy.complex)
expec[bin1] = bin1_val
expec = concatenate([expec]*syms)
epsilon = [-0.5]
frame_trigger = numpy.concatenate([[1],[0]*(syms-1)])
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen, cp_length)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
sampler = ofdm.vector_sampler(gr.sizeof_gr_complex,vlen)
trigger_vec = concatenate([[0]*(vlen+cp_length-1),[1]])
trigger_vec = concatenate([trigger_vec]*syms)
trigger = gr.vector_source_b(trigger_vec.tolist())
src = gr.sig_source_c(vlen, gr.GR_COS_WAVE, 4.5, 1.0, 0.0) # bin vlen/2 + 4.5
dst = gr.vector_sink_c()
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger.tolist())
self.fg.connect(src, (sampler,0))
self.fg.connect(trigger, (sampler,1))
self.fg.connect(sampler, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5, 1e-5)
def test_100(self):
vlen = 256
cp_len = 12
M = 10
N = int(3e6)
uut = ofdm.frequency_shift_vcc( vlen, 1.0/vlen, cp_len )
trig = [0]*M
trig[0] = 1
eps = [1.]*M
src1 = gr.vector_source_c( [1.]*(M*vlen), True, vlen )
src2 = gr.vector_source_f( eps, True )
src3 = gr.vector_source_b( trig, True )
dst = gr.null_sink( gr.sizeof_gr_complex * vlen )
limit3 = gr.head( gr.sizeof_char, N )
self.fg.connect( src1, ( uut, 0 ) )
self.fg.connect( src2, ( uut, 1 ) )
self.fg.connect( src3, limit3, ( uut, 2 ) )
self.fg.connect( uut, dst )
r = time_it( self.fg )
print "Rate %s" % \
( eng_notation.num_to_str( float( ( vlen + cp_len ) * N ) / r ) )
def time_it(tb):
start = os.times()
tb.run()
stop = os.times()
delta = map((lambda a, b: a-b), stop, start)
user, sys, childrens_user, childrens_sys, real = delta
total_user = user + childrens_user
total_sys = sys + childrens_sys
print "real %7.3f" % (real,)
print "user %7.3f" % (total_user,)
print "sys %7.3f" % (total_sys,)
return real
if __name__ == '__main__':
gr_unittest.main()
| gpl-3.0 | 5,002,455,819,893,132,000 | 30.708029 | 81 | 0.607812 | false |
keishi/chromium | chrome/test/pyautolib/remote_inspector_client.py | 1 | 39894 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome remote inspector utility for pyauto tests.
This script provides a python interface that acts as a front-end for Chrome's
remote inspector module, communicating via sockets to interact with Chrome in
the same way that the Developer Tools does. This -- in theory -- should allow
a pyauto test to do anything that Chrome's Developer Tools does, as long as the
appropriate communication with the remote inspector is implemented in this
script.
This script assumes that Chrome is already running on the local machine with
flag '--remote-debugging-port=9222' to enable remote debugging on port 9222.
To use this module, first create an instance of class RemoteInspectorClient;
doing this sets up a connection to Chrome's remote inspector. Then call the
appropriate functions on that object to perform the desired actions with the
remote inspector. When done, call Stop() on the RemoteInspectorClient object
to stop communication with the remote inspector.
For example, to take v8 heap snapshots from a pyauto test:
import remote_inspector_client
my_client = remote_inspector_client.RemoteInspectorClient()
snapshot_info = my_client.HeapSnapshot(include_summary=True)
// Do some stuff...
new_snapshot_info = my_client.HeapSnapshot(include_summary=True)
my_client.Stop()
It is expected that a test will only use one instance of RemoteInspectorClient
at a time. If a second instance is instantiated, a RuntimeError will be raised.
RemoteInspectorClient could be made into a singleton in the future if the need
for it arises.
"""
import asyncore
import datetime
import logging
import optparse
import pprint
import simplejson
import socket
import sys
import threading
import time
import urllib2
import urlparse
class _DevToolsSocketRequest(object):
"""A representation of a single DevToolsSocket request.
A DevToolsSocket request is used for communication with a remote Chrome
instance when interacting with the renderer process of a given webpage.
Requests and results are passed as specially-formatted JSON messages,
according to a communication protocol defined in WebKit. The string
representation of this request will be a JSON message that is properly
formatted according to the communication protocol.
Public Attributes:
method: The string method name associated with this request.
id: A unique integer id associated with this request.
params: A dictionary of input parameters associated with this request.
results: A dictionary of relevant results obtained from the remote Chrome
instance that are associated with this request.
is_fulfilled: A boolean indicating whether or not this request has been sent
and all relevant results for it have been obtained (i.e., this value is
True only if all results for this request are known).
is_fulfilled_condition: A threading.Condition for waiting for the request to
be fulfilled.
"""
def __init__(self, method, params, message_id):
"""Initialize.
Args:
method: The string method name for this request.
message_id: An integer id for this request, which is assumed to be unique
from among all requests.
"""
self.method = method
self.id = message_id
self.params = params
self.results = {}
self.is_fulfilled = False
self.is_fulfilled_condition = threading.Condition()
def __repr__(self):
json_dict = {}
json_dict['method'] = self.method
json_dict['id'] = self.id
if self.params:
json_dict['params'] = self.params
return simplejson.dumps(json_dict, separators=(',', ':'))
class _DevToolsSocketClient(asyncore.dispatcher):
"""Client that communicates with a remote Chrome instance via sockets.
This class works in conjunction with the _RemoteInspectorThread class to
communicate with a remote Chrome instance following the remote debugging
communication protocol in WebKit. This class performs the lower-level work
of socket communication.
Public Attributes:
handshake_done: A boolean indicating whether or not the client has completed
the required protocol handshake with the remote Chrome instance.
inspector_thread: An instance of the _RemoteInspectorThread class that is
working together with this class to communicate with a remote Chrome
instance.
"""
def __init__(self, verbose, show_socket_messages, hostname, port, path):
"""Initialize.
Args:
verbose: A boolean indicating whether or not to use verbose logging.
show_socket_messages: A boolean indicating whether or not to show the
socket messages sent/received when communicating with the remote
Chrome instance.
hostname: The string hostname of the DevToolsSocket to which to connect.
port: The integer port number of the DevToolsSocket to which to connect.
path: The string path of the DevToolsSocket to which to connect.
"""
asyncore.dispatcher.__init__(self)
self._logger = logging.getLogger('_DevToolsSocketClient')
self._logger.setLevel([logging.WARNING, logging.DEBUG][verbose])
self._show_socket_messages = show_socket_messages
self._read_buffer = ''
self._write_buffer = ''
self._socket_buffer_lock = threading.Lock()
self.handshake_done = False
self.inspector_thread = None
# Connect to the remote Chrome instance and initiate the protocol handshake.
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((hostname, port))
fields = [
'Upgrade: WebSocket',
'Connection: Upgrade',
'Host: %s:%d' % (hostname, port),
'Origin: http://%s:%d' % (hostname, port),
'Sec-WebSocket-Key1: 4k0L66E ZU 8 5 <18 <TK 7 7',
'Sec-WebSocket-Key2: s2 20 `# 4| 3 9 U_ 1299',
]
handshake_msg = ('GET %s HTTP/1.1\r\n%s\r\n\r\n\x47\x30\x22\x2D\x5A\x3F'
'\x47\x58' % (path, '\r\n'.join(fields)))
self._Write(handshake_msg.encode('utf-8'))
def SendMessage(self, msg):
"""Causes a request message to be sent to the remote Chrome instance.
Args:
msg: A string message to be sent; assumed to be a JSON message in proper
format according to the remote debugging protocol in WebKit.
"""
# According to the communication protocol, each request message sent over
# the wire must begin with '\x00' and end with '\xff'.
self._Write('\x00' + msg.encode('utf-8') + '\xff')
def _Write(self, msg):
"""Causes a raw message to be sent to the remote Chrome instance.
Args:
msg: A raw string message to be sent.
"""
self._write_buffer += msg
self.handle_write()
def handle_write(self):
"""Called if a writable socket can be written; overridden from asyncore."""
self._socket_buffer_lock.acquire()
if self._write_buffer:
sent = self.send(self._write_buffer)
if self._show_socket_messages:
msg_type = ['Handshake', 'Message'][self._write_buffer[0] == '\x00' and
self._write_buffer[-1] == '\xff']
msg = ('========================\n'
'Sent %s:\n'
'========================\n'
'%s\n'
'========================') % (msg_type,
self._write_buffer[:sent-1])
print msg
self._write_buffer = self._write_buffer[sent:]
self._socket_buffer_lock.release()
def handle_read(self):
"""Called when a socket can be read; overridden from asyncore."""
self._socket_buffer_lock.acquire()
if self.handshake_done:
# Process a message reply from the remote Chrome instance.
self._read_buffer += self.recv(4096)
pos = self._read_buffer.find('\xff')
while pos >= 0:
pos += len('\xff')
data = self._read_buffer[:pos-len('\xff')]
pos2 = data.find('\x00')
if pos2 >= 0:
data = data[pos2 + 1:]
self._read_buffer = self._read_buffer[pos:]
if self._show_socket_messages:
msg = ('========================\n'
'Received Message:\n'
'========================\n'
'%s\n'
'========================') % data
print msg
if self.inspector_thread:
self.inspector_thread.NotifyReply(data)
pos = self._read_buffer.find('\xff')
else:
# Process a handshake reply from the remote Chrome instance.
self._read_buffer += self.recv(4096)
pos = self._read_buffer.find('\r\n\r\n')
if pos >= 0:
pos += len('\r\n\r\n')
data = self._read_buffer[:pos]
self._read_buffer = self._read_buffer[pos:]
self.handshake_done = True
if self._show_socket_messages:
msg = ('=========================\n'
'Received Handshake Reply:\n'
'=========================\n'
'%s\n'
'=========================') % data
print msg
self._socket_buffer_lock.release()
def handle_close(self):
"""Called when the socket is closed; overridden from asyncore."""
self.close()
def writable(self):
"""Determines if writes can occur for this socket; overridden from asyncore.
Returns:
True, if there is something to write to the socket, or
False, otherwise.
"""
return len(self._write_buffer) > 0
def handle_expt(self):
"""Called when out-of-band data exists; overridden from asyncore."""
self.handle_error()
def handle_error(self):
"""Called when an exception is raised; overridden from asyncore."""
self.close()
self.inspector_thread.ClientSocketExceptionOccurred()
asyncore.dispatcher.handle_error(self)
class _RemoteInspectorThread(threading.Thread):
"""Manages communication using Chrome's remote inspector protocol.
This class works in conjunction with the _DevToolsSocketClient class to
communicate with a remote Chrome instance following the remote inspector
communication protocol in WebKit. This class performs the higher-level work
of managing request and reply messages, whereas _DevToolsSocketClient handles
the lower-level work of socket communication.
"""
def __init__(self, tab_index, verbose, show_socket_messages):
"""Initialize.
Args:
tab_index: The integer index of the tab in the remote Chrome instance to
use for snapshotting.
verbose: A boolean indicating whether or not to use verbose logging.
show_socket_messages: A boolean indicating whether or not to show the
socket messages sent/received when communicating with the remote
Chrome instance.
"""
threading.Thread.__init__(self)
self._logger = logging.getLogger('_RemoteInspectorThread')
self._logger.setLevel([logging.WARNING, logging.DEBUG][verbose])
self._killed = False
self._requests = []
self._action_queue = []
self._action_queue_condition = threading.Condition()
self._action_specific_callback = None # Callback only for current action.
self._action_specific_callback_lock = threading.Lock()
self._general_callbacks = [] # General callbacks that can be long-lived.
self._general_callbacks_lock = threading.Lock()
self._condition_to_wait = None
# Create a DevToolsSocket client and wait for it to complete the remote
# debugging protocol handshake with the remote Chrome instance.
result = self._IdentifyDevToolsSocketConnectionInfo(tab_index)
self._client = _DevToolsSocketClient(
verbose, show_socket_messages, result['host'], result['port'],
result['path'])
self._client.inspector_thread = self
while asyncore.socket_map:
if self._client.handshake_done or self._killed:
break
asyncore.loop(timeout=1, count=1, use_poll=True)
def ClientSocketExceptionOccurred(self):
"""Notifies that the _DevToolsSocketClient encountered an exception."""
self.Kill()
def NotifyReply(self, msg):
"""Notifies of a reply message received from the remote Chrome instance.
Args:
msg: A string reply message received from the remote Chrome instance;
assumed to be a JSON message formatted according to the remote
debugging communication protocol in WebKit.
"""
reply_dict = simplejson.loads(msg)
# Notify callbacks of this message received from the remote inspector.
self._action_specific_callback_lock.acquire()
if self._action_specific_callback:
self._action_specific_callback(reply_dict)
self._action_specific_callback_lock.release()
self._general_callbacks_lock.acquire()
if self._general_callbacks:
for callback in self._general_callbacks:
callback(reply_dict)
self._general_callbacks_lock.release()
if 'result' in reply_dict:
# This is the result message associated with a previously-sent request.
request = self.GetRequestWithId(reply_dict['id'])
if request:
request.is_fulfilled_condition.acquire()
request.is_fulfilled_condition.notify()
request.is_fulfilled_condition.release()
def run(self):
"""Start this thread; overridden from threading.Thread."""
while not self._killed:
self._action_queue_condition.acquire()
if self._action_queue:
# There's a request to the remote inspector that needs to be processed.
messages, callback = self._action_queue.pop(0)
self._action_specific_callback_lock.acquire()
self._action_specific_callback = callback
self._action_specific_callback_lock.release()
# Prepare the request list.
for message_id, message in enumerate(messages):
self._requests.append(
_DevToolsSocketRequest(message[0], message[1], message_id))
# Send out each request. Wait until each request is complete before
# sending the next request.
for request in self._requests:
self._FillInParams(request)
self._client.SendMessage(str(request))
request.is_fulfilled_condition.acquire()
self._condition_to_wait = request.is_fulfilled
request.is_fulfilled_condition.wait()
request.is_fulfilled_condition.release()
if self._killed:
self._client.close()
return
# Clean up so things are ready for the next request.
self._requests = []
self._action_specific_callback_lock.acquire()
self._action_specific_callback = None
self._action_specific_callback_lock.release()
# Wait until there is something to process.
self._condition_to_wait = self._action_queue_condition
self._action_queue_condition.wait()
self._action_queue_condition.release()
self._client.close()
def Kill(self):
"""Notify this thread that it should stop executing."""
self._killed = True
# The thread might be waiting on a condition.
if self._condition_to_wait:
self._condition_to_wait.acquire()
self._condition_to_wait.notify()
self._condition_to_wait.release()
def PerformAction(self, request_messages, reply_message_callback):
"""Notify this thread of an action to perform using the remote inspector.
Args:
request_messages: A list of strings representing the requests to make
using the remote inspector.
reply_message_callback: A callable to be invoked any time a message is
received from the remote inspector while the current action is
being performed. The callable should accept a single argument,
which is a dictionary representing a message received.
"""
self._action_queue_condition.acquire()
self._action_queue.append((request_messages, reply_message_callback))
self._action_queue_condition.notify()
self._action_queue_condition.release()
def AddMessageCallback(self, callback):
"""Add a callback to invoke for messages received from the remote inspector.
Args:
callback: A callable to be invoked any time a message is received from the
remote inspector. The callable should accept a single argument, which
is a dictionary representing a message received.
"""
self._general_callbacks_lock.acquire()
self._general_callbacks.append(callback)
self._general_callbacks_lock.release()
def RemoveMessageCallback(self, callback):
"""Remove a callback from the set of those to invoke for messages received.
Args:
callback: A callable to remove from consideration.
"""
self._general_callbacks_lock.acquire()
self._general_callbacks.remove(callback)
self._general_callbacks_lock.release()
def GetRequestWithId(self, request_id):
"""Identifies the request with the specified id.
Args:
request_id: An integer request id; should be unique for each request.
Returns:
A request object associated with the given id if found, or
None otherwise.
"""
found_request = [x for x in self._requests if x.id == request_id]
if found_request:
return found_request[0]
return None
def GetFirstUnfulfilledRequest(self, method):
"""Identifies the first unfulfilled request with the given method name.
An unfulfilled request is one for which all relevant reply messages have
not yet been received from the remote inspector.
Args:
method: The string method name of the request for which to search.
Returns:
The first request object in the request list that is not yet fulfilled
and is also associated with the given method name, or
None if no such request object can be found.
"""
for request in self._requests:
if not request.is_fulfilled and request.method == method:
return request
return None
def _GetLatestRequestOfType(self, ref_req, method):
"""Identifies the latest specified request before a reference request.
This function finds the latest request with the specified method that
occurs before the given reference request.
Args:
ref_req: A reference request from which to start looking.
method: The string method name of the request for which to search.
Returns:
The latest _DevToolsSocketRequest object with the specified method,
if found, or None otherwise.
"""
start_looking = False
for request in self._requests[::-1]:
if request.id == ref_req.id:
start_looking = True
elif start_looking:
if request.method == method:
return request
return None
def _FillInParams(self, request):
"""Fills in parameters for requests as necessary before the request is sent.
Args:
request: The _DevToolsSocketRequest object associated with a request
message that is about to be sent.
"""
if request.method == 'Profiler.takeHeapSnapshot':
# We always want detailed v8 heap snapshot information.
request.params = {'detailed': True}
elif request.method == 'Profiler.getProfile':
# To actually request the snapshot data from a previously-taken snapshot,
# we need to specify the unique uid of the snapshot we want.
# The relevant uid should be contained in the last
# 'Profiler.takeHeapSnapshot' request object.
last_req = self._GetLatestRequestOfType(request,
'Profiler.takeHeapSnapshot')
if last_req and 'uid' in last_req.results:
request.params = {'type': 'HEAP', 'uid': last_req.results['uid']}
@staticmethod
def _IdentifyDevToolsSocketConnectionInfo(tab_index):
"""Identifies DevToolsSocket connection info from a remote Chrome instance.
Args:
tab_index: The integer index of the tab in the remote Chrome instance to
which to connect.
Returns:
A dictionary containing the DevToolsSocket connection info:
{
'host': string,
'port': integer,
'path': string,
}
Raises:
RuntimeError: When DevToolsSocket connection info cannot be identified.
"""
try:
# TODO(dennisjeffrey): Do not assume port 9222. The port should be passed
# as input to this function.
f = urllib2.urlopen('http://localhost:9222/json')
result = f.read();
result = simplejson.loads(result)
except urllib2.URLError, e:
raise RuntimeError(
'Error accessing Chrome instance debugging port: ' + str(e))
if tab_index >= len(result):
raise RuntimeError(
'Specified tab index %d doesn\'t exist (%d tabs found)' %
(tab_index, len(result)))
if 'webSocketDebuggerUrl' not in result[tab_index]:
raise RuntimeError('No socket URL exists for the specified tab.')
socket_url = result[tab_index]['webSocketDebuggerUrl']
parsed = urlparse.urlparse(socket_url)
# On ChromeOS, the "ws://" scheme may not be recognized, leading to an
# incorrect netloc (and empty hostname and port attributes) in |parsed|.
# Change the scheme to "http://" to fix this.
if not parsed.hostname or not parsed.port:
socket_url = 'http' + socket_url[socket_url.find(':'):]
parsed = urlparse.urlparse(socket_url)
# Warning: |parsed.scheme| is incorrect after this point.
return ({'host': parsed.hostname,
'port': parsed.port,
'path': parsed.path})
class _RemoteInspectorDriverThread(threading.Thread):
"""Drives the communication service with the remote inspector."""
def __init__(self):
"""Initialize."""
threading.Thread.__init__(self)
def run(self):
"""Drives the communication service with the remote inspector."""
try:
while asyncore.socket_map:
asyncore.loop(timeout=1, count=1, use_poll=True)
except KeyboardInterrupt:
pass
class _V8HeapSnapshotParser(object):
"""Parses v8 heap snapshot data."""
_CHILD_TYPES = ['context', 'element', 'property', 'internal', 'hidden',
'shortcut', 'weak']
_NODE_TYPES = ['hidden', 'array', 'string', 'object', 'code', 'closure',
'regexp', 'number', 'native', 'synthetic']
@staticmethod
def ParseSnapshotData(raw_data):
"""Parses raw v8 heap snapshot data and returns the summarized results.
The raw heap snapshot data is represented as a JSON object with the
following keys: 'snapshot', 'nodes', and 'strings'.
The 'snapshot' value provides the 'title' and 'uid' attributes for the
snapshot. For example:
{ u'title': u'org.webkit.profiles.user-initiated.1', u'uid': 1}
The 'nodes' value is a list of node information from the v8 heap, with a
special first element that describes the node serialization layout (see
HeapSnapshotJSONSerializer::SerializeNodes). All other list elements
contain information about nodes in the v8 heap, according to the
serialization layout.
The 'strings' value is a list of strings, indexed by values in the 'nodes'
list to associate nodes with strings.
Args:
raw_data: A string representing the raw v8 heap snapshot data.
Returns:
A dictionary containing the summarized v8 heap snapshot data:
{
'total_v8_node_count': integer, # Total number of nodes in the v8 heap.
'total_shallow_size': integer, # Total heap size, in bytes.
}
"""
total_node_count = 0
total_shallow_size = 0
constructors = {}
# TODO(dennisjeffrey): The following line might be slow, especially on
# ChromeOS. Investigate faster alternatives.
heap = simplejson.loads(raw_data)
index = 1 # Bypass the special first node list item.
node_list = heap['nodes']
while index < len(node_list):
node_type = node_list[index]
node_name = node_list[index + 1]
node_id = node_list[index + 2]
node_self_size = node_list[index + 3]
node_retained_size = node_list[index + 4]
node_dominator = node_list[index + 5]
node_children_count = node_list[index + 6]
index += 7
node_children = []
for i in xrange(node_children_count):
child_type = node_list[index]
child_type_string = _V8HeapSnapshotParser._CHILD_TYPES[int(child_type)]
child_name_index = node_list[index + 1]
child_to_node = node_list[index + 2]
index += 3
child_info = {
'type': child_type_string,
'name_or_index': child_name_index,
'to_node': child_to_node,
}
node_children.append(child_info)
# Get the constructor string for this node so nodes can be grouped by
# constructor.
# See HeapSnapshot.js: WebInspector.HeapSnapshotNode.prototype.
type_string = _V8HeapSnapshotParser._NODE_TYPES[int(node_type)]
constructor_name = None
if type_string == 'hidden':
constructor_name = '(system)'
elif type_string == 'object':
constructor_name = heap['strings'][int(node_name)]
elif type_string == 'native':
pos = heap['strings'][int(node_name)].find('/')
if pos >= 0:
constructor_name = heap['strings'][int(node_name)][:pos].rstrip()
else:
constructor_name = heap['strings'][int(node_name)]
elif type_string == 'code':
constructor_name = '(compiled code)'
else:
constructor_name = '(' + type_string + ')'
node_obj = {
'type': type_string,
'name': heap['strings'][int(node_name)],
'id': node_id,
'self_size': node_self_size,
'retained_size': node_retained_size,
'dominator': node_dominator,
'children_count': node_children_count,
'children': node_children,
}
if constructor_name not in constructors:
constructors[constructor_name] = []
constructors[constructor_name].append(node_obj)
total_node_count += 1
total_shallow_size += node_self_size
# TODO(dennisjeffrey): Have this function also return more detailed v8
# heap snapshot data when a need for it arises (e.g., using |constructors|).
result = {}
result['total_v8_node_count'] = total_node_count
result['total_shallow_size'] = total_shallow_size
return result
# TODO(dennisjeffrey): The "verbose" option used in this file should re-use
# pyauto's verbose flag.
class RemoteInspectorClient(object):
"""Main class for interacting with Chrome's remote inspector.
Upon initialization, a socket connection to Chrome's remote inspector will
be established. Users of this class should call Stop() to close the
connection when it's no longer needed.
Public Methods:
Stop: Close the connection to the remote inspector. Should be called when
a user is done using this module.
HeapSnapshot: Takes a v8 heap snapshot and returns the summarized data.
GetMemoryObjectCounts: Retrieves memory object count information.
CollectGarbage: Forces a garbage collection.
StartTimelineEventMonitoring: Starts monitoring for timeline events.
StopTimelineEventMonitoring: Stops monitoring for timeline events.
"""
# TODO(dennisjeffrey): Allow a user to specify a window index too (not just a
# tab index), when running through PyAuto.
def __init__(self, tab_index=0, verbose=False, show_socket_messages=False):
"""Initialize.
Args:
tab_index: The integer index of the tab in the remote Chrome instance to
which to connect. Defaults to 0 (the first tab).
verbose: A boolean indicating whether or not to use verbose logging.
show_socket_messages: A boolean indicating whether or not to show the
socket messages sent/received when communicating
with the remote Chrome instance.
"""
self._tab_index = tab_index
self._verbose = verbose
self._show_socket_messages = show_socket_messages
self._timeline_started = False
logging.basicConfig()
self._logger = logging.getLogger('RemoteInspectorClient')
self._logger.setLevel([logging.WARNING, logging.DEBUG][verbose])
# Creating _RemoteInspectorThread might raise an exception. This prevents an
# AttributeError in the destructor.
self._remote_inspector_thread = None
self._remote_inspector_driver_thread = None
# Start up a thread for long-term communication with the remote inspector.
self._remote_inspector_thread = _RemoteInspectorThread(
tab_index, verbose, show_socket_messages)
self._remote_inspector_thread.start()
# At this point, a connection has already been made to the remote inspector.
# This thread calls asyncore.loop, which activates the channel service.
self._remote_inspector_driver_thread = _RemoteInspectorDriverThread()
self._remote_inspector_driver_thread.start()
def __del__(self):
"""Called on destruction of this object."""
self.Stop()
def Stop(self):
"""Stop/close communication with the remote inspector."""
if self._remote_inspector_thread:
self._remote_inspector_thread.Kill()
self._remote_inspector_thread.join()
self._remote_inspector_thread = None
if self._remote_inspector_driver_thread:
self._remote_inspector_driver_thread.join()
self._remote_inspector_driver_thread = None
def HeapSnapshot(self, include_summary=False):
"""Takes a v8 heap snapshot.
Returns:
A dictionary containing information for a single v8 heap
snapshot that was taken.
{
'url': string, # URL of the webpage that was snapshotted.
'raw_data': string, # The raw data as JSON string.
'total_v8_node_count': integer, # Total number of nodes in the v8 heap.
# Only if |include_summary| is True.
'total_heap_size': integer, # Total v8 heap size (number of bytes).
# Only if |include_summary| is True.
}
"""
HEAP_SNAPSHOT_MESSAGES = [
('Page.getResourceTree', {}),
('Debugger.enable', {}),
('Profiler.clearProfiles', {}),
('Profiler.takeHeapSnapshot', {}),
('Profiler.getProfile', {}),
]
self._current_heap_snapshot = []
self._url = ''
self._collected_heap_snapshot_data = {}
done_condition = threading.Condition()
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote inspector.
"""
if 'result' in reply_dict:
# This is the result message associated with a previously-sent request.
request = self._remote_inspector_thread.GetRequestWithId(
reply_dict['id'])
if 'frameTree' in reply_dict['result']:
self._url = reply_dict['result']['frameTree']['frame']['url']
elif 'method' in reply_dict:
# This is an auxiliary message sent from the remote Chrome instance.
if reply_dict['method'] == 'Profiler.addProfileHeader':
snapshot_req = (
self._remote_inspector_thread.GetFirstUnfulfilledRequest(
'Profiler.takeHeapSnapshot'))
if snapshot_req:
snapshot_req.results['uid'] = reply_dict['params']['header']['uid']
elif reply_dict['method'] == 'Profiler.addHeapSnapshotChunk':
self._current_heap_snapshot.append(reply_dict['params']['chunk'])
elif reply_dict['method'] == 'Profiler.finishHeapSnapshot':
# A heap snapshot has been completed. Analyze and output the data.
self._logger.debug('Heap snapshot taken: %s', self._url)
# TODO(dennisjeffrey): Parse the heap snapshot on-the-fly as the data
# is coming in over the wire, so we can avoid storing the entire
# snapshot string in memory.
raw_snapshot_data = ''.join(self._current_heap_snapshot)
self._collected_heap_snapshot_data = {
'url': self._url,
'raw_data': raw_snapshot_data}
if include_summary:
self._logger.debug('Now analyzing heap snapshot...')
parser = _V8HeapSnapshotParser()
time_start = time.time()
self._logger.debug('Raw snapshot data size: %.2f MB',
len(raw_snapshot_data) / (1024.0 * 1024.0))
result = parser.ParseSnapshotData(raw_snapshot_data)
self._logger.debug('Time to parse data: %.2f sec',
time.time() - time_start)
count = result['total_v8_node_count']
self._collected_heap_snapshot_data['total_v8_node_count'] = count
total_size = result['total_shallow_size']
self._collected_heap_snapshot_data['total_heap_size'] = total_size
done_condition.acquire()
done_condition.notify()
done_condition.release()
# Tell the remote inspector to take a v8 heap snapshot, then wait until
# the snapshot information is available to return.
self._remote_inspector_thread.PerformAction(HEAP_SNAPSHOT_MESSAGES,
HandleReply)
done_condition.acquire()
done_condition.wait()
done_condition.release()
return self._collected_heap_snapshot_data
def EvaluateJavaScript(self, expression):
"""Evaluates a JavaScript expression and returns the result.
Sends a message containing the expression to the remote Chrome instance we
are connected to, and evaluates it in the context of the tab we are
connected to. Blocks until the result is available and returns it.
Returns:
A dictionary representing the result.
"""
EVALUATE_MESSAGES = [
('Runtime.evaluate', { 'expression': expression,
'objectGroup': 'group',
'returnByValue': True }),
('Runtime.releaseObjectGroup', { 'objectGroup': 'group' })
]
self._result = None
done_condition = threading.Condition()
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote Chrome instance.
"""
if 'result' in reply_dict and 'result' in reply_dict['result']:
self._result = reply_dict['result']['result']['value']
done_condition.acquire()
done_condition.notify()
done_condition.release()
# Tell the remote inspector to evaluate the given expression, then wait
# until that information is available to return.
self._remote_inspector_thread.PerformAction(EVALUATE_MESSAGES,
HandleReply)
done_condition.acquire()
done_condition.wait()
done_condition.release()
return self._result
def GetMemoryObjectCounts(self):
"""Retrieves memory object count information.
Returns:
A dictionary containing the memory object count information:
{
'DOMNodeCount': integer, # Total number of DOM nodes.
'EventListenerCount': integer, # Total number of event listeners.
}
"""
MEMORY_COUNT_MESSAGES = [
('Memory.getDOMNodeCount', {})
]
self._event_listener_count = None
self._dom_node_count = None
done_condition = threading.Condition()
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote Chrome instance.
"""
if 'result' in reply_dict and 'domGroups' in reply_dict['result']:
event_listener_count = 0
dom_node_count = 0
dom_group_list = reply_dict['result']['domGroups']
for dom_group in dom_group_list:
listener_array = dom_group['listenerCount']
for listener in listener_array:
event_listener_count += listener['count']
dom_node_array = dom_group['nodeCount']
for dom_element in dom_node_array:
dom_node_count += dom_element['count']
self._event_listener_count = event_listener_count
self._dom_node_count = dom_node_count
done_condition.acquire()
done_condition.notify()
done_condition.release()
# Tell the remote inspector to collect memory count info, then wait until
# that information is available to return.
self._remote_inspector_thread.PerformAction(MEMORY_COUNT_MESSAGES,
HandleReply)
done_condition.acquire()
done_condition.wait()
done_condition.release()
return {
'DOMNodeCount': self._dom_node_count,
'EventListenerCount': self._event_listener_count,
}
def CollectGarbage(self):
"""Forces a garbage collection."""
COLLECT_GARBAGE_MESSAGES = [
('Profiler.collectGarbage', {})
]
# Tell the remote inspector to do a garbage collect. We can return
# immediately, since there is no result for which to wait.
self._remote_inspector_thread.PerformAction(COLLECT_GARBAGE_MESSAGES, None)
def StartTimelineEventMonitoring(self, event_callback):
"""Starts timeline event monitoring.
Args:
event_callback: A callable to invoke whenever a timeline event is observed
from the remote inspector. The callable should take a single input,
which is a dictionary containing the detailed information of a
timeline event.
"""
if self._timeline_started:
self._logger.warning('Timeline monitoring already started.')
return
TIMELINE_MESSAGES = [
('Timeline.start', {})
]
self._event_callback = event_callback
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote Chrome instance.
"""
if reply_dict.get('method') == 'Timeline.eventRecorded':
self._event_callback(reply_dict['params']['record'])
# Tell the remote inspector to start the timeline. We can return
# immediately, since there is no result for which to wait.
self._timeline_callback = HandleReply
self._remote_inspector_thread.AddMessageCallback(self._timeline_callback)
self._remote_inspector_thread.PerformAction(TIMELINE_MESSAGES, None)
self._timeline_started = True
def StopTimelineEventMonitoring(self):
"""Stops timeline event monitoring."""
if not self._timeline_started:
self._logger.warning('Timeline monitoring already stopped.')
return
TIMELINE_MESSAGES = [
('Timeline.stop', {})
]
# Tell the remote inspector to stop the timeline. We can return
# immediately, since there is no result for which to wait.
self._remote_inspector_thread.RemoveMessageCallback(self._timeline_callback)
self._remote_inspector_thread.PerformAction(TIMELINE_MESSAGES, None)
self._timeline_started = False
def _ConvertByteCountToHumanReadableString(self, num_bytes):
"""Converts an integer number of bytes into a human-readable string.
Args:
num_bytes: An integer number of bytes.
Returns:
A human-readable string representation of the given number of bytes.
"""
if num_bytes < 1024:
return '%d B' % num_bytes
elif num_bytes < 1048576:
return '%.2f KB' % (num_bytes / 1024.0)
else:
return '%.2f MB' % (num_bytes / 1048576.0)
| bsd-3-clause | 7,801,864,265,520,846,000 | 37.694471 | 80 | 0.657643 | false |
stackforge/cloudkitty | cloudkitty/backend/__init__.py | 1 | 1671 | # -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BaseIOBackend(object):
def __init__(self, path):
self.open(path)
@abc.abstractmethod
def open(self, path):
"""Open the connection/file on the backend.
"""
@abc.abstractmethod
def tell(self):
"""Current position on the backend.
"""
@abc.abstractmethod
def seek(self, offset, from_what=0):
# 0 beg, 1 cur, 2 end
"""Change position in the backend.
"""
@abc.abstractmethod
def flush(self):
"""Force write informations on the backend.
"""
@abc.abstractmethod
def write(self, data):
"""Writer data on the backend.
:param data: Data to be written on the backend.
"""
@abc.abstractmethod
def read(self):
"""Read data from the backend.
:return str: Data read from the backend.
"""
@abc.abstractmethod
def close(self):
"""Close the connection/file on the backend.
"""
| apache-2.0 | 6,410,223,103,262,482,000 | 23.217391 | 78 | 0.625374 | false |
patelrajnath/rnn4nlp | rnn/LSTM.py | 1 | 38726 | import theano
import numpy
import os
from theano import tensor as T
from collections import OrderedDict
from utils.tools import numpy_floatX
class LSTM(object):
def __init__(self, nh, nc, de, cs, ne_char=None, de_char=None, ne_src=None,
ne_tgt=None, emb_src=None, emb_tgt=None, max_char=10):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(ne_tgt + 1, de)).astype(theano.config.floatX)) # add one for PADDING at the end
#Input layer weghts
self.Wx = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#Reccurant weight or g(t) in note
self.Wh = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#LSTM Gate
#X(t) weights
self.Wi = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wo = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wf = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#H(t-1) weights
self.Ui = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.Uo = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.Uf = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
#Bias
self.bi = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bo = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bf = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#output weights and biases
self.W = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nc)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
#Recurent memory or h(t-1)
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.c0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb, self.Wx, self.Wh, self.Wi, self.Wo, self.Wf, self.W, self.Ui,
self.Uo, self.Uf, self.bh, self.bi, self.bo, self.bf, self.b, self.h0, self.c0]
self.names = ['emb', 'Wx', 'Wh', 'Wi', 'Wo', 'Wf', 'W', 'Ui', 'Uo', 'Uf', 'bh', 'bi',
'bo', 'bf', 'b', 'h0', 'c0']
idxs = T.imatrix() # as many columns as context window size/lines as words in the sentence
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y = T.ivector('y') # label
def recurrence(x_t, h_tm1, c_):
#Gates
g_i = T.nnet.sigmoid(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
g_o = T.nnet.sigmoid(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
g_f = T.nnet.sigmoid(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_t = T.tanh(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
c = g_f*c_ + g_i*g_t
h = g_o*T.tanh(c)
s_t = T.nnet.softmax(T.dot(h, self.W) + self.b)
return [h, c, s_t]
[h, c, s_t],_ = theano.scan(fn=recurrence, \
sequences=x, outputs_info=[self.h0, self.c0, None], \
n_steps=x.shape[0])
p_y_given_x_lastword = s_t[-1,0,:]
p_y_given_x_sentence = s_t[:,0,:]
#p_y_given_x_lastword = s[-1]
#p_y_given_x_sentence = s
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -T.log(p_y_given_x_lastword)[y].sum()
gradients = T.grad( nll, self.params )
updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
# theano functions
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.train = theano.function( inputs = [idxs, y, lr],
outputs = nll,
updates = updates, on_unused_input='ignore',
name='train_LSTM', allow_input_downcast=True)
self.normalize = theano.function( inputs = [],
updates = {self.emb:\
self.emb/T.sqrt((self.emb**2).sum(axis=1)).dimshuffle(0,'x')})
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
class LSTM_pretrain(object):
def __init__(self, nh, nc, de, cs, ne_char=None, de_char=None, ne_src=None,
ne_tgt=None, emb_src=None, emb_tgt=None, max_char=10):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(numpy.asarray(emb_tgt).astype(theano.config.floatX))
#Input layer weghts
self.Wx = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#Reccurant weight or g(t) in note
self.Wh = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#LSTM Gate
#X(t) weights
self.Wi = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wo = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wf = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#H(t-1) weights
self.Ui = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.Uo = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.Uf = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
#Bias
self.bi = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bo = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bf = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#output weights and biases
self.W = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nc)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
#Recurent memory or h(t-1)
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.c0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb, self.Wx, self.Wh, self.Wi, self.Wo, self.Wf, self.W, self.Ui,
self.Uo, self.Uf, self.bh, self.bi, self.bo, self.bf, self.b, self.h0, self.c0]
self.names = ['emb', 'Wx', 'Wh', 'Wi', 'Wo', 'Wf', 'W', 'Ui', 'Uo', 'Uf', 'bh', 'bi',
'bo', 'bf', 'b', 'h0', 'c0']
idxs = T.imatrix() # as many columns as context window size/lines as words in the sentence
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y = T.ivector('y') # label
def recurrence(x_t, h_tm1, c_):
#Gates
g_i = T.nnet.sigmoid(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
g_o = T.nnet.sigmoid(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
g_f = T.nnet.sigmoid(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_t = T.tanh(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
c = g_f*c_ + g_i*g_t
h = g_o*T.tanh(c)
s_t = T.nnet.softmax(T.dot(h, self.W) + self.b)
return [h, c, s_t]
[h, c, s_t],_ = theano.scan(fn=recurrence, \
sequences=x, outputs_info=[self.h0, self.c0, None], \
n_steps=x.shape[0])
p_y_given_x_lastword = s_t[-1,0,:]
p_y_given_x_sentence = s_t[:,0,:]
#p_y_given_x_lastword = s[-1]
#p_y_given_x_sentence = s
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -T.log(p_y_given_x_lastword)[y].sum()
gradients = T.grad( nll, self.params )
updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
# theano functions
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.train = theano.function( inputs = [idxs, y, lr],
outputs = nll,
updates = updates, on_unused_input='ignore',
name='train_LSTM', allow_input_downcast=True)
self.normalize = theano.function( inputs = [],
updates = {self.emb:\
self.emb/T.sqrt((self.emb**2).sum(axis=1)).dimshuffle(0,'x')})
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
class LSTM_adadelta(object):
def __init__(self, nh, nc, de, cs, ne_char=None, de_char=None, ne_src=None,
ne_tgt=None, emb_src=None, emb_tgt=None, max_char=10):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(numpy.random.normal(0.0, 1.0,\
(ne_tgt + 1, de)).astype(theano.config.floatX)) # add one for PADDING at the end
#Input layer weghts
self.Wx = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#Reccurant weight or g(t) in note
self.Wh = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#LSTM Gate
#X(t) weights
self.Wi = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wo = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wf = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#H(t-1) weights
self.Ui = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uo = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uf = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
#Bias
self.bi = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bo = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bf = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#output weights and biases
self.W = theano.shared(numpy.random.normal(0.0, 1.0,\
(nh, nc)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
#Recurent memory or h(t-1)
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.c0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb, self.Wx, self.Wh, self.Wi, self.Wo, self.Wf, self.W, self.Ui, self.Uo, self.Uf, self.bh, self.bi, self.bo, self.bf, self.b, self.h0, self.c0]
self.names = ['emb', 'Wx', 'Wh', 'Wi', 'Wo', 'Wf', 'W', 'Ui', 'Uo', 'Uf', 'bh', 'bi', 'bo', 'bf', 'b', 'h0', 'c0']
idxs = T.imatrix() # as many columns as context window size/lines as words in the sentence
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y = T.ivector('y') # label
def recurrence(x_t, h_tm1, c_):
#Gates
#g_i = T.nnet.relu(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
#g_o = T.nnet.relu(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
#g_f = T.nnet.relu(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_i = T.nnet.sigmoid(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
g_o = T.nnet.sigmoid(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
g_f = T.nnet.sigmoid(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_t = T.tanh(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
c = g_f*c_ + g_i*g_t
h = g_o*T.tanh(c)
s_t = T.nnet.softmax(T.dot(h, self.W) + self.b)
return [h, c, s_t]
[h, c, s_t],_ = theano.scan(fn=recurrence, \
sequences=x, outputs_info=[self.h0, self.c0, None], \
n_steps=x.shape[0],\
truncate_gradient=7)
#p_y_given_x_lastword = s_t[-1,0,:]
p_y_given_x_sentence = s_t[:,0,:]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -(T.log(s_t[:,0,:])[T.arange(y.shape[0]), y]).sum()
grads = T.grad( nll, self.params )
#updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
'''
Adadelta update
'''
tparams = OrderedDict((p,q) for p,q in zip(self.names, self.params))
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
# theano functions
self.train_grad_shared = theano.function(inputs=[idxs, y, lr], outputs=nll, updates=zgup + rg2up,
on_unused_input='ignore', name='adadelta_train_grad_shared', allow_input_downcast=True)
self.train_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_train_update')
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.normalize = theano.function( inputs = [],
updates = {self.emb:\
self.emb/T.sqrt((self.emb**2).sum(axis=1)).dimshuffle(0,'x')})
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
class LSTM_adadelta_pretrain(object):
def __init__(self, nh, nc, de, cs, ne_char=None, de_char=None, ne_src=None,
ne_tgt=None, emb_src=None, emb_tgt=None, max_char=10):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(numpy.asarray(emb_tgt).astype(theano.config.floatX))
#Input layer weghts
self.Wx = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#Reccurant weight or g(t) in note
self.Wh = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#LSTM Gate
#X(t) weights
self.Wi = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wo = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wf = theano.shared(numpy.random.normal(0.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
#H(t-1) weights
self.Ui = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uo = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uf = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
#Bias
self.bi = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bo = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bf = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#output weights and biases
self.W = theano.shared(numpy.random.normal(0.0, 1.0,\
(nh, nc)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
#Recurent memory or h(t-1)
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.c0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb, self.Wx, self.Wh, self.Wi, self.Wo, self.Wf, self.W, self.Ui, self.Uo, self.Uf, self.bh, self.bi, self.bo, self.bf, self.b, self.h0, self.c0]
self.names = ['emb', 'Wx', 'Wh', 'Wi', 'Wo', 'Wf', 'W', 'Ui', 'Uo', 'Uf', 'bh', 'bi', 'bo', 'bf', 'b', 'h0', 'c0']
idxs = T.imatrix() # as many columns as context window size/lines as words in the sentence
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y = T.ivector('y') # label
def recurrence(x_t, h_tm1, c_):
#Gates
#g_i = T.nnet.relu(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
#g_o = T.nnet.relu(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
#g_f = T.nnet.relu(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_i = T.nnet.sigmoid(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
g_o = T.nnet.sigmoid(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
g_f = T.nnet.sigmoid(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_t = T.tanh(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
c = g_f*c_ + g_i*g_t
#c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = g_o*T.tanh(c)
#h = m_[:, None] * h + (1. - m_)[:, None] * h_tm1
s_t = T.nnet.softmax(T.dot(h, self.W) + self.b)
return [h, c, s_t]
[h, c, s_t],_ = theano.scan(fn=recurrence, \
sequences=x, outputs_info=[self.h0, self.c0, None], \
n_steps=x.shape[0],\
truncate_gradient=7)
#p_y_given_x_lastword = s_t[-1,0,:]
p_y_given_x_sentence = s_t[:,0,:]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -(T.log(s_t[:,0,:])[T.arange(y.shape[0]), y]).sum()
grads = T.grad( nll, self.params )
#updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
'''
Adadelta update
'''
tparams = OrderedDict((p,q) for p,q in zip(self.names, self.params))
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
# theano functions
self.train_grad_shared = theano.function(inputs=[idxs, y, lr], outputs=nll, updates=zgup + rg2up,
on_unused_input='ignore', name='adadelta_train_grad_shared', allow_input_downcast=True)
self.train_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_train_update')
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.normalize = theano.function( inputs = [],
updates = {self.emb:\
self.emb/T.sqrt((self.emb**2).sum(axis=1)).dimshuffle(0,'x')})
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
class LSTM_adadelta_bilingual(object):
def __init__(self, nh, nc, de, cs, ne_char=None, de_char=None, ne_src=None,
ne_tgt=None, emb_src=None, emb_tgt=None, max_char=10):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb_src = theano.shared(numpy.random.normal(0.0, 0.01,\
(ne_src + 1, de)).astype(theano.config.floatX)) # add one for PADDING at the end
self.emb_tgt = theano.shared(numpy.random.normal(0.0, 0.01,\
(ne_tgt + 1, de)).astype(theano.config.floatX)) # add one for PADDING at the end
normalization_list = [self.emb_src, self.emb_tgt]
#Input layer weghts
self.Wx = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
#Reccurant weight or g(t) in note
self.Wh = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#LSTM Gate
#X(t) weights
self.Wi = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
self.Wo = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
self.Wf = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
#H(t-1) weights
self.Ui = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uo = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uf = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
#Bias
self.bi = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bo = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bf = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#output weights and biases
self.W = theano.shared(numpy.random.normal(0.0, 0.01,\
(nh, nc)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
#Recurent memory or h(t-1)
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.c0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb_src, self.emb_tgt, self.Wx, self.Wh, self.Wi, self.Wo, self.Wf, self.W,self.Ui, self.Uo, self.Uf, self.bh, self.bi, self.bo, self.bf, self.b, self.h0, self.c0]
self.names = ['emb_src', 'emb_tgt', 'Wx', 'Wh', 'Wi', 'Wo', 'Wf', 'W', 'Ui', 'Uo', 'Uf', 'bh', 'bi', 'bo', 'bf', 'b', 'h0', 'c0']
idxs_src = T.imatrix() # as many columns as context window size/lines as words in the sentence
x_src = self.emb_src[idxs_src].reshape((idxs_src.shape[0], de*cs))
idxs_tgt = T.imatrix() # as many columns as context window size/lines as words in the sentence
x_tgt = self.emb_tgt[idxs_tgt].reshape((idxs_tgt.shape[0], de*cs))
y = T.ivector('y') # label
def recurrence(x1, x2, h_tm1, c_):
#Gates
#g_i = T.nnet.relu(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
#g_o = T.nnet.relu(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
#g_f = T.nnet.relu(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
x_t = T.concatenate([x1, x2])
g_i = T.nnet.sigmoid(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
g_o = T.nnet.sigmoid(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
g_f = T.nnet.sigmoid(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_t = T.tanh(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
c = g_f*c_ + g_i*g_t
#c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = g_o*T.tanh(c)
#h = m_[:, None] * h + (1. - m_)[:, None] * h_tm1
s_t = T.nnet.softmax(T.dot(h, self.W) + self.b)
return [h, c, s_t]
[h, c, s_t],_ = theano.scan(fn=recurrence, \
sequences=[x_src, x_tgt], outputs_info=[self.h0, self.c0, None], \
n_steps=x_src.shape[0],\
truncate_gradient=7)
#p_y_given_x_lastword = s_t[-1,0,:]
p_y_given_x_sentence = s_t[:,0,:]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -(T.log(s_t[:,0,:])[T.arange(y.shape[0]), y]).sum()
grads = T.grad( nll, self.params )
#updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
'''
Adadelta update
'''
tparams = OrderedDict((p,q) for p,q in zip(self.names, self.params))
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
# theano functions
self.train_grad_shared = theano.function(inputs=[idxs_src, idxs_tgt, y, lr], outputs=nll, updates=zgup + rg2up,
on_unused_input='ignore', name='adadelta_train_grad_shared', allow_input_downcast=True)
self.train_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_train_update')
self.classify = theano.function(inputs=[idxs_src, idxs_tgt], outputs=y_pred)
self.normalize = theano.function( inputs = [],
updates = OrderedDict((emb, emb/T.sqrt((emb**2).sum(axis=1)).dimshuffle(0,'x')) for emb in normalization_list))
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
class LSTM_adadelta_bilingual_pretrain(object):
def __init__(self, nh, nc, de, cs, ne_char=None, de_char=None, ne_src=None,
ne_tgt=None, emb_src=None, emb_tgt=None, max_char=10):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb_src = theano.shared(numpy.asarray(emb_src).astype(theano.config.floatX))
# add one for PADDING at the end
self.emb_tgt = theano.shared(numpy.asarray(emb_tgt).astype(theano.config.floatX))
# add one for PADDING at the end
normalization_list = [self.emb_src, self.emb_tgt]
#Input layer weghts
self.Wx = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
#Reccurant weight or g(t) in note
self.Wh = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#LSTM Gate
#X(t) weights
self.Wi = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
self.Wo = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
self.Wf = theano.shared(numpy.random.normal(0.0, 0.01,\
(2 * de * cs, nh)).astype(theano.config.floatX))
#H(t-1) weights
self.Ui = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uo = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
self.Uf = theano.shared(numpy.linalg.svd(numpy.random.randn(\
nh, nh))[0].astype(theano.config.floatX))
#Bias
self.bi = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bo = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.bf = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
#output weights and biases
self.W = theano.shared(numpy.random.normal(0.0, 0.01,\
(nh, nc)).astype(theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
#Recurent memory or h(t-1)
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.c0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb_src, self.emb_tgt, self.Wx, self.Wh, self.Wi, self.Wo, self.Wf, self.W, self.Ui, self.Uo, self.Uf, self.bh, self.bi, self.bo, self.bf, self.b, self.h0, self.c0]
self.names = ['emb_src', 'emb_tgt', 'Wx', 'Wh', 'Wi', 'Wo', 'Wf', 'W', 'Ui', 'Uo', 'Uf', 'bh', 'bi', 'bo', 'bf', 'b', 'h0', 'c0']
idxs_src = T.imatrix() # as many columns as context window size/lines as words in the sentence
x_src = self.emb_src[idxs_src].reshape((idxs_src.shape[0], de*cs))
idxs_tgt = T.imatrix() # as many columns as context window size/lines as words in the sentence
x_tgt = self.emb_tgt[idxs_tgt].reshape((idxs_tgt.shape[0], de*cs))
y = T.ivector('y') # label
def recurrence(x1, x2, h_tm1, c_):
#Gates
#g_i = T.nnet.relu(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
#g_o = T.nnet.relu(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
#g_f = T.nnet.relu(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
x_t = T.concatenate([x1, x2])
g_i = T.nnet.sigmoid(T.dot(x_t, self.Wi) + T.dot(h_tm1, self.Ui) + self.bi)
g_o = T.nnet.sigmoid(T.dot(x_t, self.Wo) + T.dot(h_tm1, self.Uo) + self.bo)
g_f = T.nnet.sigmoid(T.dot(x_t, self.Wf) + T.dot(h_tm1, self.Uf) + self.bf)
g_t = T.tanh(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
c = g_f*c_ + g_i*g_t
#c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = g_o*T.tanh(c)
#h = m_[:, None] * h + (1. - m_)[:, None] * h_tm1
s_t = T.nnet.softmax(T.dot(h, self.W) + self.b)
return [h, c, s_t]
[h, c, s_t],_ = theano.scan(fn=recurrence,
sequences=[x_src, x_tgt], outputs_info=[self.h0, self.c0, None],
n_steps=x_src.shape[0],
truncate_gradient=7)
#p_y_given_x_lastword = s_t[-1,0,:]
p_y_given_x_sentence = s_t[:,0,:]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -(T.log(s_t[:,0,:])[T.arange(y.shape[0]), y]).sum()
grads = T.grad( nll, self.params )
#updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
'''
Adadelta update
'''
tparams = OrderedDict((p,q) for p,q in zip(self.names, self.params))
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
# theano functions
self.train_grad_shared = theano.function(inputs=[idxs_src, idxs_tgt, y, lr],
outputs=nll, updates=zgup + rg2up,
on_unused_input='ignore', name='adadelta_train_grad_shared', allow_input_downcast=True)
self.train_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_train_update')
self.classify = theano.function(inputs=[idxs_src, idxs_tgt], outputs=y_pred)
self.normalize = theano.function( inputs = [],
updates = OrderedDict((emb, emb/T.sqrt((emb**2).sum(axis=1)).dimshuffle(0,'x')) for emb in normalization_list))
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
| gpl-3.0 | 3,313,607,363,037,820,000 | 45.940606 | 193 | 0.522827 | false |
jskinn/robot-vision-experiment-framework | batch_analysis/tasks/tests/test_compare_trials_task.py | 1 | 2374 | # Copyright (c) 2017, John Skinner
import unittest
import numpy as np
import bson
import database.tests.test_entity
import util.dict_utils as du
import batch_analysis.task
import batch_analysis.tasks.compare_trials_task as task
class TestCompareTrialTask(database.tests.test_entity.EntityContract, unittest.TestCase):
def get_class(self):
return task.CompareTrialTask
def make_instance(self, *args, **kwargs):
kwargs = du.defaults(kwargs, {
'trial_result1_id': bson.ObjectId(),
'trial_result2_id': bson.ObjectId(),
'comparison_id': bson.ObjectId(),
'state': batch_analysis.task.JobState.RUNNING,
'num_cpus': np.random.randint(0, 1000),
'num_gpus': np.random.randint(0, 1000),
'memory_requirements': '{}MB'.format(np.random.randint(0, 50000)),
'expected_duration': '{0}:{1}:{2}'.format(np.random.randint(1000), np.random.randint(60),
np.random.randint(60)),
'node_id': 'node-{}'.format(np.random.randint(10000)),
'job_id': np.random.randint(1000)
})
return task.CompareTrialTask(*args, **kwargs)
def assert_models_equal(self, task1, task2):
"""
Helper to assert that two tasks are equal
We're going to violate encapsulation for a bit
:param task1:
:param task2:
:return:
"""
if (not isinstance(task1, task.CompareTrialTask) or
not isinstance(task2, task.CompareTrialTask)):
self.fail('object was not an CompareTrialTask')
self.assertEqual(task1.identifier, task2.identifier)
self.assertEqual(task1.trial_result1, task2.trial_result1)
self.assertEqual(task1.trial_result2, task2.trial_result2)
self.assertEqual(task1.comparison, task2.comparison)
self.assertEqual(task1._state, task2._state)
self.assertEqual(task1.node_id, task2.node_id)
self.assertEqual(task1.job_id, task2.job_id)
self.assertEqual(task1.result, task2.result)
self.assertEqual(task1.num_cpus, task2.num_cpus)
self.assertEqual(task1.num_gpus, task2.num_gpus)
self.assertEqual(task1.memory_requirements, task2.memory_requirements)
self.assertEqual(task1.expected_duration, task2.expected_duration)
| bsd-2-clause | 6,094,099,744,451,289,000 | 42.962963 | 101 | 0.641112 | false |
aurelieladier/openturns | python/test/t_TriangularFactory_std.py | 4 | 1499 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = Triangular(1.0, 2.5, 4.0)
size = 10000
sample = distribution.getSample(size)
factory = TriangularFactory()
estimatedDistribution = factory.build(sample)
print("distribution=", repr(distribution))
print("Estimated distribution=", repr(estimatedDistribution))
estimatedDistribution = factory.build()
print("Default distribution=", estimatedDistribution)
estimatedDistribution = factory.build(
distribution.getParameter())
print("Distribution from parameters=", estimatedDistribution)
estimatedTriangular = factory.buildAsTriangular(sample)
print("Triangular =", distribution)
print("Estimated triangular=", estimatedTriangular)
estimatedTriangular = factory.buildAsTriangular()
print("Default triangular=", estimatedTriangular)
estimatedTriangular = factory.buildAsTriangular(
distribution.getParameter())
print("Triangular from parameters=", estimatedTriangular)
sample = [[0.0]] * size
estimatedDistribution = factory.build(sample)
print("Estimated distribution=", repr(estimatedDistribution))
sample = [[1.0]] * size
estimatedDistribution = factory.build(sample)
print("Estimated distribution=", repr(estimatedDistribution))
except:
import sys
print("t_TriangularFactory_std.py", sys.exc_info()[0], sys.exc_info()[1])
| lgpl-3.0 | 2,667,248,631,232,486,000 | 37.435897 | 77 | 0.727151 | false |
jgehring/Laudio | laudio/src/song/formats/mp3.py | 1 | 2091 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Laudio - A webbased musicplayer
Copyright (C) 2010 Bernhard Posselt, [email protected]
Laudio is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Laudio is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from time import time
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3NoHeaderError
from laudio.src.song.song import Song
class MP3Song (Song):
def __init__(self, path):
""" Read metainformation from an ogg file
The multiple KeyErrors check if tags are not Null
Keyword arguments:
path -- the full path to the song
"""
self.codec = "mp3"
self.path = path
self.song = MP3(self.path)
try:
self.id3 = EasyID3(self.path)
for key in ('title', 'artist', 'album', 'genre', 'date', 'tracknumber'):
attr = self.id3.get(key, ('',))[0]
setattr(self, key, attr.encode("utf-8") )
self.bitrate = int(self.song.info.bitrate) / 1000
self.length = int(self.song.info.length)
# check if tracknumber is numeric
if not self.tracknumber.isdigit():
self.tracknumber = 0
# except no id3 tags
except (ID3NoHeaderError, AttributeError):
for key in ('title', 'artist', 'album', 'genre', 'date'):
setattr(self, key, "")
self.tracknumber = 0
self.bitrate = 0
self.length = 0
self.title = os.path.basename(self.path)
| gpl-3.0 | 3,334,994,844,228,916,000 | 34.440678 | 84 | 0.634146 | false |
Wopple/fimbulvetr | src/client/mapmode_c.py | 1 | 3743 | import os
import sys
import pygame
from pygame.locals import *
from common import mvc
from common.constants import *
from client.constants import *
class Controller(mvc.Controller):
def __init__(self, model=None, screen=None):
super(Controller, self).__init__()
self.shift = [False, False]
def update(self):
if self.model.initialCount == 0:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.model.leftClick()
elif event.button == 3:
self.model.rightClick(True in self.shift)
elif event.button == 4:
self.model.scrollIn()
elif event.button == 5:
self.model.scrollOut()
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
sys.exit(0)
elif event.key == K_SPACE:
self.model.pausePressed()
elif event.key == K_1:
self.model.numberKey(1)
elif event.key == K_2:
self.model.numberKey(2)
elif event.key == K_3:
self.model.numberKey(3)
elif event.key == K_4:
self.model.numberKey(4)
elif event.key == K_5:
self.model.numberKey(5)
elif event.key == K_6:
self.model.numberKey(6)
elif event.key == K_7:
self.model.numberKey(7)
elif event.key == K_8:
self.model.numberKey(8)
elif event.key == K_9:
self.model.numberKey(9)
elif event.key == K_UP:
self.model.key(0, True)
elif event.key == K_DOWN:
self.model.key(1, True)
elif event.key == K_LEFT:
self.model.key(2, True)
elif event.key == K_RIGHT:
self.model.key(3, True)
elif event.key == K_PAGEUP:
self.model.key(4, True)
elif event.key == K_PAGEDOWN:
self.model.key(5, True)
elif event.key == K_F1:
self.model.testKey(1)
elif event.key == K_F2:
self.model.testKey(2)
elif event.key == K_LSHIFT:
self.shift[0] = True
elif event.key == K_RSHIFT:
self.shift[1] = True
elif event.type == pygame.KEYUP:
if event.key == K_UP:
self.model.key(0, False)
elif event.key == K_DOWN:
self.model.key(1, False)
elif event.key == K_LEFT:
self.model.key(2, False)
elif event.key == K_RIGHT:
self.model.key(3, False)
elif event.key == K_PAGEUP:
self.model.key(4, False)
elif event.key == K_PAGEDOWN:
self.model.key(5, False)
elif event.key == K_LSHIFT:
self.shift[0] = False
elif event.key == K_RSHIFT:
self.shift[1] = False
elif event.type == pygame.QUIT:
sys.exit(0)
| bsd-3-clause | 4,532,720,471,375,591,000 | 40.131868 | 65 | 0.40716 | false |
asenovm/book-search-server | crawler/chitanka_crawler_get_books_list.py | 1 | 3790 | import requests
import time
import os
import sqlite3
def init_make_request():
global conn
global last_hour
global last_minute
global queries_for_last_minute
global queries_for_last_hour
last_hour = time.clock()
last_minute = time.clock()
queries_for_last_minute = 0
queries_for_last_hour = 0
conn = sqlite3.connect('chitanka.db')
def make_request(req):
global last_minute
global last_hour
global queries_for_last_minute
global queries_for_last_hour
time.sleep(2)
while queries_for_last_hour > 175:
delta = time.clock() - last_hour
if delta < 3600:
print "queries limit for hour reached, %d minutes remaining" % int(60-delta/60)
time.sleep(60)
else:
last_hour = time.clock()
queries_for_last_hour = 0
while queries_for_last_minute > 18:
delta = time.clock() - last_hour
if delta < 60:
print "queries limit for minute reached, %d seconds remaining" % int(60-delta)
time.sleep(10)
else:
last_minute = time.clock()
queries_for_last_minute = 0
queries_for_last_hour += 1
queries_for_last_minute += 1
proxy = {'http': 'http://93.123.45.23:8008'}
#r = requests.get(req, proxies = proxy)
r = requests.get(req)
return r
def find_books_in_text(text):
global conn
#print text
c = conn.cursor()
ind = 0
ind = text.find('<span>epub</span></a></li>', ind)
while ind != -1:
ind = ind + 26
ind = text.find('"', ind)
ind = ind + 1
book_name = text[ind:text.find('"', ind)]
#print book_name
c.execute('select * from books where name="%s"' % book_name)
if len(c.fetchall()) == 0:
c.execute('insert into books values ("%s", 0)' % book_name)
conn.commit()
print 'new book found: %s' % book_name
ind = text.find('<span>epub</span></a></li>', ind)
c.close()
def main():
global conn
c = conn.cursor()
c.execute('select * from categories')
cats = c.fetchall()
flag = True
for category in cats:
print 'getting books in %s' % str(category[0])
if str(category[0]) == 'savremenni-romani-i-povesti':
flag = False
if flag:
continue
tries = 5
while tries:
try:
--tries
r = make_request('http://www.chitanka.info/books/category/'+category[0])
break
except:
print "exception"
time.sleep(30)
find_books_in_text(r.text)
pagination = r.text.find('<ul class="pagination">')
if pagination != -1:
ind = r.text.find('<li class="next">')
while r.text[ind] != '"':
ind = ind - 1
ind = ind + 2
second_ind = ind + 1
while r.text[second_ind] != '<':
second_ind = second_ind + 1
pages_count = int(r.text[ind:second_ind])
for i in range(1, pages_count):
print 'category page %d' % (i+1)
tries = 5
while tries:
try:
--tries
r = make_request('http://www.chitanka.info/books/category/'+category[0]+'.html/'+str(i+1))
break
except:
print "except"
time.sleep(30)
find_books_in_text(r.text)
c.close()
if __name__ == '__main__':
init_make_request()
main() | mit | -2,583,254,922,830,167,000 | 27.719697 | 114 | 0.492348 | false |
davidsminor/cortex | test/IECore/LinkedSceneTest.py | 1 | 38697 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import gc
import sys
import os
import math
import unittest
import IECore
class LinkedSceneTest( unittest.TestCase ) :
@staticmethod
def compareBBox( box1, box2 ):
errorTolerance = IECore.V3d(1e-5, 1e-5, 1e-5)
boxTmp = IECore.Box3d( box1.min - errorTolerance, box1.max + errorTolerance )
if not boxTmp.contains( box2 ):
return False
boxTmp = IECore.Box3d( box2.min - errorTolerance, box2.max + errorTolerance )
if not boxTmp.contains( box1 ):
return False
return True
def testSupportedExtension( self ) :
self.assertTrue( "lscc" in IECore.SceneInterface.supportedExtensions() )
self.assertTrue( "lscc" in IECore.SceneInterface.supportedExtensions( IECore.IndexedIO.OpenMode.Read ) )
self.assertTrue( "lscc" in IECore.SceneInterface.supportedExtensions( IECore.IndexedIO.OpenMode.Write ) )
self.assertTrue( "lscc" in IECore.SceneInterface.supportedExtensions( IECore.IndexedIO.OpenMode.Write + IECore.IndexedIO.OpenMode.Read ) )
self.assertFalse( "lscc" in IECore.SceneInterface.supportedExtensions( IECore.IndexedIO.OpenMode.Append ) )
def testFactoryFunction( self ):
# test Write factory function
m = IECore.SceneInterface.create( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
self.assertTrue( isinstance( m, IECore.LinkedScene ) )
self.assertEqual( m.fileName(), "/tmp/test.lscc" )
self.assertRaises( RuntimeError, m.readBound, 0.0 )
del m
# test Read factory function
m = IECore.SceneInterface.create( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
self.assertTrue( isinstance( m, IECore.LinkedScene ) )
self.assertEqual( m.fileName(), "/tmp/test.lscc" )
m.readBound( 0.0 )
def testConstructors( self ):
# test Read from a previously opened scene.
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
l = IECore.LinkedScene( m )
# test Write mode
m = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
self.assertTrue( isinstance( m, IECore.LinkedScene ) )
self.assertEqual( m.fileName(), "/tmp/test.lscc" )
self.assertRaises( RuntimeError, m.readBound, 0.0 )
del m
# test Read mode
m = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
self.assertTrue( isinstance( m, IECore.LinkedScene ) )
self.assertEqual( m.fileName(), "/tmp/test.lscc" )
m.readBound( 0.0 )
def testAppendRaises( self ) :
self.assertRaises( RuntimeError, IECore.SceneInterface.create, "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Append )
self.assertRaises( RuntimeError, IECore.LinkedScene, "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Append )
def testReadNonExistentRaises( self ) :
self.assertRaises( RuntimeError, IECore.LinkedScene, "iDontExist.lscc", IECore.IndexedIO.OpenMode.Read )
def testLinkAttribute( self ):
self.assertEqual( IECore.LinkedScene.linkAttribute, "sceneInterface:link" )
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
attr = IECore.LinkedScene.linkAttributeData( m )
expectedAttr = IECore.CompoundData(
{
"fileName": IECore.StringData("test/IECore/data/sccFiles/animatedSpheres.scc"),
"root": IECore.InternedStringVectorData( [] )
}
)
self.assertEqual( attr, expectedAttr )
A = m.child("A")
attr = IECore.LinkedScene.linkAttributeData( A )
expectedAttr = IECore.CompoundData(
{
"fileName": IECore.StringData("test/IECore/data/sccFiles/animatedSpheres.scc"),
"root": IECore.InternedStringVectorData( [ 'A' ] )
}
)
self.assertEqual( attr, expectedAttr )
A = m.child("A")
attr = IECore.LinkedScene.linkAttributeData( A, 10.0 )
expectedAttr['time'] = IECore.DoubleData(10.0)
self.assertEqual( attr, expectedAttr )
def testWriting( self ):
generateTestFiles = False # change this to True to recreate the LinkedScene files for other tests.
testFilesSuffix = "_newTags"
if generateTestFiles :
outputPath = "test/IECore/data/sccFiles"
else :
outputPath = "/tmp"
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
A = m.child("A")
l = IECore.LinkedScene( os.path.join(outputPath,"instancedSpheres%s.lscc"%testFilesSuffix), IECore.IndexedIO.OpenMode.Write )
i0 = l.createChild("instance0")
i0.writeLink( m )
i1 = l.createChild("instance1")
i1.writeLink( m )
i1.writeAttribute( "testAttr", IECore.StringData("test"), 0 )
i1.writeTransform( IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) ), 0.0 )
i2 = l.createChild("instance2")
i2.writeLink( A )
i2.writeTransform( IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ), 0.0 )
self.assertRaises( RuntimeError, i2.createChild, "cannotHaveChildrenAtLinks" )
i2.writeTags( ["canHaveTagsAtLinks"] )
self.assertRaises( RuntimeError, i2.writeObject, IECore.SpherePrimitive( 1 ), 0.0 ) # cannot save objects at link locations.
b1 = l.createChild("branch1")
b1.writeObject( IECore.SpherePrimitive( 1 ), 0.0 )
self.assertRaises( RuntimeError, b1.writeLink, A )
b2 = l.createChild("branch2")
c2 = b2.createChild("child2")
self.assertRaises( RuntimeError, b2.writeLink, A )
del i0, i1, i2, l, b1, b2, c2
l = IECore.LinkedScene( os.path.join(outputPath,"instancedSpheres%s.lscc"%testFilesSuffix), IECore.IndexedIO.OpenMode.Read )
self.assertEqual( l.numBoundSamples(), 4 )
self.assertEqual( set(l.childNames()), set(['instance0','instance1','instance2','branch1','branch2']) )
i0 = l.child("instance0")
self.assertEqual( i0.numBoundSamples(), 4 )
self.failUnless( LinkedSceneTest.compareBBox( i0.readBoundAtSample(0), IECore.Box3d( IECore.V3d( -1,-1,-1 ), IECore.V3d( 2,2,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i0.readBoundAtSample(1), IECore.Box3d( IECore.V3d( -1,-1,-1 ), IECore.V3d( 3,3,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i0.readBoundAtSample(2), IECore.Box3d( IECore.V3d( -2,-1,-2 ), IECore.V3d( 4,5,2 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i0.readBoundAtSample(3), IECore.Box3d( IECore.V3d( -3,-1,-3 ), IECore.V3d( 4,6,3 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i0.readBound(0), IECore.Box3d( IECore.V3d( -1,-1,-1 ), IECore.V3d( 2,2,1 ) ) ) )
A = i0.child("A")
self.failUnless( LinkedSceneTest.compareBBox( A.readBoundAtSample(0), IECore.Box3d(IECore.V3d( -1,-1,-1 ), IECore.V3d( 1,1,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( A.readBoundAtSample(1), IECore.Box3d(IECore.V3d( -1,-1,-1 ), IECore.V3d( 1,1,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( A.readBoundAtSample(2), IECore.Box3d(IECore.V3d( 0,-1,-1 ), IECore.V3d( 2,1,1 ) ) ) )
self.assertEqual( i0.readTransform( 0 ), IECore.M44dData( IECore.M44d() ) )
i1 = l.child("instance1")
self.assertEqual( i1.numBoundSamples(), 4 )
self.failUnless( LinkedSceneTest.compareBBox( i1.readBoundAtSample(0), IECore.Box3d( IECore.V3d( -1,-1,-1 ), IECore.V3d( 2,2,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i1.readBoundAtSample(2), IECore.Box3d( IECore.V3d( -2,-1,-2 ), IECore.V3d( 4,5,2 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i1.readBoundAtSample(3), IECore.Box3d( IECore.V3d( -3,-1,-3 ), IECore.V3d( 4,6,3 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i1.readBound(0), IECore.Box3d( IECore.V3d( -1,-1,-1 ), IECore.V3d( 2,2,1 ) ) ) )
self.assertEqual( i1.readTransform( 0 ), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) ) )
self.assertEqual( i1.readAttribute( "testAttr", 0 ), IECore.StringData("test") )
i2 = l.child("instance2")
self.assertEqual( i2.numBoundSamples(), 3 )
self.failUnless( LinkedSceneTest.compareBBox( i2.readBoundAtSample(0), IECore.Box3d(IECore.V3d( -1,-1,-1 ), IECore.V3d( 1,1,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i2.readBoundAtSample(1), IECore.Box3d(IECore.V3d( -1,-1,-1 ), IECore.V3d( 1,1,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( i2.readBoundAtSample(2), IECore.Box3d(IECore.V3d( 0,-1,-1 ), IECore.V3d( 2,1,1 ) ) ) )
self.assertEqual( i2.readTransform( 0 ), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ) )
self.assertTrue( i2.hasTag( "canHaveTagsAtLinks", IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertTrue( l.hasTag( "canHaveTagsAtLinks", IECore.SceneInterface.TagFilter.EveryTag ) ) # tags propagate up
self.assertTrue( i2.child("a").hasTag( "canHaveTagsAtLinks", IECore.SceneInterface.TagFilter.EveryTag ) ) # tags at link locations propagate down as well
self.assertEqual( l.scene( [ 'instance0' ] ).path(), [ 'instance0' ] )
self.assertEqual( l.scene( [ 'instance0', 'A' ] ).path(), [ 'instance0', 'A' ] )
self.assertEqual( i0.path(), [ 'instance0' ] )
# test saving a two level LinkedScene
l2 = IECore.LinkedScene( os.path.join(outputPath,"environment%s.lscc"%testFilesSuffix), IECore.IndexedIO.OpenMode.Write )
base = l2.createChild("base")
t1 = base.createChild("test1")
t1.writeLink( l )
t2 = base.createChild("test2")
t2.writeLink( i0 )
t3 = base.createChild("test3")
t3.writeLink( i1 )
t4 = base.createChild("test4")
t4.writeLink( i2 )
t5 = base.createChild("test5")
t5.writeLink( A )
del l2, t1, t2, t3, t4, t5
def testWriteLinkAnimatedTransform( self ):
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
i0 = l.createChild("instance0")
i0.writeLink( m )
# this was causing a problem upon deleting l, as the first transform sample doesn't coincide with the
# first bound sample in the link
i0.writeTransform( IECore.M44dData( IECore.M44d() ), 5.0 )
i0.writeTransform( IECore.M44dData( IECore.M44d() ), 6.0 )
del i0, l, m
for messageInfo in messageHandler.messages:
if not messageInfo.message.startswith( "Detected ancestor tags" ) :
self.fail( messageInfo.message )
def testTimeRemapping( self ):
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
# save animated spheres with double the speed and with offset, using less samples (time remapping)
i0 = l.createChild("instance0")
i0.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 0.0 ), 1.0 )
i0.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 3.0 ), 2.0 )
# save animated spheres with same speed and with offset, same samples (time remapping is identity)
i1 = l.createChild("instance1")
i1.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 0.0 ), 1.0 )
i1.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 1.0 ), 2.0 )
i1.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 2.0 ), 3.0 )
i1.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 3.0 ), 4.0 )
# save animated spheres with half the speed, adding more samples to a range of the original (time remapping)
i2 = l.createChild("instance2")
i2.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 0.0 ), 0.0 )
i2.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 0.5 ), 1.0 )
i2.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 1.0 ), 2.0 )
del i0, i1, i2, l
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
self.assertEqual( l.numBoundSamples(), 5 )
self.assertEqual( l.hasAttribute( "sceneInterface:link.time" ), False )
i0 = l.child("instance0")
self.assertEqual( i0.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i0.readAttribute( "sceneInterface:link.time", 1 ).value, 0 )
self.assertEqual( i0.readAttribute( "sceneInterface:link.time", 2 ).value, 3 )
self.assertEqual( i0.numBoundSamples(), 2 )
self.assertEqual( i0.numTransformSamples(), 1 )
self.assertEqual( i0.readTransformAtSample(0), IECore.M44dData() )
A0 = i0.child("A")
self.assertEqual( A0.hasAttribute( "sceneInterface:link.time" ), False )
self.assertEqual( A0.numBoundSamples(), 2 )
self.assertEqual( A0.numTransformSamples(), 2 )
self.failUnless( LinkedSceneTest.compareBBox( A0.readBoundAtSample(0), IECore.Box3d(IECore.V3d( -1,-1,-1 ), IECore.V3d( 1,1,1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( A0.readBoundAtSample(1), IECore.Box3d(IECore.V3d( 0,-1,-1 ), IECore.V3d( 2,1,1 ) ) ) )
self.assertEqual( A0.readTransformAtSample(0), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) ) )
self.assertEqual( A0.readTransformAtSample(1), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ) )
i1 = l.child("instance1")
self.assertEqual( i1.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i1.readAttribute( "sceneInterface:link.time", 1 ).value, 0 )
self.assertEqual( i1.readAttribute( "sceneInterface:link.time", 2 ).value, 1 )
self.assertEqual( i1.readAttribute( "sceneInterface:link.time", 3 ).value, 2 )
self.assertEqual( i1.readAttribute( "sceneInterface:link.time", 4 ).value, 3 )
self.assertEqual( i1.numBoundSamples(), 4 )
self.assertEqual( i1.numTransformSamples(), 1 )
A1 = i1.child("A")
self.assertEqual( A1.numTransformSamples(), 4 )
self.assertEqual( A1.readTransformAtSample(0), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) ) )
self.assertEqual( A1.readTransformAtSample(1), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ) )
self.assertEqual( A1.readTransformAtSample(2), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ) )
self.assertEqual( A1.readTransformAtSample(3), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ) )
self.assertEqual( A1.hasAttribute( "sceneInterface:link.time" ), False )
i2 = l.child("instance2")
self.assertEqual( i2.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i2.readAttribute( "sceneInterface:link.time", 0 ).value, 0 )
self.assertEqual( i2.readAttribute( "sceneInterface:link.time", 1 ).value, 0.5 )
self.assertEqual( i2.readAttribute( "sceneInterface:link.time", 2 ).value, 1 )
self.assertEqual( i2.numBoundSamples(), 3 )
self.assertEqual( i2.numTransformSamples(), 1 )
A2 = i2.child("A")
self.assertEqual( A2.numBoundSamples(), 3 )
self.assertEqual( A2.numTransformSamples(), 3 )
self.assertEqual( A2.readTransform(1.0), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1.5, 0, 0 ) ) ) )
self.assertEqual( A2.readTransformAtSample(0), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) ) )
self.assertEqual( A2.readTransformAtSample(1), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1.5, 0, 0 ) ) ) )
self.assertEqual( A2.readTransformAtSample(2), IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ) )
self.assertEqual( A2.hasAttribute( "sceneInterface:link.time" ), False )
def testNestedTimeRemapping( self ):
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
A = m.child("A")
l2 = IECore.LinkedScene( "/tmp/test3.lscc", IECore.IndexedIO.OpenMode.Write )
t2 = l2.createChild("transform2")
i2 = t2.createChild("instance2")
i2.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 0.0 ), 0.0 )
i2.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( m, 2.0 ), 1.0 )
del l2, i2, t2
l2 = IECore.LinkedScene( "/tmp/test3.lscc", IECore.IndexedIO.OpenMode.Read )
l1 = IECore.LinkedScene( "/tmp/test2.lscc", IECore.IndexedIO.OpenMode.Write )
t1 = l1.createChild("transform1")
i1 = t1.createChild("instance1")
i1.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( l2, 0.0 ), 0.0 )
i1.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( l2, 2.0 ), 1.0 )
del l1, i1, t1
l1 = IECore.LinkedScene( "/tmp/test2.lscc", IECore.IndexedIO.OpenMode.Read )
l0 = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
t0 = l0.createChild("transform0")
i0 = t0.createChild("instance0")
i0.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( l1, 0.0 ), 0.0 )
i0.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( l1, 2.0 ), 1.0 )
del l0, i0, t0
l0 = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
l = IECore.LinkedScene( "/tmp/testTop.lscc", IECore.IndexedIO.OpenMode.Write )
t = l.createChild("transform")
i = t.createChild("instance")
i.writeLink( l0 )
del l, i, t
del m, l0, l1, l2
l = IECore.LinkedScene( "/tmp/testTop.lscc", IECore.IndexedIO.OpenMode.Read )
t = l.child("transform")
i = t.child("instance")
t0 = i.child("transform0")
i0 = t0.child("instance0")
t1 = i0.child("transform1")
i1 = t1.child("instance1")
t2 = i1.child("transform2")
i2 = t2.child("instance2")
A = i2.child("A")
# this location shouldn't be retimed:
self.assertEqual( i.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i.readAttribute( "sceneInterface:link.time", 0.25 ).value, 0.25 )
# this location should be sped up by a factor of 2:
self.assertEqual( i0.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i0.readAttribute( "sceneInterface:link.time", 0.25 ).value, 0.5 )
# this one is remapped twice, so it's sped up by a factor of 4:
self.assertEqual( i1.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i1.readAttribute( "sceneInterface:link.time", 0.25 ).value, 1 )
# and this one is remapped three times, so it's sped up by a factor of 8:
self.assertEqual( i2.hasAttribute( "sceneInterface:link.time" ), True )
self.assertEqual( i2.readAttribute( "sceneInterface:link.time", 0.25 ).value, 2 )
# sanity check:
self.assertEqual( i.readAttribute( "sceneInterface:link.time", 0 ).value, 0 )
self.assertEqual( i0.readAttribute( "sceneInterface:link.time", 0 ).value, 0 )
self.assertEqual( i1.readAttribute( "sceneInterface:link.time", 0 ).value, 0 )
self.assertEqual( i2.readAttribute( "sceneInterface:link.time", 0 ).value, 0 )
# test multiple retiming of the transform:
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
Aa = m.child("A")
self.assertEqual( Aa.readTransformAsMatrix( 0.1 ), A.readTransformAsMatrix( 0.1 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.2 ), A.readTransformAsMatrix( 0.2 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.3 ), A.readTransformAsMatrix( 0.3 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.4 ), A.readTransformAsMatrix( 0.4 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.5 ), A.readTransformAsMatrix( 0.5 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.6 ), A.readTransformAsMatrix( 0.6 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.7 ), A.readTransformAsMatrix( 0.7 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.8 ), A.readTransformAsMatrix( 0.8 / 8 ) )
self.assertEqual( Aa.readTransformAsMatrix( 0.9 ), A.readTransformAsMatrix( 0.9 / 8 ) )
def readSavedScenes( self, fileVersion ):
def recurseCompare( basePath, virtualScene, realScene, atLink = True ) :
self.assertEqual( basePath, virtualScene.path() )
if atLink :
self.assertEqual( set(virtualScene.readTags(IECore.SceneInterface.TagFilter.DescendantTag)), set(realScene.readTags(IECore.SceneInterface.TagFilter.DescendantTag)) )
else: # attributes and tranforms at link location are not loaded.
self.assertEqual( set(virtualScene.attributeNames()), set(realScene.attributeNames()) )
for attr in realScene.attributeNames() :
self.assertTrue( virtualScene.hasAttribute( attr ) )
self.assertEqual( virtualScene.numAttributeSamples(attr), realScene.numAttributeSamples(attr) )
for s in xrange(0,virtualScene.numAttributeSamples(attr)) :
self.assertEqual( virtualScene.readAttributeAtSample(attr, s), realScene.readAttributeAtSample(attr, s) )
self.assertEqual( virtualScene.numTransformSamples(), realScene.numTransformSamples() )
for s in xrange(0,virtualScene.numTransformSamples()) :
self.assertEqual( virtualScene.readTransformAtSample(s), realScene.readTransformAtSample(s) )
self.assertEqual( set(virtualScene.readTags()), set(realScene.readTags()) )
self.assertEqual( set(virtualScene.readTags(IECore.SceneInterface.TagFilter.LocalTag|IECore.SceneInterface.TagFilter.DescendantTag)), set(realScene.readTags(IECore.SceneInterface.TagFilter.LocalTag|IECore.SceneInterface.TagFilter.DescendantTag)) )
self.assertEqual( virtualScene.numBoundSamples(), realScene.numBoundSamples() )
for s in xrange(0,virtualScene.numBoundSamples()) :
self.assertEqual( virtualScene.readBoundAtSample(s), realScene.readBoundAtSample(s) )
self.assertEqual( virtualScene.hasObject(), realScene.hasObject() )
if virtualScene.hasObject() :
self.assertEqual( virtualScene.numObjectSamples(), realScene.numObjectSamples() )
for s in xrange(0,virtualScene.numObjectSamples()) :
self.assertEqual( virtualScene.readObjectAtSample(s), realScene.readObjectAtSample(s) )
self.assertEqual( set(virtualScene.childNames()), set(realScene.childNames()) )
for c in virtualScene.childNames() :
self.assertTrue( virtualScene.hasChild(c) )
recurseCompare( basePath + [ str(c) ], virtualScene.child(c), realScene.child(c), False )
env = IECore.LinkedScene( "test/IECore/data/sccFiles/environment%s.lscc" % fileVersion, IECore.IndexedIO.OpenMode.Read ) # created by testWriting() when generateTestFiles=True and testFilesSuffix is defined.
l = IECore.LinkedScene( "test/IECore/data/sccFiles/instancedSpheres%s.lscc" % fileVersion, IECore.IndexedIO.OpenMode.Read ) # created by testWriting() when generateTestFiles=True and testFilesSuffix is defined.
m = IECore.SceneCache( "test/IECore/data/sccFiles/animatedSpheres.scc", IECore.IndexedIO.OpenMode.Read )
base = env.child('base')
self.assertEqual( set(base.childNames()), set(['test1','test2','test3','test4','test5']) )
test1 = base.child('test1')
self.assertEqual( test1.path(), [ "base", "test1" ] )
recurseCompare( test1.path(), test1, l )
test2 = base.child('test2')
self.assertEqual( test2.path(), [ "base", "test2" ] )
recurseCompare( test2.path(), test2, l.child('instance0') )
test3 = base.child('test3')
self.assertEqual( test3.path(), [ "base", "test3" ] )
recurseCompare( test3.path(), test3, l.child('instance1') )
test4 = base.child('test4')
self.assertEqual( test4.path(), [ "base", "test4" ] )
recurseCompare( test4.path(), test4, l.child('instance2') )
test5 = base.child('test5')
self.assertEqual( test5.path(), [ "base", "test5" ] )
recurseCompare( test5.path(), test5, l.child('instance1').child('A') )
# attributes like sceneInterface:link.root, sceneInterface:link.fileName, and sceneInterface:link.time shouldn't show up at links, although they might be there...
self.assertEqual( test1.child('instance0').attributeNames(), [] )
self.assertEqual( test1.child('instance1').attributeNames(), [ 'testAttr' ] )
self.assertEqual( test1.child('instance2').attributeNames(), [] )
# hasAttribute should tell the truth though...
self.assertEqual( test1.child('instance0').hasAttribute( "sceneInterface:link.fileName" ), True )
self.assertEqual( test1.child('instance0').hasAttribute( "sceneInterface:link.root" ), True )
self.assertEqual( test1.child('instance1').hasAttribute( "sceneInterface:link.fileName" ), True )
self.assertEqual( test1.child('instance1').hasAttribute( "sceneInterface:link.root" ), True )
self.assertEqual( test1.child('instance2').hasAttribute( "sceneInterface:link.fileName" ), True )
self.assertEqual( test1.child('instance2').hasAttribute( "sceneInterface:link.root" ), True )
self.assertEqual( test1.child('instance0').path(), [ "base", "test1", "instance0" ] )
recurseCompare( test1.child('instance0').path(), test1.child('instance0'), m )
recurseCompare( test2.path(), test2, m )
recurseCompare( test3.path(), test3, m )
recurseCompare( test4.path(), test4, m.child('A') )
recurseCompare( test5.path(), test5, m.child('A') )
recurseCompare( test1.path(), env.scene( [ 'base', 'test1' ] ), l )
recurseCompare( test1.path(), env.scene( [ 'base' ] ).child( 'test1' ), l )
def testReadingFormats( self ):
self.readSavedScenes( "" ) # tests first LinkedScene file format, with tags represented under the entry "tags"
self.readSavedScenes( "_newTags" ) # tests second LinkedScene file format, with tags represented in separated entries: "localTags", "descendentTags" and "ancestorTags".
def testTags( self ) :
def testSet( values ):
return set( map( lambda s: IECore.InternedString(s), values ) )
# create a base scene
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
a = l.createChild('a')
a.writeTags( [ "testA" ] )
b = l.createChild('b')
b.writeTags( [ "testB" ] )
l.writeTags( [ "tags" ] )
del a, b, l
# now create a linked scene that should inherit the tags from the base one, plus add other ones
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
a = l.child('a')
b = l.child('b')
self.assertEqual( set(l.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["testA", "testB", "tags"]) )
self.assertEqual( set(l.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["tags"]) )
self.assertEqual( set(a.readTags(IECore.SceneInterface.TagFilter.AncestorTag)), testSet(["tags"]) )
self.assertEqual( set(a.readTags(IECore.SceneInterface.TagFilter.DescendantTag)), set() )
self.assertEqual( set(a.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["testA"]) )
self.assertEqual( set(b.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["testB"]) )
self.assertEqual( set(b.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["testB"]) )
self.assertTrue( l.hasTag("testA", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertTrue( l.hasTag("testB", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertFalse( l.hasTag("testA", IECore.SceneInterface.TagFilter.LocalTag) )
self.assertFalse( l.hasTag("testB", IECore.SceneInterface.TagFilter.LocalTag) )
self.assertTrue( a.hasTag("testA", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertFalse( a.hasTag("testB", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertTrue( b.hasTag("testB", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertFalse( b.hasTag("testA", IECore.SceneInterface.TagFilter.EveryTag) )
l2 = IECore.LinkedScene( "/tmp/test2.lscc", IECore.IndexedIO.OpenMode.Write )
A = l2.createChild('A')
A.writeLink( l )
A.writeTags( ['linkedA'] ) # creating tag after link
B = l2.createChild('B')
# creating a link to a branch of an external file.
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
# will have warnings as the branch inherits ancestor tags...
B.writeLink( a )
if not len( messageHandler.messages ):
self.fail( "Was expecting a warning message when linking to a location that had ancestor tags!" )
else :
for messageInfo in messageHandler.messages:
if not messageInfo.message.startswith( "Detected ancestor tags" ) :
self.fail( messageHandler.message )
C = l2.createChild('C')
c = C.createChild('c')
c.writeLink( l )
C.writeTags( [ 'C' ] )
D = l2.createChild('D')
D.writeTags( [ 'D' ] )
D.writeLink( a ) # creating link after tag
del l, a, b, l2, A, B, C, c, D
l2 = IECore.LinkedScene( "/tmp/test2.lscc", IECore.IndexedIO.OpenMode.Read )
A = l2.child("A")
Aa = A.child("a")
B = l2.child("B")
C = l2.child("C")
c = C.child("c")
ca = c.child("a")
D = l2.child("D")
self.assertTrue( l2.hasTag("testA", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertTrue( l2.hasTag("testB", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertFalse( l2.hasTag("t", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertEqual( set(l2.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet([]) )
self.assertEqual( set(l2.readTags(IECore.SceneInterface.TagFilter.DescendantTag)), testSet(["testA", "testB","tags", "C", "D","linkedA"]) )
self.assertEqual( set(l2.readTags(IECore.SceneInterface.TagFilter.LocalTag|IECore.SceneInterface.TagFilter.DescendantTag)), testSet(["testA", "testB","tags", "C", "D","linkedA"]) )
self.assertEqual( set(l2.readTags(IECore.SceneInterface.TagFilter.AncestorTag)), testSet([]) )
self.assertEqual( set(A.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["testA","testB", "tags","linkedA"]) )
self.assertTrue( A.hasTag( "linkedA", IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertTrue( A.hasTag( "tags", IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertTrue( A.hasTag( "testA", IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertTrue( A.hasTag( "testB", IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertFalse( A.hasTag("C", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertEqual( set(A.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["tags","linkedA"]) )
self.assertEqual( set(Aa.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["tags","testA", "linkedA"]) )
self.assertEqual( set(Aa.readTags(IECore.SceneInterface.TagFilter.AncestorTag)), testSet(["tags", "linkedA"]) )
self.assertEqual( set(Aa.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["testA"]) )
self.assertTrue( Aa.hasTag("testA", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertFalse( Aa.hasTag("testB", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertEqual( set(B.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["testA", "tags"]) ) # should not list "linkedA" as the link pointed to a child location.
self.assertEqual( set(C.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["testA","testB","tags","C"]) )
self.assertEqual( set(C.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["C"]) )
self.assertEqual( set(c.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["C", "testA", "testB","tags"]) )
self.assertEqual( set(c.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["tags"]) )
self.assertEqual( set(c.readTags(IECore.SceneInterface.TagFilter.DescendantTag)), testSet([ "testA", "testB" ]) )
self.assertEqual( set(ca.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["testA"]) )
self.assertEqual( set(ca.readTags(IECore.SceneInterface.TagFilter.AncestorTag)), testSet(["C", "tags"]) )
self.assertTrue( ca.hasTag("testA", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertFalse( ca.hasTag("testB", IECore.SceneInterface.TagFilter.EveryTag) )
self.assertEqual( set(C.readTags(IECore.SceneInterface.TagFilter.LocalTag)), testSet(["C"]) )
self.assertEqual( set(D.readTags(IECore.SceneInterface.TagFilter.EveryTag)), testSet(["tags", "D", "testA"]) )
def testMissingLinkedScene( self ) :
import shutil
shutil.copyfile( "test/IECore/data/sccFiles/animatedSpheres.scc", "/tmp/toBeRemoved.scc" )
m = IECore.SceneCache( "/tmp/toBeRemoved.scc", IECore.IndexedIO.OpenMode.Read )
A = m.child("A")
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
i0 = l.createChild("instance0")
i0.writeLink( m )
i1 = l.createChild("instance1")
i1.writeLink( m )
i1.writeTransform( IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) ), 0.0 )
i2 = l.createChild("instance2")
i2.writeLink( A )
i2.writeTransform( IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) ) ), 0.0 )
del i0, i1, i2, l, m, A
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
self.assertEqual( sorted(l.childNames()), [ "instance0", "instance1", "instance2" ] )
i0 = l.child( "instance0" )
self.assertEqual( sorted(i0.childNames()), [ "A", "B" ] )
i1 = l.child( "instance1" )
self.assertEqual( sorted(i1.childNames()), [ "A", "B" ] )
i2 = l.child( "instance2" )
self.assertEqual( i2.childNames(), [ "a" ] )
del l, i0, i1, i2
os.remove( "/tmp/toBeRemoved.scc" )
IECore.SharedSceneInterfaces.clear()
l = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
self.assertEqual( sorted(l.childNames()), [ "instance0", "instance1", "instance2" ] )
i0 = l.child( "instance0" )
self.assertEqual( i0.childNames(), [] )
i1 = l.child( "instance1" )
self.assertEqual( i1.childNames(), [] )
i2 = l.child( "instance2" )
self.assertEqual( i2.childNames(), [] )
def testLinkBoundTransformMismatch( self ) :
scene = IECore.SceneCache( "/tmp/test.scc", IECore.IndexedIO.OpenMode.Write )
child = scene.createChild( "child" )
mesh = IECore.MeshPrimitive.createBox( IECore.Box3f( IECore.V3f( 0 ), IECore.V3f( 1 ) ) )
child.writeObject( mesh, 0 )
del scene, child
child = IECore.SceneCache( "/tmp/test.scc", IECore.IndexedIO.OpenMode.Read ).child( "child" )
linked = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Write )
parent = linked.createChild( "parent" )
transform = IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) ) )
parent.writeTransform( transform, 1.0 )
parent.writeObject( mesh, 1.0 )
childLink = parent.createChild( "childLink" )
childLink.writeTransform( transform, 1.0 )
childLink.writeAttribute( IECore.LinkedScene.linkAttribute, IECore.LinkedScene.linkAttributeData( child ), 1.0 )
del linked, parent, child, childLink
linked = IECore.LinkedScene( "/tmp/test.lscc", IECore.IndexedIO.OpenMode.Read )
parent = linked.child( "parent" )
childLink = parent.child( "childLink" )
# there are 2 bound samples, because the link has bounds at time 0, but a transform at time 1
self.assertEqual( linked.numBoundSamples(), 2 )
self.assertEqual( parent.numBoundSamples(), 2 )
self.assertEqual( childLink.numBoundSamples(), 1 )
self.assertEqual( linked.numTransformSamples(), 1 )
self.assertEqual( parent.numTransformSamples(), 1 )
self.assertEqual( childLink.numTransformSamples(), 1 )
# object at the origin
self.failUnless( LinkedSceneTest.compareBBox( childLink.readBoundAtSample( 0 ), IECore.Box3d( IECore.V3d( 0 ), IECore.V3d( 1 ) ) ) )
# transformed the childLink by ( 1, 0, 0 ) and added an object at the origin
self.assertEqual( childLink.readTransformAtSample( 0 ), transform )
self.failUnless( LinkedSceneTest.compareBBox( parent.readBoundAtSample( 0 ), IECore.Box3d( IECore.V3d( 0 ), IECore.V3d( 2, 1, 1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( parent.readBoundAtSample( 0 ), parent.readBoundAtSample( 1 ) ) )
# transformed the parent by ( 1, 0, 0 )
self.assertEqual( parent.readTransformAtSample( 0 ), transform )
self.failUnless( LinkedSceneTest.compareBBox( linked.readBoundAtSample( 0 ), IECore.Box3d( IECore.V3d( 1, 0, 0 ), IECore.V3d( 3, 1, 1 ) ) ) )
self.failUnless( LinkedSceneTest.compareBBox( linked.readBoundAtSample( 0 ), linked.readBoundAtSample( 1 ) ) )
def testMemoryIndexedIOReadWrite( self ) :
# create inital file structure in memory:
mio = IECore.MemoryIndexedIO( IECore.CharVectorData(), IECore.IndexedIO.OpenMode.Write )
# write to the actual linkedscene:
scc = IECore.SceneCache( mio )
l = IECore.LinkedScene( scc )
c0 = l.createChild("child0")
c1 = l.createChild("child1")
c0.writeAttribute( "testAttr", IECore.StringData("test0"), 0 )
c1.writeAttribute( "testAttr", IECore.StringData("test1"), 0 )
# write the "file" to memory
del l, scc, c0, c1
# can we read it back again?
mioData = mio.buffer()
mio = IECore.MemoryIndexedIO( mioData, IECore.IndexedIO.OpenMode.Read )
scc = IECore.SceneCache( mio )
l = IECore.LinkedScene( scc )
self.assertEqual( set( l.childNames() ), set( ["child0", "child1"] ) )
# no write access!
self.assertRaises( RuntimeError, l.createChild, "child2" )
c0 = l.child("child0")
c1 = l.child("child1")
self.assertEqual( c0.readAttribute( "testAttr", 0 ), IECore.StringData( "test0" ) )
self.assertEqual( c1.readAttribute( "testAttr", 0 ), IECore.StringData( "test1" ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 6,922,035,895,185,708,000 | 51.222672 | 251 | 0.716671 | false |
aashish24/dataset | dataset/persistence/table.py | 1 | 13492 | import logging
from itertools import count
from sqlalchemy.sql import and_, expression
from sqlalchemy.schema import Column, Index
from dataset.persistence.util import guess_type
from dataset.persistence.util import ResultIter
from dataset.util import DatasetException
log = logging.getLogger(__name__)
class Table(object):
def __init__(self, database, table):
self.indexes = dict([(i.name, i) for i in table.indexes])
self.database = database
self.table = table
self._is_dropped = False
@property
def columns(self):
"""
Get a listing of all columns that exist in the table.
>>> print 'age' in table.columns
True
"""
return set(self.table.columns.keys())
def drop(self):
"""
Drop the table from the database, deleting both the schema
and all the contents within it.
Note: the object will raise an Exception if you use it after
dropping the table. If you want to re-create the table, make
sure to get a fresh instance from the :py:class:`Database <dataset.Database>`.
"""
self.database._acquire()
self._is_dropped = True
self.database._tables.pop(self.table.name, None)
self.table.drop(self.database.engine)
def _check_dropped(self):
if self._is_dropped:
raise DatasetException('the table has been dropped. this object should not be used again.')
def insert(self, row, ensure=True, types={}):
"""
Add a row (type: dict) by inserting it into the table.
If ``ensure`` is set, any of the keys of the row are not
table columns, they will be created automatically.
During column creation, ``types`` will be checked for a key
matching the name of a column to be created, and the given
SQLAlchemy column type will be used. Otherwise, the type is
guessed from the row value, defaulting to a simple unicode
field.
::
data = dict(title='I am a banana!')
table.insert(data)
"""
self._check_dropped()
if ensure:
self._ensure_columns(row, types=types)
res = self.database.executable.execute(self.table.insert(row))
return res.lastrowid
def insert_many(self, rows, chunk_size=1000, ensure=True, types={}):
"""
Add many rows at a time, which is significantly faster than adding
them one by one. Per default the rows are processed in chunks of
1000 per commit, unless you specify a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows)
"""
def _process_chunk(chunk):
if ensure:
for row in chunk:
self._ensure_columns(row, types=types)
self.table.insert().execute(chunk)
self._check_dropped()
chunk = []
i = 0
for row in rows:
chunk.append(row)
i += 1
if i == chunk_size:
_process_chunk(chunk)
chunk = []
i = 0
if i > 0:
_process_chunk(chunk)
def update(self, row, keys, ensure=True, types={}):
"""
Update a row in the table. The update is managed via
the set of column names stated in ``keys``: they will be
used as filters for the data to be updated, using the values
in ``row``.
::
# update all entries with id matching 10, setting their title columns
data = dict(id=10, title='I am a banana!')
table.update(data, ['id'])
If keys in ``row`` update columns not present in the table,
they will be created based on the settings of ``ensure`` and
``types``, matching the behavior of :py:meth:`insert() <dataset.Table.insert>`.
"""
# check whether keys arg is a string and format as a list
if isinstance(keys, basestring):
keys = [keys]
self._check_dropped()
if not len(keys) or len(keys)==len(row):
return False
clause = [(u, row.get(u)) for u in keys]
"""
Don't update the key itself, so remove any keys from the row dict
"""
for key in keys:
if key in row.keys():
del row[key]
if ensure:
self._ensure_columns(row, types=types)
try:
filters = self._args_to_clause(dict(clause))
stmt = self.table.update(filters, row)
rp = self.database.executable.execute(stmt)
return rp.rowcount > 0
except KeyError:
return False
def upsert(self, row, keys, ensure=True, types={}):
"""
An UPSERT is a smart combination of insert and update. If rows with matching ``keys`` exist
they will be updated, otherwise a new row is inserted in the table.
::
data = dict(id=10, title='I am a banana!')
table.upsert(data, ['id'])
"""
# check whether keys arg is a string and format as a list
if isinstance(keys, basestring):
keys = [keys]
self._check_dropped()
if ensure:
self.create_index(keys)
filters = {}
for key in keys:
filters[key] = row.get(key)
if self.find_one(**filters) is not None:
self.update(row, keys, ensure=ensure, types=types)
else:
self.insert(row, ensure=ensure, types=types)
def delete(self, **_filter):
""" Delete rows from the table. Keyword arguments can be used
to add column-based filters. The filter criterion will always
be equality:
.. code-block:: python
table.delete(place='Berlin')
If no arguments are given, all records are deleted.
"""
self._check_dropped()
if len(_filter) > 0:
q = self._args_to_clause(_filter)
stmt = self.table.delete(q)
else:
stmt = self.table.delete()
self.database.executable.execute(stmt)
def _ensure_columns(self, row, types={}):
for column in set(row.keys()) - set(self.table.columns.keys()):
if column in types:
_type = types[column]
else:
_type = guess_type(row[column])
log.debug("Creating column: %s (%s) on %r" % (column,
_type, self.table.name))
self.create_column(column, _type)
def _args_to_clause(self, args):
self._ensure_columns(args)
clauses = []
for k, v in args.items():
if isinstance(v, list) or isinstance(v, tuple):
clauses.append(self.table.c[k].in_(v))
else:
clauses.append(self.table.c[k] == v)
return and_(*clauses)
def create_column(self, name, type):
"""
Explicitely create a new column ``name`` of a specified type.
``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
::
table.create_column('created_at', sqlalchemy.DateTime)
"""
self._check_dropped()
self.database._acquire()
try:
if name not in self.table.columns.keys():
col = Column(name, type)
col.create(self.table,
connection=self.database.executable)
finally:
self.database._release()
def create_index(self, columns, name=None):
"""
Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
::
table.create_index(['name', 'country'])
"""
self._check_dropped()
if not name:
sig = abs(hash('||'.join(columns)))
name = 'ix_%s_%s' % (self.table.name, sig)
if name in self.indexes:
return self.indexes[name]
try:
self.database._acquire()
columns = [self.table.c[c] for c in columns]
idx = Index(name, *columns)
idx.create(self.database.engine)
except:
idx = None
finally:
self.database._release()
self.indexes[name] = idx
return idx
def find_one(self, **_filter):
"""
Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or None.
::
row = table.find_one(country='United States')
"""
self._check_dropped()
args = self._args_to_clause(_filter)
query = self.table.select(whereclause=args, limit=1)
rp = self.database.executable.execute(query)
return rp.fetchone()
def _args_to_order_by(self, order_by):
if order_by[0] == '-':
return self.table.c[order_by[1:]].desc()
else:
return self.table.c[order_by].asc()
def find(self, _limit=None, _offset=0, _step=5000,
order_by='id', **_filter):
"""
Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus sign
to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (by year in descending order)
results = table.find(order_by=['country', '-year'])
By default :py:meth:`find() <dataset.Table.find>` will break the
query into chunks of ``_step`` rows to prevent huge tables
from being loaded into memory at once.
For more complex queries, please use :py:meth:`db.query() <dataset.Database.query>`
instead."""
self._check_dropped()
if isinstance(order_by, (str, unicode)):
order_by = [order_by]
order_by = filter(lambda o: o in self.table.columns, order_by)
order_by = [self._args_to_order_by(o) for o in order_by]
args = self._args_to_clause(_filter)
# query total number of rows first
count_query = self.table.count(whereclause=args, limit=_limit, offset=_offset)
rp = self.database.executable.execute(count_query)
total_row_count = rp.fetchone()[0]
if _step is None or _step is False or _step == 0:
_step = total_row_count
if total_row_count > _step and len(order_by) == 0:
_step = total_row_count
log.warn("query cannot be broken into smaller sections because it is unordered")
queries = []
for i in count():
qoffset = _offset + (_step * i)
qlimit = _step
if _limit is not None:
qlimit = min(_limit - (_step * i), _step)
if qlimit <= 0:
break
if qoffset > total_row_count:
break
queries.append(self.table.select(whereclause=args, limit=qlimit,
offset=qoffset, order_by=order_by))
return ResultIter((self.database.executable.execute(q) for q in queries))
def __len__(self):
"""
Returns the number of rows in the table.
"""
d = self.database.query(self.table.count()).next()
return d.values().pop()
def distinct(self, *columns, **_filter):
"""
Returns all rows of a table, but removes rows in with duplicate values in ``columns``.
Interally this creates a `DISTINCT statement <http://www.w3schools.com/sql/sql_distinct.asp>`_.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China')
"""
self._check_dropped()
qargs = []
try:
columns = [self.table.c[c] for c in columns]
for col, val in _filter.items():
qargs.append(self.table.c[col] == val)
except KeyError:
return []
q = expression.select(columns, distinct=True,
whereclause=and_(*qargs),
order_by=[c.asc() for c in columns])
return self.database.query(q)
def all(self):
"""
Returns all rows of the table as simple dictionaries. This is simply a shortcut
to *find()* called with no arguments.
::
rows = table.all()"""
return self.find()
def __iter__(self):
"""
Allows for iterating over all rows in the table without explicetly
calling :py:meth:`all() <dataset.Table.all>`.
::
for row in table:
print row
"""
return self.all()
| mit | 4,836,438,356,623,720,000 | 33.863049 | 109 | 0.552698 | false |
alisonbnt/watchtower | tower/elements/water.py | 1 | 1249 | exports = {
"name": "Water",
"aspects": {
"amulets": [
{
"item": "sailor",
"effect": "health",
"description": "By knowing the tides, your way home will be "
"easier and faster."
},
{
"item": "wizard",
"effect": "hit",
"description": "Water runs in it's own course. Water seeks "
"peace, but can cause harm when needed. "
"More accurate attacks"
}
],
"potions": [
{
"item": "youth",
"effect": "health",
"description": "Water carries away the meaning "
"of time and space. Water can restore life, or "
"destroy it quickly. Less health for opponents"
},
{
"item": "purity",
"effect": "hit",
"description": "Water cleans not only the body but doubt and "
"fear. Raised hit ratio"
}
]
},
"traces": ['gemini', 'libra', 'aquarius']
}
| mit | -1,760,138,044,276,598,500 | 33.694444 | 79 | 0.377102 | false |
wooga/airflow | airflow/providers/apache/hive/sensors/hive_partition.py | 1 | 3014 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: str
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:type partition: str
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
ui_color = '#C5CAE9'
@apply_defaults
def __init__(self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60 * 3,
*args,
**kwargs):
super().__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.log.info(
'Poking for table %s.%s, partition %s', self.schema, self.table, self.partition
)
if not hasattr(self, 'hook'):
hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return hook.check_for_partition(
self.schema, self.table, self.partition)
| apache-2.0 | 333,490,660,544,972,600 | 39.72973 | 91 | 0.661911 | false |
Heufneutje/PyHeufyBot | heufybot/bot.py | 1 | 4502 | from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from heufybot.config import Config
from heufybot.factory import HeufyBotFactory
from heufybot.modulehandler import ModuleHandler
from heufybot.utils.timeutils import now
import os, shelve, sys
# Try to enable SSL support
try:
from twisted.internet import ssl
except ImportError:
ssl = None
class HeufyBot(object):
def __init__(self, configFile):
self.config = Config(configFile)
self.connectionFactory = HeufyBotFactory(self)
self.log = None
self.moduleHandler = ModuleHandler(self)
self.servers = {}
self.storage = None
self.storageSync = None
self.startTime = now()
def startup(self):
if ssl is None:
self.log.warn("The PyOpenSSL package was not found. You will not be able to connect to servers using SSL.")
self.log.info("Loading configuration file...")
self.config.loadConfig()
self.log.info("Loading storage...")
self.storage = shelve.open(self.config.itemWithDefault("storage_path", "heufybot.db"))
self.storageSync = LoopingCall(self.storage.sync)
self.storageSync.start(self.config.itemWithDefault("storage_sync_interval", 5), now=False)
self.log.info("Loading modules...")
self.moduleHandler.loadAllModules()
self.log.info("Initiating connections...")
self._initiateConnections()
self.log.info("Starting reactor...")
reactor.run()
def _initiateConnections(self):
for server in self.config["servers"].iterkeys():
self.connectServer(server)
def connectServer(self, host):
if host in self.servers:
error = "A connection to {} was requested, but already exists.".format(host)
self.log.warn(error)
return error
if host not in self.config["servers"]:
error = "A connection to {} was requested, but there is no config data for this server.".format(host)
self.log.warn(error)
return error
port = int(self.config.serverItemWithDefault(host, "port", 6667))
if self.config.serverItemWithDefault(host, "ssl", False):
self.log.info("Attempting secure connection to {host}/{port}...", host=host, port=port)
if ssl is not None:
reactor.connectSSL(host, port, self.connectionFactory, ssl.ClientContextFactory())
else:
self.log.error("Can't connect to {host}/{port}; PyOpenSSL is required to allow secure connections.",
host=host, port=port)
else:
self.log.info("Attempting connection to {host}/{port}...", host=host, port=port)
reactor.connectTCP(host, port, self.connectionFactory)
def disconnectServer(self, host, quitMessage = "Quitting..."):
if host not in self.servers:
error = "A disconnect from {} was requested, but this connection doesn't exist.".format(host)
self.log.warn(error)
return error
self.servers[host].disconnect(quitMessage, True)
def reconnectServer(self, host, quitMessage = "Reconnecting..."):
if host not in self.servers:
error = "A reconnect to {} was requested, but this connection doesn't exist.".format(host)
self.log.warn(error)
return error
self.servers[host].disconnect(quitMessage)
def shutdown(self, quitMessage = "Shutting down..."):
serversCopy = self.servers.copy()
for server in serversCopy.itervalues():
server.disconnect(quitMessage, True)
def restart(self, quitMessage = "Restarting..."):
reactor.addSystemEventTrigger("after", "shutdown", lambda: os.execl(sys.executable, sys.executable, *sys.argv))
self.shutdown(quitMessage)
def countConnections(self):
if len(self.servers) == 0:
self.log.info("No more connections alive, shutting down...")
# If we have any connections that have been started but never finished, stop trying
self.connectionFactory.stopTrying()
self.log.info("Closing storage...")
if self.storageSync.running:
self.storageSync.stop()
self.storage.close()
self.log.info("Unloading modules...")
self.moduleHandler.unloadAllModules()
self.log.info("Stopping reactor...")
reactor.stop()
| mit | -3,110,336,336,444,037,000 | 42.708738 | 119 | 0.637272 | false |
carpedm20/between | between/client.py | 1 | 15097 | # -*- coding: UTF-8 -*-
"""
between.client
~~~~~~~~~~~~~~
This module contains the client for Between.
"""
import json
import requests
import websocket
from uuid import uuid1
from datetime import datetime
from mimetypes import MimeTypes
from random import random, choice
from .utils import make_url
from .models import Person, Message, Image
from .preloads import sticker_tokens
from .exceptions import LoginError, AuthenticateError, MessageError
class Client(object):
"""A client for the Between.
See http://github.com/carpedm20/between for complete
documentation for the API.
"""
def __init__(self, email, password, debug=True, user_agent=None):
"""A client for the Between
:param email: Between account `email`
:param password: Between account password
import between
client = between.Client(email, password)
"""
self.email = email
self.headers = {'User-Agent': 'python-between/1.0.0'}
self.uuid = str(uuid1())
self.me = None
self.lover = None
self._session = requests.Session()
self._request_id = 0
self.login(email, password)
self.start()
def start(self):
self.get_status()
self.set_device()
self.get_endpoints()
self.authenticate()
def get(self, url, payload=None, is_json=True):
r = self._session.get(make_url(url), params=payload, headers=self.headers)
if is_json:
return json.loads(r.text)
else:
return r.text
def post(self, url, files=None, payload=None, is_json=True):
r = self._session.post(make_url(url), data=payload, headers=self.headers, files=files)
if is_json:
return json.loads(r.text)
else:
return r.text
def delete(self, url, files=None, payload=None, is_json=True):
r = self._session.delete(make_url(url), data=payload, headers=self.headers, files=files)
if is_json:
return json.loads(r.text)
else:
return r.text
def login(self, email, password):
"""Login to Between server
:param email: Between account `email`
:param password: Between account password
"""
payload = {
"email" : email,
"password" : password,
"session[type]" : "S_WINDOWS",
"session[name]" : "carpedm20",
}
j = self.get("/authentication/getAccessTokenV2", payload)
if j.has_key("error"):
raise LoginError(j["error"]["message"])
self.access_token = j["access_token"]
self.account_id = j["account_id"]
self.expires_at = j["expires_at"]
self.relationship_id = j["relationship_id"]
self.session_id = j["session_id"] # account_id + "xxx"
self.user_id = j["user_id"]
self.headers["x-between-authorization"] = self.access_token
def authenticate(self):
payload = {
"name" : "basicAuthenticate",
"body" : {
"access_token" : self.access_token,
"user_agent" : "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Between-PC/0.3.1 Safari/537.36",
"device_uuid" : self.uuid
}
}
j = self._send("/authentication", payload)
if not j["m"]["body"]["success"]:
raise AuthenticateError(j)
payload = {"name" : "get"}
j = self._send("/%s/threads" % self.user_id, payload)
data = j["m"]["body"]["data"][0]
self.thread_id = data["id"]
self.chatroom = data["chatroom"]
self.chatroom_id = data["chatroom_id"]
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "SUBSCRIPTIONS",
"path" : "/subscriptions",
"subscriptionsReq":{
"methodName" : "ADD_V4",
"addV4Req":{
"subscriptions":[
{
"path" : "/%s" % self.thread_id,
"recursive":True
},
{
"path" : "/%s" % self.chatroom_id,
"recursive":True
},
{
"path" : "/%s/push" % self.account_id,
"recursive":True,
"push_priority" : "HIGH"
}
]
}
}
},
{
"objectName" : "CHAT_ROOM",
"path" : "/%s" % self.chatroom_id,
"chatRoomReq":{
"methodName" : "GET"
}
}
]
}
}
j = self._send("/batch", payload)
if not j["m"]["body"]["data"][0]["success"]:
raise AuthenticateError(j)
def send(self, content):
"""Send a message
:param content: message content to send
"""
try:
content = content.decode('utf-8')
except:
pass
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "MESSAGES",
"path" : "/%s/messages" % self.thread_id,
"messagesReq" : {
"methodName" : "ADD",
"addReq" : {
"message" : {
"content" : content
}
}
}
},
{
"objectName" : "CHAT_MEMBER_STATE",
"path" : "/chatMemberState",
"chatMemberStateReq" : {
"methodName" : "EDIT",
"editReq" : {
"state_param" : {
"state" : "ST_ACTIVE"
}
}
}
}
]
}
}
j = self._send("/batch", payload)
#if not j["m"]["body"]["data"][0]["success"]:
# raise MessageError(j)
def send_sticker(self, sticker_id=None):
"""Send a sticker
:param sticker: message content to send
"""
if not sticker_id:
sticker_id = choice(sticker_tokens.keys())
try:
token = sticker_tokens[sticker_id]
except:
raise MessageError("Don't have sticker token information of %s" % sticker_id)
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "MESSAGES",
"path" : "/%s/messages" % self.thread_id,
"messagesReq" : {
"methodName" : "ADD",
"addReq" : {
"message" : {
"attachments" : [
{
"attachment_type" : "T_STICKER_V2",
"sticker" : {
"sticker_id" : str(sticker_id),
"sticker_token" : token
}
}
]
}
}
}
},
{
"objectName" : "CHAT_MEMBER_STATE",
"path" : "/chatMemberState",
"chatMemberStateReq" : {
"methodName" : "EDIT",
"editReq" : {
"state_param" : {
"state" : "ST_ACTIVE"
}
}
}
}
]
}
}
j = self._send("/batch", payload)
#if not j["m"]["body"]["data"][0]["success"]:
# raise MessageError(j)
def send_image(self, path=None, image_id=None):
"""Send an image
:param path: path of image to upload
"""
if not path and not image_id:
raise MessageError("path or image_id should be passed")
if not image_id:
image_id = self.upload_image(path)._id
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "MESSAGES",
"path" : "/%s/messages" % self.thread_id,
"messagesReq" : {
"methodName" : "ADD",
"addReq" : {
"message" : {
"attachments" : [
{
"attachment_type" : "T_IMAGE",
"reference" : image_id
}
]
}
}
}
},
{
"objectName" : "CHAT_MEMBER_STATE",
"path" : "/chatMemberState",
"chatMemberStateReq" : {
"methodName" : "EDIT",
"editReq" : {
"state_param" : {
"state" : "ST_ACTIVE"
}
}
}
}
]
}
}
j = self._send("/batch", payload)
#if not j["m"]["body"]["data"][0]["success"]:
# raise MessageError(j)
def get_images(self, limit=64):
"""Get uploaded images
:param limit: the maximum number of images
"""
payload = {
"range[limit]" : limit,
"file_types[]" : "FT_IMAGE",
"file_types[]" : "FT_VOUCHER"
}
#j = self.get("/%s/messages/byFileType" % self.thread_id, payload)
url = "/%s/messages/byFileType?range[limit]=%s&file_types[]=FT_IMAGE&file_types[]=FT_VOUCHER" % (self.thread_id, limit)
j = self.get(url)
if j["status"] == "ERROR":
raise MessageError(j)
return j
def get_recent_messages(self, limit=32):
"""Get recent messages
:param limit: the maximum number of messages
"""
payload = {
"name" : "getV4",
"body" : {
"range" : {
"limit" : limit
},
"glimpse" : True
}
}
j = self._send("/%s/messages" % self.thread_id, payload)
recent_messages = []
for message in j["m"]["body"]["data"]:
recent_messages.append(Message(message))
return recent_messages
def mark_read_message(self, message_id):
"""Mark a message as be read
:param message_id: message_id to mark to be read
"""
payload = {
"name" : "readMessages",
"body" : {
"message_id" : message_id
}
}
return self._send("/%s/messages" % self.thread_id, payload)
def _send(self, path, message, c=1, v=1):
"""Send websocket message
:param path: command to execute
:param message: message to send
:param c: (optional) ?
:param v: (optional) ?
"""
message["type"] = "CALL"
payload = {
"c" : c,
"v" : v,
"#" : self._request_id,
"p" : path,
"m" : message
}
msg = str(payload).replace("u'","'").replace("'",'"').replace("True","true")
try:
self._websocket.send(msg)
except:
self.start()
self._websocket.send(msg)
self._request_id += 1
result = self._websocket.recv()
return json.loads(result)
def upload_image(self, path):
"""Upload an image to Between server
:param path: path of file to upload
"""
mime_type = MimeTypes().guess_type(path)[0]
files = {
'file_body': open(path)
}
j = self.post("/%s/files/uploadPhoto" % self.user_id, files=files)
image = Image(j['image'], _id=j['id'])
return image
def get_status(self):
j = self.get("/%s/views/status" % self.relationship_id)
for user in j['users']:
if user['email'] == self.email:
self.me = Person(user)
else:
self.lover = Person(user)
return j
def get_endpoints(self):
j = self.get("/info/endpoints")
self.message_endpoints = j['message']
self._websocket_url = "%s&access_token=%s" % (j['websocket'][0], self.access_token)
self._websocket = websocket.create_connection(self._websocket_url)
return j
def run_forever(self, on_message, on_error=None, on_close=None):
"""Long polling method
:param on_message: method that will executed when message is arrived.
"""
self._websocket_app = websocket.WebSocketApp(self._websocket_url,
on_message = on_message,
on_error = on_error,
on_close = on_close)
self.run_forever_mode = True
self._websocket_app.run_forever()
def set_device(self, os_type="D_WINDOWS"):
payload = {
"type" : os_type
}
j = self.get("/%s/device" % self.session_id, payload)
return j
def delete_session(self):
j = self.delete('/%s/' % self.session_id)
return j
def __del__(self):
j = self.get_status()
j = self.delete_session()
return j['value']
| bsd-3-clause | -5,605,430,828,047,814,000 | 30.783158 | 139 | 0.400411 | false |
nektor211/imgaug | tests/check_noise.py | 1 | 2519 | from __future__ import print_function, division
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from scipy import misc
import numpy as np
from skimage import data
import cv2
def main():
nb_rows = 8
nb_cols = 8
h, w = (128, 128)
sample_size = 128
noise_gens = [
iap.SimplexNoise(),
iap.FrequencyNoise(exponent=-4, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=-2, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=0, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=2, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=4, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
iap.IterativeNoiseAggregator(
other_param=iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
iterations=(1, 3),
aggregation_method=["max", "avg"]
),
iap.IterativeNoiseAggregator(
other_param=iap.Sigmoid(
iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
threshold=(-10, 10),
activated=0.33,
mul=20,
add=-10
),
iterations=(1, 3),
aggregation_method=["max", "avg"]
)
]
samples = [[] for _ in range(len(noise_gens))]
for _ in range(nb_rows * nb_cols):
for i, noise_gen in enumerate(noise_gens):
samples[i].append(noise_gen.draw_samples((h, w)))
rows = [np.hstack(row) for row in samples]
grid = np.vstack(rows)
misc.imshow((grid*255).astype(np.uint8))
images = [ia.quokka_square(size=(128, 128)) for _ in range(16)]
seqs = [
iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0)),
iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True),
iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0)),
iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True)
]
images_aug = []
for seq in seqs:
images_aug.append(np.hstack(seq.augment_images(images)))
images_aug = np.vstack(images_aug)
misc.imshow(images_aug)
if __name__ == "__main__":
main()
| mit | 3,232,499,235,980,668,000 | 37.166667 | 138 | 0.611354 | false |
gusnaughton/orbit | orbit/pyencoding.py | 1 | 4918 | # thrown when there isn't enough data
from _codecs import utf_8_decode
import struct
big_endian, little_endian = range(2)
packEndians = {
big_endian: '>',
little_endian: '<'
}
class ReadException(Exception):
pass
# endian is here for automatic struct.packing purposes
def packByte(b, endian=big_endian):
return struct.pack('%sB' % packEndians[endian], b)
def packSByte(endian=big_endian):
return struct.pack('%sb' % packEndians[endian], b)
def packShort(endian=big_endian):
return struct.pack('%sh' % packEndians[endian], s)
def packUShort(s, endian=big_endian):
return struct.pack('%sH' % packEndians[endian], s)
def packInt(i, endian=big_endian):
return struct.pack('%si' % packEndians[endian], i)
def packUInt(i, endian=big_endian):
return struct.pack('%sI' % packEndians[endian], i)
def packLong(i, endian=big_endian):
return struct.pack('%sq' % packEndians[endian], i)
def packULong(i, endian=big_endian):
return struct.pack('%sQ' % packEndians[endian], i)
# All of the names of the compiled structs on the reader object
# This makes dealing with endians faster
compiled_struct_names = [
('struct_sbyte', '%sb'),
('struct_short', '%sh'),
('struct_ushort', '%sH'),
('struct_int', '%si'),
('struct_uint', '%sI'),
('struct_long', '%sq'),
('struct_ulong', '%sQ'),
('struct_float', '%sf'),
('struct_double', '%sd'),
('struct_bool', '%s?'),
]
class Reader(object):
def __init__(self, data=None, endian=big_endian):
self.index = 0
self.endian = packEndians[endian]
for (struct_name, struct_prototype) in compiled_struct_names:
setattr(self, struct_name, struct.Struct(struct_prototype % self.endian))
if data is None:
self.data = ''
else:
self.data = data
def addData(self, data):
self.data += data
def has(self, c):
return len(self.data) - self.index >= c
def advance(self, c):
self.index += c
def revert(self):
self.index = 0
def commit(self):
self.data = self.data[self.index:]
self.index = 0
def empty(self):
self.data = ''
self.index = 0
def peekByte(self):
if not self.has(1):
raise ReadException()
return ord(self.data[self.index])
def readByte(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return ord(self.data[self.index - 1])
def readSByte(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return self.struct_sbyte.unpack_from(self.data, self.index - 1)[0]
def readChars(self, count):
if not self.has(count):
raise ReadException()
self.advance(count)
return self.data[self.index - count:self.index]
def readBytes(self, count):
if not self.has(count):
raise ReadException()
self.advance(count)
return [ord(x) for x in list(self.data[self.index - count:self.index])]
def readChar(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return chr(self.data[self.index - 1])
def readShort(self):
if not self.has(2):
raise ReadException()
self.advance(2)
return self.struct_short.unpack_from(self.data, self.index - 2)[0]
def readUShort(self):
if not self.has(2):
raise ReadException()
self.advance(2)
return self.struct_ushort.unpack_from(self.data, self.index - 2)[0]
def readInt(self):
if not self.has(4):
raise ReadException()
self.advance(4)
return self.struct_int.unpack_from(self.data, self.index - 4)[0]
def readUInt(self):
if not self.has(4):
raise ReadException()
self.advance(4)
return self.struct_uint.unpack_from(self.data, self.index - 4)[0]
def readLong(self):
if not self.has(8):
raise ReadException()
self.advance(8)
return self.struct_long.unpack_from(self.data, self.index - 8)[0]
def readULong(self):
if not self.has(8):
raise ReadException()
self.advance(8)
return self.struct_ulong.unpack_from(self.data, self.index - 8)[0]
def readFloat(self):
if not self.has(4):
raise ReadException()
self.advance(4)
return self.struct_float.unpack_from(self.data, self.index - 4)[0]
def readDouble(self):
if not self.has(8):
raise ReadException()
self.advance(8)
return self.struct_double.unpack_from(self.data, self.index - 8)[0]
def readBool(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return self.struct_bool.unpack_from(self.data, self.index - 1)[0]
def readCharArray(self, len_func):
if hasattr(len_func, '__call__'):
l = len_func()
else:
l = len_func
return self.readChars(l)
def readArray(self, len_func, data_func, data_len=None):
if hasattr(len_func, '__call__'):
l = len_func()
else:
l = len_func
if data_len is not None and not self.has(l * data_len):
raise ReadException()
ret = []
for i in range(l):
ret.append(data_func())
return ret
def readUTF8(self):
l = self.readUShort()
if not self.has(l):
raise ReadException()
ret = utf_8_decode(self.data[self.index:self.index + l])[0]
self.advance(l)
return ret
| mit | -5,072,918,801,631,369,000 | 21.153153 | 76 | 0.672834 | false |
TobiasLohner/proSoar | prosoar/task/json_writer.py | 1 | 1481 | import json
def write_json_task(task):
database = {}
database['type'] = task.type
database['distance'] = task.distance
database['aat_min_time'] = task.aat_min_time
database['start_max_speed'] = task.start_max_speed
database['start_max_height'] = task.start_max_height
database['start_max_height_ref'] = task.start_max_height_ref
database['finish_min_height'] = task.finish_min_height
database['finish_min_height_ref'] = task.finish_min_height_ref
database['fai_finish'] = task.fai_finish
database['min_points'] = task.min_points
database['max_points'] = task.max_points
database['homogeneous_tps'] = task.homogeneous_tps
database['is_closed'] = task.is_closed
database['task_scored'] = task.task_scored
for key, turnpoint in enumerate(task):
database[key] = {'lon': turnpoint.lon,
'lat': turnpoint.lat,
'name': turnpoint.name,
'id': turnpoint.id,
'comment': turnpoint.comment,
'altitude': turnpoint.altitude,
'type': turnpoint.sector.type,
'radius': turnpoint.sector.radius,
'inner_radius': turnpoint.sector.inner_radius,
'start_radial': turnpoint.sector.start_radial,
'end_radial': turnpoint.sector.end_radial}
return json.dumps(database, indent=1)
| gpl-2.0 | -5,785,154,506,445,901,000 | 41.314286 | 71 | 0.579338 | false |
andymckay/zamboni | mkt/constants/comm.py | 1 | 5367 | from tower import ugettext_lazy as _
# To add a note type:
# - assign it a incremented number (MY_NOTE_TYPE = 42)
# - give it a translation in NOTE_TYPES
# - if adding from amo/log.py, add it to ACTION_MAP
# - add the translation to Commbadge settings
# Faith of the seven.
NO_ACTION = 0
APPROVAL = 1
REJECTION = 2
DISABLED = 3
MORE_INFO_REQUIRED = 4
ESCALATION = 5
REVIEWER_COMMENT = 6
RESUBMISSION = 7
APPROVE_VERSION_PRIVATE = 8
ESCALATION_HIGH_ABUSE = 9
ESCALATION_HIGH_REFUNDS = 10
ESCALATION_CLEARED = 11
REREVIEW_CLEARED = 12
SUBMISSION = 13
DEVELOPER_COMMENT = 14
REVIEW_DEVICE_OVERRIDE = 15
REVIEW_FEATURES_OVERRIDE = 16
REREVIEW_MANIFEST_CHANGE = 17
REREVIEW_MANIFEST_URL_CHANGE = 18
REREVIEW_PREMIUM_TYPE_UPGRADE = 19
REREVIEW_DEVICES_ADDED = 20
REREVIEW_FEATURES_CHANGED = 21
REREVIEW_CONTENT_RATING_ADULT = 22
ESCALATION_VIP_APP = 22
ESCALATION_PRERELEASE_APP = 23
PRIORITY_REVIEW_REQUESTED = 24
ADDITIONAL_REVIEW = 25
NOTE_TYPES = {
NO_ACTION: _('No action'),
APPROVAL: _('Approved'),
REJECTION: _('Rejected'),
DISABLED: _('Banned'),
MORE_INFO_REQUIRED: _('More information requested'),
ESCALATION: _('Escalated'),
REVIEWER_COMMENT: _('Comment'),
RESUBMISSION: _('App resubmission'),
APPROVE_VERSION_PRIVATE: _('Approved but private'),
ESCALATION_CLEARED: _('Escalation cleared'),
ESCALATION_HIGH_ABUSE: _('Escalated due to High Abuse Reports'),
ESCALATION_HIGH_REFUNDS: _('Escalated due to High Refund Requests'),
REREVIEW_CLEARED: _('Re-review cleared'),
SUBMISSION: _('App submission notes'),
DEVELOPER_COMMENT: _('Developer comment'),
REVIEW_DEVICE_OVERRIDE: _('Device(s) changed by reviewer'),
REVIEW_FEATURES_OVERRIDE: _('Requirement(s) changed by reviewer'),
REREVIEW_MANIFEST_CHANGE: _('Rereview due to Manifest Change'),
REREVIEW_MANIFEST_URL_CHANGE: _('Rereview due to Manifest URL Change'),
REREVIEW_PREMIUM_TYPE_UPGRADE: _('Rrereview due to Premium Type Upgrade'),
REREVIEW_DEVICES_ADDED: _('Rereview due to Devices Added'),
REREVIEW_FEATURES_CHANGED: _('Rereview due to Requirements Change'),
REREVIEW_CONTENT_RATING_ADULT: _('Rereview due to Adult Content Rating'),
ESCALATION_VIP_APP: _('Escalation due to VIP App'),
ESCALATION_PRERELEASE_APP: _('Escalation due to Prelease App'),
PRIORITY_REVIEW_REQUESTED: _('Priority review requested'),
ADDITIONAL_REVIEW: _('Additional review completed'),
}
# Note types only visible by reviewers and not developers.
REVIEWER_NOTE_TYPES = (
ESCALATION,
REVIEWER_COMMENT,
ESCALATION_HIGH_ABUSE,
ESCALATION_HIGH_REFUNDS,
ESCALATION_CLEARED,
REREVIEW_MANIFEST_CHANGE,
REREVIEW_MANIFEST_URL_CHANGE,
REREVIEW_PREMIUM_TYPE_UPGRADE,
REREVIEW_DEVICES_ADDED,
REREVIEW_FEATURES_CHANGED,
REREVIEW_CONTENT_RATING_ADULT,
ESCALATION_VIP_APP,
ESCALATION_PRERELEASE_APP,
PRIORITY_REVIEW_REQUESTED
)
# Note types that can be created through the API view.
API_NOTE_TYPE_WHITELIST = (
NO_ACTION,
REVIEWER_COMMENT,
DEVELOPER_COMMENT,
)
def U_NOTE_TYPES():
return dict((key, unicode(value)) for (key, value) in
NOTE_TYPES.iteritems())
def ACTION_MAP(activity_action):
"""Maps ActivityLog action ids to Commbadge note types."""
import amo
if isinstance(activity_action, amo._LOG):
activity_action = activity_action.id
return {
amo.LOG.APPROVE_VERSION.id: APPROVAL,
amo.LOG.APPROVE_VERSION_PRIVATE.id: APPROVE_VERSION_PRIVATE,
amo.LOG.APP_DISABLED.id: DISABLED,
amo.LOG.ESCALATE_MANUAL.id: ESCALATION,
amo.LOG.ESCALATE_VERSION.id: ESCALATION,
amo.LOG.ESCALATION_VIP_APP.id: ESCALATION,
amo.LOG.ESCALATED_HIGH_ABUSE.id: ESCALATION_HIGH_ABUSE,
amo.LOG.ESCALATED_HIGH_REFUNDS.id: ESCALATION_HIGH_REFUNDS,
amo.LOG.ESCALATION_CLEARED.id: ESCALATION_CLEARED,
amo.LOG.REQUEST_INFORMATION.id: MORE_INFO_REQUIRED,
amo.LOG.REJECT_VERSION.id: REJECTION,
amo.LOG.REREVIEW_CLEARED.id: REREVIEW_CLEARED,
amo.LOG.WEBAPP_RESUBMIT.id: RESUBMISSION,
amo.LOG.COMMENT_VERSION.id: REVIEWER_COMMENT,
amo.LOG.REVIEW_FEATURES_OVERRIDE.id: REVIEW_FEATURES_OVERRIDE,
amo.LOG.REVIEW_DEVICE_OVERRIDE.id: REVIEW_DEVICE_OVERRIDE,
amo.LOG.REREVIEW_MANIFEST_CHANGE.id: REREVIEW_MANIFEST_CHANGE,
amo.LOG.REREVIEW_MANIFEST_URL_CHANGE.id: REREVIEW_MANIFEST_URL_CHANGE,
amo.LOG.REREVIEW_PREMIUM_TYPE_UPGRADE.id:
REREVIEW_PREMIUM_TYPE_UPGRADE,
amo.LOG.REREVIEW_DEVICES_ADDED.id: REREVIEW_DEVICES_ADDED,
amo.LOG.REREVIEW_FEATURES_CHANGED.id: REREVIEW_FEATURES_CHANGED,
amo.LOG.CONTENT_RATING_TO_ADULT.id:
REREVIEW_CONTENT_RATING_ADULT,
amo.LOG.ESCALATION_VIP_APP.id: ESCALATION_VIP_APP,
amo.LOG.ESCALATION_PRERELEASE_APP.id: ESCALATION_PRERELEASE_APP,
amo.LOG.PRIORITY_REVIEW_REQUESTED.id: PRIORITY_REVIEW_REQUESTED,
amo.LOG.PASS_ADDITIONAL_REVIEW.id: ADDITIONAL_REVIEW,
amo.LOG.FAIL_ADDITIONAL_REVIEW.id: ADDITIONAL_REVIEW,
}.get(activity_action, NO_ACTION)
# Number of days a token is valid for.
THREAD_TOKEN_EXPIRY = 30
# Number of times a token can be used.
MAX_TOKEN_USE_COUNT = 5
MAX_ATTACH = 10
# Prefix of the reply to address in comm emails.
REPLY_TO_PREFIX = 'commreply+'
| bsd-3-clause | -4,813,002,732,634,742,000 | 35.263514 | 78 | 0.703931 | false |
yudingding6197/fin_script | debug/bs_record.py | 1 | 1577 | #!/usr/bin/env python
# -*- coding:gbk -*-
import sys
import os
import pandas as pd
from openpyxl import Workbook
from openpyxl.reader.excel import load_workbook
# ͳ¼Æbuy_sellϵļǼ£¬½¨ÒéʹÓÃÉÏÒ»¼¶µÄtransaction_sum.py£¬Í³¼ÆÐÅÏ¢¸ü¶à
# Main
pindex = len(sys.argv)
if pindex<2:
sys.stderr.write("Usage: " +os.path.basename(sys.argv[0])+ " ´úÂë\n")
exit(0)
code = sys.argv[1]
path = "../buy_sell/"
file_list = []
filter_item = "A1:I1"
for f in os.listdir(path):
if os.path.isfile(path + f) is False:
continue
file_list.append(f)
c_list = ['date','time','code','name','op','vol','price','amount']
df = pd.DataFrame()
st_date = file_list[0][6:12]
ed_date = file_list[-1][6:12]
print st_date,ed_date
for file in file_list:
dt_str = file[6:12]
if dt_str.isdigit() is False:
print "Invalid file(%s) or date(%s)" % (file, dt_str)
continue
sheet_st = 'table'
wb = load_workbook(path+file)
ws = wb.get_sheet_by_name(sheet_st)
for rx in range(2, ws.max_row+1):
w1 = ws.cell(row = rx, column = 1).value
w2 = ws.cell(row = rx, column = 2).value
w3 = ws.cell(row = rx, column = 3).value
w4 = ws.cell(row = rx, column = 4).value
w5 = ws.cell(row = rx, column = 5).value
w6 = ws.cell(row = rx, column = 6).value
w7 = ws.cell(row = rx, column = 7).value
w2 = "%06d" % (w2)
if w2!=code:
continue
temp_list = [int(dt_str),w1,w2,w3,w4,w5,w6,w7]
df1 = pd.DataFrame([temp_list], columns=c_list)
df = df.append(df1)
#print temp_list
if len(df)>0:
filename = "%s%s_S%s-%s_%s.xlsx" %(path, "trade/", code, st_date, ed_date)
df.to_excel(filename)
| gpl-2.0 | -564,167,224,171,468,740 | 26.189655 | 75 | 0.636652 | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/Infopanel/Softcam.py | 1 | 4929 | from Components.Console import Console
from os import mkdir, path, remove
from glob import glob
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigText, getConfigListEntry, ConfigSelection, ConfigIP, ConfigYesNo, ConfigSequence, ConfigNumber, NoSave, ConfigEnableDisable, configfile
import os
config.NFRSoftcam.camdir = ConfigText(default = "/usr/emu", fixed_size=False)
config.NFRSoftcam.camconfig = ConfigText(default = "/usr/keys", fixed_size=False)
def getcamcmd(cam):
camname = cam.lower()
xcamname = cam
if getcamscript(camname):
return config.NFRSoftcam.camdir.value + "/" + cam + " start"
elif ".x" in camname:
if "mgcamd" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam
else:
emus=[]
i = 0
for fAdd in glob ('/etc/*.emu'):
searchfile = open(fAdd, "r")
emustart=[]
cam_name = xcamname.strip(".x")
for line in searchfile:
if "binname" in line:
emus.append(line[10:])
if cam_name in emus[i]:
searchemu = open(fAdd, "r")
for line in searchemu:
if "startcam" in line:
emustart.append(line[11:])
emustart = emustart[0].strip()
cam_count_test = emustart.count(" ")
start_emu = emustart.split(" ", 1 )
if (cam_count_test == 0):
return config.NFRSoftcam.camdir.value + "/" + cam
else:
return config.NFRSoftcam.camdir.value + "/" + cam + " " + start_emu[1]
i = i + 1
searchfile.close()
else:
if "oscam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -bc " + \
config.NFRSoftcam.camconfig.value + "/"
if "doscam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -bc " + \
config.NFRSoftcam.camconfig.value + "/"
elif "wicard" in camname:
return "ulimit -s 512; " + config.NFRSoftcam.camdir.value + \
"/" + cam + " -d -c " + config.NFRSoftcam.camconfig.value + \
"/wicardd.conf"
elif "camd3" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " " + \
config.NFRSoftcam.camconfig.value + "/camd3.config"
elif "mbox" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " " + \
config.NFRSoftcam.camconfig.value + "/mbox.cfg"
elif "cccam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -C " + \
config.NFRSoftcam.camconfig.value + "/CCcam.cfg"
elif "mgcamd" in camname:
os.system("rm /dev/dvb/adapter0/ca1")
os.system("ln -sf 'ca0' '/dev/dvb/adapter0/ca1'")
return config.NFRSoftcam.camdir.value + "/" + cam + " -bc " + \
config.NFRSoftcam.camconfig.value + "/"
elif "mpcs" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -c " + \
config.NFRSoftcam.camconfig.value
elif "newcs" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -C " + \
config.NFRSoftcam.camconfig.value + "/newcs.conf"
elif "vizcam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -b -c " + \
config.NFRSoftcam.camconfig.value + "/"
elif "rucam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -b"
elif "scam" in camname and not "oscam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -s " + \
config.NFRSoftcam.camconfig.value + "/"
else:
return config.NFRSoftcam.camdir.value + "/" + cam
def getcamscript(cam):
cam = cam.lower()
if cam.endswith('.sh') or cam.startswith('softcam') or \
cam.startswith('cardserver'):
return True
else:
return False
def stopcam(cam):
if getcamscript(cam):
cmd = config.NFRSoftcam.camdir.value + "/" + cam + " stop"
else:
cmd = "killall -15 " + cam
Console().ePopen(cmd)
print "[NFR-SoftCam Manager] stopping", cam
try:
remove("/tmp/ecm.info")
except:
pass
def __createdir(list):
dir = ""
for line in list[1:].split("/"):
dir += "/" + line
if not path.exists(dir):
try:
mkdir(dir)
except:
print "[NFR-SoftCam Manager] Failed to mkdir", dir
def checkconfigdir():
if not path.exists(config.NFRSoftcam.camconfig.value):
__createdir("/usr/keys")
config.NFRSoftcam.camconfig.value = "/usr/keys"
config.NFRSoftcam.camconfig.save()
if not path.exists(config.NFRSoftcam.camdir.value):
if path.exists("/usr/emu"):
config.NFRSoftcam.camdir.value = "/usr/emu"
else:
__createdir("/usr/emu")
config.NFRSoftcam.camdir.value = "/usr/emu"
config.NFRSoftcam.camdir.save()
| gpl-2.0 | -4,713,683,582,130,165,000 | 38.75 | 213 | 0.577602 | false |
simsong/grr-insider | gui/plugins/inspect_view_test.py | 1 | 2579 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the inspect interface."""
from grr.gui import runtests_test
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class TestInspectView(test_lib.GRRSeleniumTest):
"""Test the inspect interface."""
def testInspect(self):
"""Test the inspect UI."""
with self.ACLChecksDisabled():
self.GrantClientApproval("C.0000000000000001")
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
self.Click("css=a[grrtarget=LaunchFlows]")
self.Click("css=#_Administrative ins")
self.Click("css=a:contains(Interrogate)")
self.Click("css=button.Launch")
# Open the "Advanced" dropdown.
self.Click("css=a[href='#HostAdvanced']")
# Click on the "Debug client requests".
self.Click("css=a[grrtarget=InspectView]")
self.WaitUntil(self.IsElementPresent, "css=td:contains(GetPlatformInfo)")
# Check that the we can see the requests in the table.
for request in "GetPlatformInfo GetConfig EnumerateInterfaces".split():
self.assertTrue(self.IsElementPresent(
"css=td:contains(%s)" % request))
self.Click("css=td:contains(GetPlatformInfo)")
# Check that the proto is rendered inside the tab.
self.WaitUntil(self.IsElementPresent,
"css=.tab-content td.proto_value:contains(GetPlatformInfo)")
# Check that the request tab is currently selected.
self.assertTrue(
self.IsElementPresent("css=li.active:contains(Request)"))
# Here we emulate a mock client with no actions (None) this should produce
# an error.
with self.ACLChecksDisabled():
mock = test_lib.MockClient(rdfvalue.ClientURN("C.0000000000000001"),
None, token=self.token)
while mock.Next():
pass
# Now select the Responses tab:
self.Click("css=li a:contains(Responses)")
self.WaitUntil(self.IsElementPresent, "css=td:contains('flow:response:')")
self.assertTrue(self.IsElementPresent(
"css=.tab-content td.proto_value:contains(GENERIC_ERROR)"))
self.assertTrue(self.IsElementPresent(
"css=.tab-content td.proto_value:contains(STATUS)"))
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | 6,400,125,217,869,577,000 | 28.306818 | 79 | 0.67119 | false |
weblyzard/ewrt | src/eWRT/ws/geonames/gazetteer/exception.py | 1 | 1588 | #!/usr/bin/env python
"""
@package eWRT.ws.geonames.gazetteer.exception
exceptions related to the gazetteer class
"""
from __future__ import print_function
# (C)opyrights 2009 by Heinz Lang <[email protected]>
# Albert Weichselbraun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class GazetteerEntryNotFound(Exception):
""" @class GazetteerEntryNotFound
Base class for gazetteer lookup errors
"""
def __init__(self, id, query):
self.id = id
self.query = query
print(id, query)
def __str__(self):
return "Gazetteer lookup for entity-id '%s' failed." % (self.id)
class GazetteerNameNotFound(Exception):
""" @class GazetteerNameNotFound
This exception is thrown if a lookup name has not been found in the gazetteer
"""
def __init__(self, name):
self.name = name
def __str__(self):
return "Gazetteer lookup of name '%s' failed." % (self.name)
| gpl-3.0 | -1,609,358,482,326,160,400 | 31.408163 | 85 | 0.687657 | false |
JeffDestroyerOfWorlds/hydro_examples | advection/advection.py | 1 | 11152 | """
2nd-order accurate finite-volume implementation of linear advection with
piecewise linear slope reconstruction.
We are solving a_t + u a_x = 0
This script defines two classes:
-- the Grid1d class that manages a cell-centered grid and holds the
data that lives on that grid
-- the Simulation class that is built on a Grid1d object and defines
everything needed to do a advection.
Options for several different slope limiters are provided.
M. Zingale
"""
import numpy
import pylab
import math
# helper functions for the limiting
def minmod(a, b):
if (abs(a) < abs(b) and a*b > 0.0):
return a
elif (abs(b) < abs(a) and a*b > 0.0):
return b
else:
return 0.0
def maxmod(a, b):
if (abs(a) > abs(b) and a*b > 0.0):
return a
elif (abs(b) > abs(a) and a*b > 0.0):
return b
else:
return 0.0
class Grid1d:
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.ng = ng
self.nx = nx
self.xmin = xmin
self.xmax = xmax
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx
# storage for the solution
self.a = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def scratch_array(self):
""" return a scratch array dimensioned for our grid """
return numpy.zeros((self.nx+2*self.ng), dtype=numpy.float64)
def fill_BCs(self):
""" fill all single ghostcell with periodic boundary conditions """
n = 0
while n < self.ng:
# left boundary
self.a[self.ilo-1-n] = self.a[self.ihi-n]
# right boundary
self.a[self.ihi+1+n] = self.a[self.ilo+n]
n += 1
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
class Simulation:
def __init__(self, grid, u, C=0.8, slope_type="centered"):
self.grid = grid
self.t = 0.0 # simulation time
self.u = u # the constant advective velocity
self.C = C # CFL number
self.slope_type = slope_type
def init_cond(self, type="tophat"):
""" initialize the data """
if type == "tophat":
self.grid.a[:] = 0.0
self.grid.a[numpy.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)] = 1.0
elif type == "sine":
self.grid.a[:] = numpy.sin(2.0*math.pi*self.grid.x/(self.grid.xmax-self.grid.xmin))
elif type == "gaussian":
self.grid.a[:] = 1.0 + numpy.exp(-60.0*(self.grid.x - 0.5)**2)
def timestep(self):
""" return the advective timestep """
return self.C*self.grid.dx/self.u
def period(self):
""" return the period for advection with velocity u """
return (self.grid.xmax - self.grid.xmin)/self.u
def states(self, dt):
""" compute the left and right interface states """
# compute the piecewise linear slopes
g = self.grid
slope = g.scratch_array()
g = self.grid
if self.slope_type == "godunov":
# piecewise constant = 0 slopes
slope[:] = 0.0
elif self.slope_type == "centered":
# unlimited centered difference slopes
i = g.ilo-1
while i <= g.ihi+1:
slope[i] = 0.5*(g.a[i+1] - g.a[i-1])/g.dx
i += 1
elif self.slope_type == "minmod":
# minmod limited slope
i = g.ilo-1
while i <= g.ihi+1:
slope[i] = minmod( (g.a[i] - g.a[i-1])/g.dx,
(g.a[i+1] - g.a[i])/g.dx )
i += 1
elif self.slope_type == "MC":
# MC limiter
i = g.ilo-1
while i <= g.ihi+1:
slope[i] = minmod(minmod( 2.0*(g.a[i] - g.a[i-1])/g.dx,
2.0*(g.a[i+1] - g.a[i])/g.dx ),
0.5*(g.a[i+1] - g.a[i-1])/g.dx)
i += 1
elif self.slope_type == "superbee":
# superbee limiter
i = g.ilo-1
while i <= g.ihi+1:
A = minmod( (g.a[i+1] - g.a[i])/g.dx,
2.0*(g.a[i] - g.a[i-1])/g.dx )
B = minmod( (g.a[i] - g.a[i-1])/g.dx,
2.0*(g.a[i+1] - g.a[i])/g.dx )
slope[i] = maxmod(A, B)
i += 1
# loop over all the interfaces. Here, i refers to the left
# interface of the zone. Note that thre are 1 more interfaces
# than zones
al = g.scratch_array()
ar = g.scratch_array()
i = g.ilo
while i <= g.ihi+1:
# left state on the current interface comes from zone i-1
al[i] = g.a[i-1] + 0.5*g.dx*(1.0 - u*dt/g.dx)*slope[i-1]
# right state on the current interface comes from zone i
ar[i] = g.a[i] - 0.5*g.dx*(1.0 + u*dt/g.dx)*slope[i]
i += 1
return al, ar
def riemann(self, al, ar):
"""
Riemann problem for advection -- this is simply upwinding,
but we return the flux
"""
if self.u > 0.0:
return self.u*al
else:
return self.u*ar
def update(self, dt, flux):
""" conservative update """
g = self.grid
anew = g.scratch_array()
anew[g.ilo:g.ihi+1] = g.a[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return anew
def evolve(self, num_periods=1):
""" evolve the linear advection equation """
self.t = 0.0
g = self.grid
tmax = num_periods*self.period()
# main evolution loop
while (self.t < tmax):
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep()
if (self.t + dt > tmax):
dt = tmax - self.t
# get the interface states
al, ar = self.states(dt)
# solve the Riemann problem at all interfaces
flux = self.riemann(al, ar)
# do the conservative update
anew = self.update(dt, flux)
g.a[:] = anew[:]
self.t += dt
if __name__ == "__main__":
#-------------------------------------------------------------------------
# compare limiting and no-limiting
xmin = 0.0
xmax = 1.0
nx = 64
ng = 2
g = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = Simulation(g, u, C=0.7, slope_type="centered")
s.init_cond("tophat")
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
color="r", label="unlimited")
s = Simulation(g, u, C=0.7, slope_type="minmod")
s.init_cond("tophat")
s.evolve(num_periods=5)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
color="b", label="minmod limiter")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1],
ls=":", color="0.5", label="exact")
pylab.legend(frameon=False, loc="best")
pylab.xlabel(r"$x$")
pylab.ylabel(r"$a$")
pylab.savefig("fv-advect.eps")
#-------------------------------------------------------------------------
# convergence test
problem = "gaussian"
xmin = 0.0
xmax = 1.0
ng = 2
N = [32, 64, 128, 256, 512]
err = []
for nx in N:
g = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = Simulation(g, u, C=0.8, slope_type="centered")
s.init_cond("gaussian")
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
# compute the error
err.append(g.norm(g.a - ainit))
print g.dx, nx, err[-1]
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
pylab.scatter(N, err, color="r")
pylab.plot(N, err[len(N)-1]*(N[len(N)-1]/N)**2,
color="k", label=r"$\mathcal{O}(\Delta x^2)$")
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.xlabel("N")
pylab.ylabel(r"$\|\| a^\mathrm{final} - a^\mathrm{init} \|\|_2$",
fontsize=16)
pylab.legend(frameon=False)
pylab.savefig("plm-converge.png")
#-------------------------------------------------------------------------
# different limiters: run both the Gaussian and tophat
xmin = 0.0
xmax = 1.0
nx = 128
ng = 2
u = 1.0
g= Grid1d(nx, ng, xmin=xmin, xmax=xmax)
for p in ["gaussian", "tophat"]:
pylab.clf()
s = Simulation(g, u, C=0.8, slope_type="godunov")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(231)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("piecewise constant")
s = Simulation(g, u, C=0.8, slope_type="centered")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(232)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("centered (unlimited)")
s = Simulation(g, u, C=0.8, slope_type="minmod")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(233)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("minmod limiter")
s = Simulation(g, u, C=0.8, slope_type="MC")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(234)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("MC limiter")
s = Simulation(g, u, C=0.8, slope_type="superbee")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(235)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("superbee limiter")
f = pylab.gcf()
f.set_size_inches(10.0,7.0)
pylab.tight_layout()
pylab.savefig("fv-{}-limiters.png".format(p), bbox_inches="tight")
| bsd-3-clause | -5,731,016,496,385,529,000 | 24.060674 | 95 | 0.492019 | false |
nprapps/elections14 | app.py | 1 | 9458 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import copy
import json
import argparse
from flask import Flask, render_template
import app_config
import app_utils
from app_utils import get_last_updated
from render_utils import make_context, smarty_filter, urlencode_filter
import slides
import static_app
import static_theme
app = Flask(__name__)
app.jinja_env.filters['smarty'] = smarty_filter
app.jinja_env.filters['urlencode'] = urlencode_filter
@app.template_filter()
def format_board_time(dt):
"""
Format a time for the big board
"""
if not dt:
return ''
return '{d:%l}:{d.minute:02}'.format(d=dt) + ' EST'
@app.template_filter()
def format_percent(num):
"""
Format a percentage
"""
return int(round(num))
@app.template_filter()
def format_precincts_percent(num):
"""
Format a percentage for precincts reporting
"""
if num > 0 and num < 1:
return '<1'
if num > 99 and num < 100:
return '>99'
else:
return int(round(num))
@app.template_filter()
def signed(num):
"""
Add sign to number (e.g. +1, -1)
"""
return '{0:+d}'.format(num)
@app.route('/')
def index():
"""
Example view demonstrating rendering a simple HTML page.
"""
from models import Race
context = make_context()
with open('data/featured.json') as f:
context['featured'] = json.load(f)
context['races'] = Race.select()
"""
Balance of Power data
"""
races = Race.select().where(Race.office_name == 'U.S. Senate').order_by(Race.state_postal)
context['not_called'] = app_utils.calculate_seats_left(races)
if app_config.DEPLOY_PROMO:
template_file = 'promo.html'
else:
template_file = 'index.html'
return render_template(template_file, **context), 200,
@app.route('/promo/')
def promo():
"""
Test promo template.
"""
return render_template('promo.html', **make_context())
@app.route('/board/<slug>/')
def _big_board(slug):
"""
Preview a slide outside of the stack.
"""
context = make_context()
context['body'] = _slide(slug).data
if slug == 'senate-big-board':
title = 'U.S. Senate'
elif slug == 'house-big-board-one':
title = 'U.S. House 1'
elif slug == 'house-big-board-two':
title = 'U.S. House 2'
elif slug == 'governor-big-board':
title = 'Governors'
elif slug == 'ballot-measures-big-board':
title = 'Ballot measures'
context['title'] = title
return render_template('_big_board_wrapper.html', **context)
@app.route('/bop.html')
@app_utils.cors
def _bop():
"""
Serve the most recent bop data
"""
from models import Race
context = make_context()
races = Race.select().where(Race.office_name == 'U.S. Senate').order_by(Race.state_postal)
context['bop'] = app_utils.calculate_bop(races, app_utils.SENATE_INITIAL_BOP)
context['not_called'] = app_utils.calculate_seats_left(races)
return render_template('bop.html', **context)
@app.route('/live-data/stack.json')
@app_utils.cors
def _stack_json():
"""
Serve up the current slide stack.
"""
from models import SlideSequence
data = SlideSequence.stack()
# There is one state slug to manipulate in the stack, but the client
# should see two
for i, d in enumerate(data):
if d['slug'] == 'state-house-results':
one = copy(d)
one['slug'] = 'state-house-results-1'
two = copy(d)
two['slug'] = 'state-house-results-2'
data[i:i + 1] = [
one,
two
]
break
js = json.dumps(data)
return js, 200, { 'Content-Type': 'application/javascript' }
@app.route('/preview/state-house-results/index.html')
@app.route('/preview/state-senate-results/index.html')
def _state_picker_preview():
"""
Preview a state slide outside of the stack.
"""
context = make_context()
return render_template('_state_picker_preview.html', **context)
@app.route('/preview/state-house-results-<string:slug>-<int:page>/index.html')
@app_utils.cors
def _state_house_slide_preview(slug, page):
"""
Preview a state slide outside of the stack.
"""
context = make_context()
context['body'] = _state_house_slide(slug, page).data
return render_template('slide_preview.html', **context)
@app.route('/preview/state-senate-results-<slug>/index.html')
@app_utils.cors
def _state_senate_slide_preview(slug):
"""
Preview a state slide outside of the stack.
"""
context = make_context()
resp = _state_senate_slide(slug)
if resp.status_code == 200:
context['body'] = resp.data
return render_template('slide_preview.html', **context)
else:
return "404", 404
@app.route('/preview/<slug>/index.html')
@app_utils.cors
def _slide_preview(slug):
"""
Preview a slide outside of the stack.
"""
from models import SlideSequence
context = make_context()
sequence = SlideSequence.select()
for slide in sequence:
if slide.slide.slug == slug:
context['in_sequence'] = True
previous_slide_order = slide.order - 1
next_slide_order = slide.order + 1
break
try:
context['previous_slide'] = SlideSequence.get(SlideSequence.order == previous_slide_order).slide.slug
except:
pass
try:
context['next_slide'] = SlideSequence.get(SlideSequence.order == next_slide_order).slide.slug
except:
pass
context['body'] = _slide(slug).data.decode('utf-8')
context['slug'] = slug
return render_template('slide_preview.html', **context)
@app.route('/slides/state-house-results-<string:slug>-<int:page>.html')
@app_utils.cors
def _state_house_slide(slug, page):
"""
Serve a state slide.
"""
from models import Race, Slide
slide = Slide.get(Slide.slug == 'state-house-results')
slug = slug.upper()
races = Race.select().where(
(Race.office_name == 'U.S. House') &
(Race.state_postal == slug)
).order_by(Race.seat_number)
timestamp = get_last_updated(races)
context = make_context(timestamp=timestamp)
context['slide_class'] = 'state-house'
context['state_postal'] = slug
context['state_name'] = app_config.STATES.get(slug)
# Calculate BOP using all races
context.update(app_utils.calculate_state_bop(races))
# Filter to display races
races = races.where(Race.featured_race == True)
if slug in app_config.PAGINATED_STATES:
race_count = races.count()
page_size = race_count / 2
if page == 1:
races = races.limit(page_size)
elif page == 2:
races = races.offset(page_size)
context['page'] = page
if races.count():
context['time_on_screen'] = slide.time_on_screen
context['races'] = [race for race in races]
context['body'] = render_template('slides/state_house.html', **context)
return render_template('_slide.html', **context)
else:
return "no races", 404
@app.route('/slides/state-senate-results-<slug>.html')
@app_utils.cors
def _state_senate_slide(slug):
"""
Serve a state slide.
"""
from models import Race, Slide
slide = Slide.get(Slide.slug == 'state-senate-results')
slug = slug.upper()
senate_races = Race.select().where(
(Race.office_name == 'U.S. Senate') &
(Race.state_postal == slug)
).order_by(Race.seat_number)
governor_races = Race.select().where(
(Race.office_name == 'Governor') &
(Race.state_postal == slug)
)
if senate_races.count() == 0 and governor_races.count() == 0:
return "404", 404
senate_updated = get_last_updated(senate_races)
governor_updated = get_last_updated(governor_races)
if senate_updated > governor_updated:
timestamp = senate_updated
else:
timestamp = governor_updated
context = make_context(timestamp=timestamp)
context['state_postal'] = slug
context['state_name'] = app_config.STATES.get(slug)
context['slide_class'] = 'state-senate'
context['senate'] = senate_races
context['governor'] = governor_races
context['time_on_screen'] = slide.time_on_screen
context['body'] = render_template('slides/state_senate.html', **context)
return render_template('_slide.html', **context)
@app.route('/slides/<slug>.html')
@app_utils.cors
def _slide(slug):
"""
Serve up slide html fragment
"""
from models import Slide
context = make_context()
slide = Slide.get(Slide.slug == slug)
view_name = slide.view_name
if slide.data:
context['body'] = slides.__dict__[view_name](slide.data)
else:
context['body'] = slides.__dict__[view_name]()
context['slide_class'] = view_name.replace('_', '-')
context['time_on_screen'] = slide.time_on_screen
return render_template('_slide.html', **context)
app.register_blueprint(static_app.static_app)
app.register_blueprint(static_theme.theme)
# Boilerplate
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port')
args = parser.parse_args()
server_port = 8000
if args.port:
server_port = int(args.port)
app.run(host='0.0.0.0', port=server_port, debug=app_config.DEBUG)
| mit | 3,996,981,375,998,131,000 | 24.771117 | 109 | 0.617467 | false |
Azure/azure-sdk-for-python | sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/aio/operations/_incident_comments_operations.py | 1 | 14785 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IncidentCommentsOperations:
"""IncidentCommentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.securityinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_incident(
self,
resource_group_name: str,
workspace_name: str,
incident_id: str,
filter: Optional[str] = None,
orderby: Optional[str] = None,
top: Optional[int] = None,
skip_token: Optional[str] = None,
**kwargs
) -> AsyncIterable["models.IncidentCommentList"]:
"""Gets all incident comments.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param filter: Filters the results, based on a Boolean condition. Optional.
:type filter: str
:param orderby: Sorts the results. Optional.
:type orderby: str
:param top: Returns only the first n results. Optional.
:type top: int
:param skip_token: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls. Optional.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IncidentCommentList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.securityinsight.models.IncidentCommentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentCommentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_incident.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IncidentCommentList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_incident.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments'} # type: ignore
async def get(
self,
resource_group_name: str,
workspace_name: str,
incident_id: str,
incident_comment_id: str,
**kwargs
) -> "models.IncidentComment":
"""Gets an incident comment.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param incident_comment_id: Incident comment ID.
:type incident_comment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IncidentComment, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.IncidentComment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentComment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
'incidentCommentId': self._serialize.url("incident_comment_id", incident_comment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IncidentComment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}'} # type: ignore
async def create_comment(
self,
resource_group_name: str,
workspace_name: str,
incident_id: str,
incident_comment_id: str,
message: Optional[str] = None,
**kwargs
) -> "models.IncidentComment":
"""Creates the incident comment.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param incident_comment_id: Incident comment ID.
:type incident_comment_id: str
:param message: The comment message.
:type message: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IncidentComment, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.IncidentComment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentComment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_incident_comment = models.IncidentComment(message=message)
api_version = "2020-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_comment.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
'incidentCommentId': self._serialize.url("incident_comment_id", incident_comment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_incident_comment, 'IncidentComment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IncidentComment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_comment.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}'} # type: ignore
| mit | 862,310,406,738,324,700 | 50.515679 | 279 | 0.642611 | false |
rancavil/python-oauth2 | oauth2/__init__.py | 1 | 29076 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url.encode('utf-8'))
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k.encode('utf-8'), []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| mit | 3,934,429,948,027,602,400 | 32.809302 | 265 | 0.609575 | false |
auready/django | django/db/models/query.py | 1 | 69244 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F
from django.db.models.fields import AutoField
from django.db.models.functions import Trunc
from django.db.models.query_utils import InvalidQuery, Q
from django.db.models.sql.constants import CURSOR
from django.utils import timezone
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet:
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self, chunked_fetch=True))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size)
if connection.features.can_return_ids_from_bulk_insert:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
if param != 'pk': # It's okay to use a model's pk property.
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
def _values(self, *fields, **expressions):
clone = self._clone()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False):
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
_fields = []
expressions = {}
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id = str(id(field))
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._clone()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if connections[self.db].features.can_return_ids_from_bulk_insert:
inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db)
return inserted_ids
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare_as_filter_value(self):
if self._fields is None:
queryset = self.values('pk')
queryset.query._forced_pk = True
else:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
queryset = self._clone()
return queryset.query.as_subquery_filter(queryset._db)
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = columns.index(query_name)
columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return columns
@cached_property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
converter = connections[self.db].introspection.table_name_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and queryset._iterable_class is not ModelIterable:
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._clone(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| bsd-3-clause | 2,804,857,369,781,657,600 | 39.37551 | 115 | 0.589784 | false |
VerTiGoEtrex/spideTor | spideTor/Metafile.py | 1 | 7762 | # coding: utf-8
'''
Created on Jul 24, 2014
@author: Noah
'''
import bencode
import logging
import pprint
import os.path
log = logging.getLogger(__name__)
pp = pprint.PrettyPrinter(indent = 1, width = 80)
HASHLEN = 20
class Metafile:
'''
Decodes the metadata stored in a .torrent metafile and presents a standard interface to access it
Notice about the bencode library. Everything is encoded in UTF-8 strings, so you must .decode them to get them back to unicode
'''
def __init__(self, metafilepath):
self.metafilepath = metafilepath
with open(metafilepath, 'rb') as metafile:
encoded = metafile.read()
log.debug("Read metafile successfully")
log.debug("decoding bencoded data")
self.decoded = bencode.bdecode(encoded)
log.debug("decoded as {}".format(pp.pformat(self.decoded)))
if self.isSingleFileTorrent():
log.debug("metafile appears to be a single file metafile")
else:
log.debug("metafile appears to contain many files")
self.files = None
def __unicode__(self):
return self.metafilepath
def __str__(self):
return self.__unicode__().encode("utf-8")
def getMetafileFiles(self):
if self.files != None:
return self.files
self.files = []
if self.isSingleFileTorrent():
self.files.append(MetafileFile(self.getName(), self.decoded['info']['length']))
else:
for metadata in self.decoded['info']['files']:
self.files.append(MetafileFile(os.path.join(*(path.decode("utf-8") for path in metadata['path'])), metadata['length']))
return self.files
def getPieces(self):
hashes = self.getHashes()
log.debug("Number of pieces: {}".format(len(hashes)))
pieceLength = self.decoded['info']['piece length']
log.debug("Piece length: {}".format(pieceLength))
pieces = []
# Populate all of the constant-length pieces
metafileFiles = self.getMetafileFiles()
fileIterator = iter(metafileFiles)
currentFile = fileIterator.next()
currentPiecePosition = 0
currentFileReadPosition = 0
prevPiece = None
for pieceNumber in xrange(0, len(hashes)):
# Get files in this piece (similar to a merge)
# This is a list, because ordering matters
filesInPiece = []
# If this file ends inside this piece, then advance to the next one and add it too
#Piece ------XXXXX-----
#File --XXXXXX++++----
#AND ALSO
#Piece ------XXXXX-----
#File --XXXXXXXXX+++--
bytesReadInPiece = 0
while currentPiecePosition + currentFile.getSize() <= (pieceNumber + 1) * pieceLength:
currentPiecePosition += currentFile.getSize()
bytesRemainingInFile = currentFile.getSize() - currentFileReadPosition
filesInPiece.append(MetafileFileWithOffset(currentFile, currentFileReadPosition, bytesRemainingInFile, (currentFileReadPosition == 0)))
bytesReadInPiece += bytesRemainingInFile
currentFileReadPosition = 0
try:
currentFile = fileIterator.next()
except StopIteration, e:
# That was the last file. This should be the last piece, which is asserted later.
currentFile = None
break
if currentFile != None:
bytesToRead = min(pieceLength - bytesReadInPiece, currentFile.getSize() - currentFileReadPosition)
filesInPiece.append(MetafileFileWithOffset(currentFile, currentFileReadPosition, bytesToRead, False))
currentFileReadPosition += bytesToRead
elif not pieceNumber == len(hashes)-1 or len(filesInPiece) == 0: #Assert that this is the last piece
log.error("Ran out of files on piece {} / {}".format(pieceNumber, len(hashes)-1))
return
log.debug("Piece [{}/{}]: {} files".format(pieceNumber, len(hashes)-1, len(filesInPiece)))
pieceToInsert = Piece(pieceNumber, hashes[pieceNumber], pieceLength, filesInPiece)
# Setup linked list (for heapq updating)
pieceToInsert.setPrevPiece(prevPiece)
if prevPiece != None:
prevPiece.setNextPiece(pieceToInsert)
pieces.append(pieceToInsert)
prevPiece = pieceToInsert
return pieces
def getHashes(self):
allHashes = self.decoded['info']['pieces']
return [allHashes[window:window+HASHLEN] for window in xrange(0, len(allHashes), HASHLEN)]
def getName(self):
return self.decoded['info']['name'].decode("utf-8")
def isSingleFileTorrent(self):
return 'length' in self.decoded['info']
def getMetafilePath(self):
return self.metafilepath
class Piece:
'''
Holds information about a "piece" in the metafile
'''
def __init__(self, pieceNumber, pieceHash, pieceLength, fileWithOffsetsInPiece):
self.pieceNumber = pieceNumber
self.pieceHash = pieceHash
self.pieceLength = pieceLength
self.fileWithOffsetsInPiece = fileWithOffsetsInPiece
self.nextPiece = None
self.prevPiece = None
def getPieceNumber(self):
return self.pieceNumber
def getFileWithOffsetsInPiece(self):
return self.fileWithOffsetsInPiece
def getHash(self):
return self.pieceHash
def getPieceLength(self):
return self.pieceLength
def oneFileInPiece(self):
return len(self.fileWithOffsetsInPiece) == 1
def getOneFileInPiece(self):
if self.oneFileInPiece():
return next(iter(self.fileWithOffsetsInPiece))
def setPrevPiece(self, prevPiece):
self.prevPiece = prevPiece
def setNextPiece(self, nextPiece):
self.nextPiece = nextPiece
def getPrevPiece(self):
return self.prevPiece
def getNextPiece(self):
return self.nextPiece
class MetafileFile:
'''
Holds more detailed information about a file within a metafile
'''
def __init__(self, file_path, size):
'''
Constructs a new MetafileFile object
'''
self.file_path = file_path
self.size = size
def __unicode__(self):
return self.getPath()
def __str__(self):
return self.__unicode__().encode("utf-8")
def getPath(self):
return self.file_path
def getSize(self):
return self.size
class MetafileFileWithOffset:
'''
Holds some additional information about a file as it relates to a piece
'''
def __init__(self, metafileFile, startOffset, readLength, entirelyInPiece):
self.metafileFile = metafileFile
self.startOffset = startOffset
self.readLength = readLength
self.entirelyInPiece = entirelyInPiece
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
return unicode(self.metafileFile)
def __repr__(self):
return "MFWO|" + self.__str__()
def getMetafileFile(self):
return self.metafileFile
def getStartOffset(self):
return self.startOffset
def getReadLength(self):
return self.readLength
def fileEntirelyInPiece(self):
return self.entirelyInPiece | apache-2.0 | -5,599,650,462,333,740,000 | 32.606061 | 151 | 0.598171 | false |
onepercentclub/onepercentclub-site | apps/projects/management/commands/cron_status_realised.py | 1 | 3907 | from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now
from django.utils.translation import ugettext as _
class Command(BaseCommand):
args = 'No arguments required'
help = 'Sets projects to "Done Incomplete" and task status to "Realised" when the deadline is passed'
def handle(self, *args, **options):
from apps.projects.models import Project
from bluebottle.bb_projects.models import ProjectPhase
from apps.tasks.models import Task
"""
Projects which have expired but have been funded will already have their status
set to done-complete so these can be ignored. We only need to update projects which
haven't been funded but have expired, or they have been overfunded and have expired.
"""
try:
done_incomplete_phase = ProjectPhase.objects.get(slug='done-incomplete')
self.stdout.write("Found ProjectPhase model with name 'Done Incomplete'")
except ProjectPhase.DoesNotExist:
raise CommandError("A ProjectPhase with name 'Done Incomplete' does not exist")
try:
done_complete_phase = ProjectPhase.objects.get(slug='done-complete')
self.stdout.write("Found ProjectPhase model with name 'Done Complete'")
except ProjectPhase.DoesNotExist:
raise CommandError("A ProjectPhase with name 'Done Complete' does not exist")
try:
campaign_phase = ProjectPhase.objects.get(slug='campaign')
self.stdout.write("Found ProjectPhase model with name 'Campaign'")
except ProjectPhase.DoesNotExist:
raise CommandError("A ProjectPhase with name 'Campaign' does not exist")
"""
Projects which have at least the funds asked, are still in campaign phase and have not expired
need the campaign funded date set to now.
FIXME: this action should be moved into the code where 'amount_needed' is calculated => when
the value is lte 0 then set campaign_funded.
"""
self.stdout.write("Checking Project funded and still running...")
Project.objects.filter(amount_needed__lte=0, status=campaign_phase, deadline__gt=now()).update(campaign_funded=now())
"""
Projects which have at least the funds asked, are still in campaign phase but have expired
need to be set to 'done complete' and the campaign ended date set to now.
Iterate over projects and save them one by one so the receivers get a signal
"""
self.stdout.write("Checking Project overfunded deadlines...")
for project in Project.objects.filter(amount_needed__lt=0, status=campaign_phase, deadline__lte=now()).all():
project.status = done_complete_phase
project.campaign_ended = now()
project.save()
"""
Projects which don't have the funds asked, are still in campaign phase but have expired
need to be set to 'done incomplete' and the campaign ended date set to now.
Iterate over projects and save them one by one so the receivers get a signal
"""
self.stdout.write("Checking Project unfunded deadlines...")
for project in Project.objects.filter(status=campaign_phase, deadline__lt=now()).all():
project.status = done_incomplete_phase
project.campaign_ended = now()
project.save()
"""
Iterate over tasks and save them one by one so the receivers get a signal
"""
self.stdout.write("Checking Task deadlines...\n\n")
for task in Task.objects.filter(status='in progress', deadline__lt=now()).all():
task.status = 'realized'
task.save()
self.stdout.write("Successfully updated the status of expired Project and Task models.\n\n") | bsd-3-clause | 7,871,289,710,018,549,000 | 49.102564 | 125 | 0.666752 | false |
Blazemeter/taurus | bzt/jmx/base.py | 1 | 55715 | """
Module holds base stuff regarding JMX format
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import traceback
from cssselect import GenericTranslator
from lxml import etree
from urllib import parse
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.utils import BetterDict, iteritems, numeric_types
from bzt.requests_model import has_variable_pattern
LOG = logging.getLogger("")
def try_convert(val, func=int, default=None):
if val is None:
res = val
elif has_variable_pattern(val): # it's property...
if default is not None:
val = get_prop_default(val) or default
res = func(val)
else:
res = val
else:
res = func(val)
return res
def get_prop_default(val):
comma_ind = val.find(",")
comma_found = comma_ind > -1
is_property = val.startswith("${__property(") or val.startswith("${__P(")
if has_variable_pattern(val) and is_property and comma_found:
return val[comma_ind + 1: -2]
else:
return None
def cond_int(val):
if isinstance(val, float):
return int(val)
return val
def cond_float(val, rounding=None):
if isinstance(val, numeric_types):
return round(float(val), rounding) if rounding is not None else float(val)
return val
class JMX(object):
"""
A class to manipulate and generate JMX test plans for JMeter
:param original: path to existing JMX to load. If it is None, then creates
empty test plan
"""
TEST_PLAN_SEL = "jmeterTestPlan>hashTree>hashTree"
THR_GROUP_SEL = TEST_PLAN_SEL + ">hashTree[type=tg]"
THR_TIMER = "kg.apc.jmeter.timers.VariableThroughputTimer"
SET_VAR_ACTION = "kg.apc.jmeter.control.sampler.SetVariablesAction"
def __init__(self, original=None, test_plan_name="BZT Generated Test Plan"):
self.log = logging.getLogger(self.__class__.__name__)
if original:
self.load(original)
else:
root = etree.Element("jmeterTestPlan")
self.tree = etree.ElementTree(root)
test_plan = etree.Element("TestPlan", guiclass="TestPlanGui",
testname=test_plan_name,
testclass="TestPlan", enabled="true")
htree = etree.Element("hashTree")
htree.append(test_plan)
htree.append(etree.Element("hashTree"))
self.append("jmeterTestPlan", htree)
element_prop = self._get_arguments_panel("TestPlan.user_defined_variables")
self.append("jmeterTestPlan>hashTree>TestPlan", element_prop)
def load(self, original):
"""
Load existing JMX file
:param original: JMX file path
:raise TaurusInternalException: in case of XML parsing error
"""
try:
self.tree = etree.ElementTree()
self.tree.parse(original)
except BaseException as exc:
msg = "XML parsing failed for file %s: %s"
raise TaurusInternalException(msg % (original, exc))
def get(self, selector):
"""
Returns tree elements by CSS selector
:type selector: str
:return:
"""
expression = GenericTranslator().css_to_xpath(selector)
nodes = self.tree.xpath(expression)
return nodes
def append(self, selector, node):
"""
Add node to container specified by selector. If multiple nodes will
match the selector, first of them will be used as container.
:param selector: CSS selector for container
:param node: Element instance to add
:raise TaurusInternalException: if container was not found
"""
container = self.get(selector)
if not len(container):
msg = "Failed to find TestPlan node in file: %s"
raise TaurusInternalException(msg % selector)
container[0].append(node)
def save(self, filename):
"""
Save JMX into file
:param filename:
"""
self.log.debug("Saving JMX to: %s", filename)
with open(filename, "wb") as fhd:
self.tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
@staticmethod
def _flag(flag_name, bool_value):
"""
Generates element for JMX flag node
:param flag_name:
:param bool_value:
:return:
"""
elm = etree.Element(flag_name)
elm.text = "true" if bool_value else "false"
return elm
@staticmethod
def __jtl_writer(filename, label, flags):
"""
Generates JTL writer
:param filename:
:return:
"""
jtl = etree.Element("stringProp", {"name": "filename"})
jtl.text = filename
name = etree.Element("name")
name.text = "saveConfig"
value = etree.Element("value")
value.set("class", "SampleSaveConfiguration")
for key, val in iteritems(flags):
value.append(JMX._flag(key, val))
obj_prop = etree.Element("objProp")
obj_prop.append(name)
obj_prop.append(value)
listener = etree.Element("ResultCollector",
testname=label,
testclass="ResultCollector",
guiclass="SimpleDataWriter")
listener.append(jtl)
listener.append(obj_prop)
return listener
@staticmethod
def new_kpi_listener(filename, flag_overrides=None):
"""
Generates listener for writing basic KPI data in CSV format
:param filename:
:return:
"""
defaults = {
"xml": False,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"connectTime": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": False,
"encoding": False,
"assertions": False,
"subresults": False,
"responseData": False,
"samplerData": False,
"responseHeaders": False,
"requestHeaders": False,
"responseDataOnError": False,
"saveAssertionResultsFailureMessage": False,
"bytes": True,
"hostname": True,
"threadCounts": True,
"url": False
}
flags = BetterDict.from_dict(defaults)
if flag_overrides:
flags.merge(flag_overrides)
return JMX.__jtl_writer(filename, "KPI Writer", flags)
@staticmethod
def new_xml_listener(filename, is_full, user_flags):
"""
:param is_full: bool
:param filename: str
:param user_flags: BetterDict
:return:
"""
default_flags = {
"xml": True,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": True,
"encoding": True,
"assertions": True,
"subresults": True,
"responseData": False,
"samplerData": False,
"responseHeaders": True,
"requestHeaders": True,
"responseDataOnError": True,
"saveAssertionResultsFailureMessage": True,
"bytes": True,
"threadCounts": True,
"url": True
}
flags = BetterDict.from_dict(default_flags)
flags.merge(user_flags)
if is_full:
writer = JMX.__jtl_writer(filename, "Trace Writer", flags)
else:
writer = JMX.__jtl_writer(filename, "Errors Writer", flags)
writer.append(JMX._bool_prop("ResultCollector.error_logging", True))
return writer
@staticmethod
def _get_arguments_panel(name):
"""
Generates ArgumentsPanel node
:param name:
:return:
"""
return etree.Element("elementProp", name=name, elementType="Arguments",
guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
def get_auth_manager(authorizations, clear_flag):
mgr = etree.Element("AuthManager", guiclass="AuthPanel", testclass="AuthManager",
testname="HTTP Authorization Manager")
if clear_flag:
mgr.append(JMX._bool_prop("AuthManager.clearEachIteration", True))
auth_coll = JMX._collection_prop("AuthManager.auth_list")
mgr.append(auth_coll)
for authorization in authorizations:
auth_element = JMX._element_prop(name="", element_type="Authorization")
conf_url = authorization.get("url", "")
conf_name = authorization.get("name", "")
conf_pass = authorization.get("password", "")
conf_domain = authorization.get("domain", "")
conf_realm = authorization.get("realm", "")
conf_mech = authorization.get("mechanism", "").upper()
if not (conf_name and conf_pass and (conf_url or conf_domain)):
LOG.warning("Wrong authorization: %s" % authorization)
continue
auth_element.append(JMX._string_prop("Authorization.url", conf_url))
auth_element.append(JMX._string_prop("Authorization.username", conf_name))
auth_element.append(JMX._string_prop("Authorization.password", conf_pass))
auth_element.append(JMX._string_prop("Authorization.domain", conf_domain))
auth_element.append(JMX._string_prop("Authorization.realm", conf_realm))
if conf_mech == "KERBEROS": # optional prop
auth_element.append(JMX._string_prop("Authorization.mechanism", "KERBEROS"))
auth_coll.append(auth_element)
return mgr
@staticmethod
def _get_http_request(url, label, method, timeout, body, keepalive, files=(), encoding=None, follow_redirects=True,
use_random_host_ip=False, host_ips=()):
"""
Generates HTTP request
:type method: str
:type label: str
:type url: str
:rtype: lxml.etree.Element
"""
proxy = etree.Element("HTTPSamplerProxy", guiclass="HttpTestSampleGui", testclass="HTTPSamplerProxy")
proxy.set("testname", label)
args = JMX._get_arguments_panel("HTTPsampler.Arguments")
if isinstance(body, str):
JMX.__add_body_from_string(args, body, proxy)
elif isinstance(body, dict):
JMX.__add_body_from_script(args, body, proxy)
elif body:
msg = "Cannot handle 'body' option of type %s: %s"
raise TaurusInternalException(msg % (type(body), body))
parsed_url = parse.urlparse(url)
JMX.__add_hostnameport_2sampler(parsed_url, proxy, url)
path = parsed_url.path
if parsed_url.params:
path += ";" + parsed_url.params
if parsed_url.query:
path += "?" + parsed_url.query
proxy.append(JMX._string_prop("HTTPSampler.path", path))
proxy.append(JMX._string_prop("HTTPSampler.method", method))
proxy.append(JMX._bool_prop("HTTPSampler.use_keepalive", keepalive))
proxy.append(JMX._bool_prop("HTTPSampler.follow_redirects", follow_redirects))
proxy.append(JMX._bool_prop("HTTPSampler.auto_redirects", False))
if timeout is not None:
proxy.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
proxy.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if encoding is not None:
proxy.append(JMX._string_prop("HTTPSampler.contentEncoding", encoding))
proxy.extend(JMX.get_files_elements(files))
if use_random_host_ip and host_ips:
if len(host_ips) > 1:
expr = "${__chooseRandom(%s,randomAddr)}" % ",".join(host_ips)
else:
expr = host_ips[0]
proxy.append(JMX._string_prop("HTTPSampler.ipSource", expr))
return proxy
@staticmethod
def get_files_elements(files):
elements = []
if files:
files_prop = JMX._element_prop("HTTPsampler.Files", "HTTPFileArgs")
elements.append(files_prop)
files_coll = JMX._collection_prop("HTTPFileArgs.files")
for file_dict in files:
file_elem = JMX._element_prop(file_dict.get("path", ""), "HTTPFileArg")
file_elem.append(JMX._string_prop("File.path", file_dict.get("path", "")))
file_elem.append(JMX._string_prop("File.paramname", file_dict.get("param", "")))
file_elem.append(JMX._string_prop("File.mimetype", file_dict.get("mime-type", "")))
files_coll.append(file_elem)
files_prop.append(files_coll)
return elements
@staticmethod
def get_keystore_config_elements(variable_name, start_index, end_index, preload):
elements = []
if variable_name:
elements = etree.Element("KeystoreConfig", guiclass="TestBeanGUI", testclass="KeystoreConfig",
testname="Taurus-Keystore-Configuration")
elements.append(JMX._string_prop("clientCertAliasVarName", variable_name))
elements.append(JMX._string_prop("startIndex", start_index))
elements.append(JMX._string_prop("endIndex", end_index))
elements.append(JMX._string_prop("preload", preload))
return elements
@staticmethod
def __add_body_from_string(args, body, proxy):
proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True))
coll_prop = JMX._collection_prop("Arguments.arguments")
header = JMX._element_prop("elementProp", "HTTPArgument")
try:
header.append(JMX._string_prop("Argument.value", body))
except ValueError:
LOG.warning("Failed to set body: %s", traceback.format_exc())
header.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
coll_prop.append(header)
args.append(coll_prop)
proxy.append(args)
@staticmethod
def __add_body_from_script(args, body, proxy):
http_args_coll_prop = JMX._collection_prop("Arguments.arguments")
for arg_name, arg_value in body.items():
try:
http_element_prop = JMX._element_prop(arg_name, "HTTPArgument")
except ValueError:
LOG.warning("Failed to get element property: %s", traceback.format_exc())
http_element_prop = JMX._element_prop('BINARY-STUB', "HTTPArgument")
try:
http_element_prop.append(JMX._string_prop("Argument.name", arg_name))
except ValueError:
LOG.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.name", "BINARY-STUB"))
try:
http_element_prop.append(
JMX._string_prop("Argument.value", arg_value if arg_value is not None else ''))
except ValueError:
LOG.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", True))
use_equals = arg_value is not None
http_element_prop.append(JMX._bool_prop("HTTPArgument.use_equals", arg_value is not None))
http_element_prop.append(JMX._string_prop("Argument.metadata", '=' if use_equals else ''))
http_args_coll_prop.append(http_element_prop)
args.append(http_args_coll_prop)
proxy.append(args)
@staticmethod
def __add_hostnameport_2sampler(parsed_url, proxy, url):
if parsed_url.scheme:
proxy.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc_parts = parsed_url.netloc.split(':')
if netloc_parts[0]:
proxy.append(JMX._string_prop("HTTPSampler.domain", netloc_parts[0]))
if len(netloc_parts) > 1 and netloc_parts[1]:
proxy.append(JMX._string_prop("HTTPSampler.port", netloc_parts[1]))
else:
try:
if parsed_url.port:
proxy.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
else:
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
except ValueError:
LOG.debug("Non-parsable port: %s", url)
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
@staticmethod
def _element_prop(name, element_type):
"""
Generates element property node
:param name:
:param element_type:
:return:
"""
res = etree.Element("elementProp", name=name, elementType=element_type)
return res
@staticmethod
def _collection_prop(name):
"""
Adds Collection prop
:param name:
:return:
"""
res = etree.Element("collectionProp", name=name)
return res
@staticmethod
def _string_prop(name, value):
"""
Generates string property node
:param name:
:param value:
:return:
"""
res = etree.Element("stringProp", name=name)
res.text = str(value)
return res
@staticmethod
def _long_prop(name, value):
"""
Generates long property node
:param name:
:param value:
:return:
"""
res = etree.Element("longProp", name=name)
res.text = str(value)
return res
@staticmethod
def _bool_prop(name, value):
"""
Generates boolean property
:param name:
:param value:
:return:
"""
res = etree.Element("boolProp", name=name)
res.text = 'true' if value else 'false'
return res
@staticmethod
def int_prop(name, value):
"""
JMX int property
:param name:
:param value:
:return:
"""
res = etree.Element("intProp", name=name)
res.text = str(value)
return res
@staticmethod
def get_thread_group(concurrency=None, rampup=0, hold=0, iterations=None,
testname="ThreadGroup", on_error="continue", thread_delay=False, scheduler_delay=None):
"""
Generates ThreadGroup
Expected values (by JMeter):
ThreadGroup.num_threads (concurrency): int
ThreadGroup.ramp_time (rampup): int
ThreadGroup.scheduler (need to hold): boolean
ThreadGroup.duration (rampup + hold): int
LoopController.loops (iterations): int
ThreadGroup.delayedStart: boolean
:return: etree element, ThreadGroup
"""
rampup = cond_int(rampup or 0)
hold = cond_int(hold or 0)
if concurrency is None:
concurrency = 1
if isinstance(concurrency, numeric_types) and concurrency <= 0:
enabled = "false"
else:
enabled = "true"
if not hold:
duration = rampup
elif not rampup:
duration = hold
elif isinstance(rampup, numeric_types) and isinstance(hold, numeric_types):
duration = hold + rampup
else:
duration = "${__intSum(%s,%s)}" % (rampup, hold)
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname=testname, enabled=enabled)
if not iterations:
if duration:
iterations = -1
else:
iterations = 1
scheduler = False
if hold or (rampup and (iterations == -1)):
scheduler = True
if on_error is not None:
trg.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
loop = etree.Element("elementProp",
name="ThreadGroup.main_controller",
elementType="LoopController",
guiclass="LoopControlPanel",
testclass="LoopController")
# 'true' causes endless execution of TG in non-gui mode
loop.append(JMX._bool_prop("LoopController.continue_forever", False))
loop.append(JMX._string_prop("LoopController.loops", iterations))
trg.append(loop)
trg.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
trg.append(JMX._string_prop("ThreadGroup.ramp_time", rampup))
trg.append(JMX._string_prop("ThreadGroup.start_time", ""))
trg.append(JMX._string_prop("ThreadGroup.end_time", ""))
trg.append(JMX._bool_prop("ThreadGroup.scheduler", scheduler))
trg.append(JMX._string_prop("ThreadGroup.duration", duration))
if scheduler_delay:
trg.append(JMX._string_prop("ThreadGroup.delay", scheduler_delay))
if thread_delay:
trg.append(JMX._bool_prop("ThreadGroup.delayedStart", thread_delay))
return trg
def get_rps_shaper(self):
"""
:return: etree.Element
"""
throughput_timer_element = etree.Element(self.THR_TIMER,
guiclass=self.THR_TIMER + "Gui",
testclass=self.THR_TIMER,
testname="Throughput_Limiter",
enabled="true")
shaper_load_prof = self._collection_prop("load_profile")
throughput_timer_element.append(shaper_load_prof)
return throughput_timer_element
def add_rps_shaper_schedule(self, shaper_etree, start_rps, end_rps, duration):
"""
Adds schedule to rps shaper
Expected values (by JMeter):
<first> ('start_rps'): float
<second> ('end_rps'): float
<third> ('duration'): int
"""
shaper_collection = shaper_etree.find(".//collectionProp[@name='load_profile']")
coll_prop = self._collection_prop("")
start_rps_prop = self._string_prop("", cond_float(start_rps, 3))
end_rps_prop = self._string_prop("", cond_float(end_rps, 3))
duration_prop = self._string_prop("", cond_int(duration))
coll_prop.append(start_rps_prop)
coll_prop.append(end_rps_prop)
coll_prop.append(duration_prop)
shaper_collection.append(coll_prop)
@staticmethod
def get_set_var_action(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element(JMX.SET_VAR_ACTION, guiclass=JMX.SET_VAR_ACTION + "Gui",
testclass=JMX.SET_VAR_ACTION, testname=testname)
arg_element = etree.Element("elementProp", name="SetVariablesAction", guiclass="ArgumentsPanel",
testclass="Arguments", testname="User Defined Variables", elementType="Arguments")
udv_element.append(arg_element)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
arg_element.append(udv_collection_prop)
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(name=str(var_name), element_type="Argument")
udv_collection_prop.append(udv_element_prop)
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_meta_prop)
return udv_element
@staticmethod
def add_user_def_vars_elements(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element("Arguments", guiclass="ArgumentsPanel", testclass="Arguments",
testname=testname)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(str(var_name), "Argument")
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_desc_prop = JMX._string_prop("Argument.desc", "")
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_desc_prop)
udv_element_prop.append(udv_arg_meta_prop)
udv_collection_prop.append(udv_element_prop)
udv_element.append(udv_collection_prop)
return udv_element
@staticmethod
def get_concurrency_thread_group(concurrency=None, rampup=0, hold=0, steps=None, on_error="continue",
testname="ConcurrencyThreadGroup", iterations=""):
"""
Generates ConcurrencyThreadGroup
Expected values (by JMeter):
Targetlevel (concurrency): int
RampUp (rampup): float
Steps (steps): boolean
Hold (hold): float
:return: etree element, Concurrency Thread Group
"""
if not rampup:
rampup = 0
if concurrency is None:
concurrency = 1
if isinstance(concurrency, numeric_types) and concurrency <= 0:
enabled = "false"
else:
enabled = "true"
if steps is None: # zero means infinity of steps
steps = 0
name = 'com.blazemeter.jmeter.threads.concurrency.ConcurrencyThreadGroup'
concurrency_thread_group = etree.Element(
name, guiclass=name + "Gui", testclass=name, testname=testname, enabled=enabled)
virtual_user_controller = etree.Element(
"elementProp",
name="ThreadGroup.main_controller",
elementType="com.blazemeter.jmeter.control.VirtualUserController")
concurrency_thread_group.append(virtual_user_controller)
concurrency_thread_group.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
concurrency_thread_group.append(JMX._string_prop("TargetLevel", str(concurrency)))
concurrency_thread_group.append(JMX._string_prop("RampUp", str(cond_int(rampup))))
concurrency_thread_group.append(JMX._string_prop("Steps", steps))
concurrency_thread_group.append(JMX._string_prop("Hold", str(cond_int(hold))))
concurrency_thread_group.append(JMX._string_prop("LogFilename", ""))
concurrency_thread_group.append(JMX._string_prop("Iterations", iterations or ""))
concurrency_thread_group.append(JMX._string_prop("Unit", "S"))
return concurrency_thread_group
@staticmethod
def get_dns_cache_mgr():
"""
Adds dns cache element with defaults parameters
:return:
"""
dns_element = etree.Element("DNSCacheManager", guiclass="DNSCachePanel", testclass="DNSCacheManager",
testname="DNS Cache Manager")
dns_element.append(JMX._collection_prop("DNSCacheManager.servers"))
dns_element.append(JMX._bool_prop("DNSCacheManager.clearEachIteration", False))
dns_element.append(JMX._bool_prop("DNSCacheManager.isCustomResolver", False))
return dns_element
@staticmethod
def _get_header_mgr(hdict):
"""
:type hdict: dict[str,str]
:rtype: lxml.etree.Element
"""
mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers")
coll_prop = etree.Element("collectionProp", name="HeaderManager.headers")
for hname, hval in iteritems(hdict):
header = etree.Element("elementProp", name="", elementType="Header")
header.append(JMX._string_prop("Header.name", hname))
header.append(JMX._string_prop("Header.value", hval))
coll_prop.append(header)
mgr.append(coll_prop)
return mgr
@staticmethod
def _get_cache_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CacheManager", guiclass="CacheManagerGui", testclass="CacheManager", testname="Cache")
mgr.append(JMX._bool_prop("clearEachIteration", True))
mgr.append(JMX._bool_prop("useExpires", True))
return mgr
@staticmethod
def _get_cookie_mgr(scenario=None):
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CookieManager", guiclass="CookiePanel", testclass="CookieManager", testname="Cookies")
mgr.append(JMX._bool_prop("CookieManager.clearEachIteration", False))
mgr.append(JMX._string_prop("CookieManager.implementation",
"org.apache.jmeter.protocol.http.control.HC4CookieHandler"))
if scenario:
cookies = scenario.get(Scenario.COOKIES)
if cookies:
cookies_coll = JMX._collection_prop("CookieManager.cookies")
mgr.append(cookies_coll)
for cookie in cookies:
if not isinstance(cookie, dict):
raise TaurusConfigError("Cookie must be dictionary: %s" % cookie)
c_name = cookie.get("name", TaurusConfigError("Name of cookie isn't found: %s" % cookie))
c_value = cookie.get("value", TaurusConfigError("Value of cookie isn't found: %s" % cookie))
c_domain = cookie.get("domain", TaurusConfigError("Domain of cookie isn't found: %s" % cookie))
c_path = cookie.get("path", "")
c_secure = cookie.get("secure", False)
# follow params are hardcoded in JMeter
c_expires = 0
c_path_specified = True
c_domain_specified = True
c_elem = etree.Element("elementProp", name=c_name, elementType="Cookie", testname=c_name)
c_elem.append(JMX._string_prop("Cookie.value", c_value))
c_elem.append(JMX._string_prop("Cookie.domain", c_domain))
c_elem.append(JMX._string_prop("Cookie.path", c_path))
c_elem.append(JMX._bool_prop("Cookie.secure", c_secure))
c_elem.append(JMX._long_prop("Cookie.expires", c_expires))
c_elem.append(JMX._bool_prop("Cookie.path_specified", c_path_specified))
c_elem.append(JMX._bool_prop("Cookie.domain_specified", c_domain_specified))
cookies_coll.append(c_elem)
return mgr
@staticmethod
def _get_http_defaults(default_address=None, timeout=None, retrieve_resources=None, concurrent_pool_size=4,
content_encoding=None, resources_regex=None):
"""
:rtype: lxml.etree.Element
"""
cfg = etree.Element("ConfigTestElement", guiclass="HttpDefaultsGui",
testclass="ConfigTestElement", testname="Defaults")
if retrieve_resources:
cfg.append(JMX._bool_prop("HTTPSampler.image_parser", True))
cfg.append(JMX._bool_prop("HTTPSampler.concurrentDwn", True))
if concurrent_pool_size:
cfg.append(JMX._string_prop("HTTPSampler.concurrentPool", concurrent_pool_size))
params = etree.Element("elementProp",
name="HTTPsampler.Arguments",
elementType="Arguments",
guiclass="HTTPArgumentsPanel",
testclass="Arguments", testname="user_defined")
cfg.append(params)
if default_address:
parsed_url = parse.urlsplit(default_address)
if parsed_url.scheme:
cfg.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc = parsed_url.netloc
if ':' in netloc:
index = netloc.rfind(':')
cfg.append(JMX._string_prop("HTTPSampler.port", netloc[index + 1:]))
netloc = netloc[:index]
cfg.append(JMX._string_prop("HTTPSampler.domain", netloc))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
cfg.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if content_encoding:
cfg.append(JMX._string_prop("HTTPSampler.contentEncoding", content_encoding))
if resources_regex:
cfg.append(JMX._string_prop("HTTPSampler.embedded_url_re", resources_regex))
return cfg
@staticmethod
def _get_dur_assertion(timeout):
"""
:type timeout: int
:return:
"""
element = etree.Element("DurationAssertion", guiclass="DurationAssertionGui",
testclass="DurationAssertion", testname="Timeout Check")
element.append(JMX._string_prop("DurationAssertion.duration", timeout))
return element
@staticmethod
def get_constant_timer(delay):
timer_type = "ConstantTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("%s.delay" % timer_type, delay))
return [element, etree.Element("hashTree")]
@staticmethod
def get_uniform_timer(maximum, offset):
timer_type = "UniformRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", offset))
element.append(JMX._string_prop("RandomTimer.range", maximum))
return [element, etree.Element("hashTree")]
@staticmethod
def get_gaussian_timer(dev, offset):
timer_type = "GaussianRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", offset))
element.append(JMX._string_prop("RandomTimer.range", dev))
return [element, etree.Element("hashTree")]
@staticmethod
def get_poisson_timer(lam, delay):
timer_type = "PoissonRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", delay))
element.append(JMX._string_prop("RandomTimer.range", lam))
return [element, etree.Element("hashTree")]
@staticmethod
def _get_extractor(varname, headers, regexp, template, match_no, default='NOT_FOUND', scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
if isinstance(template, int):
template = '$%s$' % template
if headers.lower() == 'headers':
headers = 'true'
elif headers.lower() == 'http-code':
headers = 'code'
elif headers.lower() == 'url':
headers = 'URL'
else:
headers = 'body'
element = etree.Element("RegexExtractor", guiclass="RegexExtractorGui",
testclass="RegexExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("RegexExtractor.useHeaders", headers))
element.append(JMX._string_prop("RegexExtractor.refname", varname))
element.append(JMX._string_prop("RegexExtractor.regex", regexp))
element.append(JMX._string_prop("RegexExtractor.template", template))
element.append(JMX._string_prop("RegexExtractor.default", default))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_boundary_extractor(varname, subject, left, right, match_no, defvalue='NOT_FOUND', scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
subjects = {
'body': 'false',
'body-unescaped': 'unescaped',
'body-as-document': 'as_document',
'response-headers': 'true',
'request-headers': 'request_headers',
'url': 'URL',
'code': 'code',
'message': 'message',
}
subject = subjects.get(subject)
element = etree.Element("BoundaryExtractor", guiclass="BoundaryExtractorGui",
testclass="BoundaryExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("BoundaryExtractor.useHeaders", subject))
element.append(JMX._string_prop("BoundaryExtractor.refname", varname))
element.append(JMX._string_prop("BoundaryExtractor.lboundary", left)) # TODO: html-escape boundaries?
element.append(JMX._string_prop("BoundaryExtractor.rboundary", right))
element.append(JMX._string_prop("RegexExtractor.default", defvalue))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_jquerycss_extractor(varname, selector, attribute, match_no, default="NOT_FOUND", scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
element = etree.Element("HtmlExtractor", guiclass="HtmlExtractorGui", testclass="HtmlExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("HtmlExtractor.refname", varname))
element.append(JMX._string_prop("HtmlExtractor.expr", selector))
element.append(JMX._string_prop("HtmlExtractor.attribute", attribute))
element.append(JMX._string_prop("HtmlExtractor.match_number", match_no))
element.append(JMX._string_prop("HtmlExtractor.default", default))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_json_extractor(varname, jsonpath, default='NOT_FOUND', from_variable=None):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathextractor"
element = etree.Element("%s.JSONPathExtractor" % package,
guiclass="%s.gui.JSONPathExtractorGui" % package,
testclass="%s.JSONPathExtractor" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("VAR", varname))
element.append(JMX._string_prop("JSONPATH", jsonpath))
element.append(JMX._string_prop("DEFAULT", default))
if from_variable:
element.append(JMX._string_prop("VARIABLE", from_variable))
element.append(JMX._string_prop("SUBJECT", "VAR"))
return element
@staticmethod
def get_scope_props(scope, from_variable):
props = []
if scope:
props.append(JMX._string_prop("Sample.scope", scope))
if scope == "variable":
props.append(JMX._string_prop("Scope.variable", from_variable))
return props
@staticmethod
def _get_internal_json_extractor(varname, jsonpath, default, scope, from_variable, match_no, concat):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "JSONPostProcessor"
element = etree.Element(package,
guiclass="%sGui" % package,
testclass="%s" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("JSONPostProcessor.referenceNames", varname))
element.append(JMX._string_prop("JSONPostProcessor.jsonPathExprs", jsonpath))
element.append(JMX._string_prop("JSONPostProcessor.match_numbers", match_no))
if default:
element.append(JMX._string_prop("JSONPostProcessor.defaultValues", default))
element.extend(JMX.get_scope_props(scope, from_variable))
if concat:
element.append(JMX._bool_prop("JSONPostProcessor.compute_concat", True))
return element
@staticmethod
def _get_json_path_assertion(jsonpath, expected_value, json_validation, expect_null, invert, regexp=True):
"""
:type jsonpath: str
:type expected_value: str
:type json_validation: bool
:type expect_null: bool
:type invert: bool
:type regexp: bool
:return: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion"
element = etree.Element("%s.JSONPathAssertion" % package,
guiclass="%s.gui.JSONPathAssertionGui" % package,
testclass="%s.JSONPathAssertion" % package,
testname="JSon path assertion")
element.append(JMX._string_prop("JSON_PATH", jsonpath))
element.append(JMX._string_prop("EXPECTED_VALUE", expected_value))
element.append(JMX._bool_prop("JSONVALIDATION", json_validation))
element.append(JMX._bool_prop("EXPECT_NULL", expect_null))
element.append(JMX._bool_prop("INVERT", invert))
element.append(JMX._bool_prop("ISREGEX", regexp))
return element
@staticmethod
def _get_xpath_extractor(varname, xpath, default, validate_xml, ignore_whitespace, match_no, use_namespaces,
use_tolerant_parser, scope, from_var):
"""
:type varname: str
:type xpath: str
:type default: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
element = etree.Element("XPathExtractor",
guiclass="XPathExtractorGui",
testclass="XPathExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("XPathExtractor.refname", varname))
element.append(JMX._string_prop("XPathExtractor.xpathQuery", xpath))
element.append(JMX._string_prop("XPathExtractor.default", default))
element.append(JMX._bool_prop("XPathExtractor.validate", validate_xml))
element.append(JMX._bool_prop("XPathExtractor.whitespace", ignore_whitespace))
element.append(JMX._string_prop("XPathExtractor.matchNumber", match_no))
element.append(JMX._bool_prop("XPathExtractor.namespace", use_namespaces))
element.append(JMX._bool_prop("XPathExtractor.tolerant", use_tolerant_parser))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_xpath_assertion(xpath, validate_xml, ignore_whitespace, use_tolerant_parser, invert):
"""
:type xpath: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:return: lxml.etree.Element
"""
element = etree.Element("XPathAssertion",
guiclass="XPathAssertionGui",
testclass="XPathAssertion",
testname="XPath Assertion")
element.append(JMX._string_prop("XPath.xpath", xpath))
element.append(JMX._bool_prop("XPath.validate", validate_xml))
element.append(JMX._bool_prop("XPath.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPath.tolerant", use_tolerant_parser))
element.append(JMX._bool_prop("XPath.negate", invert))
return element
@staticmethod
def _get_resp_assertion(field, contains, is_regexp, is_invert, assume_success=False):
"""
:type field: str
:type contains: list[str]
:type is_regexp: bool
:type is_invert: bool
:rtype: lxml.etree.Element
"""
tname = "Assert %s %s" % ("hasn't" if is_invert else "has",
"[" + ", ".join('"' + str(x) + '"' for x in contains) + "]")
element = etree.Element("ResponseAssertion", guiclass="AssertionGui",
testclass="ResponseAssertion", testname=tname)
if field == Scenario.FIELD_HEADERS:
fld = "Assertion.response_headers"
elif field == Scenario.FIELD_RESP_CODE:
fld = "Assertion.response_code"
else:
fld = "Assertion.response_data"
if is_regexp:
if is_invert:
mtype = 6 # not contains
else:
mtype = 2 # contains
else:
if is_invert:
mtype = 20 # not substring
else:
mtype = 16 # substring
element.append(JMX._string_prop("Assertion.test_field", fld))
element.append(JMX._string_prop("Assertion.test_type", mtype))
element.append(JMX._bool_prop("Assertion.assume_success", assume_success))
coll_prop = etree.Element("collectionProp", name="Asserion.test_strings")
for string in contains:
coll_prop.append(JMX._string_prop("", string))
element.append(coll_prop)
return element
@staticmethod
def _get_jsr223_element(language, script_file, parameters, execute, script_text=None, cache_key='true'):
if execute == "before":
proc = "JSR223PreProcessor"
else:
proc = "JSR223PostProcessor"
element = etree.Element(proc, guiclass="TestBeanGUI", testclass=proc, testname=proc)
element.append(JMX._string_prop("filename", script_file if script_file else ''))
element.append(JMX._string_prop("script", script_text if script_text else ''))
element.append(JMX._string_prop("parameters", parameters))
element.append(JMX._string_prop("scriptLanguage", language))
element.append(JMX._string_prop("cacheKey", cache_key))
return element
@staticmethod
def _get_csv_config(path, delimiter, loop, variable_names, is_quoted):
"""
:type path: str
:type delimiter: str
:type is_quoted: bool
:type loop: bool
:type variable_names: string
:return:
"""
element = etree.Element("CSVDataSet", guiclass="TestBeanGUI",
testclass="CSVDataSet", testname="CSV %s" % os.path.basename(path))
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._bool_prop("quotedData", is_quoted))
element.append(JMX._bool_prop("recycle", loop))
element.append(JMX._bool_prop("stopThread", not loop))
element.append(JMX._string_prop("variableNames", variable_names))
return element
@staticmethod
def _get_csv_config_random(path, delimiter, loop, variable_names):
"""
:type path: str
:type delimiter: str
:type loop: bool
:type variable_names: string
:return:
"""
element = etree.Element("com.blazemeter.jmeter.RandomCSVDataSetConfig",
guiclass="com.blazemeter.jmeter.RandomCSVDataSetConfigGui",
testclass="com.blazemeter.jmeter.RandomCSVDataSetConfig",
testname="bzm - Random CSV Data Set Config")
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("fileEncoding", "UTF-8"))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._string_prop("variableNames", variable_names))
element.append(JMX._bool_prop("randomOrder", True))
element.append(JMX._bool_prop("ignoreFirstLine", False if variable_names else True))
element.append(JMX._bool_prop("rewindOnTheEndOfList", loop))
element.append(JMX._bool_prop("independentListPerThread", False))
return element
def set_enabled(self, sel, state):
"""
Toggle items by selector
:type sel: str
:type state: bool
"""
items = self.get(sel)
self.log.debug("Enable %s elements %s: %s", state, sel, items)
for item in items:
item.set("enabled", 'true' if state else 'false')
def set_text(self, sel, text):
"""
Set text value
:type sel: str
:type text: str
"""
items = self.get(sel)
res = 0
for item in items:
item.text = str(text)
res += 1
return res
@staticmethod
def _get_simple_controller(name):
return etree.Element("GenericController", guiclass="LogicControllerGui", testclass="GenericController",
testname=name)
def _add_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
self.append(self.TEST_PLAN_SEL, dbg_tree)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
@staticmethod
def _get_results_tree():
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
return dbg_tree
@staticmethod
def _get_if_controller(condition):
controller = etree.Element("IfController", guiclass="IfControllerPanel", testclass="IfController",
testname="If Controller")
controller.append(JMX._string_prop("IfController.condition", condition))
return controller
@staticmethod
def _get_once_controller():
"""
Generates Once Only Controller
:return: etree element, OnceOnlyController
"""
controller = etree.Element("OnceOnlyController", guiclass="OnceOnlyControllerGui",
testclass="OnceOnlyController", testname="Once Only Controller")
return controller
@staticmethod
def _get_loop_controller(loops):
"""
Generates Loop Controller
Expected values(by JMeter):
LoopController.loops(iterations): int
LoopController.continue_forever: boolean
:return: etree element, LoopController
"""
if loops == 'forever':
iterations = -1
else:
iterations = loops
controller = etree.Element("LoopController", guiclass="LoopControlPanel", testclass="LoopController",
testname="Loop Controller")
# 'false' means controller can be called only one time (by parent)
controller.append(JMX._bool_prop("LoopController.continue_forever", True))
controller.append(JMX._string_prop("LoopController.loops", str(iterations)))
return controller
@staticmethod
def _get_foreach_controller(input_var, loop_var):
# TODO: useSeparator option
controller = etree.Element("ForeachController", guiclass="ForeachControlPanel", testclass="ForeachController",
testname="ForEach Controller")
controller.append(JMX._string_prop("ForeachController.inputVal", input_var))
controller.append(JMX._string_prop("ForeachController.returnVal", loop_var))
controller.append(JMX._bool_prop("ForeachController.useSeparator", True))
return controller
@staticmethod
def _get_while_controller(condition):
controller = etree.Element("WhileController", guiclass="WhileControllerGui", testclass="WhileController",
testname="While Controller")
controller.append(JMX._string_prop("WhileController.condition", condition))
return controller
@staticmethod
def _get_transaction_controller(transaction_name, force_parent_sample=False, include_timers=False):
controller = etree.Element("TransactionController", guiclass="TransactionControllerGui",
testclass="TransactionController", testname=transaction_name)
controller.append(JMX._bool_prop("TransactionController.parent", force_parent_sample))
controller.append(JMX._bool_prop("TransactionController.includeTimers", include_timers))
return controller
@staticmethod
def _get_functional_mode_prop(enabled):
return JMX._bool_prop("TestPlan.functional_mode", enabled)
@staticmethod
def _get_action_block(action_index, target_index, duration_ms):
action = etree.Element("TestAction", guiclass="TestActionGui", testclass="TestAction", testname="Test Action")
action.append(JMX.int_prop("ActionProcessor.action", action_index))
action.append(JMX.int_prop("ActionProcessor.target", target_index))
action.append(JMX._string_prop("ActionProcessor.duration", str(duration_ms)))
return action
| apache-2.0 | -383,183,640,151,168,500 | 38.542229 | 119 | 0.590631 | false |
magenta/magenta | magenta/models/onsets_frames_transcription/create_dataset_lib_test.py | 1 | 3341 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for create_dataset_lib."""
import itertools
from magenta.models.onsets_frames_transcription import create_dataset_lib
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class CreateDatasetLibTest(tf.test.TestCase):
def test_generate_unique_mixes(self):
sourceid_to_exids = [('source1', 'a'), ('source1', 'b'),
('source2', 'c'), ('source2', 'd')]
exid_to_mixids = create_dataset_lib.generate_mixes(
val='unused', num_mixes=100, sourceid_to_exids=sourceid_to_exids)
mix_ids = set(itertools.chain(*list(exid_to_mixids.values())))
# Requested 100, but there are only 4 unique mixes, so that's how many
# we should end up with.
self.assertEqual(4, len(mix_ids))
def test_generate_num_mixes(self):
sourceid_to_exids = [('source1', 'a'), ('source1', 'b'), ('source1', 'c'),
('source2', 'd'), ('source2', 'e'), ('source2', 'f')]
exid_to_mixids = create_dataset_lib.generate_mixes(
val='unused', num_mixes=4, sourceid_to_exids=sourceid_to_exids)
mix_ids = set(itertools.chain(*list(exid_to_mixids.values())))
# Ensure we get the number of mixes we requested even when more unique mixes
# would be possible.
self.assertEqual(4, len(mix_ids))
def test_unique_mixes_duplicate_sources(self):
sourceid_to_exids = [('source1', 'a'), ('source1', 'b'), ('source1', 'c'),
('source2', 'a'), ('source2', 'b'), ('source2', 'c'),
('source3', 'a'), ('source3', 'b'), ('source3', 'c')]
exid_to_mixids = create_dataset_lib.generate_mixes(
val='unused', num_mixes=100, sourceid_to_exids=sourceid_to_exids)
mix_ids = set(itertools.chain(*list(exid_to_mixids.values())))
# There are only 3 unique ids, but we're request mixes of 3 items, so only
# 1 unique mix is possible.
self.assertEqual(1, len(mix_ids))
def test_generate_mixes_using_all_examples(self):
sourceid_to_exids = {
'source1': ['a', 'b', 'c', 'd'],
'source2': ['a', 'b', 'c', 'd'],
}
rs = np.random.RandomState(seed=0)
mixes = create_dataset_lib.generate_mixes_using_all_examples(
sourceid_to_exids, rs)
self.assertEqual(set(['a', 'b', 'c', 'd']), set(itertools.chain(*mixes)))
def test_generate_mixes_using_all_examples_plus_random_examples(self):
sourceid_to_exids = (
[('source1', i) for i in range(100)] +
[('source2', i) for i in range(100)])
exid_to_mixids = create_dataset_lib.generate_mixes(
val='unused', num_mixes=200, sourceid_to_exids=sourceid_to_exids)
self.assertEqual(set(range(100)), exid_to_mixids.keys())
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,089,513,554,171,817,200 | 40.7625 | 80 | 0.640826 | false |
titilambert/harbour-squilla | squilla/lib/friend.py | 1 | 8123 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# friend.py
#
# This file is part of Squilla
#
# Copyright (C) 2014 Thibault Cohen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import select
import random
import socket
from time import sleep
import subprocess
from socket import inet_aton
from threading import Thread
import pyotherside
from mdns.zeroconf import ServiceInfo
import dbus
from squilla.lib.logger import logger
from squilla.lib.config import is_favorite, get_favorites
from squilla.lib import get_presence_auth_user, friend_list
from squilla.lib.presence_browser import zeroconf
from squilla.lib.utils import get_interface_address
from squilla.lib.config import get_interface_name
port = 5299
# NEED TO GET THE GOOD IP!!!!
ip_address = '192.168.13.15'
ip_address = get_interface_address(get_interface_name())
#ip_address = '0.0.0.0'
class Friend(Thread):
def __init__(self, fullname, number, auth_user, parent=None):
Thread.__init__(self)
global ip_address
self.fullname = fullname
self.number = number
self.port = port
if ip_address == None:
ip_address = get_interface_address(get_interface_name())
self.ip_address = ip_address
self.auth_user = auth_user
self.node = fullname + "@jolla"
self.favorite = False
self.parent = parent
self.client = None # Bonjour socket client
self.is_ready = False
self.id = 0
def set_data(self, value, attr):
""" For view
"""
if attr == "favorite":
value, bool = value.toInt()
if bool:
if value == QtCore.Qt.Checked:
setattr(self, "favorite", QtCore.Qt.Checked)
else:
setattr(self, "favorite", QtCore.Qt.Unchecked)
return True
return False
def run(self):
# Register on bonjour chat
self.id = random.randint(0, 10000000)
# Prepare properties
txt = {}
txt['1st'] = str(self.fullname)
txt['1st'] = self.fullname.encode('utf-8')
#txt['1st'] = "aaa"
txt['last'] = ""
txt['status'] = 'avail'
txt['port.p2pj'] = 5299
txt['nick'] = self.fullname.encode('utf-8')
txt['node'] = self.node.encode('utf-8')
txt['jid'] = self.node.encode('utf-8')
txt['email'] = self.node.encode('utf-8')
txt['version'] = 1
txt['txtvers'] = 1
name = self.node + '._presence._tcp.local.'
reg_type = '_presence._tcp.local.'
# Prepare service informations
self.info = ServiceInfo(reg_type, name, inet_aton(self.ip_address), self.port, properties=txt)
# Register service
zeroconf.register_service(self.info)
self.is_ready = True
# Join thread
zeroconf.engine.join()
def unregister(self):
""" Unregister service """
zeroconf.unregister_service(self.info)
def send_sms(self, message):
logger.debug("Sending sms using 'dbus'")
bus = dbus.SystemBus()
smsobject = bus.get_object('org.ofono',
'/ril_0')
smsiface = dbus.Interface(smsobject, 'org.ofono.MessageManager')
message = message.encode('utf-8')
smsiface.SendMessage(self.number, message)
logger.debug("Sms send: %s" % message)
logger.debug("to: %s " % self.number)
def sms_to_bonjour(self, msg):
logger.debug("Forward sms to bonjour")
msg = msg.replace("<", "<")
msg = msg.replace(">", ">")
# Waiting self is bonjour registered
while self.is_ready == False:
logger.debug("Waiting bonjour contact "
"registered: %s" % self.fullname)
sleep(1)
# Connect to bonjour server
self.auth_user = get_presence_auth_user()
if self.auth_user is None:
logger.debug("Authentication user not set")
return False
#logger.debug(self.auth_user)
#logger.debug(self.auth_user.values())
#logger.debug(list(self.auth_user.values()))
host = self.auth_user['host']
port = self.auth_user['port']
so = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.debug("Connecting to %s:%s" % (host, port))
try:
so.connect((host, port))
except TypeError as e:
logger.debug("Connection error: %s" % str(e))
return False
# Dont need this !?
so.setblocking(1)
so.settimeout(2)
# Prepare variables
username = self.auth_user['name']
dic = {"to": username,
"from": self.node,
"msg": msg,
"id": self.id}
# Hand check
# Send data
xml = (u"""<?xml version='1.0' encoding='UTF-8'?><stream:stream """
u"""xmlns='jabber:client' """
u"""xmlns:stream='http://etherx.jabber.org/streams' """
u"""to="%(to)s" from="%(from)s" version="1.0">""" % dic)
logger.debug(xml)
so.send(xml.encode('utf-8'))
# Read data
try:
data = so.recv(1024)
except socket.timeout:
logger.debug("socket.timeout1")
except Exception as e:
logger.debug(e)
# Send data
so.send("""<stream:features/>""".encode('utf-8'))
# Read data
try:
data = so.recv(1024)
except socket.timeout:
logger.debug("socket.timeout2")
# Send data
xml = ("""<message from="%(from)s" to="%(to)s" type="chat" """
"""id="%(id)s"><body>%(msg)s</body></message>""" % dic)
logger.debug(xml)
logger.debug("Send message")
so.send(xml.encode('utf-8'))
try:
data = so.recv(1024)
except socket.timeout:
logger.debug("socket.timeout3")
# Close connection
logger.debug("End foward sms to bonjour")
so.close()
return True
def delete_friend(number):
global friend_list
for friend in friend_list:
if friend.number == number:
logger.debug("Friend %s deleted" % friend.fullname)
index = friend_list.index(friend)
friend_list.remove(friend)
friend.unregister()
del(friend)
return index
return None
def add_friend(fullname, number):
global friend_list
number_list = [friend.number for friend in friend_list]
if not number in number_list:
# Create a new friend
logger.debug("This is a new friend: %s" % number)
# Save it !
logger.debug("PRESENCE_AUTH: " + str(get_presence_auth_user()))
auth_user = get_presence_auth_user()
new_friend = Friend(fullname, number, auth_user)
# append to friend list
friend_list.append(new_friend)
# Register it on bonjour
new_friend.start()
tmp_dict = {'name': new_friend.fullname,
'favorite': is_favorite(number),
'number': new_friend.number}
# Add friend in listmodel
pyotherside.send('add_friend_list', tmp_dict)
def load_favorite_friends():
favorites = get_favorites()
for number, name in favorites:
if number and name:
add_friend(name, number)
| gpl-3.0 | 8,010,937,772,864,076,000 | 31.62249 | 102 | 0.577004 | false |
narurien/ganeti-ceph | lib/objects.py | 1 | 66234 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Transportable objects for Ganeti.
This module provides small, mostly data-only objects which are safe to
pass to and from external parties.
"""
# pylint: disable=E0203,W0201,R0902
# E0203: Access to member %r before its definition, since we use
# objects.py which doesn't explicitly initialise its members
# W0201: Attribute '%s' defined outside __init__
# R0902: Allow instances of these objects to have more than 20 attributes
import ConfigParser
import re
import copy
import logging
import time
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import netutils
from ganeti import outils
from ganeti import utils
from socket import AF_INET
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
"OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
_TIMESTAMPS = ["ctime", "mtime"]
_UUID = ["uuid"]
def FillDict(defaults_dict, custom_dict, skip_keys=None):
"""Basic function to apply settings on top a default dict.
@type defaults_dict: dict
@param defaults_dict: dictionary holding the default values
@type custom_dict: dict
@param custom_dict: dictionary holding customized value
@type skip_keys: list
@param skip_keys: which keys not to fill
@rtype: dict
@return: dict with the 'full' values
"""
ret_dict = copy.deepcopy(defaults_dict)
ret_dict.update(custom_dict)
if skip_keys:
for k in skip_keys:
try:
del ret_dict[k]
except KeyError:
pass
return ret_dict
def FillIPolicy(default_ipolicy, custom_ipolicy):
"""Fills an instance policy with defaults.
"""
assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
ret_dict = copy.deepcopy(custom_ipolicy)
for key in default_ipolicy:
if key not in ret_dict:
ret_dict[key] = copy.deepcopy(default_ipolicy[key])
elif key == constants.ISPECS_STD:
ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
return ret_dict
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
"""Fills the disk parameter defaults.
@see: L{FillDict} for parameters and return value
"""
assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
skip_keys=skip_keys))
for dt in constants.DISK_TEMPLATES)
def UpgradeGroupedParams(target, defaults):
"""Update all groups for the target parameter.
@type target: dict of dicts
@param target: {group: {parameter: value}}
@type defaults: dict
@param defaults: default parameter values
"""
if target is None:
target = {constants.PP_DEFAULT: defaults}
else:
for group in target:
target[group] = FillDict(defaults, target[group])
return target
def UpgradeBeParams(target):
"""Update the be parameters dict to the new format.
@type target: dict
@param target: "be" parameters dict
"""
if constants.BE_MEMORY in target:
memory = target[constants.BE_MEMORY]
target[constants.BE_MAXMEM] = memory
target[constants.BE_MINMEM] = memory
del target[constants.BE_MEMORY]
def UpgradeDiskParams(diskparams):
"""Upgrade the disk parameters.
@type diskparams: dict
@param diskparams: disk parameters to upgrade
@rtype: dict
@return: the upgraded disk parameters dict
"""
if not diskparams:
result = {}
else:
result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
return result
def UpgradeNDParams(ndparams):
"""Upgrade ndparams structure.
@type ndparams: dict
@param ndparams: disk parameters to upgrade
@rtype: dict
@return: the upgraded node parameters dict
"""
if ndparams is None:
ndparams = {}
if (constants.ND_OOB_PROGRAM in ndparams and
ndparams[constants.ND_OOB_PROGRAM] is None):
# will be reset by the line below
del ndparams[constants.ND_OOB_PROGRAM]
return FillDict(constants.NDC_DEFAULTS, ndparams)
def MakeEmptyIPolicy():
"""Create empty IPolicy dictionary.
"""
return {}
class ConfigObject(outils.ValidatedSlots):
"""A generic config object.
It has the following properties:
- provides somewhat safe recursive unpickling and pickling for its classes
- unset attributes which are defined in slots are always returned
as None instead of raising an error
Classes derived from this must always declare __slots__ (we use many
config objects and the memory reduction is useful)
"""
__slots__ = []
def __getattr__(self, name):
if name not in self.GetAllSlots():
raise AttributeError("Invalid object attribute %s.%s" %
(type(self).__name__, name))
return None
def __setstate__(self, state):
slots = self.GetAllSlots()
for name in state:
if name in slots:
setattr(self, name, state[name])
def Validate(self):
"""Validates the slots.
"""
def ToDict(self):
"""Convert to a dict holding only standard python types.
The generic routine just dumps all of this object's attributes in
a dict. It does not work if the class has children who are
ConfigObjects themselves (e.g. the nics list in an Instance), in
which case the object should subclass the function in order to
make sure all objects returned are only standard python types.
"""
result = {}
for name in self.GetAllSlots():
value = getattr(self, name, None)
if value is not None:
result[name] = value
return result
__getstate__ = ToDict
@classmethod
def FromDict(cls, val):
"""Create an object from a dictionary.
This generic routine takes a dict, instantiates a new instance of
the given class, and sets attributes based on the dict content.
As for `ToDict`, this does not work if the class has children
who are ConfigObjects themselves (e.g. the nics list in an
Instance), in which case the object should subclass the function
and alter the objects.
"""
if not isinstance(val, dict):
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
val_str = dict([(str(k), v) for k, v in val.iteritems()])
obj = cls(**val_str) # pylint: disable=W0142
return obj
def Copy(self):
"""Makes a deep copy of the current object and its children.
"""
dict_form = self.ToDict()
clone_obj = self.__class__.FromDict(dict_form)
return clone_obj
def __repr__(self):
"""Implement __repr__ for ConfigObjects."""
return repr(self.ToDict())
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
This method will be called at configuration load time, and its
implementation will be object dependent.
"""
pass
class TaggableObject(ConfigObject):
"""An generic class supporting tags.
"""
__slots__ = ["tags"]
VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
@classmethod
def ValidateTag(cls, tag):
"""Check if a tag is valid.
If the tag is invalid, an errors.TagError will be raised. The
function has no return value.
"""
if not isinstance(tag, basestring):
raise errors.TagError("Invalid tag type (not a string)")
if len(tag) > constants.MAX_TAG_LEN:
raise errors.TagError("Tag too long (>%d characters)" %
constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
if not cls.VALID_TAG_RE.match(tag):
raise errors.TagError("Tag contains invalid characters")
def GetTags(self):
"""Return the tags list.
"""
tags = getattr(self, "tags", None)
if tags is None:
tags = self.tags = set()
return tags
def AddTag(self, tag):
"""Add a new tag.
"""
self.ValidateTag(tag)
tags = self.GetTags()
if len(tags) >= constants.MAX_TAGS_PER_OBJ:
raise errors.TagError("Too many tags")
self.GetTags().add(tag)
def RemoveTag(self, tag):
"""Remove a tag.
"""
self.ValidateTag(tag)
tags = self.GetTags()
try:
tags.remove(tag)
except KeyError:
raise errors.TagError("Tag not found")
def ToDict(self):
"""Taggable-object-specific conversion to standard python types.
This replaces the tags set with a list.
"""
bo = super(TaggableObject, self).ToDict()
tags = bo.get("tags", None)
if isinstance(tags, set):
bo["tags"] = list(tags)
return bo
@classmethod
def FromDict(cls, val):
"""Custom function for instances.
"""
obj = super(TaggableObject, cls).FromDict(val)
if hasattr(obj, "tags") and isinstance(obj.tags, list):
obj.tags = set(obj.tags)
return obj
class MasterNetworkParameters(ConfigObject):
"""Network configuration parameters for the master
@ivar uuid: master nodes UUID
@ivar ip: master IP
@ivar netmask: master netmask
@ivar netdev: master network device
@ivar ip_family: master IP family
"""
__slots__ = [
"uuid",
"ip",
"netmask",
"netdev",
"ip_family",
]
class ConfigData(ConfigObject):
"""Top-level config object."""
__slots__ = [
"version",
"cluster",
"nodes",
"nodegroups",
"instances",
"networks",
"serial_no",
] + _TIMESTAMPS
def ToDict(self):
"""Custom function for top-level config data.
This just replaces the list of instances, nodes and the cluster
with standard python types.
"""
mydict = super(ConfigData, self).ToDict()
mydict["cluster"] = mydict["cluster"].ToDict()
for key in "nodes", "instances", "nodegroups", "networks":
mydict[key] = outils.ContainerToDicts(mydict[key])
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for top-level config data
"""
obj = super(ConfigData, cls).FromDict(val)
obj.cluster = Cluster.FromDict(obj.cluster)
obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
obj.instances = \
outils.ContainerFromDicts(obj.instances, dict, Instance)
obj.nodegroups = \
outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
return obj
def HasAnyDiskOfType(self, dev_type):
"""Check if in there is at disk of the given type in the configuration.
@type dev_type: L{constants.LDS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a disk of the given type was found or not
"""
for instance in self.instances.values():
for disk in instance.disks:
if disk.IsBasedOnDiskType(dev_type):
return True
return False
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
self.cluster.UpgradeConfig()
for node in self.nodes.values():
node.UpgradeConfig()
for instance in self.instances.values():
instance.UpgradeConfig()
if self.nodegroups is None:
self.nodegroups = {}
for nodegroup in self.nodegroups.values():
nodegroup.UpgradeConfig()
if self.cluster.drbd_usermode_helper is None:
# To decide if we set an helper let's check if at least one instance has
# a DRBD disk. This does not cover all the possible scenarios but it
# gives a good approximation.
if self.HasAnyDiskOfType(constants.LD_DRBD8):
self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
if self.networks is None:
self.networks = {}
for network in self.networks.values():
network.UpgradeConfig()
self._UpgradeEnabledDiskTemplates()
def _UpgradeEnabledDiskTemplates(self):
"""Upgrade the cluster's enabled disk templates by inspecting the currently
enabled and/or used disk templates.
"""
# enabled_disk_templates in the cluster config were introduced in 2.8.
# Remove this code once upgrading from earlier versions is deprecated.
if not self.cluster.enabled_disk_templates:
template_set = \
set([inst.disk_template for inst in self.instances.values()])
# Add drbd and plain, if lvm is enabled (by specifying a volume group)
if self.cluster.volume_group_name:
template_set.add(constants.DT_DRBD8)
template_set.add(constants.DT_PLAIN)
# FIXME: Adapt this when dis/enabling at configure time is removed.
# Enable 'sharedfile', if they are enabled, even though they might
# currently not be used.
if constants.ENABLE_SHARED_FILE_STORAGE:
template_set.add(constants.DT_SHARED_FILE)
# Set enabled_disk_templates to the inferred disk templates. Order them
# according to a preference list that is based on Ganeti's history of
# supported disk templates.
self.cluster.enabled_disk_templates = []
for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
if preferred_template in template_set:
self.cluster.enabled_disk_templates.append(preferred_template)
template_set.remove(preferred_template)
self.cluster.enabled_disk_templates.extend(list(template_set))
class NIC(ConfigObject):
"""Config object representing a network card."""
__slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
@classmethod
def CheckParameterSyntax(cls, nicparams):
"""Check the given parameters for validity.
@type nicparams: dict
@param nicparams: dictionary with parameter names/value
@raise errors.ConfigurationError: when a parameter is not valid
"""
mode = nicparams[constants.NIC_MODE]
if (mode not in constants.NIC_VALID_MODES and
mode != constants.VALUE_AUTO):
raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
if (mode == constants.NIC_MODE_BRIDGED and
not nicparams[constants.NIC_LINK]):
raise errors.ConfigurationError("Missing bridged NIC link")
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = (["name", "dev_type", "logical_id", "physical_id",
"children", "iv_name", "size", "mode", "params", "spindles"] +
_UUID)
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
return self.dev_type in (constants.LD_LV,)
def StaticDevPath(self):
"""Return the device path if this device type has a static one.
Some devices (LVM for example) live always at the same /dev/ path,
irrespective of their status. For such devices, we return this
path, for others we return None.
@warning: The path returned is not a normalized pathname; callers
should check that it is a valid path.
"""
if self.dev_type == constants.LD_LV:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
elif self.dev_type == constants.LD_BLOCKDEV:
return self.logical_id[1]
elif self.dev_type == constants.LD_RBD:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
return None
def ChildrenNeeded(self):
"""Compute the needed number of children for activation.
This method will return either -1 (all children) or a positive
number denoting the minimum number of children needed for
activation (only mirrored devices will usually return >=0).
Currently, only DRBD8 supports diskless activation (therefore we
return 0), for all other we keep the previous semantics and return
-1.
"""
if self.dev_type == constants.LD_DRBD8:
return 0
return -1
def IsBasedOnDiskType(self, dev_type):
"""Check if the disk or its children are based on the given type.
@type dev_type: L{constants.LDS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a device of the given type was found or not
"""
if self.children:
for child in self.children:
if child.IsBasedOnDiskType(dev_type):
return True
return self.dev_type == dev_type
def GetNodes(self, node_uuid):
"""This function returns the nodes this device lives on.
Given the node on which the parent of the device lives on (or, in
case of a top-level device, the primary node of the devices'
instance), this function will return a list of nodes on which this
devices needs to (or can) be assembled.
"""
if self.dev_type in [constants.LD_LV, constants.LD_FILE,
constants.LD_BLOCKDEV, constants.LD_RBD,
constants.LD_EXT]:
result = [node_uuid]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
if node_uuid not in result:
raise errors.ConfigurationError("DRBD device passed unknown node")
else:
raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
return result
def ComputeNodeTree(self, parent_node_uuid):
"""Compute the node/disk tree for this disk and its children.
This method, given the node on which the parent disk lives, will
return the list of all (node UUID, disk) pairs which describe the disk
tree in the most compact way. For example, a drbd/lvm stack
will be returned as (primary_node, drbd) and (secondary_node, drbd)
which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node_uuid)
result = [(node, self) for node in my_nodes]
if not self.children:
# leaf device
return result
for node in my_nodes:
for child in self.children:
child_result = child.ComputeNodeTree(node)
if len(child_result) == 1:
# child (and all its descendants) is simple, doesn't split
# over multiple hosts, so we don't need to describe it, our
# own entry for this node describes it completely
continue
else:
# check if child nodes differ from my nodes; note that
# subdisk can differ from the child itself, and be instead
# one of its descendants
for subnode, subdisk in child_result:
if subnode not in my_nodes:
result.append((subnode, subdisk))
# otherwise child is under our own node, so we ignore this
# entry (but probably the other results in the list will
# be different)
return result
def ComputeGrowth(self, amount):
"""Compute the per-VG growth requirements.
This only works for VG-based disks.
@type amount: integer
@param amount: the desired increase in (user-visible) disk space
@rtype: dict
@return: a dictionary of volume-groups and the required size
"""
if self.dev_type == constants.LD_LV:
return {self.logical_id[0]: amount}
elif self.dev_type == constants.LD_DRBD8:
if self.children:
return self.children[0].ComputeGrowth(amount)
else:
return {}
else:
# Other disk types do not require VG space
return {}
def RecordGrow(self, amount):
"""Update the size of this disk after growth.
This method recurses over the disks's children and updates their
size correspondigly. The method needs to be kept in sync with the
actual algorithms from bdev.
"""
if self.dev_type in (constants.LD_LV, constants.LD_FILE,
constants.LD_RBD, constants.LD_EXT):
self.size += amount
elif self.dev_type == constants.LD_DRBD8:
if self.children:
self.children[0].RecordGrow(amount)
self.size += amount
else:
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
def Update(self, size=None, mode=None, spindles=None):
"""Apply changes to size, spindles and mode.
"""
if self.dev_type == constants.LD_DRBD8:
if self.children:
self.children[0].Update(size=size, mode=mode)
else:
assert not self.children
if size is not None:
self.size = size
if mode is not None:
self.mode = mode
if spindles is not None:
self.spindles = spindles
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
"""
if self.children:
for child in self.children:
child.UnsetSize()
self.size = 0
def SetPhysicalID(self, target_node_uuid, nodes_ip):
"""Convert the logical ID to the physical ID.
This is used only for drbd, which needs ip/port configuration.
The routine descends down and updates its children also, because
this helps when the only the top device is passed to the remote
node.
Arguments:
- target_node_uuid: the node UUID we wish to configure for
- nodes_ip: a mapping of node name to ip
The target_node must exist in in nodes_ip, and must be one of the
nodes in the logical ID for each of the DRBD devices encountered
in the disk tree.
"""
if self.children:
for child in self.children:
child.SetPhysicalID(target_node_uuid, nodes_ip)
if self.logical_id is None and self.physical_id is not None:
return
if self.dev_type in constants.LDS_DRBD:
pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
if target_node_uuid not in (pnode_uuid, snode_uuid):
raise errors.ConfigurationError("DRBD device not knowing node %s" %
target_node_uuid)
pnode_ip = nodes_ip.get(pnode_uuid, None)
snode_ip = nodes_ip.get(snode_uuid, None)
if pnode_ip is None or snode_ip is None:
raise errors.ConfigurationError("Can't find primary or secondary node"
" for %s" % str(self))
p_data = (pnode_ip, port)
s_data = (snode_ip, port)
if pnode_uuid == target_node_uuid:
self.physical_id = p_data + s_data + (pminor, secret)
else: # it must be secondary, we tested above
self.physical_id = s_data + p_data + (sminor, secret)
else:
self.physical_id = self.logical_id
return
def ToDict(self):
"""Disk-specific conversion to standard python types.
This replaces the children lists of objects with lists of
standard python types.
"""
bo = super(Disk, self).ToDict()
for attr in ("children",):
alist = bo.get(attr, None)
if alist:
bo[attr] = outils.ContainerToDicts(alist)
return bo
@classmethod
def FromDict(cls, val):
"""Custom function for Disks
"""
obj = super(Disk, cls).FromDict(val)
if obj.children:
obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
if obj.logical_id and isinstance(obj.logical_id, list):
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
obj.physical_id = tuple(obj.physical_id)
if obj.dev_type in constants.LDS_DRBD:
# we need a tuple of length six here
if len(obj.logical_id) < 6:
obj.logical_id += (None,) * (6 - len(obj.logical_id))
return obj
def __str__(self):
"""Custom str() formatter for disks.
"""
if self.dev_type == constants.LD_LV:
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
elif self.dev_type in constants.LDS_DRBD:
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
val = "<DRBD8("
if self.physical_id is None:
phy = "unconfigured"
else:
phy = ("configured as %s:%s %s:%s" %
(self.physical_id[0], self.physical_id[1],
self.physical_id[2], self.physical_id[3]))
val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
(node_a, minor_a, node_b, minor_b, port, phy))
if self.children and self.children.count(None) == 0:
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
else:
val += "no local storage"
else:
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
(self.dev_type, self.logical_id, self.physical_id, self.children))
if self.iv_name is None:
val += ", not visible"
else:
val += ", visible as /dev/%s" % self.iv_name
if self.spindles is not None:
val += ", spindles=%s" % self.spindles
if isinstance(self.size, int):
val += ", size=%dm)>" % self.size
else:
val += ", size='%s')>" % (self.size,)
return val
def Verify(self):
"""Checks that this disk is correctly configured.
"""
all_errors = []
if self.mode not in constants.DISK_ACCESS_SET:
all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
return all_errors
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
if self.children:
for child in self.children:
child.UpgradeConfig()
# FIXME: Make this configurable in Ganeti 2.7
self.params = {}
# add here config upgrade for this disk
@staticmethod
def ComputeLDParams(disk_template, disk_params):
"""Computes Logical Disk parameters from Disk Template parameters.
@type disk_template: string
@param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
@type disk_params: dict
@param disk_params: disk template parameters;
dict(template_name -> parameters
@rtype: list(dict)
@return: a list of dicts, one for each node of the disk hierarchy. Each dict
contains the LD parameters of the node. The tree is flattened in-order.
"""
if disk_template not in constants.DISK_TEMPLATES:
raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
assert disk_template in disk_params
result = list()
dt_params = disk_params[disk_template]
if disk_template == constants.DT_DRBD8:
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
}))
# data LV
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
}))
# metadata LV
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
}))
elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
elif disk_template == constants.DT_PLAIN:
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
}))
elif disk_template == constants.DT_BLOCK:
result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
elif disk_template == constants.DT_RBD:
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
constants.LDP_POOL: dt_params[constants.RBD_POOL],
constants.LDP_ACCESS: dt_params[constants.RBD_ACCESS],
}))
elif disk_template == constants.DT_EXT:
result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
return result
class InstancePolicy(ConfigObject):
"""Config object representing instance policy limits dictionary.
Note that this object is not actually used in the config, it's just
used as a placeholder for a few functions.
"""
@classmethod
def CheckParameterSyntax(cls, ipolicy, check_std):
""" Check the instance policy for validity.
@type ipolicy: dict
@param ipolicy: dictionary with min/max/std specs and policies
@type check_std: bool
@param check_std: Whether to check std value or just assume compliance
@raise errors.ConfigurationError: when the policy is not legal
"""
InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
if constants.IPOLICY_DTS in ipolicy:
InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
for key in constants.IPOLICY_PARAMETERS:
if key in ipolicy:
InstancePolicy.CheckParameter(key, ipolicy[key])
wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
if wrong_keys:
raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
utils.CommaJoin(wrong_keys))
@classmethod
def _CheckIncompleteSpec(cls, spec, keyname):
missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
if missing_params:
msg = ("Missing instance specs parameters for %s: %s" %
(keyname, utils.CommaJoin(missing_params)))
raise errors.ConfigurationError(msg)
@classmethod
def CheckISpecSyntax(cls, ipolicy, check_std):
"""Check the instance policy specs for validity.
@type ipolicy: dict
@param ipolicy: dictionary with min/max/std specs
@type check_std: bool
@param check_std: Whether to check std value or just assume compliance
@raise errors.ConfigurationError: when specs are not valid
"""
if constants.ISPECS_MINMAX not in ipolicy:
# Nothing to check
return
if check_std and constants.ISPECS_STD not in ipolicy:
msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
raise errors.ConfigurationError(msg)
stdspec = ipolicy.get(constants.ISPECS_STD)
if check_std:
InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
if not ipolicy[constants.ISPECS_MINMAX]:
raise errors.ConfigurationError("Empty minmax specifications")
std_is_good = False
for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
if missing:
msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
raise errors.ConfigurationError(msg)
for (key, spec) in minmaxspecs.items():
InstancePolicy._CheckIncompleteSpec(spec, key)
spec_std_ok = True
for param in constants.ISPECS_PARAMETERS:
par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
param, check_std)
spec_std_ok = spec_std_ok and par_std_ok
std_is_good = std_is_good or spec_std_ok
if not std_is_good:
raise errors.ConfigurationError("Invalid std specifications")
@classmethod
def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
"""Check the instance policy specs for validity on a given key.
We check if the instance specs makes sense for a given key, that is
if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
@type minmaxspecs: dict
@param minmaxspecs: dictionary with min and max instance spec
@type stdspec: dict
@param stdspec: dictionary with standard instance spec
@type name: string
@param name: what are the limits for
@type check_std: bool
@param check_std: Whether to check std value or just assume compliance
@rtype: bool
@return: C{True} when specs are valid, C{False} when standard spec for the
given name is not valid
@raise errors.ConfigurationError: when min/max specs for the given name
are not valid
"""
minspec = minmaxspecs[constants.ISPECS_MIN]
maxspec = minmaxspecs[constants.ISPECS_MAX]
min_v = minspec[name]
max_v = maxspec[name]
if min_v > max_v:
err = ("Invalid specification of min/max values for %s: %s/%s" %
(name, min_v, max_v))
raise errors.ConfigurationError(err)
elif check_std:
std_v = stdspec.get(name, min_v)
return std_v >= min_v and std_v <= max_v
else:
return True
@classmethod
def CheckDiskTemplates(cls, disk_templates):
"""Checks the disk templates for validity.
"""
if not disk_templates:
raise errors.ConfigurationError("Instance policy must contain" +
" at least one disk template")
wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
if wrong:
raise errors.ConfigurationError("Invalid disk template(s) %s" %
utils.CommaJoin(wrong))
@classmethod
def CheckParameter(cls, key, value):
"""Checks a parameter.
Currently we expect all parameters to be float values.
"""
try:
float(value)
except (TypeError, ValueError), err:
raise errors.ConfigurationError("Invalid value for key" " '%s':"
" '%s', error: %s" % (key, value, err))
class Instance(TaggableObject):
"""Config object representing an instance."""
__slots__ = [
"name",
"primary_node",
"os",
"hypervisor",
"hvparams",
"beparams",
"osparams",
"admin_state",
"nics",
"disks",
"disk_template",
"disks_active",
"network_port",
"serial_no",
] + _TIMESTAMPS + _UUID
def _ComputeSecondaryNodes(self):
"""Compute the list of secondary nodes.
This is a simple wrapper over _ComputeAllNodes.
"""
all_nodes = set(self._ComputeAllNodes())
all_nodes.discard(self.primary_node)
return tuple(all_nodes)
secondary_nodes = property(_ComputeSecondaryNodes, None, None,
"List of names of secondary nodes")
def _ComputeAllNodes(self):
"""Compute the list of all nodes.
Since the data is already there (in the drbd disks), keeping it as
a separate normal attribute is redundant and if not properly
synchronised can cause problems. Thus it's better to compute it
dynamically.
"""
def _Helper(nodes, device):
"""Recursively computes nodes given a top device."""
if device.dev_type in constants.LDS_DRBD:
nodea, nodeb = device.logical_id[:2]
nodes.add(nodea)
nodes.add(nodeb)
if device.children:
for child in device.children:
_Helper(nodes, child)
all_nodes = set()
all_nodes.add(self.primary_node)
for device in self.disks:
_Helper(all_nodes, device)
return tuple(all_nodes)
all_nodes = property(_ComputeAllNodes, None, None,
"List of names of all the nodes of the instance")
def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
"""Provide a mapping of nodes to LVs this instance owns.
This function figures out what logical volumes should belong on
which nodes, recursing through a device tree.
@type lvmap: dict
@param lvmap: optional dictionary to receive the
'node' : ['lv', ...] data.
@type devs: list of L{Disk}
@param devs: disks to get the LV name for. If None, all disk of this
instance are used.
@type node_uuid: string
@param node_uuid: UUID of the node to get the LV names for. If None, the
primary node of this instance is used.
@return: None if lvmap arg is given, otherwise, a dictionary of
the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
volumeN is of the form "vg_name/lv_name", compatible with
GetVolumeList()
"""
if node_uuid is None:
node_uuid = self.primary_node
if lvmap is None:
lvmap = {
node_uuid: [],
}
ret = lvmap
else:
if not node_uuid in lvmap:
lvmap[node_uuid] = []
ret = None
if not devs:
devs = self.disks
for dev in devs:
if dev.dev_type == constants.LD_LV:
lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
elif dev.dev_type in constants.LDS_DRBD:
if dev.children:
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
elif dev.children:
self.MapLVsByNode(lvmap, dev.children, node_uuid)
return ret
def FindDisk(self, idx):
"""Find a disk given having a specified index.
This is just a wrapper that does validation of the index.
@type idx: int
@param idx: the disk index
@rtype: L{Disk}
@return: the corresponding disk
@raise errors.OpPrereqError: when the given index is not valid
"""
try:
idx = int(idx)
return self.disks[idx]
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
errors.ECODE_INVAL)
except IndexError:
raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
" 0 to %d" % (idx, len(self.disks) - 1),
errors.ECODE_INVAL)
def ToDict(self):
"""Instance-specific conversion to standard python types.
This replaces the children lists of objects with lists of standard
python types.
"""
bo = super(Instance, self).ToDict()
for attr in "nics", "disks":
alist = bo.get(attr, None)
if alist:
nlist = outils.ContainerToDicts(alist)
else:
nlist = []
bo[attr] = nlist
return bo
@classmethod
def FromDict(cls, val):
"""Custom function for instances.
"""
if "admin_state" not in val:
if val.get("admin_up", False):
val["admin_state"] = constants.ADMINST_UP
else:
val["admin_state"] = constants.ADMINST_DOWN
if "admin_up" in val:
del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
return obj
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
for nic in self.nics:
nic.UpgradeConfig()
for disk in self.disks:
disk.UpgradeConfig()
if self.hvparams:
for key in constants.HVC_GLOBALS:
try:
del self.hvparams[key]
except KeyError:
pass
if self.osparams is None:
self.osparams = {}
UpgradeBeParams(self.beparams)
if self.disks_active is None:
self.disks_active = self.admin_state == constants.ADMINST_UP
class OS(ConfigObject):
"""Config object representing an operating system.
@type supported_parameters: list
@ivar supported_parameters: a list of tuples, name and description,
containing the supported parameters by this OS
@type VARIANT_DELIM: string
@cvar VARIANT_DELIM: the variant delimiter
"""
__slots__ = [
"name",
"path",
"api_versions",
"create_script",
"export_script",
"import_script",
"rename_script",
"verify_script",
"supported_variants",
"supported_parameters",
]
VARIANT_DELIM = "+"
@classmethod
def SplitNameVariant(cls, name):
"""Splits the name into the proper name and variant.
@param name: the OS (unprocessed) name
@rtype: list
@return: a list of two elements; if the original name didn't
contain a variant, it's returned as an empty string
"""
nv = name.split(cls.VARIANT_DELIM, 1)
if len(nv) == 1:
nv.append("")
return nv
@classmethod
def GetName(cls, name):
"""Returns the proper name of the os (without the variant).
@param name: the OS (unprocessed) name
"""
return cls.SplitNameVariant(name)[0]
@classmethod
def GetVariant(cls, name):
"""Returns the variant the os (without the base name).
@param name: the OS (unprocessed) name
"""
return cls.SplitNameVariant(name)[1]
class ExtStorage(ConfigObject):
"""Config object representing an External Storage Provider.
"""
__slots__ = [
"name",
"path",
"create_script",
"remove_script",
"grow_script",
"attach_script",
"detach_script",
"setinfo_script",
"verify_script",
"supported_parameters",
]
class NodeHvState(ConfigObject):
"""Hypvervisor state on a node.
@ivar mem_total: Total amount of memory
@ivar mem_node: Memory used by, or reserved for, the node itself (not always
available)
@ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
rounding
@ivar mem_inst: Memory used by instances living on node
@ivar cpu_total: Total node CPU core count
@ivar cpu_node: Number of CPU cores reserved for the node itself
"""
__slots__ = [
"mem_total",
"mem_node",
"mem_hv",
"mem_inst",
"cpu_total",
"cpu_node",
] + _TIMESTAMPS
class NodeDiskState(ConfigObject):
"""Disk state on a node.
"""
__slots__ = [
"total",
"reserved",
"overhead",
] + _TIMESTAMPS
class Node(TaggableObject):
"""Config object representing a node.
@ivar hv_state: Hypervisor state (e.g. number of CPUs)
@ivar hv_state_static: Hypervisor state overriden by user
@ivar disk_state: Disk state (e.g. free space)
@ivar disk_state_static: Disk state overriden by user
"""
__slots__ = [
"name",
"primary_ip",
"secondary_ip",
"serial_no",
"master_candidate",
"offline",
"drained",
"group",
"master_capable",
"vm_capable",
"ndparams",
"powered",
"hv_state",
"hv_state_static",
"disk_state",
"disk_state_static",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
# pylint: disable=E0203
# because these are "defined" via slots, not manually
if self.master_capable is None:
self.master_capable = True
if self.vm_capable is None:
self.vm_capable = True
if self.ndparams is None:
self.ndparams = {}
# And remove any global parameter
for key in constants.NDC_GLOBALS:
if key in self.ndparams:
logging.warning("Ignoring %s node parameter for node %s",
key, self.name)
del self.ndparams[key]
if self.powered is None:
self.powered = True
def ToDict(self):
"""Custom function for serializing.
"""
data = super(Node, self).ToDict()
hv_state = data.get("hv_state", None)
if hv_state is not None:
data["hv_state"] = outils.ContainerToDicts(hv_state)
disk_state = data.get("disk_state", None)
if disk_state is not None:
data["disk_state"] = \
dict((key, outils.ContainerToDicts(value))
for (key, value) in disk_state.items())
return data
@classmethod
def FromDict(cls, val):
"""Custom function for deserializing.
"""
obj = super(Node, cls).FromDict(val)
if obj.hv_state is not None:
obj.hv_state = \
outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
if obj.disk_state is not None:
obj.disk_state = \
dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
for (key, value) in obj.disk_state.items())
return obj
class NodeGroup(TaggableObject):
"""Config object representing a node group."""
__slots__ = [
"name",
"members",
"ndparams",
"diskparams",
"ipolicy",
"serial_no",
"hv_state_static",
"disk_state_static",
"alloc_policy",
"networks",
] + _TIMESTAMPS + _UUID
def ToDict(self):
"""Custom function for nodegroup.
This discards the members object, which gets recalculated and is only kept
in memory.
"""
mydict = super(NodeGroup, self).ToDict()
del mydict["members"]
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for nodegroup.
The members slot is initialized to an empty list, upon deserialization.
"""
obj = super(NodeGroup, cls).FromDict(val)
obj.members = []
return obj
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
if self.ndparams is None:
self.ndparams = {}
if self.serial_no is None:
self.serial_no = 1
if self.alloc_policy is None:
self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
# We only update mtime, and not ctime, since we would not be able
# to provide a correct value for creation time.
if self.mtime is None:
self.mtime = time.time()
if self.diskparams is None:
self.diskparams = {}
if self.ipolicy is None:
self.ipolicy = MakeEmptyIPolicy()
if self.networks is None:
self.networks = {}
def FillND(self, node):
"""Return filled out ndparams for L{objects.Node}
@type node: L{objects.Node}
@param node: A Node object to fill
@return a copy of the node's ndparams with defaults filled
"""
return self.SimpleFillND(node.ndparams)
def SimpleFillND(self, ndparams):
"""Fill a given ndparams dict with defaults.
@type ndparams: dict
@param ndparams: the dict to fill
@rtype: dict
@return: a copy of the passed in ndparams with missing keys filled
from the node group defaults
"""
return FillDict(self.ndparams, ndparams)
class Cluster(TaggableObject):
"""Config object representing the cluster."""
__slots__ = [
"serial_no",
"rsahostkeypub",
"highest_used_port",
"tcpudp_port_pool",
"mac_prefix",
"volume_group_name",
"reserved_lvs",
"drbd_usermode_helper",
"default_bridge",
"default_hypervisor",
"master_node",
"master_ip",
"master_netdev",
"master_netmask",
"use_external_mip_script",
"cluster_name",
"file_storage_dir",
"shared_file_storage_dir",
"enabled_hypervisors",
"hvparams",
"ipolicy",
"os_hvp",
"beparams",
"osparams",
"nicparams",
"ndparams",
"diskparams",
"candidate_pool_size",
"modify_etc_hosts",
"modify_ssh_setup",
"maintain_node_health",
"uid_pool",
"default_iallocator",
"hidden_os",
"blacklisted_os",
"primary_ip_family",
"prealloc_wipe_disks",
"hv_state_static",
"disk_state_static",
"enabled_disk_templates",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
# pylint: disable=E0203
# because these are "defined" via slots, not manually
if self.hvparams is None:
self.hvparams = constants.HVC_DEFAULTS
else:
for hypervisor in self.hvparams:
self.hvparams[hypervisor] = FillDict(
constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
if self.os_hvp is None:
self.os_hvp = {}
# osparams added before 2.2
if self.osparams is None:
self.osparams = {}
self.ndparams = UpgradeNDParams(self.ndparams)
self.beparams = UpgradeGroupedParams(self.beparams,
constants.BEC_DEFAULTS)
for beparams_group in self.beparams:
UpgradeBeParams(self.beparams[beparams_group])
migrate_default_bridge = not self.nicparams
self.nicparams = UpgradeGroupedParams(self.nicparams,
constants.NICC_DEFAULTS)
if migrate_default_bridge:
self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
self.default_bridge
if self.modify_etc_hosts is None:
self.modify_etc_hosts = True
if self.modify_ssh_setup is None:
self.modify_ssh_setup = True
# default_bridge is no longer used in 2.1. The slot is left there to
# support auto-upgrading. It can be removed once we decide to deprecate
# upgrading straight from 2.0.
if self.default_bridge is not None:
self.default_bridge = None
# default_hypervisor is just the first enabled one in 2.1. This slot and
# code can be removed once upgrading straight from 2.0 is deprecated.
if self.default_hypervisor is not None:
self.enabled_hypervisors = ([self.default_hypervisor] +
[hvname for hvname in self.enabled_hypervisors
if hvname != self.default_hypervisor])
self.default_hypervisor = None
# maintain_node_health added after 2.1.1
if self.maintain_node_health is None:
self.maintain_node_health = False
if self.uid_pool is None:
self.uid_pool = []
if self.default_iallocator is None:
self.default_iallocator = ""
# reserved_lvs added before 2.2
if self.reserved_lvs is None:
self.reserved_lvs = []
# hidden and blacklisted operating systems added before 2.2.1
if self.hidden_os is None:
self.hidden_os = []
if self.blacklisted_os is None:
self.blacklisted_os = []
# primary_ip_family added before 2.3
if self.primary_ip_family is None:
self.primary_ip_family = AF_INET
if self.master_netmask is None:
ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
self.master_netmask = ipcls.iplen
if self.prealloc_wipe_disks is None:
self.prealloc_wipe_disks = False
# shared_file_storage_dir added before 2.5
if self.shared_file_storage_dir is None:
self.shared_file_storage_dir = ""
if self.use_external_mip_script is None:
self.use_external_mip_script = False
if self.diskparams:
self.diskparams = UpgradeDiskParams(self.diskparams)
else:
self.diskparams = constants.DISK_DT_DEFAULTS.copy()
# instance policy added before 2.6
if self.ipolicy is None:
self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
else:
# we can either make sure to upgrade the ipolicy always, or only
# do it in some corner cases (e.g. missing keys); note that this
# will break any removal of keys from the ipolicy dict
wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
if wrongkeys:
# These keys would be silently removed by FillIPolicy()
msg = ("Cluster instance policy contains spurious keys: %s" %
utils.CommaJoin(wrongkeys))
raise errors.ConfigurationError(msg)
self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
@property
def primary_hypervisor(self):
"""The first hypervisor is the primary.
Useful, for example, for L{Node}'s hv/disk state.
"""
return self.enabled_hypervisors[0]
def ToDict(self):
"""Custom function for cluster.
"""
mydict = super(Cluster, self).ToDict()
if self.tcpudp_port_pool is None:
tcpudp_port_pool = []
else:
tcpudp_port_pool = list(self.tcpudp_port_pool)
mydict["tcpudp_port_pool"] = tcpudp_port_pool
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for cluster.
"""
obj = super(Cluster, cls).FromDict(val)
if obj.tcpudp_port_pool is None:
obj.tcpudp_port_pool = set()
elif not isinstance(obj.tcpudp_port_pool, set):
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
return obj
def SimpleFillDP(self, diskparams):
"""Fill a given diskparams dict with cluster defaults.
@param diskparams: The diskparams
@return: The defaults dict
"""
return FillDiskParams(self.diskparams, diskparams)
def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
"""Get the default hypervisor parameters for the cluster.
@param hypervisor: the hypervisor name
@param os_name: if specified, we'll also update the defaults for this OS
@param skip_keys: if passed, list of keys not to use
@return: the defaults dict
"""
if skip_keys is None:
skip_keys = []
fill_stack = [self.hvparams.get(hypervisor, {})]
if os_name is not None:
os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
fill_stack.append(os_hvp)
ret_dict = {}
for o_dict in fill_stack:
ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
return ret_dict
def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
"""Fill a given hvparams dict with cluster defaults.
@type hv_name: string
@param hv_name: the hypervisor to use
@type os_name: string
@param os_name: the OS to use for overriding the hypervisor defaults
@type skip_globals: boolean
@param skip_globals: if True, the global hypervisor parameters will
not be filled
@rtype: dict
@return: a copy of the given hvparams with missing keys filled from
the cluster defaults
"""
if skip_globals:
skip_keys = constants.HVC_GLOBALS
else:
skip_keys = []
def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
return FillDict(def_dict, hvparams, skip_keys=skip_keys)
def FillHV(self, instance, skip_globals=False):
"""Fill an instance's hvparams dict with cluster defaults.
@type instance: L{objects.Instance}
@param instance: the instance parameter to fill
@type skip_globals: boolean
@param skip_globals: if True, the global hypervisor parameters will
not be filled
@rtype: dict
@return: a copy of the instance's hvparams with missing keys filled from
the cluster defaults
"""
return self.SimpleFillHV(instance.hypervisor, instance.os,
instance.hvparams, skip_globals)
def SimpleFillBE(self, beparams):
"""Fill a given beparams dict with cluster defaults.
@type beparams: dict
@param beparams: the dict to fill
@rtype: dict
@return: a copy of the passed in beparams with missing keys filled
from the cluster defaults
"""
return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
def FillBE(self, instance):
"""Fill an instance's beparams dict with cluster defaults.
@type instance: L{objects.Instance}
@param instance: the instance parameter to fill
@rtype: dict
@return: a copy of the instance's beparams with missing keys filled from
the cluster defaults
"""
return self.SimpleFillBE(instance.beparams)
def SimpleFillNIC(self, nicparams):
"""Fill a given nicparams dict with cluster defaults.
@type nicparams: dict
@param nicparams: the dict to fill
@rtype: dict
@return: a copy of the passed in nicparams with missing keys filled
from the cluster defaults
"""
return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
def SimpleFillOS(self, os_name, os_params):
"""Fill an instance's osparams dict with cluster defaults.
@type os_name: string
@param os_name: the OS name to use
@type os_params: dict
@param os_params: the dict to fill with default values
@rtype: dict
@return: a copy of the instance's osparams with missing keys filled from
the cluster defaults
"""
name_only = os_name.split("+", 1)[0]
# base OS
result = self.osparams.get(name_only, {})
# OS with variant
result = FillDict(result, self.osparams.get(os_name, {}))
# specified params
return FillDict(result, os_params)
@staticmethod
def SimpleFillHvState(hv_state):
"""Fill an hv_state sub dict with cluster defaults.
"""
return FillDict(constants.HVST_DEFAULTS, hv_state)
@staticmethod
def SimpleFillDiskState(disk_state):
"""Fill an disk_state sub dict with cluster defaults.
"""
return FillDict(constants.DS_DEFAULTS, disk_state)
def FillND(self, node, nodegroup):
"""Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
@type node: L{objects.Node}
@param node: A Node object to fill
@type nodegroup: L{objects.NodeGroup}
@param nodegroup: A Node object to fill
@return a copy of the node's ndparams with defaults filled
"""
return self.SimpleFillND(nodegroup.FillND(node))
def SimpleFillND(self, ndparams):
"""Fill a given ndparams dict with defaults.
@type ndparams: dict
@param ndparams: the dict to fill
@rtype: dict
@return: a copy of the passed in ndparams with missing keys filled
from the cluster defaults
"""
return FillDict(self.ndparams, ndparams)
def SimpleFillIPolicy(self, ipolicy):
""" Fill instance policy dict with defaults.
@type ipolicy: dict
@param ipolicy: the dict to fill
@rtype: dict
@return: a copy of passed ipolicy with missing keys filled from
the cluster defaults
"""
return FillIPolicy(self.ipolicy, ipolicy)
def IsDiskTemplateEnabled(self, disk_template):
"""Checks if a particular disk template is enabled.
"""
return utils.storage.IsDiskTemplateEnabled(
disk_template, self.enabled_disk_templates)
def IsFileStorageEnabled(self):
"""Checks if file storage is enabled.
"""
return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
def IsSharedFileStorageEnabled(self):
"""Checks if shared file storage is enabled.
"""
return utils.storage.IsSharedFileStorageEnabled(
self.enabled_disk_templates)
class BlockDevStatus(ConfigObject):
"""Config object representing the status of a block device."""
__slots__ = [
"dev_path",
"major",
"minor",
"sync_percent",
"estimated_time",
"is_degraded",
"ldisk_status",
]
class ImportExportStatus(ConfigObject):
"""Config object representing the status of an import or export."""
__slots__ = [
"recent_output",
"listen_port",
"connected",
"progress_mbytes",
"progress_throughput",
"progress_eta",
"progress_percent",
"exit_status",
"error_message",
] + _TIMESTAMPS
class ImportExportOptions(ConfigObject):
"""Options for import/export daemon
@ivar key_name: X509 key name (None for cluster certificate)
@ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
@ivar compress: Compression method (one of L{constants.IEC_ALL})
@ivar magic: Used to ensure the connection goes to the right disk
@ivar ipv6: Whether to use IPv6
@ivar connect_timeout: Number of seconds for establishing connection
"""
__slots__ = [
"key_name",
"ca_pem",
"compress",
"magic",
"ipv6",
"connect_timeout",
]
class ConfdRequest(ConfigObject):
"""Object holding a confd request.
@ivar protocol: confd protocol version
@ivar type: confd query type
@ivar query: query request
@ivar rsalt: requested reply salt
"""
__slots__ = [
"protocol",
"type",
"query",
"rsalt",
]
class ConfdReply(ConfigObject):
"""Object holding a confd reply.
@ivar protocol: confd protocol version
@ivar status: reply status code (ok, error)
@ivar answer: confd query reply
@ivar serial: configuration serial number
"""
__slots__ = [
"protocol",
"status",
"answer",
"serial",
]
class QueryFieldDefinition(ConfigObject):
"""Object holding a query field definition.
@ivar name: Field name
@ivar title: Human-readable title
@ivar kind: Field type
@ivar doc: Human-readable description
"""
__slots__ = [
"name",
"title",
"kind",
"doc",
]
class _QueryResponseBase(ConfigObject):
__slots__ = [
"fields",
]
def ToDict(self):
"""Custom function for serializing.
"""
mydict = super(_QueryResponseBase, self).ToDict()
mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for de-serializing.
"""
obj = super(_QueryResponseBase, cls).FromDict(val)
obj.fields = \
outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
return obj
class QueryResponse(_QueryResponseBase):
"""Object holding the response to a query.
@ivar fields: List of L{QueryFieldDefinition} objects
@ivar data: Requested data
"""
__slots__ = [
"data",
]
class QueryFieldsRequest(ConfigObject):
"""Object holding a request for querying available fields.
"""
__slots__ = [
"what",
"fields",
]
class QueryFieldsResponse(_QueryResponseBase):
"""Object holding the response to a query for fields.
@ivar fields: List of L{QueryFieldDefinition} objects
"""
__slots__ = []
class MigrationStatus(ConfigObject):
"""Object holding the status of a migration.
"""
__slots__ = [
"status",
"transferred_ram",
"total_ram",
]
class InstanceConsole(ConfigObject):
"""Object describing how to access the console of an instance.
"""
__slots__ = [
"instance",
"kind",
"message",
"host",
"port",
"user",
"command",
"display",
]
def Validate(self):
"""Validates contents of this object.
"""
assert self.kind in constants.CONS_ALL, "Unknown console type"
assert self.instance, "Missing instance name"
assert self.message or self.kind in [constants.CONS_SSH,
constants.CONS_SPICE,
constants.CONS_VNC]
assert self.host or self.kind == constants.CONS_MESSAGE
assert self.port or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SSH]
assert self.user or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SPICE,
constants.CONS_VNC]
assert self.command or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SPICE,
constants.CONS_VNC]
assert self.display or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SPICE,
constants.CONS_SSH]
return True
class Network(TaggableObject):
"""Object representing a network definition for ganeti.
"""
__slots__ = [
"name",
"serial_no",
"mac_prefix",
"network",
"network6",
"gateway",
"gateway6",
"reservations",
"ext_reservations",
] + _TIMESTAMPS + _UUID
def HooksDict(self, prefix=""):
"""Export a dictionary used by hooks with a network's information.
@type prefix: String
@param prefix: Prefix to prepend to the dict entries
"""
result = {
"%sNETWORK_NAME" % prefix: self.name,
"%sNETWORK_UUID" % prefix: self.uuid,
"%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
}
if self.network:
result["%sNETWORK_SUBNET" % prefix] = self.network
if self.gateway:
result["%sNETWORK_GATEWAY" % prefix] = self.gateway
if self.network6:
result["%sNETWORK_SUBNET6" % prefix] = self.network6
if self.gateway6:
result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
if self.mac_prefix:
result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
return result
@classmethod
def FromDict(cls, val):
"""Custom function for networks.
Remove deprecated network_type and family.
"""
if "network_type" in val:
del val["network_type"]
if "family" in val:
del val["family"]
obj = super(Network, cls).FromDict(val)
return obj
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.
This class is basically ConfigParser.SafeConfigParser with two
additional methods that allow it to serialize/unserialize to/from a
buffer.
"""
def Dumps(self):
"""Dump this instance and return the string representation."""
buf = StringIO()
self.write(buf)
return buf.getvalue()
@classmethod
def Loads(cls, data):
"""Load data from a string."""
buf = StringIO(data)
cfp = cls()
cfp.readfp(buf)
return cfp
class LvmPvInfo(ConfigObject):
"""Information about an LVM physical volume (PV).
@type name: string
@ivar name: name of the PV
@type vg_name: string
@ivar vg_name: name of the volume group containing the PV
@type size: float
@ivar size: size of the PV in MiB
@type free: float
@ivar free: free space in the PV, in MiB
@type attributes: string
@ivar attributes: PV attributes
@type lv_list: list of strings
@ivar lv_list: names of the LVs hosted on the PV
"""
__slots__ = [
"name",
"vg_name",
"size",
"free",
"attributes",
"lv_list"
]
def IsEmpty(self):
"""Is this PV empty?
"""
return self.size <= (self.free + 1)
def IsAllocatable(self):
"""Is this PV allocatable?
"""
return ("a" in self.attributes)
| gpl-2.0 | 8,113,824,897,905,943,000 | 28.555556 | 80 | 0.650119 | false |
qingshuimonk/STA663 | vae/Vanilla_GAN.py | 1 | 3540 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from time import gmtime, strftime
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X = tf.placeholder(tf.float32, shape=[None, 784])
D_W1 = tf.Variable(xavier_init([784, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))
D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
Z = tf.placeholder(tf.float32, shape=[None, 100])
G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def plot(samples):
fig = plt.figure(figsize=(8, 2))
gs = gridspec.GridSpec(2, 8)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
G_sample = generator(Z)
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)
# D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
# G_loss = -tf.reduce_mean(tf.log(D_fake))
# Alternative losses:
# -------------------
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
mb_size = 128
Z_dim = 100
mnist = input_data.read_data_sets('/docs/MNIST_data', one_hot=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not os.path.exists('/]data/GAN_pics/'):
os.makedirs('/]data/GAN_pics/')
i = 0
for it in range(100000):
if it == 99999:
samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})
fig = plot(samples)
plt.savefig('/]data/GAN_pics/{}.png'.format(strftime("%m-%d_%H:%M:%S", gmtime())), bbox_inches='tight')
i += 1
plt.close(fig)
X_mb, _ = mnist.train.next_batch(mb_size)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print() | mit | -5,731,070,445,213,390,000 | 27.556452 | 126 | 0.632768 | false |
pytorch/fairseq | fairseq/models/nat/nat_crf_transformer.py | 1 | 4378 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel, base_architecture
from fairseq.modules import DynamicCRF
@register_model("nacrf_transformer")
class NACRFTransformerModel(NATransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.crf_layer = DynamicCRF(
num_embedding=len(self.tgt_dict),
low_rank=args.crf_lowrank_approx,
beam_size=args.crf_beam_approx,
)
@property
def allow_ensemble(self):
return False
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--crf-lowrank-approx",
type=int,
help="the dimension of low-rank approximation of transition",
)
parser.add_argument(
"--crf-beam-approx",
type=int,
help="the beam size for apporixmating the normalizing factor",
)
parser.add_argument(
"--word-ins-loss-factor",
type=float,
help="weights on NAT loss used to co-training with CRF loss.",
)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad)
# compute the log-likelihood of CRF
crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask)
crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean()
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
"factor": self.args.word_ins_loss_factor,
},
"word_crf": {"loss": crf_nll},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder and get emission scores
output_masks = output_tokens.ne(self.pad)
word_ins_out = self.decoder(
normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
# run viterbi decoding through CRF
_scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("nacrf_transformer", "nacrf_transformer")
def nacrf_base_architecture(args):
args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32)
args.crf_beam_approx = getattr(args, "crf_beam_approx", 64)
args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
base_architecture(args)
| mit | 8,179,743,690,239,201,000 | 35.181818 | 88 | 0.604386 | false |
NicWayand/xray | xarray/core/combine.py | 1 | 15635 | import warnings
import pandas as pd
from . import utils
from .alignment import align
from .merge import merge
from .pycompat import iteritems, OrderedDict, basestring
from .variable import Variable, as_variable, Coordinate, concat as concat_vars
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xarray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xarray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xarray Dataset and DataArray '
'objects, got %s' % type(first_obj))
return f(objs, dim, data_vars, coords, compat, positions)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
if isinstance(dim, basestring):
coord = None
elif not hasattr(dim, 'dims'):
# dim is not a DataArray or Coordinate
dim_name = getattr(dim, 'name', None)
if dim_name is None:
dim_name = 'concat_dim'
coord = Coordinate(dim_name, dim)
dim = dim_name
elif not hasattr(dim, 'name'):
coord = as_variable(dim).to_coord()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
def process_subset_opt(opt, subset):
if subset == 'coords':
subset_long_name = 'coordinates'
else:
subset_long_name = 'data variables'
if isinstance(opt, basestring):
if opt == 'different':
def differs(vname):
# simple helper function which compares a variable
# across all datasets and indicates whether that
# variable differs or not.
v = datasets[0].variables[vname]
return any(not ds.variables[vname].equals(v)
for ds in datasets[1:])
# all nonindexes that are not the same in each dataset
concat_new = set(k for k in getattr(datasets[0], subset)
if k not in concat_over and differs(k))
elif opt == 'all':
concat_new = (set(getattr(datasets[0], subset)) -
set(datasets[0].dims))
elif opt == 'minimal':
concat_new = set()
else:
raise ValueError("unexpected value for concat_%s: %s"
% (subset, opt))
else:
invalid_vars = [k for k in opt
if k not in getattr(datasets[0], subset)]
if invalid_vars:
raise ValueError('some variables in %s are not '
'%s on the first dataset: %s'
% (subset, subset_long_name, invalid_vars))
concat_new = set(opt)
return concat_new
concat_over = set()
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items()
if dim in v.dims)
concat_over.update(process_subset_opt(data_vars, 'data_vars'))
concat_over.update(process_subset_opt(coords, 'coords'))
if dim in datasets[0]:
concat_over.add(dim)
return concat_over
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset, as_dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
datasets = [as_dataset(ds) for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in iteritems(ds.variables):
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif (k in result_vars and k != dim and
not getattr(v, compat)(result_vars[k])):
verb = 'equal' if compat == 'equals' else compat
raise ValueError(
'variable %r not %s across datasets' % (k, verb))
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.expand_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset
for k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(arrays, dim, data_vars, coords, compat,
positions):
arrays = list(arrays)
if data_vars != 'all':
raise ValueError('data_vars is not a valid argument when '
'concatenating DataArray objects')
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == 'identical':
raise ValueError('array names not identical')
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(datasets, dim, data_vars, coords, compat,
positions)
return arrays[0]._from_temp_dataset(ds, name)
def _auto_concat(datasets, dim=None):
if len(datasets) == 1:
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = set(i for i, _ in dim_tuples)
if len(concat_dims) > 1:
concat_dims = set(d for d in concat_dims
if not ds0[d].equals(ds1[d]))
if len(concat_dims) > 1:
raise ValueError('too many different dimensions to '
'concatenate: %s' % concat_dims)
elif len(concat_dims) == 0:
raise ValueError('cannot infer dimension to concatenate: '
'supply the ``concat_dim`` argument '
'explicitly')
dim, = concat_dims
return concat(datasets, dim=dim)
def auto_combine(datasets, concat_dim=None):
"""Attempt to auto-magically combine the given datasets into one.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or align or sort data
under any circumstances. It will fail in complex cases, for which you
should use ``concat`` and ``merge`` explicitly.
When ``auto_combine`` may succeed:
* You have N years of data and M data variables. Each combination of a
distinct time period and test of data variables is saved its own dataset.
Examples of when ``auto_combine`` fails:
* In the above scenario, one file is missing, containing the data for one
year's data for one variable.
* In the most recent year, there is an additional data variable.
* Your data includes "time" and "station" dimensions, and each year's data
has a different set of stations.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if the
dimension along which you want to concatenate is not a dimension in
the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
Returns
-------
combined : xarray.Dataset
See also
--------
concat
Dataset.merge
"""
from toolz import itertoolz
grouped = itertoolz.groupby(lambda ds: tuple(sorted(ds.data_vars)),
datasets).values()
concatenated = [_auto_concat(ds, dim=concat_dim) for ds in grouped]
merged = merge(concatenated)
return merged
| apache-2.0 | -1,998,947,403,007,847,000 | 40.144737 | 81 | 0.6055 | false |
register-v1/PyBot | API/wisdom.py | 1 | 6947 | #!/usr/bin/python3
import random as r
def pick_random(value):
return value[r.randint(0, len(value)-1)]
abbreviation = ["TCP", "HTTP", "SDD", "RAM", "GB",
"CSS", "SSL", "AGP", "SQL", "FTP",
"PCI", "AI", "ADP", "RSS", "XML",
"EXE", "COM", "HDD", "THX", "SMTP",
"SMS", "USB", "PNG", "XSS", "SFTP",
"MITM"]
adjective = ["auxiliary", "primary", "back-end", "digital",
"open-source", "virtual", "cross-platform",
"redundant", "online", "haptic","multi-byte",
"bluetooth", "wireless", "1080p", "neural",
"optical", "solid state", "mobile"]
noun = ["driver", "protocol", "bandwidth", "panel", "microchip",
"program", "port", "card", "array", "interface", "system",
"sensor", "firewall", "hard drive", "pixel", "alarm",
"feed", "monitor", "application", "transmitter", "bus",
"circuit", "capacitor", "matrix", "socket", "database"]
verb = ["back up", "bypass", "hack", "override", "compress", "copy",
"navigate", "index", "connect", "generate", "quantify",
"calculate", "synthesize", "input", "transmit", "program",
"reboot", "parse", "analyze"]
ingverb = ["backing up", "bypassing", "hacking", "overriding",
"compressing", "copying", "navigating", "indexing",
"connecting", "generating", "quantifying", "calculating",
"synthesizing", "transmitting", "programming", "parsing",
"DDoSing", "scamming", "pwning", "rooting", "pigning",
"lurking"]
sentences = [
"If we {} the {}, we can get to the {} {} throught the {} {} {}!"
.format(
pick_random(verb),
pick_random(noun),
pick_random(abbreviation),
pick_random(noun),
pick_random(adjective),
pick_random(abbreviation),
pick_random(noun)
),
"We need to {} the {} {} {}!"
.format(
pick_random(verb),
pick_random(adjective),
pick_random(abbreviation),
pick_random(noun),
),
"Try to {} the {} {}, maybe it will {} the {} {}!"
.format(
pick_random(verb),
pick_random(abbreviation),
pick_random(noun),
pick_random(verb),
pick_random(adjective),
pick_random(noun),
),
"You can't {} the {} without {} the {} {} {}!"
.format(
pick_random(verb),
pick_random(noun),
pick_random(ingverb),
pick_random(adjective),
pick_random(abbreviation),
pick_random(noun),
)]
wise_sentences = [
"Practice makes perect!",
"Rome was not built in a day!",
"Shoot for the moon! Even if you miss, you'll land amongst the stars!",
"There is no such thing as a hacker that never made a mistake - Anon",
"Learning to code is like growing a tree, takes time - Anon",
"If you work for Microsoft or Apple, get a life - Anon",
"It is easier to build good habits than break bad ones - Forgotton",
"Education makes man unfit for a slave - Frederick Douglas",
"Life as a script kiddie is not a life worth living - Anon",
"A person who never made a mistake, never tried anything new - Einstein",
"If you're not willing to learn code, you don't deserve to know how to code - v1",
"Well being worth a god damn comes with an ability to not be a complete and total retard all the time ~ mickers"
]
urls = [
"https://www.youtube.com/watch?v=ZzfHjytDceU - Topics of Interest: Asyncio",
"https://www.youtube.com/watch?v=lyDLAutA88s - David Beazley: Builtin Superheros!",
"https://www.youtube.com/watch?v=E-1Y4kSsAFc - Fear and awaiting in Async",
"https://www.youtube.com/watch?v=OSGv2VnC0go - Idiomatic, Pythonic code",
"https://www.youtube.com/watch?v=N4mEzFDjqtA - Python in one video : Derek Banas",
"https://www.youtube.com/watch?v=XXmzYY03t64 - Basic SysAdmin's Guide to Python",
"https://www.youtube.com/watch?v=s1SkCYMnfbY - MulitProcessing with Python",
"https://www.youtube.com/watch?v=l_HBRhcgeuQ - Global Interpreter Lock",
"https://www.youtube.com/watch?v=ciNHn38EyRc - SQL Injections with exmaples",
"https://www.youtube.com/watch?v=GMGbOkKfZRo - Beginner SysAdmin with Python",
"https://www.youtube.com/watch?v=yHO8hdqzKw8 - Basic Python for the OS",
"https://www.youtube.com/watch?v=Thd8yoBou7k - SQL for Python Developers",
"https://www.youtube.com/watch?v=T1QEs3mdJoc - Cookie Grabbing Basics",
"https://www.youtube.com/watch?v=Pi9NpxAvYSs - Python Epiphanies"
]
courses = [
"AI! - https://www.youtube.com/watch?v=OGxgnH8y2NM&list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v"
]
topics = [
]
sciences = [
"https://www.youtube.com/watch?v=9Cd36WJ79z4 : Poetry of Reality",
"https://www.youtube.com/watch?v=1PT90dAA49Q : Wave of Reason",
"https://www.youtube.com/watch?v=zSgiXGELjbc : Glorious Dawn - Carl Sagan",
"https://www.youtube.com/watch?v=vioZf4TjoUI : The Cosmic Perspective",
"https://www.youtube.com/watch?v=hOLAGYmUQV0 : The Unbroken Thread"
]
music = [
"https://www.youtube.com/watch?v=X6t3CVafuec : YTCracker - Bazaar",
"https://www.youtube.com/watch?v=ieDBrlKnaAM : YTCracker - Starship",
"https://www.youtube.com/watch?v=2tRKH_BSsk0 : YTCracker - Social Engineering",
"https://www.youtube.com/watch?v=lIuEuJvKos4 : Astrix - Jungle Walk",
"https://www.youtube.com/watch?v=FoUWHfh733Y : Dual Core - All the things",
"https://www.youtube.com/watch?v=zeIjmvZZ_SQ : Zearle - Hackers and Crackers",
"https://www.youtube.com/watch?v=v1BXfMNfjFo : Deep Space House 061",
"https://www.youtube.com/watch?v=scPU1tTIg7Y : VOICIANS - Stranger",
"https://www.youtube.com/watch?v=8fIjqPqJYhA : VOICIANS - Wolves",
"https://www.youtube.com/watch?v=8EQzx-OzQmU : Wavve - 9 is God",
"https://www.youtube.com/watch?v=2GLGZQ4Y8SM : YTCracker - Crack",
"https://www.youtube.com/watch?v=YEP7rhDuWVE : YTCracker - Untouchable",
"https://www.youtube.com/watch?v=Sr8ILq1a_yw : Dual Core - 0x0A Commandments",
"https://www.youtube.com/watch?v=yc7_NHx6oHw : YTCracker - Packets",
"https://www.youtube.com/watch?v=YrRa6dEkzmk : Beat Hackers - Experience",
"https://www.youtube.com/watch?v=f04pC0_U5-I : Talamasca - Psychedelic Trance"
]
noob_quotes = [
"So if I want to write a maleware i must use a github? ~ EniGmis7",
"Windows wouldn't be popular if microsoft didn't know what they were doing ~ leaxyz",
"how hax facebook? ~ Virtually every noob ever.",
"I'm a hacker. Can someone help me reverse an md5? ~ MoHD"
]
def noob():
data = noob_quotes[r.randint(0, len(noob_quotes))]
return data
def troll():
troll = sentences[r.randint(0, len(sentences)-1)]
return troll
def science(): return sciences[r.randint(0, len(sciences))]
def science_song(number): data = sciences[number] ; return data
def wisdom(): return wise_sentences[r.randint(0, len(wise_sentences))]
def urlpls(): return urls[r.randint(0, len(urls))]
def url(num): data = urls[num] ; return data
def song(number): data = music[number] ; return data
def quote(number): data = wise_sentences[number] ; return data
def songpls(): return music[r.randint(0, len(music))]
def randomtopic(): return topics[r.randint(0, len(topics))]
def randomcourse(): return courses[r.randint(0,len(courses))]
| gpl-3.0 | -2,541,187,709,382,089,700 | 37.17033 | 112 | 0.673816 | false |
ismailsunni/healthsites | django_project/localities/tests/test_model_LocalityArchive.py | 2 | 1292 | # -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.gis.geos import Point
from .model_factories import LocalityF, DomainF
from ..models import LocalityArchive
class TestModelLocalityArchive(TestCase):
def test_LocalityArchive_fields(self):
self.assertListEqual(
[fld.name for fld in LocalityArchive._meta.fields], [
u'id', 'changeset', 'version', 'content_type', 'object_id',
'domain_id', 'uuid', 'upstream_id', 'geom'
]
)
def test_archiving_locality(self):
domain = DomainF(id=1, name='A domain')
locality = LocalityF.create(domain=domain)
locality.geom = Point(1, 1)
locality.save()
# test save with no changes, should not trigger model archival
locality.save()
self.assertEqual(LocalityArchive.objects.count(), 2)
self.assertListEqual(
[loc.geom.ewkt for loc in LocalityArchive.objects.all()], [
u'SRID=4326;POINT (0.0000000000000000 0.0000000000000000)',
u'SRID=4326;POINT (1.0000000000000000 1.0000000000000000)'
]
)
self.assertListEqual(
[loc.version for loc in LocalityArchive.objects.all()],
[1, 2]
)
| bsd-2-clause | 4,821,071,091,769,891,000 | 30.512195 | 75 | 0.607585 | false |
minicole/elpolitico | elpolitico/elpolitico/MyState.py | 1 | 2660 | __author__ = 'Nicole'
import json
import random
import time
GREEN = 'green'
CONSERVATIVE = 'conservative'
LIBERAL = 'liberal'
LIBERTARIAN = 'libertarian'
MAX_CACHED_POINTS = 400
STATES = [GREEN, CONSERVATIVE, LIBERAL, LIBERTARIAN]
class MyStates:
def __init__(self):
self.currentStates = [CurrentStateOfParty(GREEN), CurrentStateOfParty(CONSERVATIVE), CurrentStateOfParty(LIBERAL), CurrentStateOfParty(LIBERTARIAN)]
self.newPoints = list()
self.existingPoints = list()
self.totalPoints = 0
def passStateToFrontEnd(self):
pointsToPass = list()
for point in self.newPoints:
self.existingPoints.append(point)
# serialize points:
pointsToPass.append(json.dumps(point.newPoint.exportToFrontEnd()))
# empty the old new points:
self.newPoints = list()
return {'newPoints': pointsToPass, 'timestamp': time.time()}
def addNewPoint(self, point):
self.newPoints.append(point)
state = self.getState(point.party)
self.totalPoints += 1
state.percentTotal = state.totalPoints / self.totalPoints
if self.totalPoints >= MAX_CACHED_POINTS:
self.existingPoints.pop(1)
self.totalPoints -= 1
class CurrentStateOfParty:
def __init__(self, party):
self.party = party
self.percentTotal = 0
self.certainty = 0
self.positivity = 0
self.totalPoints = 0
def addNewPoint(self, point):
self.certainty = (self.certainty * self.totalPoints + point.newPoint.tendency) / (self.totalPoints + 1)
self.positivity = (self.positivity * self.totalPoints + point.positivity) / (self.totalPoints + 1)
self.totalPoints += 1
def exportToFrontEnd(self):
return {'party': self.party, 'percentTotal': self.percentTotal, 'certainty': self.certainty, 'positivity': self.positivity}
def exportRandomness(self):
return {'party': "conservative", 'percentTotal': random.randint(-60,60), 'certainty': random.randint(-60,60), 'positivity': random.randint(-60,60)}
class StateOfPoint:
def __init__(self):
self.newPoint = NewPoint()
self.positivity = 0
class NewPoint:
def __init__(self):
self.tendency = 0
self.lat = 0
self.long = 0
self.party = None
def exportToFrontEnd(self):
return {"lat": self.lat, "long": self.long, "tendency": self.tendency, "party": self.party}
def exportRandomness(self):
return {"lat": random.randint(-60,60), "long": random.randint(-60,60), "tendency": random.randint(-60,60), "party": random.randint(-60,60)} | mit | -5,317,939,568,438,419,000 | 32.2625 | 156 | 0.645865 | false |
fullmooninu/messy | testIfCanCheat.py | 1 | 1065 | import random
n = 10
print("strategy 1")
s1_points = 0
for i in range(100000):
num1 = random.randint(1,n)
num2 = num1
while num2==num1:
num2 = random.randint(1,n)
choice = random.randint(1,2)
if choice == 1:
if num1 > num2:
s1_points = s1_points +1
if choice == 2:
if num2 > num1:
s1_points = s1_points +1
print("points",s1_points)
print("strategy 2")
s2_points = 0
for i in range(100000):
num1 = random.randint(1,n)
num2 = num1
while num2==num1:
num2 = random.randint(1,n)
num3 = random.randint(1,n)
choice = random.randint(1,2)
if choice == 1:
if num3 > num1:
if num2 > num1:
s2_points = s2_points + 1
elif num1 > num2:
s2_points = s2_points +1
if choice == 2:
if num3 > num2:
if num1 > num2:
s2_points = s2_points + 1
elif num2 > num1:
s2_points = s2_points + 1
print("points",s2_points)
| unlicense | 6,161,339,017,714,504,000 | 22.666667 | 41 | 0.501408 | false |
gsnbng/erpnext | erpnext/hr/doctype/salary_slip/test_salary_slip.py | 1 | 26910 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import erpnext
import calendar
import random
from erpnext.accounts.utils import get_fiscal_year
from frappe.utils.make_random import get_random
from frappe.utils import getdate, nowdate, add_days, add_months, flt, get_first_day, get_last_day
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_month_details
from erpnext.hr.doctype.employee.test_employee import make_employee
from erpnext.hr.doctype.employee_tax_exemption_declaration.test_employee_tax_exemption_declaration \
import create_payroll_period, create_exemption_category
class TestSalarySlip(unittest.TestCase):
def setUp(self):
make_earning_salary_component(setup=True, company_list=["_Test Company"])
make_deduction_salary_component(setup=True, company_list=["_Test Company"])
for dt in ["Leave Application", "Leave Allocation", "Salary Slip", "Attendance"]:
frappe.db.sql("delete from `tab%s`" % dt)
self.make_holiday_list()
frappe.db.set_value("Company", erpnext.get_default_company(), "default_holiday_list", "Salary Slip Test Holiday List")
frappe.db.set_value("HR Settings", None, "email_salary_slip_to_employee", 0)
frappe.db.set_value('HR Settings', None, 'leave_status_notification_template', None)
frappe.db.set_value('HR Settings', None, 'leave_approval_notification_template', None)
def tearDown(self):
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 0)
frappe.set_user("Administrator")
def test_payment_days_based_on_attendance(self):
from erpnext.hr.doctype.attendance.attendance import mark_attendance
no_of_days = self.get_no_of_days()
# Payroll based on attendance
frappe.db.set_value("HR Settings", None, "payroll_based_on", "Attendance")
frappe.db.set_value("HR Settings", None, "daily_wages_fraction_for_half_day", 0.75)
emp_id = make_employee("[email protected]")
frappe.db.set_value("Employee", emp_id, {"relieving_date": None, "status": "Active"})
frappe.db.set_value("Leave Type", "Leave Without Pay", "include_holiday", 0)
month_start_date = get_first_day(nowdate())
month_end_date = get_last_day(nowdate())
first_sunday = frappe.db.sql("""
select holiday_date from `tabHoliday`
where parent = 'Salary Slip Test Holiday List'
and holiday_date between %s and %s
order by holiday_date
""", (month_start_date, month_end_date))[0][0]
mark_attendance(emp_id, first_sunday, 'Absent', ignore_validate=True) # invalid lwp
mark_attendance(emp_id, add_days(first_sunday, 1), 'Absent', ignore_validate=True) # valid lwp
mark_attendance(emp_id, add_days(first_sunday, 2), 'Half Day', leave_type='Leave Without Pay', ignore_validate=True) # valid 0.75 lwp
mark_attendance(emp_id, add_days(first_sunday, 3), 'On Leave', leave_type='Leave Without Pay', ignore_validate=True) # valid lwp
mark_attendance(emp_id, add_days(first_sunday, 4), 'On Leave', leave_type='Casual Leave', ignore_validate=True) # invalid lwp
mark_attendance(emp_id, add_days(first_sunday, 7), 'On Leave', leave_type='Leave Without Pay', ignore_validate=True) # invalid lwp
ss = make_employee_salary_slip("[email protected]", "Monthly")
self.assertEqual(ss.leave_without_pay, 2.25)
days_in_month = no_of_days[0]
no_of_holidays = no_of_days[1]
self.assertEqual(ss.payment_days, days_in_month - no_of_holidays - 2.25)
#Gross pay calculation based on attendances
gross_pay = 78000 - ((78000 / (days_in_month - no_of_holidays)) * flt(ss.leave_without_pay))
self.assertEqual(ss.gross_pay, gross_pay)
frappe.db.set_value("HR Settings", None, "payroll_based_on", "Leave")
def test_payment_days_based_on_leave_application(self):
no_of_days = self.get_no_of_days()
# Payroll based on attendance
frappe.db.set_value("HR Settings", None, "payroll_based_on", "Leave")
emp_id = make_employee("[email protected]")
frappe.db.set_value("Employee", emp_id, {"relieving_date": None, "status": "Active"})
frappe.db.set_value("Leave Type", "Leave Without Pay", "include_holiday", 0)
month_start_date = get_first_day(nowdate())
month_end_date = get_last_day(nowdate())
first_sunday = frappe.db.sql("""
select holiday_date from `tabHoliday`
where parent = 'Salary Slip Test Holiday List'
and holiday_date between %s and %s
order by holiday_date
""", (month_start_date, month_end_date))[0][0]
make_leave_application(emp_id, first_sunday, add_days(first_sunday, 3), "Leave Without Pay")
ss = make_employee_salary_slip("[email protected]", "Monthly")
self.assertEqual(ss.leave_without_pay, 3)
days_in_month = no_of_days[0]
no_of_holidays = no_of_days[1]
self.assertEqual(ss.payment_days, days_in_month - no_of_holidays - 3)
#Gross pay calculation based on attendances
gross_pay = 78000 - ((78000 / (days_in_month - no_of_holidays)) * flt(ss.leave_without_pay))
self.assertEqual(ss.gross_pay, gross_pay)
frappe.db.set_value("HR Settings", None, "payroll_based_on", "Leave")
def test_salary_slip_with_holidays_included(self):
no_of_days = self.get_no_of_days()
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 1)
make_employee("[email protected]")
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "status", "Active")
ss = make_employee_salary_slip("[email protected]", "Monthly")
self.assertEqual(ss.total_working_days, no_of_days[0])
self.assertEqual(ss.payment_days, no_of_days[0])
self.assertEqual(ss.earnings[0].amount, 50000)
self.assertEqual(ss.earnings[1].amount, 3000)
self.assertEqual(ss.gross_pay, 78000)
def test_salary_slip_with_holidays_excluded(self):
no_of_days = self.get_no_of_days()
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 0)
make_employee("[email protected]")
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "status", "Active")
ss = make_employee_salary_slip("[email protected]", "Monthly")
self.assertEqual(ss.total_working_days, no_of_days[0] - no_of_days[1])
self.assertEqual(ss.payment_days, no_of_days[0] - no_of_days[1])
self.assertEqual(ss.earnings[0].amount, 50000)
self.assertEqual(ss.earnings[0].default_amount, 50000)
self.assertEqual(ss.earnings[1].amount, 3000)
self.assertEqual(ss.gross_pay, 78000)
def test_payment_days(self):
no_of_days = self.get_no_of_days()
# Holidays not included in working days
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 1)
# set joinng date in the same month
make_employee("[email protected]")
if getdate(nowdate()).day >= 15:
relieving_date = getdate(add_days(nowdate(),-10))
date_of_joining = getdate(add_days(nowdate(),-10))
elif getdate(nowdate()).day < 15 and getdate(nowdate()).day >= 5:
date_of_joining = getdate(add_days(nowdate(),-3))
relieving_date = getdate(add_days(nowdate(),-3))
elif getdate(nowdate()).day < 5 and not getdate(nowdate()).day == 1:
date_of_joining = getdate(add_days(nowdate(),-1))
relieving_date = getdate(add_days(nowdate(),-1))
elif getdate(nowdate()).day == 1:
date_of_joining = getdate(nowdate())
relieving_date = getdate(nowdate())
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "date_of_joining", date_of_joining)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "status", "Active")
ss = make_employee_salary_slip("[email protected]", "Monthly")
self.assertEqual(ss.total_working_days, no_of_days[0])
self.assertEqual(ss.payment_days, (no_of_days[0] - getdate(date_of_joining).day + 1))
# set relieving date in the same month
frappe.db.set_value("Employee",frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "date_of_joining", (add_days(nowdate(),-60)))
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "relieving_date", relieving_date)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "status", "Left")
ss.save()
self.assertEqual(ss.total_working_days, no_of_days[0])
self.assertEqual(ss.payment_days, getdate(relieving_date).day)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"[email protected]"}, "name"), "status", "Active")
def test_employee_salary_slip_read_permission(self):
make_employee("[email protected]")
salary_slip_test_employee = make_employee_salary_slip("[email protected]", "Monthly")
frappe.set_user("[email protected]")
self.assertTrue(salary_slip_test_employee.has_permission("read"))
def test_email_salary_slip(self):
frappe.db.sql("delete from `tabEmail Queue`")
frappe.db.set_value("HR Settings", None, "email_salary_slip_to_employee", 1)
make_employee("[email protected]")
ss = make_employee_salary_slip("[email protected]", "Monthly")
ss.company = "_Test Company"
ss.save()
ss.submit()
email_queue = frappe.db.sql("""select name from `tabEmail Queue`""")
self.assertTrue(email_queue)
def test_loan_repayment_salary_slip(self):
from erpnext.loan_management.doctype.loan.test_loan import create_loan_type, create_loan, make_loan_disbursement_entry, create_loan_accounts
from erpnext.loan_management.doctype.process_loan_interest_accrual.process_loan_interest_accrual import process_loan_interest_accrual_for_term_loans
applicant = make_employee("[email protected]", company="_Test Company")
create_loan_accounts()
create_loan_type("Car Loan", 500000, 8.4,
is_term_loan=1,
mode_of_payment='Cash',
payment_account='Payment Account - _TC',
loan_account='Loan Account - _TC',
interest_income_account='Interest Income Account - _TC',
penalty_income_account='Penalty Income Account - _TC')
loan = create_loan(applicant, "Car Loan", 11000, "Repay Over Number of Periods", 20, posting_date=add_months(nowdate(), -1))
loan.repay_from_salary = 1
loan.submit()
make_loan_disbursement_entry(loan.name, loan.loan_amount, disbursement_date=add_months(nowdate(), -1))
process_loan_interest_accrual_for_term_loans(posting_date=nowdate())
ss = make_employee_salary_slip("[email protected]", "Monthly")
ss.submit()
self.assertEqual(ss.total_loan_repayment, 592)
self.assertEqual(ss.net_pay, (flt(ss.gross_pay) - (flt(ss.total_deduction) + flt(ss.total_loan_repayment))))
def test_payroll_frequency(self):
fiscal_year = get_fiscal_year(nowdate(), company=erpnext.get_default_company())[0]
month = "%02d" % getdate(nowdate()).month
m = get_month_details(fiscal_year, month)
for payroll_frequency in ["Monthly", "Bimonthly", "Fortnightly", "Weekly", "Daily"]:
make_employee(payroll_frequency + "[email protected]")
ss = make_employee_salary_slip(payroll_frequency + "[email protected]", payroll_frequency)
if payroll_frequency == "Monthly":
self.assertEqual(ss.end_date, m['month_end_date'])
elif payroll_frequency == "Bimonthly":
if getdate(ss.start_date).day <= 15:
self.assertEqual(ss.end_date, m['month_mid_end_date'])
else:
self.assertEqual(ss.end_date, m['month_end_date'])
elif payroll_frequency == "Fortnightly":
self.assertEqual(ss.end_date, add_days(nowdate(),13))
elif payroll_frequency == "Weekly":
self.assertEqual(ss.end_date, add_days(nowdate(),6))
elif payroll_frequency == "Daily":
self.assertEqual(ss.end_date, nowdate())
def test_tax_for_payroll_period(self):
data = {}
# test the impact of tax exemption declaration, tax exemption proof submission
# and deduct check boxes in annual tax calculation
# as per assigned salary structure 40500 in monthly salary so 236000*5/100/12
frappe.db.sql("""delete from `tabPayroll Period`""")
frappe.db.sql("""delete from `tabSalary Component`""")
payroll_period = create_payroll_period()
create_tax_slab(payroll_period, allow_tax_exemption=True)
employee = make_employee("[email protected]")
delete_docs = [
"Salary Slip",
"Additional Salary",
"Employee Tax Exemption Declaration",
"Employee Tax Exemption Proof Submission",
"Employee Benefit Claim",
"Salary Structure Assignment"
]
for doc in delete_docs:
frappe.db.sql("delete from `tab%s` where employee='%s'" % (doc, employee))
from erpnext.hr.doctype.salary_structure.test_salary_structure import \
make_salary_structure, create_salary_structure_assignment
salary_structure = make_salary_structure("Stucture to test tax", "Monthly",
other_details={"max_benefits": 100000}, test_tax=True)
create_salary_structure_assignment(employee, salary_structure.name,
payroll_period.start_date)
# create salary slip for whole period deducting tax only on last period
# to find the total tax amount paid
create_salary_slips_for_payroll_period(employee, salary_structure.name,
payroll_period, deduct_random=False)
tax_paid = get_tax_paid_in_period(employee)
annual_tax = 113589.0
try:
self.assertEqual(tax_paid, annual_tax)
except AssertionError:
print("\nSalary Slip - Annual tax calculation failed\n")
raise
frappe.db.sql("""delete from `tabSalary Slip` where employee=%s""", (employee))
# create exemption declaration so the tax amount varies
create_exemption_declaration(employee, payroll_period.name)
# create for payroll deducting in random months
data["deducted_dates"] = create_salary_slips_for_payroll_period(employee,
salary_structure.name, payroll_period)
tax_paid = get_tax_paid_in_period(employee)
# No proof, benefit claim sumitted, total tax paid, should not change
try:
self.assertEqual(tax_paid, annual_tax)
except AssertionError:
print("\nSalary Slip - Tax calculation failed on following case\n", data, "\n")
raise
# Submit proof for total 120000
data["proof"] = create_proof_submission(employee, payroll_period, 120000)
# Submit benefit claim for total 50000
data["benefit-1"] = create_benefit_claim(employee, payroll_period, 15000, "Medical Allowance")
data["benefit-2"] = create_benefit_claim(employee, payroll_period, 35000, "Leave Travel Allowance")
frappe.db.sql("""delete from `tabSalary Slip` where employee=%s""", (employee))
data["deducted_dates"] = create_salary_slips_for_payroll_period(employee,
salary_structure.name, payroll_period)
tax_paid = get_tax_paid_in_period(employee)
# total taxable income 416000, 166000 @ 5% ie. 8300
try:
self.assertEqual(tax_paid, 82389.0)
except AssertionError:
print("\nSalary Slip - Tax calculation failed on following case\n", data, "\n")
raise
# create additional salary of 150000
frappe.db.sql("""delete from `tabSalary Slip` where employee=%s""", (employee))
data["additional-1"] = create_additional_salary(employee, payroll_period, 50000)
data["additional-2"] = create_additional_salary(employee, payroll_period, 100000)
data["deducted_dates"] = create_salary_slips_for_payroll_period(employee,
salary_structure.name, payroll_period)
# total taxable income 566000, 250000 @ 5%, 66000 @ 20%, 12500 + 13200
tax_paid = get_tax_paid_in_period(employee)
try:
self.assertEqual(tax_paid, annual_tax)
except AssertionError:
print("\nSalary Slip - Tax calculation failed on following case\n", data, "\n")
raise
frappe.db.sql("""delete from `tabAdditional Salary` where employee=%s""", (employee))
# undelete fixture data
frappe.db.rollback()
def make_holiday_list(self):
fiscal_year = get_fiscal_year(nowdate(), company=erpnext.get_default_company())
if not frappe.db.get_value("Holiday List", "Salary Slip Test Holiday List"):
holiday_list = frappe.get_doc({
"doctype": "Holiday List",
"holiday_list_name": "Salary Slip Test Holiday List",
"from_date": fiscal_year[1],
"to_date": fiscal_year[2],
"weekly_off": "Sunday"
}).insert()
holiday_list.get_weekly_off_dates()
holiday_list.save()
def make_activity_for_employee(self):
activity_type = frappe.get_doc("Activity Type", "_Test Activity Type")
activity_type.billing_rate = 50
activity_type.costing_rate = 20
activity_type.wage_rate = 25
activity_type.save()
def get_no_of_days(self):
no_of_days_in_month = calendar.monthrange(getdate(nowdate()).year,
getdate(nowdate()).month)
no_of_holidays_in_month = len([1 for i in calendar.monthcalendar(getdate(nowdate()).year,
getdate(nowdate()).month) if i[6] != 0])
return [no_of_days_in_month[1], no_of_holidays_in_month]
def make_employee_salary_slip(user, payroll_frequency, salary_structure=None):
from erpnext.hr.doctype.salary_structure.test_salary_structure import make_salary_structure
if not salary_structure:
salary_structure = payroll_frequency + " Salary Structure Test for Salary Slip"
employee = frappe.db.get_value("Employee", {"user_id": user})
salary_structure_doc = make_salary_structure(salary_structure, payroll_frequency, employee)
salary_slip = frappe.db.get_value("Salary Slip", {"employee": frappe.db.get_value("Employee", {"user_id": user})})
if not salary_slip:
salary_slip = make_salary_slip(salary_structure_doc.name, employee = employee)
salary_slip.employee_name = frappe.get_value("Employee",
{"name":frappe.db.get_value("Employee", {"user_id": user})}, "employee_name")
salary_slip.payroll_frequency = payroll_frequency
salary_slip.posting_date = nowdate()
salary_slip.insert()
return salary_slip
def make_salary_component(salary_components, test_tax, company_list=None):
for salary_component in salary_components:
if not frappe.db.exists('Salary Component', salary_component["salary_component"]):
if test_tax:
if salary_component["type"] == "Earning":
salary_component["is_tax_applicable"] = 1
elif salary_component["salary_component"] == "TDS":
salary_component["variable_based_on_taxable_salary"] = 1
salary_component["amount_based_on_formula"] = 0
salary_component["amount"] = 0
salary_component["formula"] = ""
salary_component["condition"] = ""
salary_component["doctype"] = "Salary Component"
salary_component["salary_component_abbr"] = salary_component["abbr"]
frappe.get_doc(salary_component).insert()
get_salary_component_account(salary_component["salary_component"], company_list)
def get_salary_component_account(sal_comp, company_list=None):
company = erpnext.get_default_company()
if company_list and company not in company_list:
company_list.append(company)
sal_comp = frappe.get_doc("Salary Component", sal_comp)
if not sal_comp.get("accounts"):
for d in company_list:
sal_comp.append("accounts", {
"company": d,
"default_account": create_account(d)
})
sal_comp.save()
def create_account(company):
salary_account = frappe.db.get_value("Account", "Salary - " + frappe.get_cached_value('Company', company, 'abbr'))
if not salary_account:
frappe.get_doc({
"doctype": "Account",
"account_name": "Salary",
"parent_account": "Indirect Expenses - " + frappe.get_cached_value('Company', company, 'abbr'),
"company": company
}).insert()
return salary_account
def make_earning_salary_component(setup=False, test_tax=False, company_list=None):
data = [
{
"salary_component": 'Basic Salary',
"abbr":'BS',
"condition": 'base > 10000',
"formula": 'base',
"type": "Earning",
"amount_based_on_formula": 1
},
{
"salary_component": 'HRA',
"abbr":'H',
"amount": 3000,
"type": "Earning"
},
{
"salary_component": 'Special Allowance',
"abbr":'SA',
"condition": 'H < 10000',
"formula": 'BS*.5',
"type": "Earning",
"amount_based_on_formula": 1
},
{
"salary_component": "Leave Encashment",
"abbr": 'LE',
"type": "Earning"
}
]
if test_tax:
data.extend([
{
"salary_component": "Leave Travel Allowance",
"abbr": 'B',
"is_flexible_benefit": 1,
"type": "Earning",
"pay_against_benefit_claim": 1,
"max_benefit_amount": 100000,
"depends_on_payment_days": 0
},
{
"salary_component": "Medical Allowance",
"abbr": 'B',
"is_flexible_benefit": 1,
"pay_against_benefit_claim": 0,
"type": "Earning",
"max_benefit_amount": 15000
},
{
"salary_component": "Performance Bonus",
"abbr": 'B',
"type": "Earning"
}
])
if setup or test_tax:
make_salary_component(data, test_tax, company_list)
data.append({
"salary_component": 'Basic Salary',
"abbr":'BS',
"condition": 'base < 10000',
"formula": 'base*.2',
"type": "Earning",
"amount_based_on_formula": 1
})
return data
def make_deduction_salary_component(setup=False, test_tax=False, company_list=None):
data = [
{
"salary_component": 'Professional Tax',
"abbr":'PT',
"type": "Deduction",
"amount": 200,
"exempted_from_income_tax": 1
},
{
"salary_component": 'TDS',
"abbr":'T',
"type": "Deduction",
"depends_on_payment_days": 0,
"variable_based_on_taxable_salary": 1,
"round_to_the_nearest_integer": 1
}
]
if not test_tax:
data.append({
"salary_component": 'TDS',
"abbr":'T',
"condition": 'employment_type=="Intern"',
"type": "Deduction",
"round_to_the_nearest_integer": 1
})
if setup or test_tax:
make_salary_component(data, test_tax, company_list)
return data
def get_tax_paid_in_period(employee):
tax_paid_amount = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail`
sd join `tabSalary Slip` ss where ss.name=sd.parent and ss.employee=%s
and ss.docstatus=1 and sd.salary_component='TDS'""", (employee))
return tax_paid_amount[0][0]
def create_exemption_declaration(employee, payroll_period):
create_exemption_category()
declaration = frappe.get_doc({
"doctype": "Employee Tax Exemption Declaration",
"employee": employee,
"payroll_period": payroll_period,
"company": erpnext.get_default_company()
})
declaration.append("declarations", {
"exemption_sub_category": "_Test Sub Category",
"exemption_category": "_Test Category",
"amount": 100000
})
declaration.submit()
def create_proof_submission(employee, payroll_period, amount):
submission_date = add_months(payroll_period.start_date, random.randint(0, 11))
proof_submission = frappe.get_doc({
"doctype": "Employee Tax Exemption Proof Submission",
"employee": employee,
"payroll_period": payroll_period.name,
"submission_date": submission_date
})
proof_submission.append("tax_exemption_proofs", {
"exemption_sub_category": "_Test Sub Category",
"exemption_category": "_Test Category",
"type_of_proof": "Test", "amount": amount
})
proof_submission.submit()
return submission_date
def create_benefit_claim(employee, payroll_period, amount, component):
claim_date = add_months(payroll_period.start_date, random.randint(0, 11))
frappe.get_doc({
"doctype": "Employee Benefit Claim",
"employee": employee,
"claimed_amount": amount,
"claim_date": claim_date,
"earning_component": component
}).submit()
return claim_date
def create_tax_slab(payroll_period, effective_date = None, allow_tax_exemption = False, dont_submit = False):
if frappe.db.exists("Income Tax Slab", "Tax Slab: " + payroll_period.name):
return
slabs = [
{
"from_amount": 250000,
"to_amount": 500000,
"percent_deduction": 5,
"condition": "annual_taxable_earning > 500000"
},
{
"from_amount": 500001,
"to_amount": 1000000,
"percent_deduction": 20
},
{
"from_amount": 1000001,
"percent_deduction": 30
}
]
income_tax_slab = frappe.new_doc("Income Tax Slab")
income_tax_slab.name = "Tax Slab: " + payroll_period.name
income_tax_slab.effective_from = effective_date or add_days(payroll_period.start_date, -2)
if allow_tax_exemption:
income_tax_slab.allow_tax_exemption = 1
income_tax_slab.standard_tax_exemption_amount = 50000
for item in slabs:
income_tax_slab.append("slabs", item)
income_tax_slab.append("other_taxes_and_charges", {
"description": "cess",
"percent": 4
})
income_tax_slab.save()
if not dont_submit:
income_tax_slab.submit()
def create_salary_slips_for_payroll_period(employee, salary_structure, payroll_period, deduct_random=True):
deducted_dates = []
i = 0
while i < 12:
slip = frappe.get_doc({"doctype": "Salary Slip", "employee": employee,
"salary_structure": salary_structure, "frequency": "Monthly"})
if i == 0:
posting_date = add_days(payroll_period.start_date, 25)
else:
posting_date = add_months(posting_date, 1)
if i == 11:
slip.deduct_tax_for_unsubmitted_tax_exemption_proof = 1
slip.deduct_tax_for_unclaimed_employee_benefits = 1
if deduct_random and not random.randint(0, 2):
slip.deduct_tax_for_unsubmitted_tax_exemption_proof = 1
deducted_dates.append(posting_date)
slip.posting_date = posting_date
slip.start_date = get_first_day(posting_date)
slip.end_date = get_last_day(posting_date)
doc = make_salary_slip(salary_structure, slip, employee)
doc.submit()
i += 1
return deducted_dates
def create_additional_salary(employee, payroll_period, amount):
salary_date = add_months(payroll_period.start_date, random.randint(0, 11))
frappe.get_doc({
"doctype": "Additional Salary",
"employee": employee,
"company": erpnext.get_default_company(),
"salary_component": "Performance Bonus",
"payroll_date": salary_date,
"amount": amount,
"type": "Earning"
}).submit()
return salary_date
def make_leave_application(employee, from_date, to_date, leave_type, company=None):
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee,
leave_type = leave_type,
from_date = from_date,
to_date = to_date,
company = company or erpnext.get_default_company() or "_Test Company",
docstatus = 1,
status = "Approved",
leave_approver = '[email protected]'
))
leave_application.submit() | agpl-3.0 | 7,116,116,027,504,919,000 | 37.171631 | 150 | 0.70301 | false |
Florianjw/NiceCharts | nicechart.py | 1 | 22515 | #!/usr/bin/env python
# nicechart.py
#
# Copyright 2011
#
# Christoph Sterz
# Florian Weber
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# These two lines are only needed if you don't put the script directly into
# the installation directory
import sys
#sys.path.append('/usr/share/inkscape/extensions')
# We will use the inkex module with the predefined Effect base class.
import inkex
# The simplestyle module provides functions for style parsing.
from simplestyle import *
import math, re, nicechart_colors as nc_colors
class NiceChart(inkex.Effect):
"""
Example Inkscape effect extension.
Creates a new layer with a "Hello World!" text centered in the middle of the document.
"""
def __init__(self):
"""
Constructor.
Defines the "--what" option of a script.
"""
# Call the base class constructor.
inkex.Effect.__init__(self)
# Define string option "--what" with "-w" shortcut and default chart values.
self.OptionParser.add_option('-w', '--what', action = 'store',
type = 'string', dest = 'what', default = '22,11,67',
help = 'Chart Values')
# Define string option "--type" with "-t" shortcut.
self.OptionParser.add_option("-t", "--type", action="store",
type="string", dest="type", default='',
help="Chart Type")
# Define bool option "--blur" with "-b" shortcut.
self.OptionParser.add_option("-b", "--blur", action="store",
type="inkbool", dest="blur", default='True',
help="Blur Type")
# Define string option "--file" with "-f" shortcut.
self.OptionParser.add_option("-f", "--filename", action="store",
type="string", dest="filename", default='',
help="Name of File")
# Define string option "--input_type" with "-i" shortcut.
self.OptionParser.add_option("-i", "--input_type", action="store",
type="string", dest="input_type", default='file',
help="Chart Type")
# Define string option "--delimiter" with "-d" shortcut.
self.OptionParser.add_option("-d", "--delimiter", action="store",
type="string", dest="csv_delimiter", default=';',
help="delimiter")
# Define string option "--colors" with "-c" shortcut.
self.OptionParser.add_option("-c", "--colors", action="store",
type="string", dest="colors", default='default',
help="color-scheme")
# Define string option "--colors_override"
self.OptionParser.add_option("", "--colors_override", action="store",
type="string", dest="colors_override", default='',
help="color-scheme-override")
self.OptionParser.add_option("", "--reverse_colors", action="store",
type="inkbool", dest="reverse_colors", default='False',
help="reverse color-scheme")
self.OptionParser.add_option("-k", "--col_key", action="store",
type="int", dest="col_key", default='0',
help="column that contains the keys")
self.OptionParser.add_option("-v", "--col_val", action="store",
type="int", dest="col_val", default='1',
help="column that contains the values")
self.OptionParser.add_option("-r", "--rotate", action="store",
type="inkbool", dest="rotate", default='False',
help="Draw barchart horizontally")
self.OptionParser.add_option("-W", "--bar-width", action="store",
type="int", dest="bar_width", default='10',
help="width of bars")
self.OptionParser.add_option("-p", "--pie-radius", action="store",
type="int", dest="pie_radius", default='100',
help="radius of pie-charts")
self.OptionParser.add_option("-H", "--bar-height", action="store",
type="int", dest="bar_height", default='100',
help="height of bars")
self.OptionParser.add_option("-O", "--bar-offset", action="store",
type="int", dest="bar_offset", default='5',
help="distance between bars")
self.OptionParser.add_option("", "--stroke-width", action="store",
type="int", dest="stroke_width", default='2')
self.OptionParser.add_option("-o", "--text-offset", action="store",
type="int", dest="text_offset", default='5',
help="distance between bar and descriptions")
self.OptionParser.add_option("-F", "--font", action="store",
type="string", dest="font", default='sans-serif',
help="font of description")
self.OptionParser.add_option("-S", "--font-size", action="store",
type="int", dest="font_size", default='10',
help="font size of description")
self.OptionParser.add_option("-C", "--font-color", action="store",
type="string", dest="font_color", default='black',
help="font color of description")
#Dummy:
self.OptionParser.add_option("","--input_sections")
self.OptionParser.add_option("-V", "--show_values", action="store",
type="inkbool", dest="show_values", default='False',
help="Show values in chart")
def effect(self):
"""
Effect behaviour.
Overrides base class' method and inserts a nice looking chart into SVG document.
"""
# Get script's "--what" option value and process the data type --- i concess the if term is a little bit of magic
what = self.options.what
keys=[]
values=[]
orig_values=[]
keys_present=True
pie_abs=False
cnt=0
csv_file_name=self.options.filename
csv_delimiter=self.options.csv_delimiter
input_type=self.options.input_type
col_key=self.options.col_key
col_val=self.options.col_val
show_values=self.options.show_values
if(input_type=="\"file\""):
csv_file=open(csv_file_name,"r")
for line in csv_file:
value=line.split(csv_delimiter)
if(len(value)>=1): #make sure that there is at least one value (someone may want to use it as description)
keys.append(value[col_key])
values.append(float(value[col_val]))
csv_file.close()
elif(input_type=="\"direct_input\""):
what=re.findall("([A-Z|a-z|0-9]+:[0-9]+\.?[0-9]*)",what)
for value in what:
value=value.split(":")
keys.append(value[0])
values.append(float(value[1]))
# Get script's "--type" option value.
charttype=self.options.type
if(charttype=="pie_abs"):
pie_abs=True
charttype="pie"
# Get access to main SVG document element and get its dimensions.
svg = self.document.getroot()
# Get the page attibutes:
width = self.getUnittouu(svg.get('width'))
height = self.getUnittouu(svg.attrib['height'])
# Create a new layer.
layer = inkex.etree.SubElement(svg, 'g')
layer.set(inkex.addNS('label', 'inkscape'), 'Chart-Layer: %s' % (what))
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
# Check if Blur should be drawn:
draw_blur=self.options.blur
#draw_blur=False
# Set Default Colors
self.options.colors_override.strip()
if (len(self.options.colors_override)>0):
Colors=self.options.colors_override
else:
Colors=self.options.colors
if(Colors[0].isalpha()):
Colors=nc_colors.get_color_scheme(Colors)
else:
Colors=re.findall("(#[0-9a-fA-F]{6})",Colors)
#to be sure we create a fallback:
if(len(Colors)==0):
Colors=nc_colors.get_color_scheme()
color_count=len(Colors)
if(self.options.reverse_colors):
Colors.reverse()
#Those values should be self-explaining:
bar_height=self.options.bar_height
bar_width=self.options.bar_width
bar_offset=self.options.bar_offset
#offset of the description in stacked-bar-charts:
#stacked_bar_text_offset=self.options.stacked_bar_text_offset
text_offset=self.options.text_offset
#get font
font=self.options.font
font_size=self.options.font_size
font_color=self.options.font_color
#get rotation
rotate = self.options.rotate
pie_radius=self.options.pie_radius
stroke_width=self.options.stroke_width
if(charttype=="bar"):
#########
###BAR###
#########
#iterate all values, use offset to draw the bars in different places
offset=0
color=0
# Normalize the bars to the largest value
try:
value_max=max(values)
except ValueError:
value_max=0.0
for x in range(len(values)):
orig_values.append(values[x])
values[x]=(values[x]/value_max)*bar_height
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
# Draw Single bars with their shadows
for value in values:
#draw blur, if it is wanted
if(draw_blur):
# Create shadow element
shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
# Set chart position to center of document. Make it horizontal or vertical
if(not rotate):
shadow.set('x', str(width / 2 + offset +1))
shadow.set('y', str(height / 2 - int(value)+1))
else:
shadow.set('y', str(width / 2 + offset +1))
shadow.set('x', str(height / 2 +1))
# Set shadow properties
if(not rotate):
shadow.set("width", str(bar_width))
shadow.set("height", str(int(value)))
else:
shadow.set("height", str(bar_width))
shadow.set("width", str(int(value)))
# Set shadow blur (connect to filter object in xml path)
shadow.set("style","filter:url(#filter)")
# Create rectangle element
#shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
rect = inkex.etree.Element(inkex.addNS('rect','svg'))
# Set chart position to center of document.
if(not rotate):
rect.set('x', str(width/2+offset))
rect.set('y', str(height/2-int(value)))
else:
rect.set('y', str(width/2+offset))
rect.set('x', str(height/2))
# Set rectangle properties
if(not rotate):
rect.set("width", str(bar_width))
rect.set("height", str(int(value)))
else:
rect.set("height", str(bar_width))
rect.set("width", str(int(value)))
rect.set("style","fill:"+Colors[color%color_count])
# Set shadow blur (connect to filter object in xml path)
if(draw_blur):
shadow.set("style","filter:url(#filter)")
# If keys are given create text elements
if(keys_present):
text = inkex.etree.Element(inkex.addNS('text','svg'))
if(not rotate): #=vertical
text.set("transform","matrix(0,-1,1,0,0,0)")
#y after rotation:
text.set("x", "-"+str(height/2+text_offset))
#x after rotation:
text.set("y", str(width/2+offset+bar_width/2+font_size/3))
else: #=horizontal
text.set("y", str(width/2+offset+bar_width/2+font_size/3))
text.set("x", str(height/2-text_offset))
text.set("style","font-size:"+str(font_size)\
+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"\
+font+";-inkscape-font-specification:Bitstream Charter;text-align:end;text-anchor:end;fill:"\
+font_color)
text.text=keys[cnt]
#cnt=cnt+1
# Increase Offset and Color
#offset=offset+bar_width+bar_offset
color=(color+1)%8
# Connect elements together.
if(draw_blur):
layer.append(shadow)
layer.append(rect)
if(keys_present):
layer.append(text)
if(show_values):
vtext = inkex.etree.Element(inkex.addNS('text','svg'))
if(not rotate): #=vertical
vtext.set("transform","matrix(0,-1,1,0,0,0)")
#y after rotation:
vtext.set("x", "-"+str(height/2+text_offset-value-text_offset-text_offset))
#x after rotation:
vtext.set("y", str(width/2+offset+bar_width/2+font_size/3))
else: #=horizontal
vtext.set("y", str(width/2+offset+bar_width/2+font_size/3))
vtext.set("x", str(height/2-text_offset+value+text_offset+text_offset))
vtext.set("style","font-size:"+str(font_size)\
+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"\
+font+";-inkscape-font-specification:Bitstream Charter;text-align:start;text-anchor:start;fill:"\
+font_color)
vtext.text=str(int(orig_values[cnt]))
layer.append(vtext)
cnt=cnt+1
offset=offset+bar_width+bar_offset
elif(charttype=="pie"):
#########
###PIE###
#########
# Iterate all values to draw the different slices
color=0
# Set Default Colors
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
# Add a grey background circle
background=inkex.etree.Element(inkex.addNS("circle","svg"))
background.set("cx", str(width/2))
background.set("cy", str(height/2))
background.set("r", str(pie_radius))
if pie_abs:
background.set("style","stroke:#ececec;fill:#f9f9f9")
else:
background.set("style","fill:#aaaaaa;stroke:none")
layer.append(background)
#create value sum in order to divide the slices
try:
valuesum=sum(values)
except ValueError:
valuesum=0
if pie_abs:
valuesum=100
# Set an offsetangle
offset=0
# Draw single slices with their shadow
for value in values:
# Calculate the PI-angles for start and end
angle=(2*3.141592)/valuesum*float(value)
# Create the shadow first (if it should be created):
if(draw_blur):
shadow=inkex.etree.Element(inkex.addNS("path","svg"))
shadow.set(inkex.addNS('type', 'sodipodi'), 'arc')
shadow.set(inkex.addNS('cx', 'sodipodi'), str(width/2))
shadow.set(inkex.addNS('cy', 'sodipodi'), str(height/2))
shadow.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))
shadow.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))
shadow.set(inkex.addNS('start', 'sodipodi'), str(offset))
shadow.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))
shadow.set("style","filter:url(#filter);fill:#000000")
#then add the slice
pieslice=inkex.etree.Element(inkex.addNS("path","svg"))
pieslice.set(inkex.addNS('type', 'sodipodi'), 'arc')
pieslice.set(inkex.addNS('cx', 'sodipodi'), str(width/2))
pieslice.set(inkex.addNS('cy', 'sodipodi'), str(height/2))
pieslice.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))
pieslice.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))
pieslice.set(inkex.addNS('start', 'sodipodi'), str(offset))
pieslice.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))
pieslice.set("style","fill:"+Colors[color%color_count]+";stroke:none;fill-opacity:1")
#If text is given, draw short paths and add the text
if(keys_present):
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width/2)+pie_radius*math.cos(angle/2+offset))+","+str((height/2)+pie_radius*math.sin(angle/2+offset))+" "+str((text_offset-2)*math.cos(angle/2+offset))+","+str((text_offset-2)*math.sin(angle/2+offset)))
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str((width/2)+(pie_radius+text_offset)*math.cos(angle/2+offset)))
text.set("y", str((height/2)+(pie_radius+text_offset)*math.sin(angle/2+offset)+font_size/3))
textstyle="font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color
#check if it is right or left of the Pie
if(math.cos(angle/2+offset)>0):
text.set("style",textstyle)
else:
text.set("style",textstyle+";text-align:end;text-anchor:end")
text.text=keys[cnt]
if show_values:
text.text=text.text+"("+str(values[cnt])
if pie_abs:
text.text=text.text+" %"
text.text=text.text+")"
cnt=cnt+1
layer.append(text)
#increase the rotation-offset and the colorcycle-position
offset=offset+angle
color=(color+1)%8
#append the objects to the extension-layer
if(draw_blur):
layer.append(shadow)
layer.append(pieslice)
elif(charttype=="stbar"):
#################
###STACKED BAR###
#################
# Iterate all values to draw the different slices
color=0
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
#create value sum in order to divide the bars
try:
valuesum=sum(values)
except ValueError:
valuesum=0.0
for value in values:
valuesum=valuesum+float(value)
# Init offset
offset=0
i=len(values)-1 #loopcounter
# Draw Single bars with their shadows
for value in values:
# Calculate the individual heights normalized on 100units
normedvalue=(bar_height/valuesum)*float(value)
if(draw_blur):
# Create rectangle element
shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
# Set chart position to center of document.
if(not rotate):
shadow.set('x', str(width / 2 + 1))
shadow.set('y', str(height / 2 - offset - (normedvalue)+1))
else:
shadow.set('x', str(width / 2 + 1 + offset))
shadow.set('y', str(height / 2 +1))
# Set rectangle properties
if(not rotate):
shadow.set("width",str(bar_width))
shadow.set("height", str((normedvalue)))
else:
shadow.set("width",str((normedvalue)))
shadow.set("height", str(bar_width))
# Set shadow blur (connect to filter object in xml path)
shadow.set("style","filter:url(#filter)")
# Create rectangle element
rect = inkex.etree.Element(inkex.addNS('rect','svg'))
# Set chart position to center of document.
if( not rotate ):
rect.set('x', str(width / 2 ))
rect.set('y', str(height / 2 - offset - (normedvalue)))
else:
rect.set('x', str(width / 2 + offset ))
rect.set('y', str(height / 2 ))
# Set rectangle properties
if( not rotate ):
rect.set("width", str(bar_width))
rect.set("height", str((normedvalue)))
else:
rect.set("height", str(bar_width))
rect.set("width", str((normedvalue)))
rect.set("style","fill:"+Colors[color%color_count])
#If text is given, draw short paths and add the text
if(keys_present):
if(not rotate):
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width+bar_width)/2)+","+str(height / 2 - offset - (normedvalue / 2))+" "+str(bar_width/2+text_offset)+",0")
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str(width/2+bar_width+text_offset+1))
text.set("y", str(height / 2 - offset + font_size/3 - (normedvalue / 2)))
text.set("style","font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color)
text.text=keys[cnt]
cnt=cnt+1
layer.append(text)
else:
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width)/2+offset+normedvalue/2)+","
+str(height / 2 + bar_width/2)
+" 0,"+str(bar_width/2+(font_size*i)+text_offset)) #line
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str((width)/2+offset+normedvalue/2-font_size/3))
text.set("y", str((height/2)+bar_width+(font_size*(i+1))+text_offset ))
text.set("style","font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color)
text.text=keys[color]
layer.append(text)
# Increase Offset and Color
offset=offset+normedvalue
color=(color+1)%8
# Connect elements together.
if(draw_blur):
layer.append(shadow)
layer.append(rect)
i-=1 #loopcounter
def getUnittouu(self, param):
#compatibility wrapper
try:
return inkex.unittouu(param)
except AttributeError:
return self.unittouu(param)
# Create effect instance and apply it.
effect = NiceChart()
effect.affect()
| gpl-3.0 | 8,903,890,283,883,620,000 | 33.852941 | 230 | 0.639129 | false |
janusnic/shoop-wintergear-demo | shoop_demo/settings/base_settings.py | 1 | 4291 | # This file is part of Shoop Wintergear Demo.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.addons import add_enabled_addons
import os
BASE_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", ".."))
SECRET_KEY = "Shhhhh"
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
MEDIA_ROOT = os.path.join(BASE_DIR, "var", "media")
STATIC_ROOT = os.path.join(BASE_DIR, "var", "static")
MEDIA_URL = "/media/"
SHOOP_ENABLED_ADDONS_FILE = os.path.join(BASE_DIR, "var", "enabled_addons")
INSTALLED_APPS = add_enabled_addons(SHOOP_ENABLED_ADDONS_FILE, (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
####
'shoop_demo',
'wintergear_theme',
'wintergear_demo_content',
####
'django_jinja',
'filer',
'easy_thumbnails',
'shoop.core',
'shoop.simple_pricing',
'shoop.simple_supplier',
'shoop.default_tax',
'shoop.front',
'shoop.front.apps.registration',
'shoop.front.apps.auth',
'shoop.front.apps.customer_information',
'shoop.front.apps.personal_order_history',
'shoop.front.apps.simple_order_notification',
'shoop.front.apps.simple_search',
'shoop.admin',
'shoop.addons',
'shoop.testing',
'bootstrap3',
'shoop.notify',
'shoop.simple_cms',
'shoop.stripe',
'registration',
))
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'shoop.front.middleware.ProblemMiddleware',
'shoop.front.middleware.ShoopFrontMiddleware',
)
ROOT_URLCONF = 'shoop_demo.urls'
WSGI_APPLICATION = 'shoop_demo.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': os.path.join(BASE_DIR, 'db_test.sqlite3'),
}
}
}
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
SOUTH_TESTS_MIGRATE = False # Makes tests that much faster.
LANGUAGES = [
('en', 'English'),
('fi', 'Finnish'),
]
PARLER_DEFAULT_LANGUAGE_CODE = "en"
PARLER_LANGUAGES = {
None: [{"code": c, "name": n} for (c, n) in LANGUAGES],
'default': {
'hide_untranslated': False,
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages"
)
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"context_processors": TEMPLATE_CONTEXT_PROCESSORS,
"newstyle_gettext": True,
},
"NAME": "jinja2",
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": TEMPLATE_CONTEXT_PROCESSORS,
}
},
]
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
SHOOP_PAYMENT_MODULE_IMPLEMENTATIONS = {
"pseudo": "shoop.testing.pseudo_payment.PseudoPaymentMethodModule",
}
SHOOP_BASKET_COMMAND_DISPATCHER_SPEC = (
"shoop_demo.basket_command_dispatcher:WintergearBasketCommandDispatcher")
SHOOP_BASKET_VIEW_SPEC = "shoop_demo.views.basket:WintergearBasketView"
DEMO_CREDENTIALS = "admin / admin"
def configure(setup):
setup.commit(globals())
| agpl-3.0 | 4,649,339,829,990,463,000 | 26.863636 | 77 | 0.662783 | false |
nirs/hpy | hpy/htokenize.py | 1 | 12383 | """Tokenization help for Python programs.
This is tokenize module from Python 2.4.3 with minor modification needed to
support Hebrew tokens.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.
@license: Python license.
"""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
"generate_tokens", "NL"]
del x
del token
from hpy import hebrew
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = ur'[a-zA-Z_%s]\w*' % ''.join(hebrew.alpha)
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
pseudoprog = re.compile(PseudoToken, re.U)
tokenprog = re.compile(Token, re.U)
single3prog = re.compile(Single3, re.U)
double3prog = re.compile(Double3, re.U)
endprogs = {"'": re.compile(Single, re.U), '"': re.compile(Double, re.U),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"' ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string.
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars = string.ascii_letters + '_' + ''.join(hebrew.alpha)
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
line = readline()
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level")
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (parenlev > 0 and NL or NEWLINE,
token, spos, epos, line)
elif initial == '#':
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| gpl-2.0 | -847,647,740,305,693,600 | 39.074434 | 78 | 0.515465 | false |
thp/backuppurge | lib/backuppurge/__init__.py | 1 | 7956 | # -*- coding: utf-8 -*-
#
# backuppurge: Selectively purge daily full backups
#
# Copyright (c) 2013, 2015 Thomas Perl <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Selectively purge daily full backups
Lists files in that should be purged in a backup strategy where daily backups
are kept for *DAYS* days, monthly backups for *MONTHS* months and yearly backups
for *YEARS* years. Monthly and yearly backups are always the oldest possible
daily backup (e.g. first of month and first of year that is available).
Files are expected to have their date embedded as ``YYYY-MM-DD`` somewhere in
the filename, e.g. ``homedir-2013-03-31.tgz``
For monthly and yearly backups, the first day available will be kept (e.g.
January 1st for yearly, but if that is not available, January 2nd will be
kept, etc..).
This program can be used together with xargs(1) from GNU findutils::
backuppurge --print0 /var/backups/ | xargs -r -0 rm
Only files directly in the specified **DIRECTORY** will be searched (in the
above example, ``/var/backups/homedir-2013-03-31.tgz`` will be considered,
but not ``/var/backups/etc/etc-2013-03-31.tgz``). This prevents accidental
deletion of files. If --include-directories (-D) is used, directories directly
below the path will be included in the search (e.g. the directory
``/var/backups/etc-2015-07-24/`` will be included in the purge search).
This script assumes daily backups are FULL backups, not incremental. For
example, a full daily backup of your ``/etc`` can be created by adding
(``crontab -e``) a command like the following to your crontab(5) file::
tar czf /var/backups/etc/etc-$(date +%F).tgz /etc
"""
from __future__ import print_function
import logging
import warnings
import datetime
import re
import os
__author__ = 'Thomas Perl <[email protected]>'
__license__ = 'Simplified BSD License'
__url__ = 'http://thp.io/2013/backuppurge/'
__version__ = '1.0.4'
class MixedFilenames(BaseException):
"""
Raised when the list of filenames passed to PurgeList don't have
the same prefix and postfix (before/after the date).
"""
pass
class NoBackupsFound(Warning):
"""
Warning raised when no backup files (with a date) are found.
"""
pass
warnings.simplefilter('always', NoBackupsFound)
def find_backups(directory, include_directories):
"""
Find backup files in directory
"""
return filter(lambda f: (include_directories and os.path.isdir(f)) or os.path.isfile(f),
map(lambda filename: os.path.join(directory, filename), os.listdir(directory)))
class PurgeList:
def __init__(self, filenames, today, prefix):
self.logger = logging.getLogger(self.__class__.__name__)
self.filenames = filenames
self.today = today
self.prefix = prefix
# Check prefix of files (before date), bail out if not all equal
self.check_file_list()
# By default, purge everything
self.purge = set(self.filenames)
def check_file_list(self):
regex = re.compile(r'^(.*)(\d{4}-\d{2}-\d{2})(.*)$')
# Remove all filenames without a date string in them
self.filenames = list(filter(regex.match, self.filenames))
if self.prefix is not None:
self.filenames = [filename for filename in self.filenames
if regex.match(filename).group(1) == self.prefix]
if len(self.filenames) == 0:
warnings.warn('File list is empty', NoBackupsFound)
return
prefixes, _, postfixes = map(set, zip(*[regex.match(filename).groups()
for filename in self.filenames]))
if len(prefixes) != 1:
raise MixedFilenames('Non-unique prefixes: {0}'.format(prefixes))
if len(postfixes) != 1:
raise MixedFilenames('Non-unique postfixes: {0}'.format(postfixes))
def keep(self, filename, kind):
"""Mark filename to be kept"""
if filename is None:
return
if filename in self.purge:
self.logger.info('Keeping file for %s: %s', kind, filename)
self.purge.remove(filename)
else:
self.logger.debug('File for %s already kept: %s', kind, filename)
def get_all(self, year, month=None, day=None):
"""Get all backups for a specific year"""
month_re = r'{0:02d}'.format(month) if month else r'\d{2}'
day_re = r'{0:02d}'.format(day) if day else r'\d{2}'
regex = re.compile(r'{0:04d}-{1:s}-{2:s}'.format(year, month_re, day_re))
return sorted(filter(regex.search, self.filenames))
def get_first(self, year, month=None, day=None):
"""Get first backup for a specific year, month or day
get_first(2013) -> First available backup in 2013
get_first(2013, 3) -> First available backup in March 2013
get_first(2013, 3, 31) -> First available backup for March 31st 2013
"""
matches = self.get_all(year, month, day)
if matches:
return matches[0]
return None
def recent_days(self, count):
day = self.today
while count > 0:
yield (day.year, day.month, day.day)
day -= datetime.timedelta(days=1)
count -= 1
def recent_months(self, count):
month = (self.today.year, self.today.month)
while count > 0:
yield month
if month[1] == 1:
month = (month[0]-1, 12)
else:
month = (month[0], month[1]-1)
count -= 1
def recent_years(self, count):
year = self.today.year
while count > 0:
yield year
year -= 1
count -= 1
def keep_daily(self, days):
for year, month, day in self.recent_days(days):
self.keep(self.get_first(year, month, day), 'daily')
def keep_monthly(self, months):
for year, month in self.recent_months(months):
self.keep(self.get_first(year, month),
'monthly ({0}-{1:02d})'.format(year, month))
def keep_yearly(self, years):
for year in self.recent_years(years):
self.keep(self.get_first(year), 'yearly ({0})'.format(year))
def get_filenames(self):
return self.purge
def main(directory, days, months, years, separator, include_directories, prefix):
today = datetime.date.today()
filenames = find_backups(directory, include_directories)
purge_list = PurgeList(filenames, today, prefix)
purge_list.keep_daily(days)
purge_list.keep_monthly(months)
purge_list.keep_yearly(years)
purge_files = purge_list.get_filenames()
if purge_files:
print(separator.join(purge_files), end=separator)
| bsd-2-clause | -5,817,171,671,719,729,000 | 33.894737 | 97 | 0.653343 | false |
personalrobotics/prpy | src/prpy/planning/snap.py | 1 | 6317 | #!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Michael Koval <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy
import openravepy
from openravepy import Robot
from prpy.util import SetTrajectoryTags
from prpy.planning.base import (
Planner,
PlanningError,
LockedPlanningMethod,
Tags
)
from ..collision import DefaultRobotCollisionCheckerFactory
class SnapPlanner(Planner):
"""Planner that checks the straight-line trajectory to the goal.
SnapPlanner is a utility planner class that collision checks the
straight-line trajectory to the goal. If that trajectory is invalid,
e.g. due to an environment or self collision, the planner
immediately returns failure by raising a PlanningError.
SnapPlanner is intended to be used only as a "short circuit" to
speed-up planning between nearby configurations. This planner is
most commonly used as the first item in a Sequence meta-planner to
avoid calling a motion planner when the trivial solution is valid.
"""
def __init__(self, robot_checker_factory=None):
super(SnapPlanner, self).__init__()
if robot_checker_factory is None:
robot_checker_factory = DefaultRobotCollisionCheckerFactory
self.robot_checker_factory = robot_checker_factory
def __str__(self):
return 'SnapPlanner'
@LockedPlanningMethod
def PlanToConfiguration(self, robot, goal, **kw_args):
"""
Attempt to plan a straight line trajectory from the robot's
current configuration to the goal configuration. This will
fail if the straight-line path is not collision free.
@param robot
@param goal desired configuration
@return traj
"""
return self._Snap(robot, goal, **kw_args)
def _Snap(self, robot, goal, **kw_args):
from prpy.util import CheckJointLimits
from prpy.util import GetLinearCollisionCheckPts
from prpy.planning.exceptions import CollisionPlanningError
from prpy.planning.exceptions import SelfCollisionPlanningError
# Create a two-point trajectory between the
# current configuration and the goal.
# (a straight line in joint space)
env = robot.GetEnv()
traj = openravepy.RaveCreateTrajectory(env, '')
cspec = robot.GetActiveConfigurationSpecification('linear')
active_indices = robot.GetActiveDOFIndices()
# Check the start position is within joint limits,
# this can throw a JointLimitError
start = robot.GetActiveDOFValues()
CheckJointLimits(robot, start, deterministic=True)
# Add the start waypoint
start_waypoint = numpy.zeros(cspec.GetDOF())
cspec.InsertJointValues(start_waypoint, start, robot,
active_indices, False)
traj.Init(cspec)
traj.Insert(0, start_waypoint.ravel())
# Make the trajectory end at the goal configuration, as
# long as it is not in collision and is not identical to
# the start configuration.
CheckJointLimits(robot, goal, deterministic=True)
if not numpy.allclose(start, goal):
goal_waypoint = numpy.zeros(cspec.GetDOF())
cspec.InsertJointValues(goal_waypoint, goal, robot,
active_indices, False)
traj.Insert(1, goal_waypoint.ravel())
# Get joint configurations to check
# Note: this returns a python generator, and if the
# trajectory only has one waypoint then only the
# start configuration will be collisioned checked.
#
# Sampling function:
# 'linear'
# from prpy.util import SampleTimeGenerator
# linear = SampleTimeGenerator
# 'Van der Corput'
from prpy.util import VanDerCorputSampleGenerator
vdc = VanDerCorputSampleGenerator
checks = GetLinearCollisionCheckPts(robot, traj,
norm_order=2,
sampling_func=vdc)
with self.robot_checker_factory(robot) as robot_checker, \
robot.CreateRobotStateSaver(Robot.SaveParameters.LinkTransformation):
# Run constraint checks at DOF resolution:
for t, q in checks:
# Set the joint positions
# Note: the planner is using a cloned 'robot' object
robot.SetActiveDOFValues(q)
# Check collision (throws an exception on collision)
robot_checker.VerifyCollisionFree()
SetTrajectoryTags(traj, {
Tags.SMOOTH: True,
Tags.DETERMINISTIC_TRAJECTORY: True,
Tags.DETERMINISTIC_ENDPOINT: True,
}, append=True)
return traj
| bsd-3-clause | -6,315,070,951,300,979,000 | 41.113333 | 81 | 0.684186 | false |
goblinhack/MundusMeus | python/things/weapon.py | 1 | 13288 | import tp
import mm
def thing_init(t):
return
def weapon_init(name, short_name, long_name, damage, is_double_handed=False):
x = tp.Tp(name)
x.set_long_name(long_name)
x.set_short_name(short_name)
x.set_is_weapon(True)
x.set_z_depth(mm.Z_DEPTH_TREASURE)
x.set_damage(damage)
x.set_is_double_handed(is_double_handed)
x.set_tile(tile=name)
x.thing_init = thing_init
def init():
weapon_init(name="axe1.1",
short_name="Hand Axe",
long_name="Very handy axe. Useful for all axeing occasions.",
damage="1d4",
is_double_handed=True
)
weapon_init(name="axe1.2",
short_name="Battle Axe",
long_name="Dont battle without this axe.",
damage="1d6"
)
weapon_init(name="axe1.3",
short_name="Greataxe",
long_name="This axe is great indeed. " +
"Not the greatest, but still pretty great.",
damage="1d8+1",
is_double_handed=True
)
weapon_init(name="axe1.4",
short_name="Even Greater Axe",
long_name="The greatest of great great axes.",
damage="1d10+2",
is_double_handed=True
)
weapon_init(name="axe1.5",
short_name="Masterwork Axe",
long_name="Finest craftwork axe. Definately not made by orcs.",
damage="1d12"
)
weapon_init(name="axe1.6",
short_name="Diamond Axe",
long_name="Diamond encrusted bladed axe. " +
"Glistens in the dark.",
damage="1d14"
)
weapon_init(name="axe1.7",
short_name="Blood Axe",
long_name="This axe yearns to be whetted with blood. "
"Hopefully not your own.",
damage="2d6+2"
)
weapon_init(name="axe1.9",
short_name="Cleaver Axe",
long_name="An edge so sharp, " +
"you might lose your head over it.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="ball_chain1.1",
short_name="Flail",
long_name="Don't flail around with this flail.",
damage="1d4"
)
weapon_init(name="ball_chain1.2",
short_name="Masterwork Flail",
long_name="If you need to flail, this is the weapon for you.",
damage="1d6"
)
weapon_init(name="ball_chain1.3",
short_name="Diamond Flail",
long_name="Flailing with a sharp edge.",
damage="1d12"
)
weapon_init(name="bow1.1",
short_name="Bow",
long_name="Standard issue bow. Wooden. " +
"Bowish. What more can be said?",
damage="1d6",
is_double_handed=True
)
weapon_init(name="bow1.2",
short_name="Longishbow",
long_name="Not quite a long bow, but long enough.",
damage="1d8",
is_double_handed=True
)
weapon_init(name="bow1.3",
short_name="Metal Longbow",
long_name="A tough bow for a tough individual.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="bow1.4",
short_name="Bowmaster",
long_name="The bow of masters. The bow master.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="bow1.5",
short_name="Masterwork Bow",
long_name="Beautiful oaken bow with inlaid markings " +
"and a silver handle. Probably fires well too.",
damage="1d12",
is_double_handed=True
)
weapon_init(name="bow1.6",
short_name="Crossbow",
long_name="If your angry and have targets, " +
"this is the bow for you. " +
"No archery training required.",
damage="1d6",
is_double_handed=True
)
weapon_init(name="bow1.7",
short_name="Metal cross",
long_name="Resounding thuds will come from " +
"this device. And screams.",
damage="1d8",
is_double_handed=True
)
weapon_init(name="bow1.8",
short_name="Masterwork cross",
long_name="It's a weapon of pointy death, " +
"but it's beautifully made. Shiny.",
damage="1d6",
is_double_handed=True
)
weapon_init(name="mace1.1",
short_name="Mace",
long_name="No powder here, this is a serious mace, " +
"made for resounding head impacts.",
damage="1d8"
)
weapon_init(name="mace1.2",
short_name="War Mace",
long_name="If you need to go to war, you need this mace.",
damage="1d10"
)
weapon_init(name="quiver1.1",
short_name="Arrows",
long_name="Standard issue ACME arrows.",
damage="1d6"
)
weapon_init(name="quiver1.2",
short_name="Flame Arrows",
long_name="Arrows that will ingite on use. No returns.",
damage="1d6"
)
weapon_init(name="quiver1.3",
short_name="Energy Arrows",
long_name="Arrows that transform into beams of " +
"energy on use. No kidding.",
damage="1d6"
)
weapon_init(name="quiver1.4",
short_name="Acid Arrows",
long_name="Don't touch the end of these arrows. " +
"And don't try and taste them either.",
damage="1d6"
)
weapon_init(name="stick1.1",
short_name="Just a stick",
long_name="Sticky the stick.",
damage="1d4"
)
weapon_init(name="stick1.2",
short_name="Flame Stick",
long_name="Sticky the stick, burning version.",
damage="1d4"
)
weapon_init(name="stick1.3",
short_name="Magic Stick",
long_name="It's a magically enhanced stick... " +
"Who would believe that?",
damage="1d4+1"
)
weapon_init(name="stick2.1",
short_name="Stick V2",
long_name="Sticky the stick, mildly improved version.",
damage="1d4+2"
)
weapon_init(name="stick2.2",
short_name="Hooked Stick",
long_name="Great stick for inflicting a bit of extra " +
"damage than your common stick.",
damage="1d4+3"
)
weapon_init(name="stick2.3",
short_name="Gnarly Stick",
long_name="An oaken stick with gnarly stuff on " +
"the business end. Good for hitting things with.",
damage="1d4+4"
)
weapon_init(name="stick2.4",
short_name="Battle Stick",
long_name="The stick of the professional peasant.",
damage="1d6"
)
weapon_init(name="sword1.1",
short_name="Shortest Sword",
long_name="The shortest of short swords.",
damage="1d4"
)
weapon_init(name="sword1.2",
short_name="Short Sword",
long_name="The second shortest of short swords.",
damage="1d6"
)
weapon_init(name="sword1.3",
short_name="Needle Sword",
long_name="A sword with a point so fine it will " +
"pierce you to the heart.",
damage="1d4+2"
)
weapon_init(name="sword1.4",
short_name="Meat Cleaver",
long_name="Not exactly a skillful weapon, but it does " +
"the job. The job of a lunatic.",
damage="1d6"
)
weapon_init(name="sword1.5",
short_name="Ice Shortsword",
long_name="It's short, blue and icy.",
damage="1d6+1"
)
weapon_init(name="sword1.6",
short_name="Platinum Shortsword",
long_name="Of short swords, this is one of the best. " +
"Durable, short and shiny.",
damage="1d8"
)
weapon_init(name="sword1.7",
short_name="Flaming Shortsword",
long_name="Mesmerizing blade. Flame ripples along its edges.",
damage="1d6+2"
)
weapon_init(name="sword1.8",
short_name="Gladius",
long_name="Wide bladed Roman style sword. " +
"Great for leaving big wounds.",
damage="1d4+3"
)
weapon_init(name="sword1.9",
short_name="Dao",
long_name="Wicked curved blade.",
damage="1d6+2"
)
weapon_init(name="sword1.10",
short_name="Khopesh",
long_name="The oriental blade of the professional.",
damage="1d6+4"
)
weapon_init(name="sword1.11",
short_name="Long Sword",
long_name="It's long. And a sword.",
damage="1d8",
is_double_handed=True
)
weapon_init(name="sword1.12",
short_name="Claymore",
long_name="The sword of the Highlander. This sword " +
"will give you your freedom. Or someone elses.",
damage="1d8+2",
is_double_handed=True
)
weapon_init(name="sword1.13",
short_name="Greatsword",
long_name="It's a sword and it's great.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="sword1.14",
short_name="Masterwork Greatsword",
long_name="Don't mess around. Get this great sword.",
damage="1d10+2",
is_double_handed=True
)
weapon_init(name="sword1.15",
short_name="Platinum Greatsword",
long_name="They don't come much tougher than this.",
damage="1d12+5",
is_double_handed=True
)
weapon_init(name="sword1.16",
short_name="Flaiming Greatsword",
long_name="Dismember and cook your enemies.",
damage="1d10+3",
is_double_handed=True
)
weapon_init(name="sword1.17",
short_name="Serrated Sword",
long_name="Slice and dice with greatness.",
damage="1d6+4"
)
weapon_init(name="sword1.18",
short_name="Ulfbehrt",
long_name="Quality hybrid of Viking and Knightly sword",
damage="1d8+2",
is_double_handed=True
)
weapon_init(name="sword1.19",
short_name="Khanda",
long_name="Double edged straight sword",
damage="1d10"
)
weapon_init(name="sword1.20",
short_name="Ice Sword",
long_name="Ice ice sword.",
damage="1d10+3"
)
weapon_init(name="sword1.22",
short_name="Zweihander",
long_name="Massive two handed ultra great sword.",
damage="1d12+6",
is_double_handed=True
)
weapon_init(name="sword_wooden1.1",
short_name="Wooden Sword aka Stick",
long_name="It's a stick",
damage="1d4"
)
weapon_init(name="warhammer1.1",
short_name="Maul",
long_name="Long handled warhammer with metal head",
damage="1d8"
)
weapon_init(name="warhammer1.2",
short_name="Warhammer",
long_name="It's a hammer. For war.",
damage="1d10+1",
is_double_handed=True
)
weapon_init(name="warhammer1.3",
short_name="Masterwork Warhammer",
long_name="A war hammer of distinction.",
damage="1d12+2",
is_double_handed=True
)
init()
| lgpl-3.0 | -4,042,375,541,105,230,000 | 36.643059 | 79 | 0.45748 | false |
GeoMop/GeoMop | testing/Analysis/test_pipeline_module_import.py | 1 | 1187 | import Analysis.pipeline as p
def test_classes():
try:
p.Int()
p.Bool()
p.Float()
p.String()
p.Struct()
p.Ensemble(p.Int())
p.And(p.Bool(True), p.Bool(False))
p.Or(p.Bool(True), p.Bool(False))
p.Input()
p.Connector()
p.RangeGenerator()
p.VariableGenerator()
p.Flow123dAction()
p.Convertor(p.Input(0))
p.Predicate(p.Input(0))
p.KeyConvertor(p.Input(0))
p.Adapter(p.Input(0))
p.Pipeline()
p.Workflow()
p.ForEach()
except:
assert False, "All public classes are not vissible"
for types in (
"DTT", "BaseDTT", " CompositeDTT", "CompositiIter", "SortableDTT",
"PredicatePoint", "TT", "GDTT", "GDTTFunc",
"Bridge", " ActionType", "ActionStateType", "BaseActionType",
"ConnectorActionType", " GeneratorActionType", "ParametrizedActionType",
"WrapperActionType", "WorkflowActionType"
):
try:
test = eval("p."+types+"()")
except:
continue
assert test is None , "Private module class {0} is vissible".format(types)
| gpl-3.0 | -4,680,865,433,653,834,000 | 29.435897 | 82 | 0.544229 | false |
DreamerBear/awesome-py3-webapp | www/core/common/apis.py | 1 | 2526 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2017/10/18 16:34
# @Author : xxc727xxc ([email protected])
# @Version : 1.0.0
'''
JSON API definition
'''
class Page(object):
'''
Page object for display pages.
'''
def __init__(self, item_count, page_index=1, page_size=10):
'''
Init Pagination by item_count, page_index and page_size.
>>> p1 = Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91, 10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 1
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (
self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
class APIError(Exception):
def __init__(self, error, data='', message=''):
super().__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
'''
Indicate the input value has error or invalid. The data specifies the error field of input form.
'''
def __init__(self, field, message=''):
super().__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
'''
Indicate the resource was not found. The data specifies the resource name.
'''
def __init__(self, field, message=''):
super().__init__('value:notfound', field, message)
class APIPermissionError(APIError):
'''
Indicate the api has no permission.
'''
def __init__(self, message=''):
super().__init__('permission:forbidden', 'permission', message)
| gpl-3.0 | -5,158,730,437,111,365,000 | 25.589474 | 105 | 0.538797 | false |
trbarrettjr/research-apps | projected.py | 1 | 1091 | #!/usr/bin/env python
import re
import sys
def getData(data):
name = re.search(r'\w\s\w\s[\w]+|\w\s{3}\w+', data)
employee = name.group()
# Headers are as follows below:
# Event, Date, Time, Train, OD Date, OD Time
tuples = re.findall(r'(\w+|\w+\s\w+|\w+\s\w+\s\w+|[\w-]+)\s+(\d+)\s(\d+)\s(\w+)\s(\d+)\s(\d+)', data)
#print employee
#print tuples
outFileName = employee + '.csv'
employeeName = employee + '\n'
header = 'Event Type,Date Checked,Time Checked,Train Projected,Date Projected,Time Projected\n'
outfile = open(outFileName, 'w')
outfile.write(employeeName)
outfile.write(header)
for projected in tuples:
(type, checkDate, checkTime, train, odDate, odTime) = projected
outdata = type + ",'" + checkDate + ",'" + checkTime + "," + train + ",'" + odDate + ",'" + odTime + "\n"
outfile.write(outdata)
def main():
if len(sys.argv) >= 2:
filename = sys.argv[1]
else:
print 'Missing: filename'
sys.exit(1)
f = open(filename, 'r')
text = f.read()
f.close()
getData(text)
if __name__ == '__main__':
main()
| cc0-1.0 | 3,289,933,708,136,195,600 | 23.244444 | 109 | 0.594867 | false |
GNOME/chronojump-server | chronojumpserver/views.py | 1 | 9513 | # -*- coding: utf-8 -*-
"""Chronojump Server views controller."""
from chronojumpserver import app
from flask import render_template, request, redirect, url_for, abort, flash
from urlparse import urlparse, urljoin
from flask_wtf.file import FileField
from chronojumpserver.models import Person, Station, RFIDHistory, User, Group, GroupCoach, GroupPerson, Coach
from chronojumpserver.forms import PersonForm, LoginForm
from flask_login import login_required, login_user, logout_user, current_user
from chronojumpserver.database import db_session
import os
from time import time
def is_safe_url(target):
"""
Snippet to check if the url is safe, specially when coming
from login action.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
@app.route('/')
@login_required
def index():
"""Chronojump Server Home page."""
return render_template('index.html')
@app.route('/home')
def airport():
"""Airport mode."""
#stations = [ station.serialize for station in Station.query.filter(Station.type != 'S')]
stations = [ station.serialize for station in Station.query.all()]
players = [ player.serialize for player in Person.query.all()]
return render_template('airport.html', stations=stations, players=players)
@app.route('/results')
@login_required
def show_results():
user_id = current_user.id
coach = Coach.query.filter(Coach.user_id == user_id).first()
groups = []
groups_by_coach = [ g.id for g in GroupCoach.query.filter(GroupCoach.coach_id == coach.id)]
for g in Group.query.filter(Group.id.in_(groups_by_coach)):
groups.append({
'id': g.id,
'name': g.name.decode('utf-8')
})
return render_template('results.html', groups=groups, coach_id=coach.id, org_id=2)
@app.route('/sprints')
@login_required
def show_sprints():
"""Show sprints view."""
return render_template('sprints.html')
@app.route('/player_list')
def show_players():
"""Show players view."""
stations = []
for station in Station.query.filter(Station.exercises is not None ):
stations.append({
'id': station.id,
'name': station.name.decode('utf-8'),
'type': station.type
})
return render_template('player_list.html', stations=stations)
@app.route('/stations')
@login_required
def show_stations():
"""Show Stations and Exercises."""
stations = []
for station in Station.query.all():
stations.append({
'id': station.id,
'name': station.name.decode('utf-8'),
'type': station.type
})
return render_template('station_list.html', stations=stations)
def _update_player_photo(player_id, photo, previous_imageName):
"""Update the photo of the player, and return the path."""
# First remove the previous photo
if previous_imageName:
previous_path = os.path.join('chronojumpserver',
app.config['UPLOAD_FOLDER'],
previous_imageName)
# Remove if exists
if os.path.exists(previous_path):
os.unlink(previous_path)
# Set the new photo filename
new_photo = 'player_' + str(player_id) + '_' + str(int(time()))
full_path = os.path.join('chronojumpserver',
app.config['UPLOAD_FOLDER'],
new_photo)
# save the photo in the disk
photo.save(full_path)
# Update the photo in the database
db_session.query(Person).filter_by(id=player_id).update({
"imageName": new_photo
})
# Commit the changes
db_session.commit()
@app.route('/player/<player_id>', methods=['GET', 'POST'])
@login_required
def player_detail(player_id):
"""Show players detail."""
has_errors = False
msg = None
# Get the player id passed by argument
player = Person.query.filter(Person.id == player_id).first()
form = PersonForm()
if request.method == "GET":
form.fullname.data = player.name.decode('utf-8')
form.height.data = player.height
form.weight.data = player.weight
form.rfid.data = player.rfid
elif request.method == "POST":
# Save the image in photos folder
if form.validate_on_submit():
"""Form Valid. Update the player."""
# Update the player
db_session.query(Person).filter_by(id=player_id).update({
"name": form.fullname.data,
"height": form.height.data,
"weight": form.weight.data,
"rfid": form.rfid.data
})
# Commit the changes
db_session.commit()
# If a new photo has passed, update it too
# Check if a photo has been passed too
if form.photo.data:
_update_player_photo(player_id, form.photo.data, player.imageName)
# If rfid is new, add the new rfid into history table
r = RFIDHistory.query.filter(RFIDHistory.rfid == form.rfid.data).first()
if not r:
# Add this new rfid into rfidHistory table
r = RFIDHistory(rfid=form.rfid.data,
person_id=player_id)
db_session.add(r)
db_session.commit()
# Update done
msg = "Les dades del jugador %s s'han guardat correctament." % form.fullname.data
else:
# There are some errors in the form
msg = 'Hi han hagut errors, revisa el formulari.'
has_errors = True
form.photo.data = player.imageName
return render_template('player_detail.html', form=form, msg=msg,
has_errors=has_errors)
@app.route('/player/add', methods=['GET', 'POST'])
@login_required
def add_player():
"""Show form to add a new player."""
has_errors = False
msg = None
form = PersonForm()
if request.method == "POST":
if form.validate_on_submit():
"""Form is valid, add the new player."""
player = Person(
name=form.fullname.data,
height=form.height.data,
weight=form.weight.data,
rfid=form.rfid.data
)
db_session.add(player)
# Commit the changes
db_session.commit()
# If a photo has given, update after person creation
if form.photo.data:
_update_player_photo(player.id, form.photo.data, None)
# Add the rfid into rfidHistory table
r = RFIDHistory(rfid=form.rfid.data,
person_id=player.id)
db_session.add(r)
db_session.commit()
msg = "Ej jugador %s s'ha creat correctament." % (form.fullname.data,)
return redirect('/player_list')
else:
# There are some errors in the form
msg = 'Hi han hagut errors, revisa el formulari.'
has_errors = True
else:
"""To remove None default values in the form."""
form.fullname.data = ""
form.rfid.data = ""
return render_template('player_detail.html', form=form, msg=msg,
has_errors=has_errors)
@app.route('/login', methods=['GET', 'POST'])
def login():
# Here we use a class of some kind to represent and validate our
# client-side form data. For example, WTForms is a library that will
# handle this for us, and we use a custom LoginForm to validate.
form = LoginForm()
if request.method == "GET":
form.organization.data = ""
form.coach.data = ""
form.password.data = ""
if form.validate_on_submit():
import md5
username = form.coach.data
password = md5.md5(form.password.data).hexdigest()
print password
user = User.query.filter(User.username == username).first()
if user:
print "DEBUG: User %s found" % user.username
print user.password
if password == user.password:
print "DEBUG: Passwords match. Allow login"
# Login and validate the user.
# user should be an instance of your `User` class
login_user(user)
flash('Logged in successfully.')
next = request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
if not is_safe_url(next):
return abort(400)
return redirect(next or url_for('index'))
else:
# Invalid password
error_msg = u"Contrasenya invàlida"
return render_template('login.html', form=form, error_msg=error_msg)
else:
# Invalid user
error_msg = u"El usuari %s no existeix!" % username
return render_template('login.html', form=form, error_msg=error_msg)
return render_template('login.html', form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
# Networks changes
@app.route("/group-players")
@login_required
def show_groups_and_players():
return render_template('groups_and_players.html')
| agpl-3.0 | -1,233,406,085,584,981,500 | 33.32491 | 109 | 0.589924 | false |
f41c0r/Cipher-Frequency-Analyzer | lettercounter.py | 1 | 2928 | #!/usr/bin/env python2
#fills a sqlite3 database with data from a plaintext file
import sys
import re
myFile = open(sys.argv[1],"r")
line = myFile.readline()
myDict = {}
myDict2 = {}
myDict3 = {}
totalLength = 0
freqDict = {"a" : 8.167, "b" : 1.49, "c" : 2.78, "d" : 4.253, "e" : 12.702, "f" : 2.228, "g" : 2.015, "h" : 6.094, "i" : 6.966, "j" : 0.153, "k" : 0.772, "l" : 4.025, "m" : 2.406, "n" : 6.749, "o" : 7.507, "p" : 1.929, "q" : 0.095, "r" : 5.987, "s" : 6.327, "t" : 9.056, "u" : 2.758, "v" : 0.978, "w" : 2.360, "x" : 0.150, "y" : 1.974, "z" : 0.07}
firstletterlength = 0
while line:
for i, letter in enumerate(line):
#comment out the next time if the " " is also encrypted
if letter is not " ":
if i == 0:
myDict3[letter] = 1
firstletterlength +=1
if i !=0 and line[i-1] == " ":
if letter in myDict3:
myDict3[letter] += 1
firstletterlength +=1
else:
myDict3[letter] = 1
firstletterlength +=1
if letter in myDict:
myDict[letter] += 1
else:
myDict[letter] = 1
# comment out if " " is also encrypted
totalLength += len(re.sub(" ","",line))
# comment out if " " is NOT encrypted
# totalLength += len(line)
line = myFile.readline()
myFile.close()
print
print "PERCENTAGES OF FIRST LETTERS"
print
for k in sorted(myDict3, key=lambda k: myDict3[k], reverse=True):
print (k,':', str(100 * float(float(myDict3[k]) / float(firstletterlength))))
#
print
print "TOTAL FREQUENCIES PER LETTER IN CIPHERTEXT:"
print
for letter in myDict:
print (letter,':',str(myDict[letter]))
print
print "FREQUENCIES IN CIPHERTEXT IN ALPHABETICAL ORDER WITH PERCENTAGES"
print
for letter in myDict:
myDict2[letter] = float(float(myDict[letter]) / float(totalLength)) * 100
print (letter,':',str(myDict2[letter]))
reverseFreqDict = {}
listFreq = []
print
print "FREQUENCES IN THE ENGLISH LANGUAGE IN ALPHABETICAL ORDER WITH PERCENTAGES"
print
for letter in freqDict:
print (letter, ":",str(freqDict[letter]))
reverseFreqDict[freqDict[letter]] = letter
listFreq.append(freqDict[letter])
print
print "LETTERS IN THE ENGLISH LANGUAGE IN ORDER OF FREQUENCY:"
print
listFreq = sorted(listFreq,reverse=True)
for number in listFreq:
print(reverseFreqDict[number], ":", str(number))
print
print "LETTERS IN CIPHERTEXT IN ORDER OF FREQUENCY"
print
reverseFreqDict = {}
listFreq = []
for letter in myDict2:
if myDict2[letter] not in reverseFreqDict:
reverseFreqDict[myDict2[letter]] = letter
else:
reverseFreqDict[myDict2[letter]] = reverseFreqDict[myDict2[letter]] + "," + letter
listFreq.append(myDict2[letter])
listFreq = sorted(listFreq,reverse=True)
for number in listFreq:
print(reverseFreqDict[number], ":", str(number))
| gpl-3.0 | -4,992,788,253,731,573,000 | 30.483871 | 347 | 0.614754 | false |
mhrivnak/crane | tests/test_app.py | 2 | 3074 | import logging
from flask import Flask
import mock
import unittest2
from crane import app, config, app_util, exceptions, search
from crane.search import GSA
from crane.views import v1
from . import demo_data
@mock.patch('os.environ.get', spec_set=True, return_value=demo_data.demo_config_path)
class TestCreateApp(unittest2.TestCase):
def setUp(self):
super(TestCreateApp, self).setUp()
with mock.patch('crane.app.init_logging') as mock_init_logging:
self.app = app.create_app()
# hold this so one of the tests can inspect it
self.mock_init_logging = mock_init_logging
def test_returns_app(self, mock_environ_get):
self.assertIsInstance(self.app, Flask)
def test_loads_config(self, mock_environ_get):
self.assertTrue(config.KEY_DATA_DIR in self.app.config)
def test_blueprints_loaded(self, mock_environ_get):
self.assertTrue(v1.section.name in self.app.blueprints)
def test_handlers_added(self, mock_environ_get):
handlers = self.app.error_handler_spec[None][None]
self.assertEquals(handlers[0], (exceptions.HTTPError,
app_util.http_error_handler))
def test_calls_init_logging(self, mock_environ_get):
self.mock_init_logging.assert_called_once_with()
def test_calls_search(self, mock_environ_get):
# reset to the default state
search.backend = search.SearchBackend()
# run the "create_app", which because of the mock_environ_get, will load
# our demo config. That config has GSA info.
with mock.patch('crane.app.init_logging'):
app.create_app()
# this will only be true if the search config was parsed
self.assertIsInstance(search.backend, GSA)
@mock.patch('logging.Logger.addHandler', spec_set=True)
class TestInitLogging(unittest2.TestCase):
def test_adds_handler(self, mock_add_handler):
app.create_app()
# make sure it was called
self.assertEqual(mock_add_handler.call_count, 1)
# make sure the first argument is the right type
self.assertIsInstance(mock_add_handler.call_args[0][0], logging.Handler)
# make sure the first argument was the only argument
mock_add_handler.assert_called_once_with(mock_add_handler.call_args[0][0])
@mock.patch('logging.Logger.setLevel', spec_set=True)
class TestSetLogLevel(unittest2.TestCase):
def setUp(self):
super(TestSetLogLevel, self).setUp()
with mock.patch('crane.app.init_logging') as mock_init_logging:
self.app = app.create_app()
def test_debug(self, mock_set_level):
self.app.config['DEBUG'] = True
app.set_log_level(self.app)
# make sure it set the level to debug
mock_set_level.assert_called_once_with(logging.DEBUG)
def test_not_debug(self, mock_set_level):
self.app.config['DEBUG'] = False
app.set_log_level(self.app)
# make sure it did not change the log level
self.assertEqual(mock_set_level.call_count, 0)
| gpl-2.0 | -1,201,378,466,537,246,000 | 35.164706 | 85 | 0.670787 | false |
guydavis/lane-detect | older/lane_detect.v2.py | 1 | 6587 | #
# Attempting to replicate lane detection results described in this tutorial by Naoki Shibuya:
# https://medium.com/towards-data-science/finding-lane-lines-on-the-road-30cf016a1165
# For more see: https://github.com/naokishibuya/car-finding-lane-lines
#
# This 2nd version does a much better job of processing images.
#
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import sys
import subprocess
import os
import shutil
def convert_hls(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
def select_white_yellow(image):
converted = convert_hls(image)
lower = np.uint8([ 0, 200, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(converted, lower, upper)
lower = np.uint8([ 10, 0, 100])
upper = np.uint8([ 40, 255, 255])
yellow_mask = cv2.inRange(converted, lower, upper)
mask = cv2.bitwise_or(white_mask, yellow_mask)
return cv2.bitwise_and(image, image, mask = mask)
def convert_gray_scale(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def apply_smoothing(image, kernel_size=15):
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
def detect_edges(image, low_threshold=50, high_threshold=150):
return cv2.Canny(image, low_threshold, high_threshold)
def filter_region(image, vertices):
mask = np.zeros_like(image)
if len(mask.shape)==2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,)*mask.shape[2])
return cv2.bitwise_and(image, mask)
def select_region(image):
rows, cols = image.shape[:2]
bottom_left = [cols*0.1, rows*0.95]
top_left = [cols*0.4, rows*0.6]
bottom_right = [cols*0.9, rows*0.95]
top_right = [cols*0.6, rows*0.6]
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
return filter_region(image, vertices)
def hough_lines(image):
return cv2.HoughLinesP(image, rho=1, theta=np.pi/180, threshold=20, minLineLength=20, maxLineGap=300)
def average_slope_intercept(lines):
left_lines = []
left_weights = []
right_lines = []
right_weights = []
for line in lines:
for x1, y1, x2, y2 in line:
if x2==x1:
continue
slope = (y2-y1)/(x2-x1)
intercept = y1 - slope*x1
length = np.sqrt((y2-y1)**2+(x2-x1)**2)
if slope < 0:
left_lines.append((slope, intercept))
left_weights.append((length))
else:
right_lines.append((slope, intercept))
right_weights.append((length))
left_lane = np.dot(left_weights, left_lines) /np.sum(left_weights) if len(left_weights) >0 else None
right_lane = np.dot(right_weights, right_lines)/np.sum(right_weights) if len(right_weights)>0 else None
return left_lane, right_lane
def make_line_points(y1, y2, line):
if line is None:
return None
slope, intercept = line
x1 = int((y1 - intercept)/slope)
x2 = int((y2 - intercept)/slope)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
def lane_lines(image, lines):
left_lane, right_lane = average_slope_intercept(lines)
y1 = image.shape[0]
y2 = y1*0.6
left_line = make_line_points(y1, y2, left_lane)
right_line = make_line_points(y1, y2, right_lane)
return left_line, right_line
def draw_lane_lines(image, lines, color=[255, 0, 0], thickness=20):
line_image = np.zeros_like(image)
for line in lines:
if line is not None:
cv2.line(line_image, *line, color, thickness)
return cv2.addWeighted(image, 1.0, line_image, 0.95, 0.0)
def mark_failed(image):
font = cv2.FONT_HERSHEY_SIMPLEX
text = "DETECT FAILED!"
textsize = cv2.getTextSize(text, font, 2, 5)[0]
textX = int((image.shape[1] - textsize[0]) / 2)
textY = int((image.shape[0] + textsize[1]) / 2)
cv2.putText(image, text, (textX, textY), font, 2, (255, 0, 0), 5)
return image
def process_image(dirpath, image_file):
if not os.path.exists('tmp'):
os.mkdir('tmp')
if not os.path.exists('output'):
os.makedirs('output')
image_name = os.path.splitext(image_file)[0]
# First load and show the sample image
image = mpimg.imread("{0}/{1}".format(dirpath, image_file))
im = plt.imshow(image)
plt.savefig('tmp/1.png')
# Now select the white and yellow lines
white_yellow = select_white_yellow(image)
im = plt.imshow(white_yellow, cmap='gray')
plt.savefig('tmp/2.png')
# Now convert to grayscale
gray_scale = convert_gray_scale(white_yellow)
im = plt.imshow(gray_scale, cmap='gray')
plt.savefig('tmp/3.png')
# Then apply a Gaussian blur
blurred_image = apply_smoothing(gray_scale)
im = plt.imshow(blurred_image, cmap='gray')
plt.savefig('tmp/4.png')
# Detect line edges
edged_image = detect_edges(blurred_image)
im = plt.imshow(edged_image, cmap='gray')
plt.savefig('tmp/5.png')
# Now ignore all but the area of interest
masked_image = select_region(edged_image)
im = plt.imshow(masked_image, cmap='gray')
plt.savefig('tmp/6.png')
# Apply Houghed lines algorithm
houghed_lines = hough_lines(masked_image)
if houghed_lines is not None:
houghed_image = draw_lane_lines(image, lane_lines(image, houghed_lines))
im = plt.imshow(houghed_image, cmap='gray')
output_name = "output/{0}_passed.gif".format(image_name)
print("Detected lanes in '{0}/{1}'. See result in '{2}'.".format(dirpath, image_file, output_name))
else:
im = plt.imshow(mark_failed(image), cmap='gray')
output_name = "output/{0}_failed.gif".format(image_name)
print("Failed detection in '{0}/{1}'. See result in '{2}'.".format(dirpath, image_file, output_name))
plt.savefig('tmp/7.png')
# Repeat last image in the loop a couple of times.
plt.savefig('tmp/8.png')
plt.savefig('tmp/9.png')
# Now generate an animated gif of the image stages
subprocess.call( ['convert', '-delay', '100', '-loop', '0', 'tmp/*.png', output_name] )
shutil.rmtree('tmp')
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: python3 ./lane_detect.py images/*")
else:
for arg in sys.argv[1:]:
if not os.path.isfile(arg):
print("Not a file: {0}".format(arg))
else:
dirpath,filename = os.path.split(arg)
process_image(dirpath, filename) | mit | 2,390,023,151,074,185,000 | 34.610811 | 109 | 0.627752 | false |
hrantzsch/signature-verification | tools/mkdata_background.py | 1 | 3585 | """
This script is used to create a training database based on GPDSSynth signatures.
Images are scaled to 192x96;
Paper-like backgrounds are added
"""
import argparse
import numpy as np
from scipy.misc import imread, imresize, imshow, imsave
from PIL import Image
import os
from skimage.transform import rotate
import time
import prepimage
def load_backgrounds(folder):
"""read image file and convert to grayscale"""
return [imresize(
np.dot(imread(os.path.join(folder, bg_file))[..., :3],
[0.299, 0.587, 0.144]),
0.5)
for bg_file in os.listdir(folder)
if '.jpg' in bg_file or '.png' in bg_file]
def get_background(img, size):
"""crop a random piece of desired size from the given image"""
y = np.random.randint(0, img.shape[0]-size[0])
x = np.random.randint(0, img.shape[1]-size[1])
return imresize(img[y:y+size[0], x:x+size[1]], (size[0], size[1]))
def get_signatures(data_dir, no_forgeries=False):
for (path, _, files) in os.walk(data_dir):
for f in files:
if '.png' in f and not (no_forgeries and 'cf' in f):
yield os.path.join(path, f)
def get_signatures_(data_dir, no_forgeries=False):
for f in os.listdir(data_dir):
if '.png' in f and not (no_forgeries and 'cf' in f):
yield os.path.join(data_dir, f)
def get_roi(image, pad=20):
roix, roiy = prepimage.min_max(prepimage.binarize(image))
roix = (max(0, roix[0] - pad), min(roix[1] + pad, image.shape[1]))
roiy = (max(0, roiy[0] - pad), min(roiy[1] + pad, image.shape[0]))
return roiy, roix
def process_signature(sig_path):
sig = imread(sig_path).astype(np.float32) / 255.0
sig = rotate(sig, np.random.randint(-25, 25), cval=1.0, resize=True)
roiy, roix = get_roi(sig)
shape = (roiy[1] - roiy[0], roix[1] - roix[0])
bg = get_background(np.random.choice(backgrounds), shape).astype(np.float32) / 255.0
img = bg + sig[roiy[0]:roiy[1], roix[0]:roix[1]]
img = imresize(img, target_size, mode='L').astype(np.float32)
img *= 1.0/img.max()
# return np.minimum(img, 1.0)
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('signatures',
help='Path to extracted GPDS data')
parser.add_argument('backgrounds',
help='Path to background files (jpg or png)')
parser.add_argument('--out', '-o', default='images',
help='Path to save output images')
parser.add_argument('--start', '-s', default=1, type=int,
help='User to start with (for resumes)')
args = parser.parse_args()
target_size = (384, 768)
# signatures = list(get_signatures(args.signatures))
backgrounds = load_backgrounds(args.backgrounds)
print("Loaded {} backgrounds".format(len(backgrounds)))
for user in range(args.start, 20):
user_str = "{}".format(user)
print("processing user " + user_str)
os.makedirs(os.path.join(args.out, user_str), exist_ok=True)
count = 0
start = time.clock()
for sig in get_signatures_(os.path.join(args.signatures, user_str)):
fname, _ = os.path.splitext(os.path.basename(sig))
for i in range(1, 21):
outname = os.path.join(args.out, user_str, "{}-{:02d}.png".format(fname, i))
imsave(outname, process_signature(sig), 'png')
count += 1
print("{} images in {:3f} sec".format(count, time.clock() - start))
| gpl-3.0 | 3,424,798,210,652,326,400 | 35.212121 | 92 | 0.600558 | false |
runt18/nupic | examples/prediction/experiments/confidenceTest/base/description.py | 1 | 13918 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import imp
from nupic.encoders import (LogEncoder,
DateEncoder,
MultiEncoder,
CategoryEncoder,
SDRCategoryEncoder,
ScalarEncoder)
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.prediction.callbacks import (printSPCoincidences,
printTPCells,
printTPTiming,
displaySPCoincidences,
setAttribute,
sensorRewind,
sensorOpen)
from nupic.frameworks.prediction.helpers import updateConfigFromSubConfig
# ----------------------------------------------------------------------
# Define this experiment's base configuration, and adjust for any modifications
# if imported from a sub-experiment.
config = dict(
sensorVerbosity = 0,
spVerbosity = 0,
tpVerbosity = 0,
ppVerbosity = 0,
dataSetPackage = None, # This can be specified in place of the next 6:
filenameTrain = 'confidence/confidence1.csv',
filenameTest = 'confidence/confidence1.csv',
filenameCategory = None,
dataGenScript = None,
dataDesc = None,
dataGenNumCategories = None,
dataGenNumTraining = None,
dataGenNumTesting = None,
noiseAmts = [],
iterationCountTrain = None,
iterationCountTest = None,
evalTrainingSetNumIterations = 10000, # Set to 0 to disable completely
trainSP = True,
trainTP = True,
trainTPRepeats = 1,
computeTopDown = 1,
# Encoder
overlappingPatterns = 0,
# SP params
disableSpatial = 1,
spPrintPeriodicStats = 0, # An integer N: print stats every N iterations
spCoincCount = 200,
spNumActivePerInhArea = 3,
# TP params
tpNCellsPerCol = 20,
tpInitialPerm = 0.6,
tpPermanenceInc = 0.1,
tpPermanenceDec = 0.000,
tpGlobalDecay = 0.0,
tpPAMLength = 1,
tpMaxSeqLength = 0,
tpMaxAge = 1,
tpTimingEvery = 0,
temporalImp = 'cpp',
)
updateConfigFromSubConfig(config)
# ==========================================================================
# Was a complete dataset package specified? This is an alternate way to
# specify a bunch of dataset related config parameters at once. They are
# especially helpful when running permutations - it keeps the permutations
# directory names shorter.
if config['dataSetPackage'] is not None:
assert (config['filenameTrain'] == 'confidence/confidence1.csv')
assert (config['filenameTest'] == 'confidence/confidence1.csv')
assert (config['filenameCategory'] is None)
assert (config['dataGenScript'] is None)
assert (config['dataDesc'] is None)
assert (config['dataGenNumCategories'] is None)
assert (config['dataGenNumTraining'] is None)
assert (config['dataGenNumTesting'] is None)
if config['dataSetPackage'] == 'firstOrder':
config['filenameTrain'] = 'extra/firstOrder/fo_1000_10_train_resets.csv'
config['filenameTest'] = 'extra/firstOrder/fo_10000_10_test_resets.csv'
config['filenameCategory'] = 'extra/firstOrder/categories.txt'
elif config['dataSetPackage'] == 'secondOrder0':
config['filenameTrain'] = None
config['filenameTest'] = None
config['filenameCategory'] = None
config['dataGenScript'] = 'extra/secondOrder/makeDataset.py'
config['dataDesc'] = 'model0'
config['dataGenNumCategories'] = 20
config['dataGenNumTraining'] = 5000
config['dataGenNumTesting'] = 1000
elif config['dataSetPackage'] == 'secondOrder1':
config['filenameTrain'] = None
config['filenameTest'] = None
config['filenameCategory'] = None
config['dataGenScript'] = 'extra/secondOrder/makeDataset.py'
config['dataDesc'] = 'model1'
config['dataGenNumCategories'] = 25
config['dataGenNumTraining'] = 5000
config['dataGenNumTesting'] = 1000
elif config['dataSetPackage'] == 'secondOrder2':
config['filenameTrain'] = None
config['filenameTest'] = None
config['filenameCategory'] = None
config['dataGenScript'] = 'extra/secondOrder/makeDataset.py'
config['dataDesc'] = 'model2'
config['dataGenNumCategories'] = 5
config['dataGenNumTraining'] = 5000
config['dataGenNumTesting'] = 1000
else:
assert False
def getBaseDatasets():
datasets = dict()
for name in ['filenameTrain', 'filenameTest', 'filenameCategory',
'dataGenScript']:
if config[name] is not None:
datasets[name] = config[name]
return datasets
def getDatasets(baseDatasets, generate=False):
# nothing to generate if no script
if not 'dataGenScript' in baseDatasets:
return baseDatasets
# -------------------------------------------------------------------
# Form the path to each dataset
datasets = dict(baseDatasets)
dataPath = os.path.dirname(baseDatasets['dataGenScript'])
# At some point, this prefix will be modified to be unique for each
# possible variation of parameters into the data generation script.
prefix = '{0!s}'.format((config['dataDesc']))
datasets['filenameTrain'] = os.path.join(dataPath,
'{0!s}_train.csv'.format(prefix))
datasets['filenameTest'] = os.path.join(dataPath,
'{0!s}_test.csv'.format(prefix))
datasets['filenameCategory'] = os.path.join(dataPath,
'{0!s}_categories.txt'.format(prefix))
if not generate:
return datasets
# -------------------------------------------------------------------
# Generate our data
makeDataset = imp.load_source('makeDataset', baseDatasets['dataGenScript'])
makeDataset.generate(model = config['dataDesc'],
filenameTrain = datasets['filenameTrain'],
filenameTest = datasets['filenameTest'],
filenameCategory = datasets['filenameCategory'],
numCategories=config['dataGenNumCategories'],
numTrainingRecords=config['dataGenNumTraining'],
numTestingRecords=config['dataGenNumTesting'],
numNoise=0, resetsEvery=None)
return datasets
def getDescription(datasets):
# ========================================================================
# Network definition
# Encoder for the sensor
encoder = MultiEncoder()
if 'filenameCategory' in datasets:
categories = [x.strip() for x in
open(datasets['filenameCategory']).xreadlines()]
else:
categories = [chr(x+ord('a')) for x in range(26)]
if config['overlappingPatterns']:
encoder.addEncoder("name", SDRCategoryEncoder(n=200,
w=config['spNumActivePerInhArea'], categoryList=categories, name="name"))
else:
encoder.addEncoder("name", CategoryEncoder(w=config['spNumActivePerInhArea'],
categoryList=categories, name="name"))
# ------------------------------------------------------------------
# Node params
# The inputs are long, horizontal vectors
inputDimensions = (1, encoder.getWidth())
# Layout the coincidences vertically stacked on top of each other, each
# looking at the entire input field.
columnDimensions = (config['spCoincCount'], 1)
# If we have disableSpatial, then set the number of "coincidences" to be the
# same as the encoder width
if config['disableSpatial']:
columnDimensions = (encoder.getWidth(), 1)
config['trainSP'] = 0
sensorParams = dict(
# encoder/datasource are not parameters so don't include here
verbosity=config['sensorVerbosity']
)
CLAParams = dict(
# SP params
disableSpatial = config['disableSpatial'],
inputDimensions = inputDimensions,
columnDimensions = columnDimensions,
potentialRadius = inputDimensions[1]/2,
potentialPct = 1.00,
gaussianDist = 0,
commonDistributions = 0, # should be False if possibly not training
localAreaDensity = -1, #0.05,
numActiveColumnsPerInhArea = config['spNumActivePerInhArea'],
dutyCyclePeriod = 1000,
stimulusThreshold = 1,
synPermInactiveDec=0.11,
synPermActiveInc=0.11,
synPermActiveSharedDec=0.0,
synPermOrphanDec = 0.0,
minPctDutyCycleBeforeInh = 0.001,
minPctDutyCycleAfterInh = 0.001,
spVerbosity = config['spVerbosity'],
spSeed = 1,
printPeriodicStats = int(config['spPrintPeriodicStats']),
# TP params
tpSeed = 1,
disableTemporal = 0 if config['trainTP'] else 1,
temporalImp = config['temporalImp'],
nCellsPerCol = config['tpNCellsPerCol'] if config['trainTP'] else 1,
collectStats = 1,
burnIn = 2,
verbosity = config['tpVerbosity'],
newSynapseCount = config['spNumActivePerInhArea'],
minThreshold = config['spNumActivePerInhArea'],
activationThreshold = config['spNumActivePerInhArea'],
initialPerm = config['tpInitialPerm'],
connectedPerm = 0.5,
permanenceInc = config['tpPermanenceInc'],
permanenceDec = config['tpPermanenceDec'], # perhaps tune this
globalDecay = config['tpGlobalDecay'],
pamLength = config['tpPAMLength'],
maxSeqLength = config['tpMaxSeqLength'],
maxAge = config['tpMaxAge'],
# General params
computeTopDown = config['computeTopDown'],
trainingStep = 'spatial',
)
dataSource = FileRecordStream(datasets['filenameTrain'])
description = dict(
options = dict(
logOutputsDuringInference = False,
),
network = dict(
sensorDataSource = dataSource,
sensorEncoder = encoder,
sensorParams = sensorParams,
CLAType = 'py.CLARegion',
CLAParams = CLAParams,
classifierType = None,
classifierParams = None),
)
if config['trainSP']:
description['spTrain'] = dict(
iterationCount=config['iterationCountTrain'],
#iter=displaySPCoincidences(50),
#finish=printSPCoincidences()
),
else:
description['spTrain'] = dict(
# need to train with one iteration just to initialize data structures
iterationCount=1)
if config['trainTP']:
description['tpTrain'] = []
for i in xrange(config['trainTPRepeats']):
stepDict = dict(name='step_{0:d}'.format((i)),
setup=sensorRewind,
iterationCount=config['iterationCountTrain'],
)
if config['tpTimingEvery'] > 0:
stepDict['iter'] = printTPTiming(config['tpTimingEvery'])
stepDict['finish'] = [printTPTiming(), printTPCells]
description['tpTrain'].append(stepDict)
# ----------------------------------------------------------------------------
# Inference tests
inferSteps = []
if config['evalTrainingSetNumIterations'] > 0:
# The training set. Used to train the n-grams.
inferSteps.append(
dict(name = 'confidenceTrain_baseline',
iterationCount = min(config['evalTrainingSetNumIterations'],
config['iterationCountTrain']),
ppOptions = dict(verbosity=config['ppVerbosity'],
printLearnedCoincidences=True,
nGrams='train',
#ipsDetailsFor = "name,None,2",
),
#finish=printTPCells,
)
)
# Testing the training set on both the TP and n-grams.
inferSteps.append(
dict(name = 'confidenceTrain_nonoise',
iterationCount = min(config['evalTrainingSetNumIterations'],
config['iterationCountTrain']),
setup = [sensorOpen(datasets['filenameTrain'])],
ppOptions = dict(verbosity=config['ppVerbosity'],
printLearnedCoincidences=False,
nGrams='test',
burnIns = [1,2,3,4],
#ipsDetailsFor = "name,None,2",
#ipsAt = [1,2,3,4],
),
)
)
# The test set
if True:
if datasets['filenameTest'] != datasets['filenameTrain']:
inferSteps.append(
dict(name = 'confidenceTest_baseline',
iterationCount = config['iterationCountTest'],
setup = [sensorOpen(datasets['filenameTest'])],
ppOptions = dict(verbosity=config['ppVerbosity'],
printLearnedCoincidences=False,
nGrams='test',
burnIns = [1,2,3,4],
#ipsAt = [1,2,3,4],
ipsDetailsFor = "name,None,2",
),
)
)
description['infer'] = inferSteps
return description
| agpl-3.0 | -5,982,191,251,469,909,000 | 33.969849 | 82 | 0.596709 | false |
crDDI/dbgap | tests/test_file_downloader.py | 1 | 2787 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import unittest
import shutil
from dbgap.file_downloader import FileDownloader
single_file_template = 'dbgap/studies/%(study)s/%(fullname)s/GapExchange_%(fullname)s.xml'
directory_template = '/dbgap/studies/%(study)s/%(fullname)s/pheno_variable_summaries'
class FileDownloaderTestCase(unittest.TestCase):
def test_dowload_single_file(self):
study = 'phs001007'
fullname = study + ".v1.p1"
dld = FileDownloader('ftp.ncbi.nlm.nih.gov')
self.assertEqual(open(os.path.join('data', 'phs001007.xml')).read(),
dld.download_file(single_file_template % dict(study=study, fullname=fullname)))
def test_dir_download(self):
test_dir = os.path.join('data', 'dltest')
shutil.rmtree(test_dir, ignore_errors=True)
os.makedirs(test_dir)
study = 'phs000722'
fullname = study + ".v1.p1"
dld = FileDownloader('ftp.ncbi.nlm.nih.gov')
self.assertEqual(4, dld.download_dir(directory_template % dict(study=study, fullname=fullname), test_dir,
name_map=lambda s: s.replace('.xml', '.tst'), file_filtr=lambda s: 'data_dict' in s))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 7,975,316,140,981,146,000 | 47.051724 | 113 | 0.717259 | false |
perclasson/trail | trail/main.py | 1 | 4059 | """Implements a wrapper script for executing a Python program from the
command line.
The wrapper script works by adding a special directory into the
'PYTHONPATH' environment variable, describing additional Python module
search directories, which contains a custom 'sitecustomize' module. When
the Python interpreter is started that custom 'sitecustomize' module
will be automatically loaded. This allows the custom 'sitecustomize'
file to then load any original 'sitecustomize' file which may have been
hidden and then bootstrap the registration of the post import hook
callback functions."""
import sys
import os
import time
_debug = os.environ.get(
'TRAIL_DEBUG', 'off').lower() in ('on', 'true', '1')
def log_message(text, *args):
if _debug:
text = text % args
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print('TRAIL: %s (%d) - %s' % (timestamp, os.getpid(), text))
def run_program(args):
log_message('trail - wrapper (%s)', __file__)
log_message('working_directory = %r', os.getcwd())
log_message('current_command = %r', sys.argv)
log_message('sys.prefix = %r', os.path.normpath(sys.prefix))
try:
log_message('sys.real_prefix = %r', sys.real_prefix)
except AttributeError:
pass
log_message('sys.version_info = %r', sys.version_info)
log_message('sys.executable = %r', sys.executable)
log_message('sys.flags = %r', sys.flags)
log_message('sys.path = %r', sys.path)
# Determine the location of the special bootstrap directory. Add
# this into the 'PYTHONPATH' environment variable, preserving any
# existing value the 'PYTHONPATH' environment variable may have.
root_directory = os.path.dirname(__file__)
boot_directory = os.path.join(root_directory, '__startup__')
log_message('root_directory = %r', root_directory)
log_message('boot_directory = %r', boot_directory)
python_path = boot_directory
if 'PYTHONPATH' in os.environ:
path = os.environ['PYTHONPATH'].split(os.path.pathsep)
if boot_directory not in path:
python_path = "%s%s%s" % (
boot_directory,
os.path.pathsep,
os.environ['PYTHONPATH']
)
os.environ['PYTHONPATH'] = python_path
# Set special environment variables which record the location of the
# Python installation or virtual environment being used as well as
# the Python version. The values of these are compared in the
# 'sitecustomize' module with the values for the Python interpreter
# which is later executed by the wrapper. If they don't match then
# nothing will be done. This check is made as using the wrapper
# script from one Python installation around 'python' executing from
# a different installation can cause problems.
os.environ['TRAIL_PYTHON_PREFIX'] = os.path.realpath(
os.path.normpath(sys.prefix))
os.environ['TRAIL_PYTHON_VERSION'] = '.'.join(
map(str, sys.version_info[:2]))
# Now launch the wrapped program. If the program to run was not an
# absolute or relative path then we need to search the directories
# specified in the 'PATH' environment variable to try and work out
# where it is actually located.
program_exe_path = args[0]
if not os.path.dirname(program_exe_path):
program_search_path = os.environ.get(
'PATH', '').split(os.path.pathsep)
for path in program_search_path:
path = os.path.join(path, program_exe_path)
if os.path.exists(path) and os.access(path, os.X_OK):
program_exe_path = path
break
log_message('program_exe_path = %r', program_exe_path)
log_message('execl_arguments = %r', [program_exe_path]+args)
os.execl(program_exe_path, *args)
def main():
if len(sys.argv) <= 1:
sys.exit('Usage: %s program [options]' % os.path.basename(
sys.argv[0]))
run_program(sys.argv[1:])
if __name__ == '__main__':
main()
| bsd-2-clause | -460,797,675,587,299,260 | 34.605263 | 72 | 0.654348 | false |
Einsteinish/PyTune3 | apps/reader/models.py | 1 | 71869 | import datetime
import time
import re
import redis
from collections import defaultdict
from operator import itemgetter
from pprint import pprint
from utils import log as logging
from utils import json_functions as json
from django.db import models, IntegrityError
from django.db.models import Q, F
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.template.defaultfilters import slugify
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.reader.managers import UserSubscriptionManager
from apps.rss_feeds.models import Feed, MStory, DuplicateFeed
from apps.rss_feeds.tasks import NewFeeds
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
from apps.analyzer.tfidf import tfidf
from utils.feed_functions import add_object_to_folder, chunks
class UserSubscription(models.Model):
"""
A feed which a user has subscribed to. Carries all of the cached information
about the subscription, including unread counts of the three primary scores.
Also has a dirty flag (needs_unread_recalc) which means that the unread counts
are not accurate and need to be calculated with `self.calculate_feed_scores()`.
"""
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user = models.ForeignKey(User, related_name='subscriptions')
feed = models.ForeignKey(Feed, related_name='subscribers')
user_title = models.CharField(max_length=255, null=True, blank=True)
active = models.BooleanField(default=False)
last_read_date = models.DateTimeField(default=UNREAD_CUTOFF)
mark_read_date = models.DateTimeField(default=UNREAD_CUTOFF)
unread_count_neutral = models.IntegerField(default=0)
unread_count_positive = models.IntegerField(default=0)
unread_count_negative = models.IntegerField(default=0)
unread_count_updated = models.DateTimeField(default=datetime.datetime.now)
oldest_unread_story_date = models.DateTimeField(default=datetime.datetime.now)
needs_unread_recalc = models.BooleanField(default=False)
feed_opens = models.IntegerField(default=0)
is_trained = models.BooleanField(default=False)
objects = UserSubscriptionManager()
def __unicode__(self):
return '[%s (%s): %s (%s)] ' % (self.user.username, self.user.pk,
self.feed.feed_title, self.feed.pk)
class Meta:
unique_together = ("user", "feed")
def canonical(self, full=False, include_favicon=True, classifiers=None):
feed = self.feed.canonical(full=full, include_favicon=include_favicon)
feed['feed_title'] = self.user_title or feed['feed_title']
feed['ps'] = self.unread_count_positive
feed['nt'] = self.unread_count_neutral
feed['ng'] = self.unread_count_negative
feed['active'] = self.active
feed['feed_opens'] = self.feed_opens
feed['subscribed'] = True
if classifiers:
feed['classifiers'] = classifiers
return feed
def save(self, *args, **kwargs):
user_title_max = self._meta.get_field('user_title').max_length
if self.user_title and len(self.user_title) > user_title_max:
self.user_title = self.user_title[:user_title_max]
try:
super(UserSubscription, self).save(*args, **kwargs)
except IntegrityError:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=self.feed_id)
for duplicate_feed in duplicate_feeds:
already_subscribed = UserSubscription.objects.filter(user=self.user, feed=duplicate_feed.feed)
if not already_subscribed:
self.feed = duplicate_feed.feed
super(UserSubscription, self).save(*args, **kwargs)
break
else:
if self: self.delete()
@classmethod
def subs_for_feeds(cls, user_id, feed_ids=None, read_filter="unread"):
usersubs = cls.objects
if read_filter == "unread":
usersubs = usersubs.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0))
if not feed_ids:
usersubs = usersubs.filter(user=user_id,
active=True).only('feed', 'mark_read_date', 'is_trained')
else:
usersubs = usersubs.filter(user=user_id,
active=True,
feed__in=feed_ids).only('feed', 'mark_read_date', 'is_trained')
return usersubs
@classmethod
def story_hashes(cls, user_id, feed_ids=None, usersubs=None, read_filter="unread", order="newest",
include_timestamps=False, group_by_feed=True, cutoff_date=None,
across_all_feeds=True):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
story_hashes = {} if group_by_feed else []
if not feed_ids and not across_all_feeds:
return story_hashes
if not usersubs:
usersubs = cls.subs_for_feeds(user_id, feed_ids=feed_ids, read_filter=read_filter)
feed_ids = [sub.feed_id for sub in usersubs]
if not feed_ids:
return story_hashes
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
unread_timestamp = int(time.mktime(cutoff_date.timetuple()))-1000
feed_counter = 0
read_dates = dict()
for us in usersubs:
read_dates[us.feed_id] = int(max(us.mark_read_date, cutoff_date).strftime('%s'))
for feed_id_group in chunks(feed_ids, 20):
pipeline = r.pipeline()
for feed_id in feed_id_group:
stories_key = 'F:%s' % feed_id
sorted_stories_key = 'zF:%s' % feed_id
read_stories_key = 'RS:%s:%s' % (user_id, feed_id)
unread_stories_key = 'U:%s:%s' % (user_id, feed_id)
unread_ranked_stories_key = 'zU:%s:%s' % (user_id, feed_id)
expire_unread_stories_key = False
max_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
min_score = read_dates[feed_id] + 1
pipeline.sdiffstore(unread_stories_key, stories_key, read_stories_key)
expire_unread_stories_key = True
else:
min_score = 0
unread_stories_key = stories_key
if order == 'oldest':
byscorefunc = pipeline.zrangebyscore
else:
byscorefunc = pipeline.zrevrangebyscore
min_score, max_score = max_score, min_score
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps)
pipeline.delete(unread_ranked_stories_key)
if expire_unread_stories_key:
pipeline.delete(unread_stories_key)
results = pipeline.execute()
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_feed:
story_hashes[feed_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all', withscores=False,
hashes_only=False, cutoff_date=None, default_cutoff_date=None):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
ignore_user_stories = False
stories_key = 'F:%s' % (self.feed_id)
read_stories_key = 'RS:%s:%s' % (self.user_id, self.feed_id)
unread_stories_key = 'U:%s:%s' % (self.user_id, self.feed_id)
unread_ranked_stories_key = 'z%sU:%s:%s' % ('h' if hashes_only else '',
self.user_id, self.feed_id)
if withscores or not offset or not rt.exists(unread_ranked_stories_key):
rt.delete(unread_ranked_stories_key)
if not r.exists(stories_key):
# print " ---> No stories on feed: %s" % self
return []
elif read_filter == 'all' or not r.exists(read_stories_key):
ignore_user_stories = True
unread_stories_key = stories_key
else:
r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
sorted_stories_key = 'zF:%s' % (self.feed_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
if not ignore_user_stories:
r.delete(unread_stories_key)
dump = r.dump(unread_ranked_stories_key)
if dump:
pipeline = rt.pipeline()
pipeline.delete(unread_ranked_stories_key)
pipeline.restore(unread_ranked_stories_key, 1*60*60*1000, dump)
pipeline.execute()
r.delete(unread_ranked_stories_key)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if read_filter == "unread":
cutoff_date = max(cutoff_date, self.mark_read_date)
elif default_cutoff_date:
cutoff_date = default_cutoff_date
if order == 'oldest':
byscorefunc = rt.zrangebyscore
if read_filter == 'unread':
min_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
min_score = int(time.mktime(cutoff_date.timetuple())) - 1000
max_score = current_time
else:
byscorefunc = rt.zrevrangebyscore
min_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
max_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
max_score = 0
if settings.DEBUG and False:
debug_stories = rt.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
print " ---> Unread all stories (%s - %s) %s stories: %s" % (
min_score,
max_score,
len(debug_stories),
debug_stories)
story_ids = byscorefunc(unread_ranked_stories_key, min_score,
max_score, start=offset, num=500,
withscores=withscores)[:limit]
if withscores:
story_ids = [(s[0], int(s[1])) for s in story_ids]
if withscores or hashes_only:
return story_ids
elif story_ids:
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
mstories = MStory.objects(story_hash__in=story_ids).order_by(story_date_order)
stories = Feed.format_stories(mstories)
return stories
else:
return []
@classmethod
def feed_stories(cls, user_id, feed_ids=None, offset=0, limit=6,
order='newest', read_filter='all', usersubs=None, cutoff_date=None,
all_feed_ids=None, cache_prefix=""):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
across_all_feeds = False
if order == 'oldest':
range_func = rt.zrange
else:
range_func = rt.zrevrange
if feed_ids is None:
across_all_feeds = True
feed_ids = []
if not all_feed_ids:
all_feed_ids = [f for f in feed_ids]
# feeds_string = ""
feeds_string = ','.join(str(f) for f in sorted(all_feed_ids))[:30]
ranked_stories_keys = '%szU:%s:feeds:%s' % (cache_prefix, user_id, feeds_string)
unread_ranked_stories_keys = '%szhU:%s:feeds:%s' % (cache_prefix, user_id, feeds_string)
stories_cached = rt.exists(ranked_stories_keys)
unreads_cached = True if read_filter == "unread" else rt.exists(unread_ranked_stories_keys)
if offset and stories_cached and unreads_cached:
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_story_hashes = story_hashes
else:
unread_story_hashes = range_func(unread_ranked_stories_keys, 0, offset+limit)
return story_hashes, unread_story_hashes
else:
rt.delete(ranked_stories_keys)
rt.delete(unread_ranked_stories_keys)
story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_feed=False,
usersubs=usersubs,
cutoff_date=cutoff_date,
across_all_feeds=across_all_feeds)
if not story_hashes:
return [], []
pipeline = rt.pipeline()
for story_hash_group in chunks(story_hashes, 100):
pipeline.zadd(ranked_stories_keys, **dict(story_hash_group))
pipeline.execute()
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_feed_story_hashes = story_hashes
rt.zunionstore(unread_ranked_stories_keys, [ranked_stories_keys])
else:
unread_story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_feed=False,
cutoff_date=cutoff_date)
if unread_story_hashes:
for unread_story_hash_group in chunks(unread_story_hashes, 100):
rt.zadd(unread_ranked_stories_keys, **dict(unread_story_hash_group))
unread_feed_story_hashes = range_func(unread_ranked_stories_keys, offset, limit)
rt.expire(ranked_stories_keys, 60*60)
rt.expire(unread_ranked_stories_keys, 60*60)
return story_hashes, unread_feed_story_hashes
@classmethod
def add_subscription(cls, user, feed_address, folder=None, bookmarklet=False, auto_active=True,
skip_fetch=False):
feed = None
us = None
logging.user(user, "~FRAdding URL: ~SB%s (in %s) %s" % (feed_address, folder,
"~FCAUTO-ADD" if not auto_active else ""))
feed = Feed.get_feed_from_url(feed_address)
if not feed:
code = -1
if bookmarklet:
message = "This site does not have an RSS feed. Nothing is linked to from this page."
else:
message = "This address does not point to an RSS feed or a website with an RSS feed."
else:
us, subscription_created = cls.objects.get_or_create(
feed=feed,
user=user,
defaults={
'needs_unread_recalc': True,
'active': auto_active,
}
)
code = 1
message = ""
if us:
user_sub_folders_object, created = UserSubscriptionFolders.objects.get_or_create(
user=user,
defaults={'folders': '[]'}
)
if created:
user_sub_folders = []
else:
user_sub_folders = json.decode(user_sub_folders_object.folders)
user_sub_folders = add_object_to_folder(feed.pk, folder, user_sub_folders)
user_sub_folders_object.folders = json.encode(user_sub_folders)
user_sub_folders_object.save()
if auto_active or user.profile.is_premium:
us.active = True
us.save()
if not skip_fetch and feed.last_update < datetime.datetime.utcnow() - datetime.timedelta(days=1):
feed = feed.update()
from apps.social.models import MActivity
MActivity.new_feed_subscription(user_id=user.pk, feed_id=feed.pk, feed_title=feed.title)
feed.setup_feed_for_premium_subscribers()
return code, message, us
@classmethod
def feeds_with_updated_counts(cls, user, feed_ids=None, check_fetch_status=False, force=False):
feeds = {}
# Get subscriptions for user
user_subs = cls.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [f for f in feed_ids if f and not f.startswith('river')]
if feed_ids:
user_subs = user_subs.filter(feed__in=feed_ids)
for i, sub in enumerate(user_subs):
# Count unreads if subscription is stale.
if (force or
sub.needs_unread_recalc or
sub.unread_count_updated < user.profile.unread_cutoff or
sub.oldest_unread_story_date < user.profile.unread_cutoff):
sub = sub.calculate_feed_scores(silent=True, force=force)
if not sub: continue # TODO: Figure out the correct sub and give it a new feed_id
feed_id = sub.feed_id
feeds[feed_id] = {
'ps': sub.unread_count_positive,
'nt': sub.unread_count_neutral,
'ng': sub.unread_count_negative,
'id': feed_id,
}
if not sub.feed.fetched_once or check_fetch_status:
feeds[feed_id]['fetched_once'] = sub.feed.fetched_once
feeds[feed_id]['not_yet_fetched'] = not sub.feed.fetched_once # Legacy. Dammit.
if sub.feed.favicon_fetching:
feeds[feed_id]['favicon_fetching'] = True
if sub.feed.has_feed_exception or sub.feed.has_page_exception:
feeds[feed_id]['has_exception'] = True
feeds[feed_id]['exception_type'] = 'feed' if sub.feed.has_feed_exception else 'page'
feeds[feed_id]['feed_address'] = sub.feed.feed_address
feeds[feed_id]['exception_code'] = sub.feed.exception_code
return feeds
@classmethod
def queue_new_feeds(cls, user, new_feeds=None):
if not isinstance(user, User):
user = User.objects.get(pk=user)
if not new_feeds:
new_feeds = cls.objects.filter(user=user,
feed__fetched_once=False,
active=True).values('feed_id')
new_feeds = list(set([f['feed_id'] for f in new_feeds]))
if not new_feeds:
return
logging.user(user, "~BB~FW~SBQueueing NewFeeds: ~FC(%s) %s" % (len(new_feeds), new_feeds))
size = 4
for t in (new_feeds[pos:pos + size] for pos in xrange(0, len(new_feeds), size)):
NewFeeds.apply_async(args=(t,), queue="new_feeds")
@classmethod
def refresh_stale_feeds(cls, user, exclude_new=False):
if not isinstance(user, User):
user = User.objects.get(pk=user)
stale_cutoff = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
# TODO: Refactor below using last_update from REDIS_FEED_UPDATE_POOL
stale_feeds = UserSubscription.objects.filter(user=user, active=True, feed__last_update__lte=stale_cutoff)
if exclude_new:
stale_feeds = stale_feeds.filter(feed__fetched_once=True)
all_feeds = UserSubscription.objects.filter(user=user, active=True)
logging.user(user, "~FG~BBRefreshing stale feeds: ~SB%s/%s" % (
stale_feeds.count(), all_feeds.count()))
for sub in stale_feeds:
sub.feed.fetched_once = False
sub.feed.save()
if stale_feeds:
stale_feeds = list(set([f.feed_id for f in stale_feeds]))
cls.queue_new_feeds(user, new_feeds=stale_feeds)
@classmethod
def identify_deleted_feed_users(cls, old_feed_id):
users = UserSubscriptionFolders.objects.filter(folders__contains=old_feed_id).only('user')
user_ids = [usf.user_id for usf in users]
f = open('utils/backups/users.txt', 'w')
f.write('\n'.join([str(u) for u in user_ids]))
return user_ids
@classmethod
def recreate_deleted_feed(cls, new_feed_id, old_feed_id=None, skip=0):
user_ids = sorted([int(u) for u in open('utils/backups/users.txt').read().split('\n') if u])
count = len(user_ids)
for i, user_id in enumerate(user_ids):
if i < skip: continue
if i % 1000 == 0:
print "\n\n ------------------------------------------------"
print "\n ---> %s/%s (%s%%)" % (i, count, round(float(i)/count))
print "\n ------------------------------------------------\n"
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
print " ***> %s has no account" % user_id
continue
us, created = UserSubscription.objects.get_or_create(user_id=user_id, feed_id=new_feed_id, defaults={
'needs_unread_recalc': True,
'active': True,
'is_trained': True
})
if not created:
print " ***> %s already subscribed" % user.username
try:
usf = UserSubscriptionFolders.objects.get(user_id=user_id)
usf.add_missing_feeds()
except UserSubscriptionFolders.DoesNotExist:
print " ***> %s has no USF" % user.username
# Move classifiers
if old_feed_id:
classifier_count = 0
for classifier_type in (MClassifierAuthor, MClassifierFeed, MClassifierTag, MClassifierTitle):
classifiers = classifier_type.objects.filter(user_id=user_id, feed_id=old_feed_id)
classifier_count += classifiers.count()
for classifier in classifiers:
classifier.feed_id = new_feed_id
try:
classifier.save()
except NotUniqueError:
continue
if classifier_count:
print " Moved %s classifiers for %s" % (classifier_count, user.username)
def trim_read_stories(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
read_stories_key = "RS:%s:%s" % (self.user_id, self.feed_id)
stale_story_hashes = r.sdiff(read_stories_key, "F:%s" % self.feed_id)
if not stale_story_hashes:
return
logging.user(self.user, "~FBTrimming ~FR%s~FB read stories (~SB%s~SN)..." % (len(stale_story_hashes), self.feed_id))
r.srem(read_stories_key, *stale_story_hashes)
r.srem("RS:%s" % self.feed_id, *stale_story_hashes)
@classmethod
def trim_user_read_stories(self, user_id):
user = User.objects.get(pk=user_id)
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
subs = UserSubscription.objects.filter(user_id=user_id).only('feed')
if not subs: return
key = "RS:%s" % user_id
feeds = [f.feed_id for f in subs]
old_rs = r.smembers(key)
old_count = len(old_rs)
if not old_count:
logging.user(user, "~FBTrimming all read stories, ~SBnone found~SN.")
return
# r.sunionstore("%s:backup" % key, key)
# r.expire("%s:backup" % key, 60*60*24)
r.sunionstore(key, *["%s:%s" % (key, f) for f in feeds])
new_rs = r.smembers(key)
missing_rs = []
missing_count = 0
feed_re = re.compile(r'(\d+):.*?')
for i, rs in enumerate(old_rs):
if i and i % 1000 == 0:
if missing_rs:
r.sadd(key, *missing_rs)
missing_count += len(missing_rs)
missing_rs = []
found = feed_re.search(rs)
if not found:
print " ---> Not found: %s" % rs
continue
rs_feed_id = found.groups()[0]
if int(rs_feed_id) not in feeds:
missing_rs.append(rs)
if missing_rs:
r.sadd(key, *missing_rs)
missing_count += len(missing_rs)
new_count = len(new_rs)
new_total = new_count + missing_count
logging.user(user, "~FBTrimming ~FR%s~FB/%s (~SB%s sub'ed ~SN+ ~SB%s unsub'ed~SN saved)" %
(old_count - new_total, old_count, new_count, missing_count))
def mark_feed_read(self, cutoff_date=None):
if (self.unread_count_negative == 0
and self.unread_count_neutral == 0
and self.unread_count_positive == 0
and not self.needs_unread_recalc):
return
recount = True
# Use the latest story to get last read time.
if cutoff_date:
cutoff_date = cutoff_date + datetime.timedelta(seconds=1)
else:
latest_story = MStory.objects(story_feed_id=self.feed.pk)\
.order_by('-story_date').only('story_date').limit(1)
if latest_story and len(latest_story) >= 1:
cutoff_date = (latest_story[0]['story_date']
+ datetime.timedelta(seconds=1))
else:
cutoff_date = datetime.datetime.utcnow()
recount = False
if cutoff_date > self.mark_read_date or cutoff_date > self.oldest_unread_story_date:
self.last_read_date = cutoff_date
self.mark_read_date = cutoff_date
self.oldest_unread_story_date = cutoff_date
else:
logging.user(self.user, "Not marking %s as read: %s > %s/%s" %
(self, cutoff_date, self.mark_read_date, self.oldest_unread_story_date))
if not recount:
self.unread_count_negative = 0
self.unread_count_positive = 0
self.unread_count_neutral = 0
self.unread_count_updated = datetime.datetime.utcnow()
self.needs_unread_recalc = False
else:
self.needs_unread_recalc = True
self.save()
return True
def mark_newer_stories_read(self, cutoff_date):
if (self.unread_count_negative == 0
and self.unread_count_neutral == 0
and self.unread_count_positive == 0
and not self.needs_unread_recalc):
return
cutoff_date = cutoff_date - datetime.timedelta(seconds=1)
story_hashes = self.get_stories(limit=500, order="newest", cutoff_date=cutoff_date,
read_filter="unread", hashes_only=True)
data = self.mark_story_ids_as_read(story_hashes, aggregated=True)
return data
def mark_story_ids_as_read(self, story_hashes, request=None, aggregated=False):
data = dict(code=0, payload=story_hashes)
if not request:
request = self.user
if not self.needs_unread_recalc:
self.needs_unread_recalc = True
self.save()
if len(story_hashes) > 1:
logging.user(request, "~FYRead %s stories in feed: %s" % (len(story_hashes), self.feed))
else:
logging.user(request, "~FYRead story in feed: %s" % (self.feed))
RUserStory.aggregate_mark_read(self.feed_id)
for story_hash in set(story_hashes):
RUserStory.mark_read(self.user_id, self.feed_id, story_hash, aggregated=aggregated)
return data
def invert_read_stories_after_unread_story(self, story, request=None):
data = dict(code=1)
if story.story_date > self.mark_read_date:
return data
# Story is outside the mark as read range, so invert all stories before.
newer_stories = MStory.objects(story_feed_id=story.story_feed_id,
story_date__gte=story.story_date,
story_date__lte=self.mark_read_date
).only('story_hash')
newer_stories = [s.story_hash for s in newer_stories]
self.mark_read_date = story.story_date - datetime.timedelta(minutes=1)
self.needs_unread_recalc = True
self.save()
# Mark stories as read only after the mark_read_date has been moved, otherwise
# these would be ignored.
data = self.mark_story_ids_as_read(newer_stories, request=request, aggregated=True)
return data
def calculate_feed_scores(self, silent=False, stories=None, force=False):
# now = datetime.datetime.strptime("2009-07-06 22:30:03", "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
oldest_unread_story_date = now
if self.user.profile.last_seen_on < self.user.profile.unread_cutoff and not force:
# if not silent:
# logging.info(' ---> [%s] SKIPPING Computing scores: %s (1 week+)' % (self.user, self.feed))
return self
ong = self.unread_count_negative
ont = self.unread_count_neutral
ops = self.unread_count_positive
oousd = self.oldest_unread_story_date
ucu = self.unread_count_updated
onur = self.needs_unread_recalc
oit = self.is_trained
# if not self.feed.fetched_once:
# if not silent:
# logging.info(' ---> [%s] NOT Computing scores: %s' % (self.user, self.feed))
# self.needs_unread_recalc = False
# self.save()
# return
feed_scores = dict(negative=0, neutral=0, positive=0)
# Two weeks in age. If mark_read_date is older, mark old stories as read.
date_delta = self.user.profile.unread_cutoff
if date_delta < self.mark_read_date:
date_delta = self.mark_read_date
else:
self.mark_read_date = date_delta
if self.is_trained:
if not stories:
stories = cache.get('S:%s' % self.feed_id)
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
cutoff_date=self.user.profile.unread_cutoff)
if not stories:
stories_db = MStory.objects(story_hash__in=unread_story_hashes)
stories = Feed.format_stories(stories_db, self.feed_id)
unread_stories = []
for story in stories:
if story['story_date'] < date_delta:
continue
if story['story_hash'] in unread_story_hashes:
unread_stories.append(story)
if story['story_date'] < oldest_unread_story_date:
oldest_unread_story_date = story['story_date']
# if not silent:
# logging.info(' ---> [%s] Format stories: %s' % (self.user, datetime.datetime.now() - now))
classifier_feeds = list(MClassifierFeed.objects(user_id=self.user_id, feed_id=self.feed_id, social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=self.user_id, feed_id=self.feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=self.user_id, feed_id=self.feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=self.user_id, feed_id=self.feed_id))
if (not len(classifier_feeds) and
not len(classifier_authors) and
not len(classifier_titles) and
not len(classifier_tags)):
self.is_trained = False
# if not silent:
# logging.info(' ---> [%s] Classifiers: %s (%s)' % (self.user, datetime.datetime.now() - now, classifier_feeds.count() + classifier_authors.count() + classifier_tags.count() + classifier_titles.count()))
scores = {
'feed': apply_classifier_feeds(classifier_feeds, self.feed),
}
for story in unread_stories:
scores.update({
'author' : apply_classifier_authors(classifier_authors, story),
'tags' : apply_classifier_tags(classifier_tags, story),
'title' : apply_classifier_titles(classifier_titles, story),
})
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
feed_scores['positive'] += 1
elif min_score < 0:
feed_scores['negative'] += 1
else:
if scores['feed'] > 0:
feed_scores['positive'] += 1
elif scores['feed'] < 0:
feed_scores['negative'] += 1
else:
feed_scores['neutral'] += 1
else:
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
include_timestamps=True,
cutoff_date=date_delta)
feed_scores['neutral'] = len(unread_story_hashes)
if feed_scores['neutral']:
oldest_unread_story_date = datetime.datetime.fromtimestamp(unread_story_hashes[-1][1])
if not silent or settings.DEBUG:
logging.user(self.user, '~FBUnread count (~SB%s~SN%s): ~SN(~FC%s~FB/~FC%s~FB/~FC%s~FB) ~SBto~SN (~FC%s~FB/~FC%s~FB/~FC%s~FB)' % (self.feed_id, '/~FMtrained~FB' if self.is_trained else '', ong, ont, ops, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
self.unread_count_positive = feed_scores['positive']
self.unread_count_neutral = feed_scores['neutral']
self.unread_count_negative = feed_scores['negative']
self.unread_count_updated = datetime.datetime.now()
self.oldest_unread_story_date = oldest_unread_story_date
self.needs_unread_recalc = False
update_fields = []
if self.unread_count_positive != ops: update_fields.append('unread_count_positive')
if self.unread_count_neutral != ont: update_fields.append('unread_count_neutral')
if self.unread_count_negative != ong: update_fields.append('unread_count_negative')
if self.unread_count_updated != ucu: update_fields.append('unread_count_updated')
if self.oldest_unread_story_date != oousd: update_fields.append('oldest_unread_story_date')
if self.needs_unread_recalc != onur: update_fields.append('needs_unread_recalc')
if self.is_trained != oit: update_fields.append('is_trained')
if len(update_fields):
self.save(update_fields=update_fields)
if (self.unread_count_positive == 0 and
self.unread_count_neutral == 0):
self.mark_feed_read()
if not silent:
logging.user(self.user, '~FC~SNComputing scores: %s (~SB%s~SN/~SB%s~SN/~SB%s~SN)' % (self.feed, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
self.trim_read_stories()
return self
@staticmethod
def score_story(scores):
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
return 1
elif min_score < 0:
return -1
return scores['feed']
def switch_feed(self, new_feed, old_feed):
# Rewrite feed in subscription folders
try:
user_sub_folders = UserSubscriptionFolders.objects.get(user=self.user)
except Exception, e:
logging.info(" *** ---> UserSubscriptionFolders error: %s" % e)
return
logging.info(" ===> %s " % self.user)
# Switch read stories
RUserStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
new_feed_id=new_feed.pk)
def switch_feed_for_classifier(model):
duplicates = model.objects(feed_id=old_feed.pk, user_id=self.user_id)
if duplicates.count():
logging.info(" ---> Switching %s %s" % (duplicates.count(), model))
for duplicate in duplicates:
duplicate.feed_id = new_feed.pk
if duplicate.social_user_id is None:
duplicate.social_user_id = 0
try:
duplicate.save()
pass
except (IntegrityError, OperationError):
logging.info(" !!!!> %s already exists" % duplicate)
duplicate.delete()
switch_feed_for_classifier(MClassifierTitle)
switch_feed_for_classifier(MClassifierAuthor)
switch_feed_for_classifier(MClassifierFeed)
switch_feed_for_classifier(MClassifierTag)
# Switch to original feed for the user subscription
self.feed = new_feed
self.needs_unread_recalc = True
try:
UserSubscription.objects.get(user=self.user, feed=new_feed)
except UserSubscription.DoesNotExist:
self.save()
user_sub_folders.rewrite_feed(new_feed, old_feed)
else:
# except (IntegrityError, OperationError):
logging.info(" !!!!> %s already subscribed" % self.user)
self.delete()
return
@classmethod
def collect_orphan_feeds(cls, user):
us = cls.objects.filter(user=user)
try:
usf = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
return
us_feed_ids = set([sub.feed_id for sub in us])
folders = json.decode(usf.folders)
def collect_ids(folders, found_ids):
for item in folders:
# print ' --> %s' % item
if isinstance(item, int):
# print ' --> Adding feed: %s' % item
found_ids.add(item)
elif isinstance(item, dict):
# print ' --> Descending folder dict: %s' % item.values()
found_ids.update(collect_ids(item.values(), found_ids))
elif isinstance(item, list):
# print ' --> Descending folder list: %s' % len(item)
found_ids.update(collect_ids(item, found_ids))
# print ' --> Returning: %s' % found_ids
return found_ids
found_ids = collect_ids(folders, set())
diff = len(us_feed_ids) - len(found_ids)
if diff > 0:
logging.info(" ---> Collecting orphans on %s. %s feeds with %s orphans" % (user.username, len(us_feed_ids), diff))
orphan_ids = us_feed_ids - found_ids
folders.extend(list(orphan_ids))
usf.folders = json.encode(folders)
usf.save()
@classmethod
def verify_feeds_scheduled(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
user = User.objects.get(pk=user_id)
subs = cls.objects.filter(user=user)
feed_ids = [sub.feed.pk for sub in subs]
p = r.pipeline()
for feed_id in feed_ids:
p.zscore('scheduled_updates', feed_id)
p.zscore('error_feeds', feed_id)
results = p.execute()
p = r.pipeline()
for feed_id in feed_ids:
p.zscore('queued_feeds', feed_id)
try:
results_queued = p.execute()
except:
results_queued = map(lambda x: False, range(len(feed_ids)))
safety_net = []
for f, feed_id in enumerate(feed_ids):
scheduled_updates = results[f*2]
error_feeds = results[f*2+1]
queued_feeds = results[f]
if not scheduled_updates and not queued_feeds and not error_feeds:
safety_net.append(feed_id)
if not safety_net: return
logging.user(user, "~FBFound ~FR%s unscheduled feeds~FB, scheduling..." % len(safety_net))
for feed_id in safety_net:
feed = Feed.get_by_id(feed_id)
feed.set_next_scheduled_update()
@classmethod
def count_subscribers_to_other_subscriptions(cls, feed_id):
# feeds = defaultdict(int)
subscribing_users = cls.objects.filter(feed=feed_id).values('user', 'feed_opens').order_by('-feed_opens')[:25]
print "Got subscribing users"
subscribing_user_ids = [sub['user'] for sub in subscribing_users]
print "Got subscribing user ids"
cofeeds = cls.objects.filter(user__in=subscribing_user_ids).values('feed').annotate(
user_count=Count('user')).order_by('-user_count')[:200]
print "Got cofeeds: %s" % len(cofeeds)
# feed_subscribers = Feed.objects.filter(pk__in=[f['feed'] for f in cofeeds]).values('pk', 'num_subscribers')
# max_local_subscribers = float(max([f['user_count'] for f in cofeeds]))
# max_total_subscribers = float(max([f['num_subscribers'] for f in feed_subscribers]))
# feed_subscribers = dict([(s['pk'], float(s['num_subscribers'])) for s in feed_subscribers])
# pctfeeds = [(f['feed'],
# f['user_count'],
# feed_subscribers[f['feed']],
# f['user_count']/max_total_subscribers,
# f['user_count']/max_local_subscribers,
# max_local_subscribers,
# max_total_subscribers) for f in cofeeds]
# print pctfeeds[:5]
# orderedpctfeeds = sorted(pctfeeds, key=lambda f: .5*f[3]+.5*f[4], reverse=True)[:8]
# pprint([(Feed.get_by_id(o[0]), o[1], o[2], o[3], o[4]) for o in orderedpctfeeds])
users_by_feeds = {}
for feed in [f['feed'] for f in cofeeds]:
users_by_feeds[feed] = [u['user'] for u in cls.objects.filter(feed=feed, user__in=subscribing_user_ids).values('user')]
print "Got users_by_feeds"
table = tfidf()
for feed in users_by_feeds.keys():
table.addDocument(feed, users_by_feeds[feed])
print "Got table"
sorted_table = sorted(table.similarities(subscribing_user_ids), key=itemgetter(1), reverse=True)[:8]
pprint([(Feed.get_by_id(o[0]), o[1]) for o in sorted_table])
return table
# return cofeeds
class RUserStory:
@classmethod
def mark_story_hashes_read(cls, user_id, story_hashes, r=None, s=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if not s:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
feed_ids = set()
friend_ids = set()
if not isinstance(story_hashes, list):
story_hashes = [story_hashes]
single_story = len(story_hashes) == 1
for story_hash in story_hashes:
feed_id, _ = MStory.split_story_hash(story_hash)
feed_ids.add(feed_id)
if single_story:
cls.aggregate_mark_read(feed_id)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in s.sinter(share_key, friend_key)]
friend_ids.update(friends_with_shares)
cls.mark_read(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=p)
p.execute()
# p2.execute()
return list(feed_ids), list(friend_ids)
@classmethod
def mark_story_hash_unread(cls, user_id, story_hash, r=None, s=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if not s:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
friend_ids = set()
feed_id, _ = MStory.split_story_hash(story_hash)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in s.sinter(share_key, friend_key)]
friend_ids.update(friends_with_shares)
cls.mark_unread(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=r)
return feed_id, list(friend_ids)
@classmethod
def aggregate_mark_read(cls, feed_id):
if not feed_id:
logging.debug(" ***> ~BR~FWNo feed_id on aggregate mark read. Ignoring.")
return
r = redis.Redis(connection_pool=settings.REDIS_FEED_READ_POOL)
week_of_year = datetime.datetime.now().strftime('%Y-%U')
feed_read_key = "fR:%s:%s" % (feed_id, week_of_year)
r.incr(feed_read_key)
r.expire(feed_read_key, 2*settings.DAYS_OF_STORY_HASHES*24*60*60)
@classmethod
def mark_read(cls, user_id, story_feed_id, story_hash, social_user_ids=None,
aggregated=False, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
if not story_hash: return
def redis_commands(key):
r.sadd(key, story_hash)
# r2.sadd(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
read_story_key = 'RS:%s:%s' % (user_id, story_feed_id)
redis_commands(read_story_key)
if social_user_ids:
for social_user_id in social_user_ids:
social_read_story_key = 'RS:%s:B:%s' % (user_id, social_user_id)
redis_commands(social_read_story_key)
if not aggregated:
key = 'lRS:%s' % user_id
r.lpush(key, story_hash)
r.ltrim(key, 0, 1000)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
@staticmethod
def story_can_be_marked_read_by_user(story, user):
message = None
if story.story_date < user.profile.unread_cutoff:
if user.profile.is_premium:
message = "Story is more than %s days old, cannot mark as unread." % (
settings.DAYS_OF_UNREAD)
elif story.story_date > user.profile.unread_cutoff_premium:
message = "Story is more than %s days old. Premiums can mark unread up to 30 days." % (
settings.DAYS_OF_UNREAD_FREE)
else:
message = "Story is more than %s days old, cannot mark as unread." % (
settings.DAYS_OF_UNREAD_FREE)
return message
@staticmethod
def mark_unread(user_id, story_feed_id, story_hash, social_user_ids=None, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
if not story_hash: return
def redis_commands(key):
r.srem(key, story_hash)
# r2.srem(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
read_story_key = 'RS:%s:%s' % (user_id, story_feed_id)
redis_commands(read_story_key)
read_stories_list_key = 'lRS:%s' % user_id
r.lrem(read_stories_list_key, story_hash)
if social_user_ids:
for social_user_id in social_user_ids:
social_read_story_key = 'RS:%s:B:%s' % (user_id, social_user_id)
redis_commands(social_read_story_key)
@staticmethod
def get_stories(user_id, feed_id, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
story_hashes = r.smembers("RS:%s:%s" % (user_id, feed_id))
return story_hashes
@staticmethod
def get_read_stories(user_id, offset=0, limit=12, order="newest"):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
key = "lRS:%s" % user_id
if order == "oldest":
count = r.llen(key)
if offset >= count: return []
offset = max(0, count - (offset+limit))
story_hashes = r.lrange(key, offset, offset+limit)
elif order == "newest":
story_hashes = r.lrange(key, offset, offset+limit)
return story_hashes
@classmethod
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
for story_hash in story_hashes:
_, hash_story = MStory.split_story_hash(story_hash)
new_story_hash = "%s:%s" % (new_feed_id, hash_story)
read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
p.sadd(read_feed_key, new_story_hash)
# p2.sadd(read_feed_key, new_story_hash)
p.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
read_user_key = "RS:%s" % (user_id)
p.sadd(read_user_key, new_story_hash)
# p2.sadd(read_user_key, new_story_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.execute()
# p2.execute()
if len(story_hashes) > 0:
logging.info(" ---> %s read stories" % len(story_hashes))
@classmethod
def switch_hash(cls, feed_id, old_hash, new_hash):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
usersubs = UserSubscription.objects.filter(feed_id=feed_id, last_read_date__gte=UNREAD_CUTOFF)
logging.info(" ---> ~SB%s usersubs~SN to switch read story hashes..." % len(usersubs))
for sub in usersubs:
rs_key = "RS:%s:%s" % (sub.user.pk, feed_id)
read = r.sismember(rs_key, old_hash)
if read:
p.sadd(rs_key, new_hash)
# p2.sadd(rs_key, new_hash)
p.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
read_user_key = "RS:%s" % sub.user.pk
p.sadd(read_user_key, new_hash)
# p2.sadd(read_user_key, new_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.execute()
# p2.execute()
@classmethod
def read_story_count(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
key = "RS:%s" % user_id
count = r.scard(key)
return count
class UserSubscriptionFolders(models.Model):
"""
A JSON list of folders and feeds for while a user has subscribed. The list
is a recursive descent of feeds and folders in folders. Used to layout
the feeds and folders in the Reader's feed navigation pane.
"""
user = models.ForeignKey(User, unique=True)
folders = models.TextField(default="[]")
def __unicode__(self):
return "[%s]: %s" % (self.user, len(self.folders),)
class Meta:
verbose_name_plural = "folders"
verbose_name = "folder"
def compact(self):
folders = json.decode(self.folders)
def _compact(folder):
new_folder = []
for item in folder:
if isinstance(item, int) and item not in new_folder:
new_folder.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
new_folder.append({f_k: _compact(f_v)})
return new_folder
new_folders = _compact(folders)
logging.info(" ---> Compacting from %s to %s" % (folders, new_folders))
new_folders = json.encode(new_folders)
logging.info(" ---> Compacting from %s to %s" % (len(self.folders), len(new_folders)))
self.folders = new_folders
self.save()
def add_folder(self, parent_folder, folder):
if self.folders:
user_sub_folders = json.decode(self.folders)
else:
user_sub_folders = []
obj = {folder: []}
user_sub_folders = add_object_to_folder(obj, parent_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
def arranged_folders(self):
user_sub_folders = json.decode(self.folders)
def _arrange_folder(folder):
folder_feeds = []
folder_folders = []
for item in folder:
if isinstance(item, int):
folder_feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
arranged_folder = _arrange_folder(f_v)
folder_folders.append({f_k: arranged_folder})
arranged_folder = folder_feeds + folder_folders
return arranged_folder
return _arrange_folder(user_sub_folders)
def flatten_folders(self, feeds=None, inactive_feeds=None):
folders = json.decode(self.folders)
flat_folders = {" ": []}
if feeds and not inactive_feeds:
inactive_feeds = []
def _flatten_folders(items, parent_folder="", depth=0):
for item in items:
if (isinstance(item, int) and
(not feeds or
(item in feeds or item in inactive_feeds))):
if not parent_folder:
parent_folder = ' '
if parent_folder in flat_folders:
flat_folders[parent_folder].append(item)
else:
flat_folders[parent_folder] = [item]
elif isinstance(item, dict):
for folder_name in item:
folder = item[folder_name]
flat_folder_name = "%s%s%s" % (
parent_folder if parent_folder and parent_folder != ' ' else "",
" - " if parent_folder and parent_folder != ' ' else "",
folder_name
)
flat_folders[flat_folder_name] = []
_flatten_folders(folder, flat_folder_name, depth+1)
_flatten_folders(folders)
return flat_folders
def delete_feed(self, feed_id, in_folder, commit_delete=True):
feed_id = int(feed_id)
def _find_feed_in_folders(old_folders, folder_name='', multiples_found=False, deleted=False):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
if (folder == feed_id and in_folder is not None and (
(folder_name != in_folder) or
(folder_name == in_folder and deleted))):
multiples_found = True
logging.user(self.user, "~FB~SBDeleting feed, and a multiple has been found in '%s' / '%s' %s" % (folder_name, in_folder, '(deleted)' if deleted else ''))
if (folder == feed_id and
(folder_name == in_folder or in_folder is None) and
not deleted):
logging.user(self.user, "~FBDelete feed: %s'th item: %s folders/feeds" % (
k, len(old_folders)
))
deleted = True
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
nf, multiples_found, deleted = _find_feed_in_folders(f_v, f_k, multiples_found, deleted)
new_folders.append({f_k: nf})
return new_folders, multiples_found, deleted
user_sub_folders = self.arranged_folders()
user_sub_folders, multiples_found, deleted = _find_feed_in_folders(user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
if not multiples_found and deleted and commit_delete:
try:
user_sub = UserSubscription.objects.get(user=self.user, feed=feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
try:
user_sub = UserSubscription.objects.get(user=self.user,
feed=duplicate_feed[0].feed)
except Feed.DoesNotExist:
return
if user_sub:
user_sub.delete()
def delete_folder(self, folder_to_delete, in_folder, feed_ids_in_folder, commit_delete=True):
def _find_folder_in_folders(old_folders, folder_name, feeds_to_delete, deleted_folder=None):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
new_folders.append(folder)
if folder in feeds_to_delete:
feeds_to_delete.remove(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
if f_k == folder_to_delete and (folder_name == in_folder or in_folder is None):
logging.user(self.user, "~FBDeleting folder '~SB%s~SN' in '%s': %s" % (f_k, folder_name, folder))
deleted_folder = folder
else:
nf, feeds_to_delete, deleted_folder = _find_folder_in_folders(f_v, f_k, feeds_to_delete, deleted_folder)
new_folders.append({f_k: nf})
return new_folders, feeds_to_delete, deleted_folder
user_sub_folders = json.decode(self.folders)
user_sub_folders, feeds_to_delete, deleted_folder = _find_folder_in_folders(user_sub_folders, '', feed_ids_in_folder)
self.folders = json.encode(user_sub_folders)
self.save()
if commit_delete:
UserSubscription.objects.filter(user=self.user, feed__in=feeds_to_delete).delete()
return deleted_folder
def delete_feeds_by_folder(self, feeds_by_folder):
logging.user(self.user, "~FBDeleting ~FR~SB%s~SN feeds~FB: ~SB%s" % (
len(feeds_by_folder), feeds_by_folder))
for feed_id, in_folder in feeds_by_folder:
self.delete_feed(feed_id, in_folder)
return self
def rename_folder(self, folder_to_rename, new_folder_name, in_folder):
def _find_folder_in_folders(old_folders, folder_name):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
nf = _find_folder_in_folders(f_v, f_k)
if f_k == folder_to_rename and folder_name == in_folder:
logging.user(self.user, "~FBRenaming folder '~SB%s~SN' in '%s' to: ~SB%s" % (
f_k, folder_name, new_folder_name))
f_k = new_folder_name
new_folders.append({f_k: nf})
return new_folders
user_sub_folders = json.decode(self.folders)
user_sub_folders = _find_folder_in_folders(user_sub_folders, '')
self.folders = json.encode(user_sub_folders)
self.save()
def move_feed_to_folders(self, feed_id, in_folders=None, to_folders=None):
logging.user(self.user, "~FBMoving feed '~SB%s~SN' in '%s' to: ~SB%s" % (
feed_id, in_folders, to_folders))
user_sub_folders = json.decode(self.folders)
for in_folder in in_folders:
self.delete_feed(feed_id, in_folder, commit_delete=False)
user_sub_folders = json.decode(self.folders)
for to_folder in to_folders:
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_feed_to_folder(self, feed_id, in_folder=None, to_folder=None):
logging.user(self.user, "~FBMoving feed '~SB%s~SN' in '%s' to: ~SB%s" % (
feed_id, in_folder, to_folder))
user_sub_folders = json.decode(self.folders)
self.delete_feed(feed_id, in_folder, commit_delete=False)
user_sub_folders = json.decode(self.folders)
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_folder_to_folder(self, folder_name, in_folder=None, to_folder=None):
logging.user(self.user, "~FBMoving folder '~SB%s~SN' in '%s' to: ~SB%s" % (
folder_name, in_folder, to_folder))
user_sub_folders = json.decode(self.folders)
deleted_folder = self.delete_folder(folder_name, in_folder, [], commit_delete=False)
user_sub_folders = json.decode(self.folders)
user_sub_folders = add_object_to_folder(deleted_folder, to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_feeds_by_folder_to_folder(self, feeds_by_folder, to_folder):
logging.user(self.user, "~FBMoving ~SB%s~SN feeds to folder: ~SB%s" % (
len(feeds_by_folder), to_folder))
for feed_id, in_folder in feeds_by_folder:
feed_id = int(feed_id)
self.move_feed_to_folder(feed_id, in_folder, to_folder)
return self
def rewrite_feed(self, original_feed, duplicate_feed):
def rewrite_folders(folders, original_feed, duplicate_feed):
new_folders = []
for k, folder in enumerate(folders):
if isinstance(folder, int):
if folder == duplicate_feed.pk:
# logging.info(" ===> Rewrote %s'th item: %s" % (k+1, folders))
new_folders.append(original_feed.pk)
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
new_folders.append({f_k: rewrite_folders(f_v, original_feed, duplicate_feed)})
return new_folders
folders = json.decode(self.folders)
folders = rewrite_folders(folders, original_feed, duplicate_feed)
self.folders = json.encode(folders)
self.save()
def flat(self):
folders = json.decode(self.folders)
def _flat(folder, feeds=None):
if not feeds:
feeds = []
for item in folder:
if isinstance(item, int) and item not in feeds:
feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
feeds.extend(_flat(f_v))
return feeds
return _flat(folders)
def feed_ids_under_folder_slug(self, slug):
folders = json.decode(self.folders)
def _feeds(folder, found=False, folder_title=None):
feeds = []
local_found = False
for item in folder:
if isinstance(item, int) and item not in feeds and found:
feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
if slugify(f_k) == slug:
found = True
local_found = True
folder_title = f_k
found_feeds, folder_title = _feeds(f_v, found, folder_title)
feeds.extend(found_feeds)
if local_found:
found = False
local_found = False
return feeds, folder_title
return _feeds(folders)
@classmethod
def add_all_missing_feeds(cls):
usf = cls.objects.all().order_by('pk')
total = usf.count()
for i, f in enumerate(usf):
print "%s/%s: %s" % (i, total, f)
f.add_missing_feeds()
def add_missing_feeds(self):
all_feeds = self.flat()
subs = [us.feed_id for us in
UserSubscription.objects.filter(user=self.user).only('feed')]
missing_subs = set(all_feeds) - set(subs)
if missing_subs:
logging.debug(" ---> %s is missing %s subs. Adding %s..." % (
self.user, len(missing_subs), missing_subs))
for feed_id in missing_subs:
feed = Feed.get_by_id(feed_id)
if feed:
us, _ = UserSubscription.objects.get_or_create(user=self.user, feed=feed, defaults={
'needs_unread_recalc': True
})
if not us.needs_unread_recalc:
us.needs_unread_recalc = True
us.save()
missing_folder_feeds = set(subs) - set(all_feeds)
if missing_folder_feeds:
user_sub_folders = json.decode(self.folders)
logging.debug(" ---> %s is missing %s folder feeds. Adding %s..." % (
self.user, len(missing_folder_feeds), missing_folder_feeds))
for feed_id in missing_folder_feeds:
feed = Feed.get_by_id(feed_id)
if feed and feed.pk == feed_id:
user_sub_folders = add_object_to_folder(feed_id, "", user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
def auto_activate(self):
if self.user.profile.is_premium: return
active_count = UserSubscription.objects.filter(user=self.user, active=True).count()
if active_count: return
all_feeds = self.flat()
if not all_feeds: return
for feed in all_feeds[:64]:
try:
sub = UserSubscription.objects.get(user=self.user, feed=feed)
except UserSubscription.DoesNotExist:
continue
sub.active = True
sub.save()
if sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
class Feature(models.Model):
"""
Simple blog-like feature board shown to all users on the home page.
"""
description = models.TextField(default="")
date = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return "[%s] %s" % (self.date, self.description[:50])
class Meta:
ordering = ["-date"]
| mit | -6,377,939,261,452,766,000 | 43.556107 | 289 | 0.542445 | false |
dhwang99/statistics_introduction | learning/lasso_linear.py | 1 | 5562 | #encoding: utf8
import pdb
import numpy as np
import matplotlib.pyplot as plt
from data_loader import load_data
from cv_common import one_std_error_rule, gen_CV_samples_by_K_folds
'''
在特征之间有一定线性相关度时(协方差不为0),一个大的特征参数有可能会增加而另一些变为负数或趋于0
这导致方差很大
L1通过收缩参数,减小了相关特征的影响,部分特征的参数项可以为0
f(x) = seta * x
J(X;seta) = 1/2n*(f(X) - Y)**2 + lambda * np.linalg.norm(seta, 1)
1. 坐标下降法求解
psj = partial J/partial seta_j
= 1/n*sum_i[(f(X_i) - Y_i)*X_ij] + r_l1
= 1/n*sum_i[(seta * X_i - Y_i) * X_ij] + r_l1
= 1/n*sum_i[sum_k(seta_k * X_ik<k!=j>)*X_ij + seta_j * X_ij**2 - Y_i*X_ij] + r_l1
or
= 1/n*sum_i[(seta * X_i - Y_i) * X_ij - seta_j * X_ij**2 + seta_j*X_ij**2] + r_l1
let:
p_j = 1/n*sum_i[(Y_i - seta * X_i) * X_ij + seta_j * X_ij**2]
z_j = 1/n*sum_i(X_ij**2)
ps = -p_j + seta_j*z_j + r_l1
r_l1 = lambda, seta_j > 0
[-lambda, lambda], seta_j = 0
-lambda, seta_j < 0
seta_j = (p_j - lambd)/z_j, if p_j > lambd
= (p_j + lambd)/z_j, if p_j < -lambd
= 0, else
2. 最小角回归求解(LARS)
waiting
用k折交叉验证法进行df选择
注意下:
1. 如果不用bias/intercept, 对Y要进行均值化
2. numpy 的 svd, 返回的是V的转秩,用的时候需要再转秩一下
3. 求估计(均值)的标准差时,记得除以sqrt(n)
4. 对数据标准化时,倾向于用全局数据,并除以方差,即: xi = (xi - mean_hat)/sigma_hat
不过本例子给的示例只做了中心标准化,未除以方差(个人觉得需要除以方差)
但在全局子集选择里,CV方法下又做了标准化(即除了方差)
稍后这儿也试一下标准化后的结果
'''
def lasso_cd(X, Y, lamb):
it_count = 5000
epilson = 1e-6
n,m = X.shape
seta = np.ones(m)
mse = 1e10
mse_new = 1e10
for it_i in xrange(it_count):
for j in xrange(m):
Xj2Xj = np.dot(X[:,j], X[:,j])
p_j = 1./n * (np.dot(Y-np.dot(X, seta), X[:,j]) + seta[j]*Xj2Xj)
z_j = 1./n * Xj2Xj
if p_j > lamb:
seta[j] = (p_j - lamb)/z_j
elif p_j < -lamb:
seta[j] = (p_j + lamb)/z_j
else:
seta[j] = 0.
err1 = np.dot(X, seta) - Y
mse_new = np.dot(err1, err1) / n
if np.abs(mse_new - mse) < epilson:
break
mse = mse_new
return seta, mse_new
def lasso_leasq_cd_CV():
'''
注:实际训练时,标准化应该针对train_X_CV进行,不能把test_X_CV加进来, 避免影响样本的独立性
本试验为了方便,把这个加起来了
'''
train_X, train_Y, test_X, test_Y, dt_stand_fun = load_data(type=1, need_bias=0, y_standard=1)
K = 10
lamb_lst = np.logspace(-3, 0, 100)
train_mid_rst = []
cv_samples = gen_CV_samples_by_K_folds(train_X, train_Y, K)
for lid in xrange(len(lamb_lst)):
lamb = lamb_lst[lid]
test_mses = np.zeros(K)
ki_rst = []
for ki in range(K):
X_CV, Y_CV, X_t_CV, Y_t_CV = cv_samples[ki]
# wait for coding
seta, train_mse = lasso_cd(X_CV, Y_CV, lamb)
y_hat_err = np.dot(X_t_CV, seta) - Y_t_CV
test_mse = np.dot(y_hat_err, y_hat_err) / len(Y_t_CV)
df = len(np.where(np.abs(seta) < 1e-5)[0])
ki_rst.append((lamb, seta, df, train_mse, test_mse))
train_mid_rst.append(ki_rst)
#计算不同lamb下的训练误差和方差
dfs = np.zeros(len(lamb_lst))
mse_means = np.zeros(len(lamb_lst))
mse_mean_stds = np.zeros(len(lamb_lst))
for lid in range(len(lamb_lst)):
#K折CV下误差均值和标准差
test_msees = np.array(map(lambda i:train_mid_rst[lid][i][4], range(0,K)))
train_msees = np.array(map(lambda i:train_mid_rst[lid][i][3], range(0,K)))
mse_means[lid] = test_msees.mean()
#!!!!!注意:这儿求的是估计的标准差 1/K mean(sum(Xi)), 故而要除以K
mse_mean_stds[lid] = test_msees.std()/np.sqrt(K)
print "lasso CD for lambda: %.4f, CV train mse: %.4f, test mse: %.4f, std: %.4f" % \
(lamb_lst[lid], train_msees.mean(), mse_means[lid], mse_mean_stds[lid])
'''
#一倍方差准则
'''
best_lamb_id, minid = one_std_error_rule(mse_means, mse_mean_stds)
best_lamb = lamb_lst[best_lamb_id]
print "Best lambid: %d, lambda: %.4f, degree of free: %.4f" % (best_lamb_id, best_lamb, dfs[best_lamb_id])
one_std_val = mse_means[minid] + mse_mean_stds[minid]
plt.plot((dfs[0],dfs[-1]), (one_std_val, one_std_val), 'r-')
plt.errorbar(dfs, mse_means, yerr=mse_mean_stds, fmt='-o')
plt.savefig('images/lasso_mse_errorbar.png', format='png')
#用K折选出来的最优 lambda 进行回归预测
'''
#非一倍方差准则
best_lamb_id = np.argmin(mse_means)
best_lamb = lamb_lst[best_lamb_id]
'''
seta, train_mse = lasso_cd(train_X, train_Y, best_lamb)
y_hat_err = np.dot(test_X, seta) - test_Y
test_mse = np.dot(y_hat_err, y_hat_err) / len(test_Y)
print "Test error: train mse: %.4f, test mse: %.4f" % (train_mse, test_mse)
print "seta: %s" % seta
if __name__ == '__main__':
print "lasso leasq by corr descent:"
lasso_leasq_cd_CV()
print ""
| gpl-3.0 | -1,762,243,759,084,949,000 | 27.461538 | 111 | 0.54158 | false |
okdshin/Yender | tutorial/tutorial.py | 1 | 3955 | import os, random, time
import yender
import numpy as np
import collections
block_set = collections.OrderedDict()
block_set["."] = yender.Block(char=".", name="air", visible=False)
block_set["#"] = yender.Block(char="#", name="stone", color=(127, 127, 127), movable=False)
block_set["R"] = yender.Block(char="R", name="red_tile", block_type="tile", color=(255, 0, 0))
block_set["B"] = yender.Block(char="B", name="blue_tile", block_type="tile", color=(0, 0, 255))
block_set["Y"] = yender.Block(char="Y", name="yellow_tile", block_type="tile", color=(255, 255, 0))
block_set["G"] = yender.Block(char="G", name="green_tile", block_type="tile", color=(0, 255, 0))
block_id_dict = {}
for i, block in enumerate(block_set.values()):
block_id_dict[block.name] = i
def make_i_maze_map():
map_source = [
"#######",
"#2...3#",
"###.###",
"###.###",
"###.###",
"#..0.1#",
"#######",
]
map_, place_holders = yender.load_map(block_set, map_source)
# place goal tiles and an indicator tile
start_pos = place_holders["0"]
indicator_pos = place_holders["1"]
blue_pos = place_holders["2"]
red_pos = place_holders["3"]
indicator_color = random.choice(("G", "Y"))
map_.set_block(start_pos, block_set["."])
map_.set_block(indicator_pos, block_set[indicator_color])
map_.set_block(blue_pos, block_set["B"])
map_.set_block(red_pos, block_set["R"])
return map_, indicator_color, start_pos, blue_pos, red_pos
class I_MazeEnv:
max_step = 50
def __init__(self):
self.rogue_env = yender.RogueEnv()
def get_ob(self):
block_ob = yender.map_to_block_ob(self.map_,
direction=self.rogue_env.agent_direction,
pos=self.rogue_env.agent_position,
block_id_dict=block_id_dict,
default_block=block_set["#"])
ob = yender.block_ob_to_hot_vectors(block_ob, len(block_id_dict))
return ob
def reset(self):
self.t = 0
self.total_reward = 0.0
self.map_, self.indicator, start_pos, self.blue_pos, self.red_pos = make_i_maze_map()
start_direction = random.choice(list(self.rogue_env.DIRECTION_SET.values()))
self.rogue_env.reset(self.map_, start_direction, start_pos)
ob = self.get_ob()
return ob
def step(self, action):
self.rogue_env.step(action)
# reward and done check
if self.rogue_env.map_.get_block(self.rogue_env.agent_position).name == "red_tile":
done = True
reward = 1.0 if self.indicator == "Y" else -1.0
elif self.rogue_env.map_.get_block(self.rogue_env.agent_position).name == "blue_tile":
done = True
reward = 1.0 if self.indicator == "G" else -1.0
elif self.t == self.max_step:
done = True
reward = -0.04
else:
done = False
reward = -0.04
# get observation
ob = self.get_ob()
self.t += 1
self.total_reward += reward
return ob, reward, done, self.rogue_env
def render(self):
self.rogue_env.print_map()
print("total_reward {0:0.2f}".format(self.total_reward))
max_episode = 20
max_step = 50
def render(env, episode, t, ob, sleep_time, message=""):
os.system("clear")
print("episode", episode)
print("step", t)
env.render()
print("ob", ob)
time.sleep(sleep_time)
print(message)
def main():
env = I_MazeEnv()
for episode in range(max_episode):
ob = env.reset()
for t in range(max_step):
render(env, episode, t, ob, 0.1)
action = random.choice(range(4)) # random agent
ob, reward, done, info = env.step(action)
if done:
render(env, episode, t, ob, 10, "Episode finished after {} timesteps".format(t+1))
break
if __name__ == "__main__":
main()
| mit | 2,953,751,811,628,154,000 | 31.418033 | 99 | 0.565613 | false |
kata198/AdvancedHTMLParser | AdvancedHTMLParser/xpath/_body.py | 1 | 84083 | '''
Copyright (c) 2019 Timothy Savannah under terms of LGPLv3. All Rights Reserved.
See LICENSE (https://gnu.org/licenses/lgpl-3.0.txt) for more information.
See: https://github.com/kata198/AdvancedHTMLParser for full information
==INTERNAL==
xpath._body.py - Internal module for dealing with items within the "body" of a filter expression on a tag
'''
# vim: set ts=4 sw=4 st=4 expandtab :
import copy
import re
from ..Tags import TagCollection
from ..compat import STRING_TYPES
from ..utils import tostr
from .exceptions import XPathNotImplementedError, XPathRuntimeError, XPathParseError
from ._filters import _mk_xpath_op_filter_tag_is_nth_child_index
from .null import Null
# __all__ is currently set to what "parsing" imports
__all__ = ('parseBodyStringIntoBodyElements', 'BodyElement', 'BodyElementOperation', 'BodyElementValue', 'BodyElementValueGenerator', 'BodyLevel_Top')
class BodyElement(object):
'''
BodyElement - Base class of body elements.
Every distinct "unit" within a body, be it a static value or a function call, or otherwise,
are subclassed from this type.
'''
@classmethod
def createFromMatch(cls, curBodyStr, matchObj):
'''
createFromMatch - Create this BodyElement from a given match object, and return the element and remainder for parsing
@param curBodyStr <str> - The current body string (matchObj should have matched at the head of this)
@param matchObj <re.match> - The match object
@return tuple( createdElement<BodyElement>, remainingBodyStr<str> ) - A tuple of the created element and the remaining portion to parse
'''
groupDict = matchObj.groupdict()
thisElement = cls( **groupDict )
curBodyStr = curBodyStr[ matchObj.span()[1] : ]
return ( thisElement, curBodyStr )
# XXX: This is a container for BodyElements, but itself can be treated as a BodyElement.
# Should give same parent class, or keep separate?
class BodyLevel(BodyElement):
'''
BodyLevel - A single "level" of a body
'''
VALIDATE_ONLY_BOOLEAN_OR_STR = False
def __init__(self):
'''
__init__ - Create this object
'''
self.bodyElements = []
def __repr__(self):
'''
__repr__ - Get a string representation of this object as codeish
@return <str> - String repr
'''
return "%s( bodyElements = %s )" %( self.__class__.__name__, repr(self.bodyElements))
# TODO: Give these a better name, as they could contain BodyElement or BodyLevels
def appendBodyElement(self, bodyElement):
'''
appendBodyElement - Add a body element to the current tail of this level
@param bodyElement <BodyElement> - The body element to add
'''
self.bodyElements.append(bodyElement)
def appendBodyElements(self, bodyElements):
'''
addBodyElements - Add a list of body elements to the current tail of this level
@param bodyElements list<BodyElement> - A list of BodyElements to add
'''
self.bodyElements += bodyElements
def __len__(self):
'''
__len__ - Get number of elements in this group
@return <int> - Number of BodyElements in this group (just this level)
'''
return len(self.bodyElements)
def getBodyElements(self):
'''
getBodyElements - Get the body elements associated with this level
@return list<BodyElement> - List of BodyElements associated with this level
'''
return self.bodyElements
def __iter__(self):
'''
__iter__ - Iterate over this object
'''
for bodyElement in self.bodyElements:
yield bodyElement
raise StopIteration()
def evaluateLevelForTag(self, currentTag):
'''
evaluateLevelForTag - Shorthand version of "evaluateLevelForTags" but for one tag
@param currentTag <AdvancedTag> - A single tag
@return <BodyElementValue> - Resulting value for running this level against given tag
@see evaluateLevelForTags
'''
# TODO: Clean up this function
return self.evaluateLevelForTags( [currentTag] )[0]
def evaluateLevelForTags(self, currentTags):
'''
evaluate - Evaluate this level, and return the final value, for each tag.
@param currentTags list/TagCollection < AdvancedTag > - The current set of tags to process
@return list< BodyElementValue > - The BodyElementValue of the results, in a list 1:1 same order same size as #currentTags
'''
# thisLevelElements - local reference to our elements
thisLevelElements = self.bodyElements
# resultPerTag - This list contains the values to be returned for each tag, in same order as #currentTags
resultPerTag = []
if len(thisLevelElements) == 0:
# This is an empty [], so just return the same
return resultPerTag
# TODO: Optimize this function, further
## These next two arrays provide the common and ordered interface to iterate through all various types which
# need evaluation.
# They are tuples, ( Class, Lambda to Evaluate ). All lambdas within the same set follow same signature
# ORDERED_BE_TYPES_TO_PROCESS_TAGS - The ordered types to process which generate values from the tag itself
ORDERED_BE_TYPES_TO_PROCESS_TAGS = [
(BodyLevel, lambda _bl, _curTag : _bl.evaluateLevelForTag(_curTag) ),
(BodyElementValueGenerator, lambda _bevg, _curTag : _bevg.resolveValueFromTag(_curTag) ),
]
# ORDERED_BE_TYPES_TO_PROCESS_VALUES - The ordered types to process which generate values from left side and right side
ORDERED_BE_TYPES_TO_PROCESS_VALUES = [
(BodyElementOperation, lambda _beo, _leftSide, _rightSide : _beo.performOperation(_leftSide, _rightSide) ),
(BodyElementComparison, lambda _bec, _leftSide, _rightSide : _bec.doComparison(_leftSide, _rightSide) ),
(BodyElementBooleanOps, lambda _bebo, _leftSide, _rightSide : _bebo.doBooleanOp(_leftSide, _rightSide) ),
]
# Iterate over all tags
for thisTag in currentTags:
# curElements - The current set of elements for this tag, as we unroll, this will change.
# Initial value will be reference to the original set of elements
curElements = thisLevelElements
# Run through the tag-processing (value generators, sublevels) ones first
for typeToProcess, processFunction in ORDERED_BE_TYPES_TO_PROCESS_TAGS:
curElements = [ (issubclass( curElement.__class__, typeToProcess ) and processFunction( curElement, thisTag )) or curElement for curElement in curElements ]
# # nextElements - We will assemble into this list the next iteration of #curElements
# nextElements = []
#
# for curElement in curElements:
#
# curElementClass = curElement.__class__
#
# if not issubclass(curElementClass, typeToProcess):
# # Not processing this type, just put back on the list
# nextElements.append( curElement )
#
# else:
# # Processing type, get new value
# generatedValue = processFunction( curElement, thisTag )
# nextElements.append( generatedValue )
#
# # Update #curElements
# curElements = nextElements
# Great, now we have to start keeping track of left/right and process the rest
for typeToProcess, processFunction in ORDERED_BE_TYPES_TO_PROCESS_VALUES:
# nextElements - We will assemble into this list the next iteration of #curElements
nextElements = []
# leftSide - this will be the left side value
leftSide = None
numElements = len(curElements)
i = 0
while i < numElements:
curElement = curElements[i]
curElementClass = curElement.__class__
if not issubclass(curElementClass, typeToProcess ):
# We aren't processing this type, just add it back
nextElements.append( curElement )
# Update previous value and increment counter
leftSide = curElement
i += 1
# Loop back
continue
else:
# Validate that we are not at the end (need to gather a right)
if (i + 1) >= numElements:
# TODO: Better error message?
raise XPathParseError('XPath expression ends in an operation, no right-side to operation.')
# Validate left is right type
if not issubclass(leftSide.__class__, BodyElementValue):
# TODO: Better error message?
raise XPathParseError('XPath expression contains two consecutive operations (left side)')
# Grab and validate right is right type
rightSide = curElements[i + 1]
if not issubclass(rightSide.__class__, BodyElementValue):
# TODO: Better error message?
raise XPathParseError('XPath expression contains two consecutive operations (right side)')
# Resolve a new value feeding left, right into the function
resolvedValue = processFunction( curElement, leftSide, rightSide)
# TODO: Remove this check?
if not issubclass(resolvedValue.__class__, BodyElementValue):
# Not a value? Error for now, may add back looping later if necessary for some ops
raise XPathRuntimeError('XPath expression for op "%s" did not return a BodyElementValue, as expected. Got: <%s> %s' % ( \
repr(curElement),
resolvedValue.__class__.__name__,
repr(resolvedValue),
)
)
# Pop the last value (left side), drop the operation, load the resolved value in place.
nextElements = nextElements[ : -1 ] + [resolvedValue]
# Update new left to this generated value
leftSide = resolvedValue
# Move past right side
i += 2
# Update #curElements
curElements = nextElements
# END: for typeToProcess, processFunction in ORDERED_BE_TYPES_TO_PROCESS_VALUES:
# At this point, should be only one value left. Zero was already handled at start
numElementsRemaining = len(curElements)
if numElementsRemaining != 1:
raise XPathRuntimeError('Got unexpected current number of elements at the end. Expected 1, got %d. Repr: %s' % ( \
numElementsRemaining,
repr(curElements),
)
)
finalElement = curElements[0]
finalElementClass = finalElement.__class__
# TODO: Remove this check?
try:
finalElementValueType = finalElement.VALUE_TYPE
except AttributeError:
# Missing this class attribute implicitly also checks the type,
# as no other types provide such a name.
# TODO: Do a better repr, maybe with string of the xpath?
raise XPathRuntimeError('Final Value resolved from level """%s""" was not a BodyElementValue, as was expected.\nIt is a: %s \nrepr: %s' % ( \
repr(self),
finalElementClass.__name__,
repr(finalElement),
)
)
if self.VALIDATE_ONLY_BOOLEAN_OR_STR and finalElementValueType not in (BODY_VALUE_TYPE_BOOLEAN, BODY_VALUE_TYPE_NUMBER):
raise XPathRuntimeError('Final value resolved from level """%s""" was not an integer or a boolean, cannot proceed.\nVALUE_TYPE is %s.\nClass: %s\nRepr: %s' % ( \
repr(self),
_bodyValueTypeToDebugStr(finalElementValueType),
finalElementClass.__name__,
repr(finalElement),
)
)
# Validated and processed this tag on this level, append to the result array
resultPerTag.append(finalElement)
# END for thisTag in currentTags
return resultPerTag
# TODO: Need to refactor this a bit maybe, to support levels as designed
class BodyLevel_Top(BodyLevel):
'''
BodyLevel_Top - The topmost level of a body. This is the final evaluation before passing onto the next tag filter
'''
VALIDATE_ONLY_BOOLEAN_OR_STR = True
def filterTagsByBody(self, currentTags):
'''
evaluate - Evaluate the topmost level (and all sub levels), and return tags that match.
For the topmost level, we run all components left-to-right, and evaluate the result.
If an integer remains, we use that 1-origin Nth child of parent.
If a boolean remains, we use True to retain, False to discard.
@param currentTags TagCollection/list<AdvancedTag> - Current set of tags to validate
@return TagCollection - The tags which passed validation
'''
retTags = []
if not currentTags:
return retTags
# Process this level and all subs, get the final value per tag for processing
# validation to retain or discard
finalResultPerTag = self.evaluateLevelForTags(currentTags)
numTags = len(currentTags)
for i in range(numTags):
currentTag = currentTags[i]
finalValue = finalResultPerTag[i]
#finalValueClass = finalValue.__class__
# TODO: We should be able to optimize this loop as all results will have either
# a number, or a boolean
if finalValue.VALUE_TYPE == BODY_VALUE_TYPE_BOOLEAN:
shouldRetainTag = finalValue.getValue()
if shouldRetainTag is True:
retTags.append( currentTag )
#elif finalValue.VALUE_TYPE == BODY_VALUE_TYPE_NUMBER:
else:
# This should have already been validated
theValue = finalValue.getValue()
innerNum = int( theValue )
if float(innerNum) != theValue:
# Float value, not integer, return nothing.
continue
# TODO: Better.
testFunc = _mk_xpath_op_filter_tag_is_nth_child_index(currentTag.tagName, innerNum)
retTags += testFunc( currentTag )
#else:
# raise XPathRuntimeError('Error, unexpected value type %s on value: %s' %( _bodyValueTypeToDebugStr(finalValue.VALUE_TYPE), repr(finalValue) ) )
return TagCollection(retTags)
# applyFunction - follow this interface, for now.
applyFunction = filterTagsByBody
#############################
## Values ##
#############################
## Values are calculated (returned from a BodyElementValueGenerator or otherwise),
# or static (provided explicitly in body string).
# These are given separate bases, and are all subclasses of BodyElement.
# Values are associated with a type (cls.VALUE_TYPE), defined as one of the types below.
# Values are wrapped within the associated BodyElementValue subclasses rather than as native python types
##### #####
### BodyElementValue types ###
##### #####
# NOTE: Use enum type? Requires additional package under python2
# An enumeration of the possible types a BodyElementValue subclass may hold
BODY_VALUE_TYPE_UNKNOWN = 0
BODY_VALUE_TYPE_NUMBER = 1
# Leave a gap for 2 should we split float/int
BODY_VALUE_TYPE_STRING = 3
BODY_VALUE_TYPE_BOOLEAN = 4
# List - Unimplemented
BODY_VALUE_TYPE_LIST = 5
BODY_VALUE_TYPE_NULL = 6
# BODY_VALUE_TYPE_TO_STR - The value type integer to a string representation.
BODY_VALUE_TYPE_TO_STR = {
BODY_VALUE_TYPE_UNKNOWN : "unknown",
BODY_VALUE_TYPE_NUMBER : "number",
BODY_VALUE_TYPE_STRING : "string",
BODY_VALUE_TYPE_BOOLEAN : "boolean",
BODY_VALUE_TYPE_LIST : "list",
BODY_VALUE_TYPE_NULL : "null",
}
def _bodyValueTypeToDebugStr(bodyValue):
return "<%d>%s" %(bodyValue, BODY_VALUE_TYPE_TO_STR[bodyValue])
class BodyElementValue(BodyElement):
'''
BodyElementValue - Base class of BodyElements which represent a static or resolved value.
These wrap the native python representation of the values.
A class-level varible, VALUE_TYPE, defines the type associated with the value.
'''
# VALUE_TYPE - The type of this value. Should be set by subclass
VALUE_TYPE = BODY_VALUE_TYPE_UNKNOWN
def __init__(self, value):
'''
__init__ - Create this element as a wrapper around an already-calculated value
@param value <...> - The python-native value to be held by this element.
This will be passed into self.setValue for processing/validation
'''
self.value = None
self.setValue(value)
def getValue(self):
'''
getvalue - Get the value associated with this object
@return <...> - The python-native value wrapped by this object
'''
return self.value
def setValue(self, newValue):
'''
setValue - Sets the value associated with this object
This will be called on all value sets, including __init__ (and from regex)
@param newValue <???> - The new value for this object
'''
self.value = newValue
def __repr__(self):
'''
__repr__ - Get a string representation of this value, with code information
'''
className = self.__class__.__name__
valueType = self.VALUE_TYPE
valueTypeStr = BODY_VALUE_TYPE_TO_STR[ valueType ]
valueRepr = repr( self.getValue() )
return "%s<VALUE_TYPE=%d[%s]>(value=%s)" %( className, valueType, valueTypeStr, valueRepr )
class BodyElementValue_Boolean(BodyElementValue):
'''
BodyElementValue_Boolean - A True/False BodyElementValue, like returned by a comparison operation
'''
VALUE_TYPE = BODY_VALUE_TYPE_BOOLEAN
def setValue(self, newValue):
'''
setValue - Set a boolean value
@param newValue <bool> - Boolean value
@see BodyElementValue.setValue
'''
if not isinstance(newValue, bool):
raise XPathRuntimeError('BodyElementValue_Boolean tried to setValue as a non-boolean type. Was: %s . Repr: %s' %( newValue.__class__.__name__, repr(newValue) ))
self.value = newValue
class BodyElementValue_String(BodyElementValue):
'''
BodyElementValue_String - A string BodyElementValue
'''
VALUE_TYPE = BODY_VALUE_TYPE_STRING
def setValue(self, newValue):
'''
setValue - Set a string value
@param newValue <str> - String value
@see BodyElementValue.setValue
'''
# TODO: Check type of newValue against str (or str/unicode for py2) ?
self.value = tostr(newValue)
class BodyElementValue_Null(BodyElementValue):
'''
BodyElementValue_Null - A null BodyElementValue
'''
VALUE_TYPE = BODY_VALUE_TYPE_NULL
def __init__(self, value=Null):
'''
__init__ - Create this object. Override default to allow passing no value (there is only one)
'''
BodyElementValue.__init__(self, value)
def setValue(self, newValue=Null):
'''
setValue - Set a null value
@param newValue <str> - String value
@see BodyElementValue.setValue
'''
# TODO: Do we want this? None == Null?
if newValue is None:
newValue = Null
if newValue != Null:
raise XPathRuntimeError('BodyElementValue_Null tried to set a value but was not Null. Was: %s . Repr: %s' %( newValue.__class__.__name__, repr(newValue)))
self.value = newValue
class BodyElementValue_Number(BodyElementValue):
'''
BodyElementValue_Number - A numeric BodyElementValue
'''
VALUE_TYPE = BODY_VALUE_TYPE_NUMBER
def setValue(self, newValue):
'''
setValue - Sets the inner value to a float, or raises exception on failure to convert.
@param newValue <str/float> - A number (positive or negative, integer or float)
@raises XPathRuntimeError - Type passed is not convertable to float
@see BodyElementValue_StaticValue.setValue
'''
try:
self.value = float(newValue)
except Exception as fe:
raise XPathRuntimeError('Runtime Type Error: BodyElementValue_StaticValue_Number was passed a value, <%s> %s -- but could not convert to float. %s %s' %( \
type(newValue).__name__,
repr(newValue),
fe.__class__.__name__,
str(fe),
)
)
class BodyElementValue_List(BodyElementValue):
'''
BodyElementValue_List - A BodyElementValue which is a list of other values.
All elements within this list will be other BodyElementValues, rather than raw values.
'''
VALUE_TYPE = BODY_VALUE_TYPE_LIST
def __init__(self, initialValues=None):
'''
__init__ - Create this object
@param initialValues <None/list> Initial values to load into the internal list.
'''
if not initialValues:
initialValues = []
BodyElementValue.__init__(self, initialValues)
def setValue(self, newValues):
'''
setValue - Replace the previous lists with new list
@param newValues list<...> - A new list from which to create the internal list.
All items must have a related BodyElementValue type, or already be one.
'''
updatedList = [ ( issubclass(thisVal.__class__, BodyElementValue) and thisVal ) or _pythonValueToBodyElementValue(thisVal) for thisVal in newValues ]
self.value = updatedList
# PYTHON_TYPE_NAME_TO_BODY_VALUE_CLASS - The __name__ of the type(val), to the associated BEV container
PYTHON_TYPE_NAME_TO_BODY_VALUE_CLASS = {
'int' : BodyElementValue_Number,
'float' : BodyElementValue_Number,
'str' : BodyElementValue_String,
'unicode' : BodyElementValue_String,
'bool' : BodyElementValue_Boolean,
'NoneType' : BodyElementValue_Null,
'list' : BodyElementValue_List,
'tuple' : BodyElementValue_List,
'set' : BodyElementValue_List,
}
def _pythonValueToBodyElementValue(pythonValue):
'''
_pythonValueToBodyElementValue - Convert a native/raw python value to
its respective BodyElementValue subclassed container.
@param pythonValue <???> - The python "raw" value (such as an int or a string)
@return <BodyElementValue subclass> - A created container body element value wrapping provided value
'''
pythonValueTypeName = type(pythonValue).__name__
try:
bodyElementValueClass = PYTHON_TYPE_NAME_TO_BODY_VALUE_CLASS[ pythonValueTypeName ]
except KeyError:
# XXX: Exception or just use an "unknown" base BodyElementValue?
# Maybe better to just shut it down early rather than introduce questionable things on down the line
raise XPathRuntimeError('Failed to find a matching BodyElementValue type from python type "%s" ! Repr: %s' %( pythonValueTypeName, repr(pythonValue) ) )
return bodyElementValueClass( pythonValue )
#############################
## Static Values ##
#############################
# STATIC_VALUES_RES - A list of tuples, which will be iterated upon parsing a body to create the BodyElementValue_StaticValue types
# Tuples are in format: ( re.compile'd expression, BodyElementValue_StaticValue child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
STATIC_VALUES_RES = []
class BodyElementValue_StaticValue(BodyElementValue):
'''
BodyElementValue_StaticValue - Base class of static values ( appear in the body string directly, e.x. "hello" or 12 )
'''
pass
class BodyElementValue_StaticValue_String(BodyElementValue_StaticValue):
'''
BodyElementValue_StaticValue_String - A StaticValue which represents a string
'''
VALUE_TYPE = BODY_VALUE_TYPE_STRING
## String will have two expressions to generate -- one for single quotes, one for double quotes. Both extract the inner string
# Can combine into one, but this is more clear.
# Double quoted string
#BEV_SV_STRING_DOUBLE_QUOTE_RE = re.compile(r'''^([ \t]*[\"](?P<value>[^"]*)[\"][ \t]*)''')
BEV_SV_STRING_DOUBLE_QUOTE_RE = re.compile(r'''^([ \t]*[\"](?P<value>([\\]["]|[^"])*)[\"][ \t]*)''')
STATIC_VALUES_RES.append( (BEV_SV_STRING_DOUBLE_QUOTE_RE, BodyElementValue_StaticValue_String) )
# Single quoted string
#BEV_SV_STRING_SINGLE_QUOTE_RE = re.compile(r"""^([ \t]*[\'](?P<value>[^']*)[\'][ \t]*)""")
BEV_SV_STRING_SINGLE_QUOTE_RE = re.compile(r"""^([ \t]*[\'](?P<value>([\\][']|[^'])*)[\'][ \t]*)""")
STATIC_VALUES_RES.append( (BEV_SV_STRING_SINGLE_QUOTE_RE, BodyElementValue_StaticValue_String) )
class BodyElementValue_StaticValue_Number(BodyElementValue_StaticValue):
'''
BodyElementValue_StaticValue_Number - StaticValue to represent a number
'''
VALUE_TYPE = BODY_VALUE_TYPE_NUMBER
def setValue(self, newValue):
'''
setValue - Sets the inner value to a float, or raises exception on failure to convert.
@param newValue <str/float> - A number (positive or negative, integer or float)
@raises XPathRuntimeError - Type passed is not convertable to float
@see BodyElementValue_StaticValue.setValue
'''
try:
self.value = float(newValue)
except Exception as fe:
raise XPathRuntimeError('Runtime Type Error: BodyElementValue_StaticValue_Number was passed a value, <%s> %s -- but could not convert to float. %s %s' %( \
type(newValue).__name__,
repr(newValue),
fe.__class__.__name__,
str(fe),
)
)
# NOTE: Look into spaces after negative sign
BEV_SV_NUMBER_RE = re.compile(r'''^([ \t]*(?P<value>([-]){0,1}([\d]*[\.][\d]+)|([\d]+))[ \t]*)''')
STATIC_VALUES_RES.append( (BEV_SV_NUMBER_RE, BodyElementValue_StaticValue_Number) )
#############################
## Value Generators ##
#############################
# VALUE_GENERATOR_RES - A list of tuples, which will be iterated upon parsing a body to create the ValueGenerator types
# Tuples are in format: ( re.compile'd expression, BodyElementValueGenerator child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
VALUE_GENERATOR_RES = []
class BodyElementValueGenerator(BodyElement):
'''
BodyElementValueGenerator - Base class of BodyElements which resolve to a BodyValue after execution with context of a tag
'''
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Process "thisTag" to obtain a BodyElementValue relative to this tag and the extending class's implementation
@param thisTag <Tags.AdvancedTag> - The tag of relevance
@return <BodyElementValue> - The resulting value
'''
raise NotImplementedError('BodyElementValueGenerator.resolveValueFromTag is not implemented in type %s! Must use a class extending BodyElementValueGenerator' % ( \
self.__class__.__name__,
)
)
class BodyElementValueGenerator_FetchAttribute(BodyElementValueGenerator):
def __init__(self, attributeName):
'''
__init__ - Create this Value Generator to fetch the value of an attribute
on a tag.
@param attributeName <str> - The name of the attribute to fetch
'''
BodyElementValueGenerator.__init__(self)
self.attributeName = attributeName
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Fetch the value of a given attribute from a tag, and return the value.
@param thisTag <Tags.AdvancedTag> - An instance of a tag on which to work
@return <BodyElementValue> - The value of the attribute, or Null, wrapped in a BodyElementValue container
'''
attributeName = self.attributeName
if attributeName == '*' or '*' in attributeName:
raise XPathNotImplementedError('Wildcard attributes are not yet supported!')
# TODO: Can just use getAttribute with a default?
if not thisTag.hasAttribute( attributeName ):
# No attribute present, return Null
return BodyElementValue_Null()
val = '%s' %( thisTag.getAttribute(attributeName), )
return BodyElementValue_String(val)
def __repr__(self):
'''
__repr__ - Get string representation of this object
'''
return """%s( attributeName = "%s" )""" %( self.__class__.__name__, self.attributeName)
BEVG_FETCH_ATTRIBUTE_RE = re.compile(r'^[ \t]*[@](?P<attributeName>([*]|[a-zA-Z_][a-zA-Z0-9_\-]*))[ \t]*')
VALUE_GENERATOR_RES.append( (BEVG_FETCH_ATTRIBUTE_RE, BodyElementValueGenerator_FetchAttribute) )
class BodyElementValueGenerator_Text(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Text - Implement the 'text()' function
'''
def __init__(self, functionInner=None):
BodyElementValueGenerator.__init__(self)
def resolveValueFromTag(self, thisTag):
return BodyElementValue_String( thisTag.innerText )
BEVG_TEXT_RE = re.compile(r'^([ \t]*[tT][eE][xX][tT][ \t]*[\(][ \t]*[\)][ \t]*)')
VALUE_GENERATOR_RES.append( (BEVG_TEXT_RE, BodyElementValueGenerator_Text) )
class BodyElementValueGenerator_Last(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Text - Implement the 'text()' function
'''
def __init__(self, functionInner=None):
BodyElementValueGenerator.__init__(self)
def resolveValueFromTag(self, thisTag):
parentElement = thisTag.parentElement
if parentElement is None:
# No parent, last() must be 1
return '1'
thisTagName = thisTag.tagName
childrenOfRelevance = [ childEm for childEm in parentElement.children if childEm.tagName == thisTagName ]
return BodyElementValue_Number( len( childrenOfRelevance ) )
BEVG_LAST_RE = re.compile(r'''^([ \t]*[lL][aA][sS][tT][ \t]*[\(][ \t]*[\)][ \t]*)''')
VALUE_GENERATOR_RES.append( (BEVG_LAST_RE, BodyElementValueGenerator_Last) )
class BodyElementValueGenerator_Position(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Position - Implement the 'position()' function
'''
def __init__(self, functionInner=None):
BodyElementValueGenerator.__init__(self)
def resolveValueFromTag(self, thisTag):
parentElement = thisTag.parentElement
if parentElement is None:
# No parent, position() must be 1
return '1'
thisTagName = thisTag.tagName
childrenOfRelevance = [ childEm for childEm in parentElement.children if childEm.tagName == thisTagName ]
return BodyElementValue_Number( childrenOfRelevance.index( thisTag ) + 1 )
BEVG_POSITION_RE = re.compile(r'^([ \t]*[pP][oO][sS][iI][tT][iI][oO][nN][ \t]*[\(][ \t]*[\)][ \t]*)')
VALUE_GENERATOR_RES.append( (BEVG_POSITION_RE, BodyElementValueGenerator_Position) )
##############################
# ValueGenerator Functions #
##############################
# TODO: Create a separate list for REs that associate with functions, rather than sharing with single-level BodyElementValueGenerators?
class BodyElementValueGenerator_Function(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Function - Base class for BodyElementValueGenerator's which are functions (and can take nested levels)
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 0
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'unknown'
@classmethod
def createFromMatch(cls, curBodyStr, matchObj):
'''
createFromMatch - Create this BodyElement from a given match object, and return the element and remainder for parsing
@param curBodyStr <str> - The current body string (matchObj should have matched at the head of this)
@param matchObj <re.match> - The match object
@return tuple( createdElement<BodyElement>, remainingBodyStr<str> ) - A tuple of the created element and the remaining portion to parse
'''
groupDict = matchObj.groupdict()
restOfBody = groupDict['restOfBody']
( fnArgElements, remainingStr ) = _parseFunctionArgsToBodyElements(restOfBody)
if len(fnArgElements) < cls.FUNCTION_MIN_ARGS:
raise XPathParseError('"%s" function takes at least %d arguments, but found only %d.\nError at: %s' % ( \
cls.FUNCTION_NAME_STR,
cls.FUNCTION_MIN_ARGS,
len(fnArgElements),
repr(curBodyStr),
)
)
thisElement = cls( fnArgElements )
return ( thisElement, remainingStr )
def __init__(self, fnArgElements=None):
'''
__init__ - Create this object
'''
if fnArgElements is None:
# TODO: Error?
fnArgElements = []
if len(fnArgElements) < self.FUNCTION_MIN_ARGS:
# TODO: More context? Should be raised in #createFromMatch but do here as well for completeness...
raise XPathParseError('"%s" function takes at least %d arguments, but found only %d.' %( self.FUNCTION_NAME_STR, self.FUNCTION_MIN_ARGS, len(fnArgElements) ) )
self.fnArgElements = fnArgElements
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Return the BodyElementValue produced by executing this function in the context of a given tag
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue> - The calculated value derived by executing this function
'''
raise NotImplementedError('BodyElement type "%s" (function "%s" ) must implement "BodyElementValueGenerator_Function.resolveValueFromTag" but does not!' % ( \
self.__class__.__name__,
self.FUNCTION_NAME_STR,
)
)
def __repr__(self):
'''
__repr__ - String repr of this class
'''
return """BodyElementValueGenerator_Function<functionName = "%s"> ( fnArgElements = %s )""" %(self.FUNCTION_NAME_STR, repr(self.fnArgElements) )
class BodyElementValueGenerator_Function_Concat(BodyElementValueGenerator_Function):
'''
BodyElementValueGenerator_Function_Concat - BodyElementValueGenerator class implementing concat function
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 2
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'concat'
@classmethod
def createFromMatch(cls, curBodyStr, matchObj):
'''
createFromMatch - Create this BodyElement from a given match object, and return the element and remainder for parsing
@param curBodyStr <str> - The current body string (matchObj should have matched at the head of this)
@param matchObj <re.match> - The match object
@return tuple( createdElement<BodyElement>, remainingBodyStr<str> ) - A tuple of the created element and the remaining portion to parse
'''
# NOTE: The first part is copied for now due to inheritence
#
# We are looking to see if we can optimize this function call to a static value, if resolveable at run time
# Generate the base levels for all the args
groupDict = matchObj.groupdict()
restOfBody = groupDict['restOfBody']
( fnArgElements, remainingStr ) = _parseFunctionArgsToBodyElements(restOfBody)
if len(fnArgElements) < cls.FUNCTION_MIN_ARGS:
raise XPathParseError('"%s" function takes at least %d arguments, but found only %d.\nError at: %s' % ( \
cls.FUNCTION_NAME_STR,
cls.FUNCTION_MIN_ARGS,
len(fnArgElements),
repr(curBodyStr),
)
)
thisElement = cls( fnArgElements )
# Check if we can optimize this whole thing to a static value
staticValueParts = []
isStillStatic = True
for fnArgElement in thisElement.fnArgElements:
fnArgElementClass = fnArgElement.__class__
if issubclass(fnArgElementClass, BodyElementValue):
# Already a value, throw it on the heap
thisPartValue = fnArgElement.getValue()
# TODO: Handle Null -> '' ?
staticValueParts.append(thisPartValue)
continue
elif issubclass(fnArgElementClass, BodyLevel):
# A level, iterate over it.
# Don't bother with recursive, if more than one level deep we won't optimize
for sublevelBodyElement in fnArgElement:
if issubclass(sublevelBodyElement.__class__, BodyElementValue):
sublevelPartValue = sublevelBodyElement.getValue()
staticValueParts.append(sublevelPartValue)
continue
# Not a value already, abort optimization attempt
isStillStatic = False
break
else:
# Not a value already, abort optimization attempt
isStillStatic = False
break
if isStillStatic is False:
# Leave the loop if not static
break
if isStillStatic is True:
# Huzzah! We have unrolled everything and retained a static value!
newElementValue = BodyElementValue_String( ''.join( staticValueParts ) )
#print ( "\nOptimized!\nFrom: %s\nTo: %s\n" %( repr(thisElement), repr(newElementValue) ) )
return (newElementValue, remainingStr)
#else:
#print ( "\nFAILED TO OPTIMIZE!\nFrom: %s\n" %( repr(thisElement), ))
# Failed to optimize, return the concat instance with levels
return ( thisElement, remainingStr )
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Return the concatenated string
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue_String> - The concatenated string as a body element value
@see BodyElementValueGenerator_Function.resolveValueFromTag
'''
valParts = []
for fnArgElement in self.fnArgElements:
valPartElement = fnArgElement.evaluateLevelForTag(thisTag)
valPartElementValue = valPartElement.getValue()
if valPartElementValue == Null:
# If we got a null, treat it as an empty string for concatenation purposes
valPartElementValue = ''
valParts.append(valPartElementValue)
val = ''.join(valParts)
return BodyElementValue_String(val)
#BEVG_CONCAT_FUNCTION_RE = re.compile(r'''^([ \t]*[cC][oO][nN][cC][aA][tT][ \t]*[\(][ \t]*(?P<fnArgsStr>[^\)]+)[ \t]*[\)][ \t]*)''')
BEVG_FUNCTION_CONCAT_RE = re.compile(r'''^([ \t]*[cC][oO][nN][cC][aA][tT][ \t]*[\(][ \t]*(?P<restOfBody>.+))$''')
VALUE_GENERATOR_RES.append( (BEVG_FUNCTION_CONCAT_RE, BodyElementValueGenerator_Function_Concat) )
class BodyElementValueGenerator_Function_Contains(BodyElementValueGenerator_Function):
'''
BodyElementValueGenerator_Function_Contains - BodyElementValueGenerator class implementing contains function
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 2
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'contains'
def __init__(self, fnArgElements=None):
'''
__init__ - Create this object
'''
BodyElementValueGenerator_Function.__init__(self, fnArgElements)
# Ensure we are given exactly two arguments
fnArgElements = self.fnArgElements
if len(fnArgElements) != 2:
raise XPathParseError('"contains" function takes exactly two arguments, but got %d. Args were: %s' % ( \
len(fnArgElements),
repr(fnArgElements),
)
)
self.string1Arg = fnArgElements[0]
self.string2Arg = fnArgElements[1]
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Test if one string occurs within the other, and return the boolean result
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue_Boolean> - True if string1 contains string2, otherwise False
@see BodyElementValueGenerator_Function.resolveValueFromTag
'''
string1ValueElement = self.string1Arg.evaluateLevelForTag(thisTag)
string2ValueElement = self.string2Arg.evaluateLevelForTag(thisTag)
try:
string1Value = str( string1ValueElement.getValue() )
except Exception as e1:
raise XPathRuntimeError('Error in contains() - cannot convert first argument to a string! It is %s' %( repr(string1ValueElement.getValue()), ))
try:
string2Value = str( string2ValueElement.getValue() )
except Exception as e2:
raise XPathRuntimeError('Error in contains() - cannot convert second argument to a string! It is %s' %( repr(string2ValueElement.getValue()), ))
containsResult = bool( string2Value in string1Value )
return BodyElementValue_Boolean(containsResult)
BEVG_FUNCTION_CONTAINS_RE = re.compile(r'''^([ \t]*[cC][oO][nN][tT][aA][iI][nN][sS][ \t]*[\(][ \t]*(?P<restOfBody>.+))$''')
VALUE_GENERATOR_RES.append( (BEVG_FUNCTION_CONTAINS_RE, BodyElementValueGenerator_Function_Contains) )
class BodyElementValueGenerator_Function_NormalizeSpace(BodyElementValueGenerator_Function):
'''
BodyElementValueGenerator_NormalizeSpace - Implement the 'normalize-space()' function
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 0
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'normalize-space'
def __init__(self, fnArgElements=None):
'''
__init__ - Create this object
'''
BodyElementValueGenerator_Function.__init__(self, fnArgElements)
# Ensure we are given exactly two arguments
fnArgElements = self.fnArgElements
numArguments = len(fnArgElements)
if numArguments > 1:
raise XPathParseError('normalize-space function called with too many arguments (0 or 1 supported)')
if numArguments == 1:
self.getString = lambda _thisTag : self._getStringFromArgumentAndTag(0, _thisTag)
else:
self.getString = lambda _thisTag : _thisTag.innerText
def _getStringFromArgumentAndTag(self, argumentNum, thisTag):
'''
_getStringFromArgument - Get the string for the given argument and tag
@param argumentNum <int> - The argument index
@param thisTag <AdvancedTag> - The tag of reference
@return <str> - The string held by that value
'''
valueEm = self.fnArgElements[0].evaluateLevelForTag(thisTag)
if not issubclass(valueEm.__class__, (BodyElementValue_String, BodyElementValue_Null) ):
raise XPathRuntimeError('Got a value returned from within argument to normalize-text which was not string! It was: %s' %( valueEm.VALUE_TYPE, ))
value = str(valueEm.getValue())
return value
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Test if one string occurs within the other, and return the boolean result
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue_Boolean> - True if string1 contains string2, otherwise False
@see BodyElementValueGenerator_Function.resolveValueFromTag
'''
stringValue = self.getString(thisTag)
return BodyElementValue_String(stringValue.strip())
BEVG_FUNCTION_NORMALIZE_SPACE_RE = re.compile(r'''^([ \t]*[nN][oO][rR][mM][aA][lL][iI][zZ][eE][\-][sS][pP][aA][cC][eE][ \t]*[\(][ \t]*(?P<restOfBody>.+))$''')
VALUE_GENERATOR_RES.append( (BEVG_FUNCTION_NORMALIZE_SPACE_RE, BodyElementValueGenerator_Function_NormalizeSpace) )
#############################
## Operations ##
#############################
# OPERATION_RES - A list of tuples, which will be iterated upon parsing a body to create the Operation types
# Tuples are in format: ( re.compile'd expression, BodyElementOperation child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
OPERATION_RES = []
class BodyElementOperation(BodyElement):
'''
BodyElementOperation - Base class of BodyElements which perform some operation against the other body elements
'''
def performOperation(self, leftSide, rightSide):
raise NotImplementedError('BodyElementOperation.performOperation is not implemented in type %s! Must use a class extending BodyElementOperation' % ( \
self.__class__.__name__,
)
)
pass
class BodyElementOperation_Concat(BodyElementOperation):
'''
BodyElementOperation_Concat - Operation to handle the concat operator, "||"
'''
def performOperation(self, leftSide, rightSide):
'''
performOperation - Concatenate two strings
@param leftSide <str/BodyElementValue_String> - The left side string (will be the prefix)
@param rightSide <str/BodyElementValue_String> - The right side string (will be the suffix)
@return <BodyElementValue_String> - The concatenated string of leftSide + rightSide
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
if not issubclass(leftSideValue.__class__, STRING_TYPES):
raise XPathRuntimeError('Concat operator tried to concatenate, but left side is not a string type! It is a %s . repr: %s' % ( \
type(leftSideValue).__name__,
repr(leftSideValue),
)
)
if not issubclass(rightSideValue.__class__, STRING_TYPES):
raise XPathRuntimeError('Concat operator tried to concatenate, but right side is not a string type! It is a %s . repr: %s' % ( \
type(rightSideValue).__name__,
repr(rightSideValue),
)
)
#print ( "Left: %s\nRight: %s\n" %(repr(leftSideValue), repr(rightSideValue)) )
val = leftSideValue + rightSideValue
return BodyElementValue_String(val)
BEO_CONCAT_RE = re.compile(r'''^([ \t]*[\|][\|][ \t]*)''')
OPERATION_RES.append( (BEO_CONCAT_RE, BodyElementOperation_Concat) )
class BodyElementOperation_Math(BodyElementOperation):
'''
BodyElementOperation_Math - Base class for math operators
'''
# MATH_OPERATOR_STR - Override with the math operator (e.x. "+")
MATH_OPERATOR_STR = 'unknown'
def _prepareValuesForOperation(self, leftSide, rightSide):
'''
_prepareValuesForOperation - Prepare values for a numeric operation
@param leftSide <str/BodyElementValue/int/float> - The left side of the operation
@param rightSide <str/BodyElementValue/int/float> - The right side of the operation
@return tuple( leftSideValue<float>, rightSideValue<float> )
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
try:
return ( float(leftSideValue), float(rightSideValue) )
except:
raise XPathRuntimeError('Math operation "%s" attempted, but could not convert body sides to numbers!\nLeft side: <%s> %s\nRight side: <%s> %s' % ( \
self.MATH_OPERATOR_STR,
type(leftSideValue).__name__,
repr(leftSideValue),
type(rightSideValue).__name__,
repr(rightSideValue),
)
)
def performOperation(self, leftSide, rightSide):
'''
performOperation - Perform a math operation (see type for details)
@param leftSide <...> - The left side (must be convertable to float)
@param rightSide <...> - The right side (must be convertable to float)
@return <BodyElementValue_Number> - The calculated value
'''
(leftSideValue, rightSideValue) = self._prepareValuesForOperation(leftSide, rightSide)
return self.doCalculation(leftSideValue, rightSideValue)
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Perform the math operation implemented by this subclas.
Subclass must override this method.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
raise NotImplementedError('BodyElementOperation_Math class "%s" must implement doCalculation function!' %( self.__class__.__name__, ))
class BodyElementOperation_Math_Plus(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Plus - BodyElementOperation that implements the Math operation "plus" / "addition" / "+"
'''
MATH_OPERATOR_STR = '+'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Add two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue + rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_PLUS_RE = re.compile(r'''^([ \t]*[+][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_PLUS_RE, BodyElementOperation_Math_Plus) )
class BodyElementOperation_Math_Minus(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Minus - BodyElementOperation that implements the Math operation "minus" / "subtraction" / "-"
'''
MATH_OPERATOR_STR = '-'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Subtract two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue - rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_MINUS_RE = re.compile(r'''^([ \t]*[-][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_MINUS_RE, BodyElementOperation_Math_Minus) )
class BodyElementOperation_Math_Multiply(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Multiply - BodyElementOperation that implements the Math operation "multiply" / "multiplication" / "*"
'''
MATH_OPERATOR_STR = '*'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Multiply two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue * rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_MULTIPLY_RE = re.compile(r'''^([ \t]*[\*][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_MULTIPLY_RE, BodyElementOperation_Math_Multiply) )
class BodyElementOperation_Math_Divide(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Divide - BodyElementOperation that implements the Math operation "divide" / "division" / "div"
'''
MATH_OPERATOR_STR = 'div'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Divide two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue / rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_DIVIDE_RE = re.compile(r'''^([ \t]*[dD][iI][vV][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_DIVIDE_RE, BodyElementOperation_Math_Divide) )
class BodyElementOperation_Math_Modulus(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Modulus - BodyElementOperation that implements the Math operation "modulus" / "%" / "mod"
'''
MATH_OPERATOR_STR = 'mod'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Divide two values, return the remainder.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue % rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_MODULUS_RE = re.compile(r'''^([ \t]*[mM][oO][dD][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_MODULUS_RE, BodyElementOperation_Math_Modulus) )
#############################
## Comparisons ##
#############################
# COMPARISON_RES - A list of tuples, which will be iterated upon parsing a body to create the Comparison types
# Tuples are in format: ( re.compile'd expression, BodyElementComparison child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
COMPARISON_RES = []
class BodyElementComparison(BodyElement):
'''
BodyElementComparison - Base class of Comparison operations (such as equals, not equals, greater than, etc.)
'''
# NUMERIC_ONLY - If True, the value must be represenatble as a float (Number), or error.
# If False, other values (e.x. string) are supported.
NUMERIC_ONLY = False
# COMPARISON_OPERATOR_STR - This should be set to the operator associated with the comparison (e.x. "!=" or "<")
COMPARISON_OPERATOR_STR = 'UNKNOWN'
def doComparison(self, leftSide, rightSide):
'''
doComparison - Do the comparison associated with the subclass of BodyElementComparison
and return the result.
@param leftSide <BodyElementValue/str/float/BodyElementValue> - Left side of comparison operator
@param rightSideValue <BodyElementValue/str/float/other?> - Right side of comparison operator
@return <bool> - The result of the comparison operation
'''
(leftSideValue, rightSideValue) = BodyElementComparison._resolveTypesForComparison(leftSide, rightSide)
return self._doComparison(leftSideValue, rightSideValue)
def _doComparison(self, leftSideValue, rightSideValue):
'''
_doComparison - TYPE INTERNAL. Do the comparison associated with the subclass of BodyElementComparison
and return the result.
This should be implemented by each comparison type, rather than doComparison directly (which prepares arguments)
@param leftSideValue <str/float/other?> - Left side of comparison operator's value (unrolled from its BodyElementValue wrapper)
@param rightSideValue <str/float/other?> - Right side of comparison operator's value (unrolled from its BodyElementValue wrapper)
@return <BodyElementValue_Boolean> - The result of the comparison operation
'''
raise NotImplementedError('BodyElementComparison._doComparison must be implemented by extending subclass, but %s does not implement!' % ( \
self.__class__.__name__,
)
)
@classmethod
def _resolveTypesForComparison(cls, leftSide, rightSide):
'''
_resolveTypesForComparison - Resolve the given leftSide and rightSide dynamic types for comparison
@param leftSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the left side of the operator
@param rightSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the right side of the operator
@return tuple(left, right) of either <float, float> if castable, or the original raw pythonic types instead (pulled out of BodyElementValue if provided in one)
@notes - If cls.NUMERIC_ONLY is True, will throw an exception if cannot cast both sides to float. See raises section, below.
@raises XPathRuntimeError - If NUMERIC_ONLY is True, and cannot cast both sides to a float.
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
# Try to represent both sides as floats (Number), if possible
try:
return ( float(leftSideValue), float(rightSideValue) )
except:
# If we failed to convert both sides to number (e.x. strings), then check if this is a NUMERIC_ONLY type,
# in which case we will throw an error.
# Otherwise, return the raw python types
if cls.NUMERIC_ONLY is False:
return ( leftSideValue, rightSideValue )
else:
# TODO: Say explicitly which side won't convert?
raise XPathRuntimeError('XPath Runtime Error: Numeric-only comparison attempted with non-numeric values! Comparison "%s" only supports both sides being numeric, and cannot convert. Left side is <%s> ( %s ) and Right side is <%s> ( %s )' % ( \
cls.COMPARISON_OPERATOR_STR,
type(leftSideValue).__name__, repr(leftSideValue),
type(rightSideValue).__name__, repr(rightSideValue),
)
)
class BodyElementComparison_Equal(BodyElementComparison):
'''
BodyElementComparison_Equal - A BodyElementComparison which represents the "equals" operation, "="
'''
COMPARISON_OPERATOR_STR = "="
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue == rightSideValue )
BEC_EQUAL_RE = re.compile(r'^([ \t]*[=][ \t]*)')
COMPARISON_RES.append( (BEC_EQUAL_RE, BodyElementComparison_Equal) )
class BodyElementComparison_NotEqual(BodyElementComparison):
'''
BodyElementComparison_NotEqual - A BodyElementComparison which represents the "not equals" operation, "!="
'''
COMPARISON_OPERATOR_STR = "!="
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue != rightSideValue )
BEC_NOT_EQUAL_RE = re.compile(r'^([ \t]*[!][=][ \t]*)')
COMPARISON_RES.append( (BEC_NOT_EQUAL_RE, BodyElementComparison_NotEqual) )
# TODO: Other types of comparison (greater than, less than or equal, etc.)
class BodyElementComparison_LessThan(BodyElementComparison):
'''
BodyElementComparison_LessThan - A BodyElementComparison which represents the "less than" operation, "<"
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '<'
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue < rightSideValue )
BEC_LESS_THAN_RE = re.compile(r'^([ \t]*[<][ \t]*)')
COMPARISON_RES.append( (BEC_LESS_THAN_RE, BodyElementComparison_LessThan) )
class BodyElementComparison_LessThanOrEqual(BodyElementComparison):
'''
BodyElementComparison_LessThanOrEqual - A BodyElementComparison which represents the "less than or equal" operation, "<="
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '<='
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue <= rightSideValue )
BEC_LESS_THAN_OR_EQUAL_RE = re.compile(r'^([ \t]*[<][=][ \t]*)')
COMPARISON_RES.append( (BEC_LESS_THAN_OR_EQUAL_RE, BodyElementComparison_LessThanOrEqual) )
class BodyElementComparison_GreaterThan(BodyElementComparison):
'''
BodyElementComparison_GreaterThan - A BodyElementComparison which represents the "greater than" operation, ">"
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '>'
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue > rightSideValue )
BEC_GREATER_THAN_RE = re.compile(r'^([ \t]*[>][ \t]*)')
COMPARISON_RES.append( (BEC_GREATER_THAN_RE, BodyElementComparison_GreaterThan) )
class BodyElementComparison_GreaterThanOrEqual(BodyElementComparison):
'''
BodyElementComparison_GreaterThanOrEqual - A BodyElementComparison which represents the "greater than or equal" operation, ">="
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '>='
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue <= rightSideValue )
BEC_GREATER_THAN_OR_EQUAL_RE = re.compile(r'^([ \t]*[>][=][ \t]*)')
COMPARISON_RES.append( (BEC_GREATER_THAN_OR_EQUAL_RE, BodyElementComparison_GreaterThanOrEqual) )
#############################
## Boolean Ops ##
#############################
# BOOLEAN_OPS_RES - A list of tuples, which will be iterated upon parsing a body to create the BooleanOps types
# Tuples are in format: ( re.compile'd expression, BodyElementBooleanOps child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
BOOLEAN_OPS_RES = []
class BodyElementBooleanOps(BodyElement):
'''
BodyElementBooleanOps - Base comparison class for boolean comparison operations (e.x. "and" , "or" )
'''
# BOOLEAN_OP_STR - The boolean operation being implemented, should be set by the subclass.
BOOLEAN_OP_STR = 'unknown'
def doBooleanOp(self, leftSide, rightSide):
'''
doBooleanOp - Do the comparison associated with the subclass of BodyElementBooleanOps
and return the result.
@param leftSide <BodyElementValue/str/float/BodyElementValue> - Left side of comparison operator
@param rightSideValue <BodyElementValue/str/float/other?> - Right side of comparison operator
@return <bool> - The result of the comparison operation
'''
(leftSideValue, rightSideValue) = BodyElementBooleanOps._resolveTypesForBooleanOp(leftSide, rightSide)
return self._doBooleanOp(leftSideValue, rightSideValue)
def _doBooleanOp(self, leftSideValue, rightSideValue):
'''
_doBooleanOp - TYPE INTERNAL. Do the comparison associated with the subclass of BodyElementBooleanOp
and return the result.
This should be implemented by each comparison type, rather than doBooleanOp directly (which prepares arguments)
@param leftSideValue <str/float/other?> - Left side of comparison operator's value
@param rightSideValue <str/float/other?> - Right side of comparison operator's value
@return <bool> - The result of the comparison operation
'''
raise NotImplementedError('BodyElementBooleanOps._doBooleanOp must be implemented by extending subclass, but %s does not implement!' % ( \
self.__class__.__name__,
)
)
@classmethod
def _resolveTypesForBooleanOp(cls, leftSide, rightSide):
'''
_resolveTypesForBooleanOp - Resolve the given leftSide and rightSide dynamic types for comparison
Boolean type overrides the comparison base in order to only accept booleans (instead of numeric / strings)
@param leftSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the left side of the operator.
Must be or resolve to a boolean
@param rightSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the right side of the operator
Must be or resolve to a boolean
@return tuple(left<bool>, right<bool>)
@raises XPathRuntimeError - If either side is not a boolean, or a boolean-wrapped BodyElementValue
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
# TODO: Provide better context here of where this operation was in the xpath string?
if not isinstance(leftSideValue, bool):
# Should this be a parse error? Their expression caused it....
raise XPathRuntimeError('XPath Runtime Error: Boolean comparison attempted ( "%s" operator ) but left side was not a boolean! Was: %s . Repr: %s' % ( \
cls.BOOLEAN_OP_STR,
type(leftSideValue).__name__,
repr(leftSideValue),
)
)
if not isinstance(rightSideValue, bool):
raise XPathRuntimeError('XPath Runtime Error: Boolean comparison attempted ( "%s" operator ) but right side was not a boolean! Was: %s . Repr: %s' % ( \
cls.BOOLEAN_OP_STR,
type(rightSideValue).__name__,
repr(rightSideValue),
)
)
return ( leftSideValue, rightSideValue )
class BodyElementBooleanOps_And(BodyElementBooleanOps):
'''
BodyElementBooleanOps_And - A BodyElementBooleanOps which represents the "and" operation -
will check that both the left and right side are True
'''
BOOLEAN_OP_STR = 'and'
def _doBooleanOp(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue and rightSideValue )
# NOTE: these requires a whitespace after, unlike other operators.
BEBO_AND_RE = re.compile(r'^([ \t]*[aA][nN][dD][ \t]+)')
BOOLEAN_OPS_RES.append( (BEBO_AND_RE, BodyElementBooleanOps_And) )
class BodyElementBooleanOps_Or(BodyElementBooleanOps):
'''
BodyElementBooleanOps_Or - A BodyElementBooleanOps which represents the "or" operation -
will check that either the left and right side are True
'''
BOOLEAN_OP_STR = 'or'
def _doBooleanOp(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue or rightSideValue )
BEBO_OR_RE = re.compile(r'^([ \t]*[oO][rR][ \t]+)')
BOOLEAN_OPS_RES.append( (BEBO_OR_RE, BodyElementBooleanOps_Or) )
# ALL_BODY_ELEMENT_RES - All regular expressions used in parsing out a body into individual operations
ALL_BODY_ELEMENT_RES = VALUE_GENERATOR_RES + STATIC_VALUES_RES + COMPARISON_RES + OPERATION_RES + BOOLEAN_OPS_RES
# NOTE: Static values should come before operations, so negative values match as a static value and not a substract operation
class BodyLevel_Group(BodyLevel):
'''
BodyLevel_Group - A group of elements
'''
def __init__(self, groupMembers=None):
'''
__init__ - Create this element
@param groupMembers list<BodyElement> - Members of this group
'''
BodyLevel.__init__(self)
if not groupMembers:
groupMembers = []
self.appendBodyElements(groupMembers)
# BODY_ELEMENT_GROUP_OPEN_RE - The opening of a parenthesis group
BODY_ELEMENT_GROUP_OPEN_RE = re.compile(r'^([ \t]*[\(](?P<restOfBody>.+)[ \t]*)$')
# BODY_ELEMENT_GROUP_CLOSE_RE - The closing of a parenthesis group
BODY_ELEMENT_GROUP_CLOSE_RE = re.compile(r'^(?P<endOfGroup>[ \t]*[\)][ \t]*)')
def _parseBodyLevelGroup(restOfBody):
'''
_parseBodyLevelGroup - Parse a group, within parenthesis
@param restOfBody <str> - The remainder of the body string to parse
@return tuple< <BodyLevel_Group>, remainderStr<str> > - The group parsed, and the unused portion of the str on which to continue parsing at parent level
'''
allBodyElementREs = ALL_BODY_ELEMENT_RES
bodyElementGroupOpenRE = BODY_ELEMENT_GROUP_OPEN_RE
bodyElementGroupCloseRE = BODY_ELEMENT_GROUP_CLOSE_RE
curString = restOfBody[:].strip()
ret = []
foundCloseParen = False
while curString:
gotMatch = False
groupCloseMatch = bodyElementGroupCloseRE.match(curString)
if groupCloseMatch:
# We are at the end of this group, return the rest of the string back upward
gotMatch = True
newCurString = curString[ groupCloseMatch.span()[1] : ]
curString = newCurString
foundCloseParen = True
break
groupOpenMatch = bodyElementGroupOpenRE.match(curString)
if groupOpenMatch:
gotMatch = True
(subLevel, newCurString) = _parseBodyLevelGroup( groupOpenMatch.groupdict()['restOfBody'] )
ret.append(subLevel)
curString = newCurString
continue
else:
for ( bodyElementRE, bodyElementClass ) in allBodyElementREs:
matchObj = bodyElementRE.match(curString)
if matchObj is None:
continue
gotMatch = True
break
if gotMatch is False:
raise XPathParseError('Failed to parse body string into usable part, at: "%s"' %(curString, ))
(thisElement, newCurString) = bodyElementClass.createFromMatch(curString, matchObj)
ret.append(thisElement)
curString = newCurString
if foundCloseParen is False:
raise XPathParseError('Missing close parenthesis for section: "%s"' %(restOfBody, ))
# Optimization: Before returning, run through and perform any operations against static values possible
#newRet = _optimizeStaticValueCalculations(ret)
ret = _optimizeStaticValueCalculations(ret)
#print ( "\nPrevious BodyElements(%2d): %s\n\n New BodyElements(%2d): %s\n" %( len(ret), repr(ret), len(newRet), repr(newRet)) )
#return newRet
return ( BodyLevel_Group(ret), curString )
# BODY_ELEMENT_GROUP_FUNCTION_NEXT_ARG_RE - The next argument
BODY_ELEMENT_GROUP_FUNCTION_NEXT_ARG_RE = re.compile(r'^([ \t]*[,][ \t]*)')
def _parseFunctionArgsToBodyElements(restOfBody):
'''
_parseFunctionArgsToBodyElements - Parse function arguments into BodyElements
@param restOfBody <str> - The remainder of the body string to parse
@return tuple< list<BodyLevel_Group>, remainderStr<str> > - The groups parsed (one per arg), and the unused portion of the str on which to continue parsing at parent level
'''
allBodyElementREs = ALL_BODY_ELEMENT_RES
bodyElementGroupOpenRE = BODY_ELEMENT_GROUP_OPEN_RE
bodyElementGroupCloseRE = BODY_ELEMENT_GROUP_CLOSE_RE
bodyElementGroupFunctionNextArgRE = BODY_ELEMENT_GROUP_FUNCTION_NEXT_ARG_RE
curString = restOfBody[:].strip()
fnArgs = []
curGroupElements = []
foundCloseParen = False
while curString:
gotMatch = False
groupCloseMatch = bodyElementGroupCloseRE.match(curString)
if groupCloseMatch:
# We are at the end of this group, return the rest of the string back upward
gotMatch = True
newCurString = curString[ groupCloseMatch.span()[1] : ]
curString = newCurString
foundCloseParen = True
break
nextArgMatch = bodyElementGroupFunctionNextArgRE.match(curString)
if nextArgMatch:
# We hit a comma, should move onto the next arg
gotMatch = True
if len(curGroupElements) == 0:
# TODO: More information here?
raise XPathParseError('Function call has empty argument, at: %s' %(curString, ))
# Append the current group and begin the next
# Optimize the group elements
curGroupElements = _optimizeStaticValueCalculations(curGroupElements)
if False and len(curGroupElements) == 1:
# TODO: Support this optimization -- will require a bit of interface massaging so common interface
# We have optimized down to a single element, so add that instead of the level
fnArgs.append( curGroupElements[0] )
else:
# More than one, create a group and append it
curGroup = BodyLevel_Group( curGroupElements )
fnArgs.append( curGroup )
# TODO: Validate we don't just have trailing comma
# Create a new list for future elements
curGroupElements = []
newCurString = curString[ nextArgMatch.span()[1] : ]
curString = newCurString
continue
groupOpenMatch = bodyElementGroupOpenRE.match(curString)
if groupOpenMatch:
gotMatch = True
(subLevel, newCurString) = _parseBodyLevelGroup( groupOpenMatch.groupdict()['restOfBody'] )
curGroupElements.append( subLevel )
curString = newCurString
continue
else:
for ( bodyElementRE, bodyElementClass ) in allBodyElementREs:
matchObj = bodyElementRE.match(curString)
if matchObj is None:
continue
gotMatch = True
break
if gotMatch is False:
raise XPathParseError('Failed to parse body string into usable part, at: "%s"' %(curString, ))
(thisElement, newCurString) = bodyElementClass.createFromMatch(curString, matchObj)
curGroupElements.append( thisElement )
curString = newCurString
if foundCloseParen is False:
raise XPathParseError('Missing close parenthesis for section: "%s"' %(restOfBody, ))
if len(curGroupElements) > 0:
# Optimize the group elements
curGroupElements = _optimizeStaticValueCalculations(curGroupElements)
if False and len(curGroupElements) == 1:
# We have optimized down to a single element, so add that instead of the level
fnArgs.append( curGroupElements[0] )
else:
# More than one, create a group and append it
curGroup = BodyLevel_Group( curGroupElements )
fnArgs.append( curGroup )
# TODO: Optimize the args, can pull out of levels if only one arg
return ( fnArgs, curString )
def parseBodyStringIntoBodyElements(bodyString):
'''
parseBodyStringIntoBodyElements - Parses the body string of a tag filter expression (between square brackets)
into individual body elements.
@param bodyString <str> - A body string of an XPath expression
@return list<BodyElement> - A list of matched BodyElement items, in order of appearance.
@raises XPathParseError - Failure to parse
'''
allBodyElementREs = ALL_BODY_ELEMENT_RES
bodyElementGroupOpenRE = BODY_ELEMENT_GROUP_OPEN_RE
curString = bodyString[:].strip()
ret = []
while curString:
gotMatch = False
groupOpenMatch = bodyElementGroupOpenRE.match(curString)
if groupOpenMatch:
gotMatch = True
(subLevel, newCurString) = _parseBodyLevelGroup( groupOpenMatch.groupdict()['restOfBody'] )
ret.append(subLevel)
curString = newCurString
continue
else:
for ( bodyElementRE, bodyElementClass ) in allBodyElementREs:
matchObj = bodyElementRE.match(curString)
if matchObj is None:
continue
gotMatch = True
break
if gotMatch is False:
raise XPathParseError('Failed to parse body string into usable part, at: "%s"' %(curString, ))
(thisElement, newCurString) = bodyElementClass.createFromMatch(curString, matchObj)
ret.append(thisElement)
curString = newCurString
# Optimization: Before returning, run through and perform any operations against static values possible
#newRet = _optimizeStaticValueCalculations(ret)
ret = _optimizeStaticValueCalculations(ret)
#print ( "\nPrevious BodyElements(%2d): %s\n\n New BodyElements(%2d): %s\n" %( len(ret), repr(ret), len(newRet), repr(newRet)) )
#return newRet
return ret
def _optimizeStaticValueCalculations(bodyElements):
'''
_optimizeStaticValueCalculations - Optimize element portions that can be pre-calculated
@param bodyElements - list<BodyElement> - List of BodyElements following parsing of XPath string
@return list<BodyElement> - Optimized list of BodyElements, where pre-calculated operations are ran once at parse-time
instead of per tag at run-time.
'''
numOrigElements = len(bodyElements)
if numOrigElements <= 2:
# Nothing to do
return bodyElements
# We are already going to hit __class__ on every object, so do it ahead of time
# in a quicker list comprehension, which we will reference later
bodyElementClasses = [bodyElement.__class__ for bodyElement in bodyElements]
# No benefit in checking if we have any BodyElementOperation (or future optimizations) first,
# as we will already iterate over everything. The only thing saved when none would be recreating the list,
# at the expense of O(n) vs O(2n) for the check in the event we can optimize.
ret = []
prevElement = bodyElements[0]
prevElementClass = bodyElementClasses[0]
ret.append(prevElement)
i = 1
while i < numOrigElements:
curElement = bodyElements[i]
curElementClass = bodyElementClasses[i]
if issubclass(curElementClass, (BodyElementOperation, BodyElementComparison)):
# If we have an operation to optimize, check if left and right are already values.
# If so, we can run it.
if (i+1) < numOrigElements and issubclass(prevElementClass, BodyElementValue):
# We are not on the last element, and the previous was a value.
# If next is value, run the operation.
nextElement = bodyElements[i + 1]
nextElementClass = bodyElementClasses[i + 1]
if issubclass(nextElementClass, BodyElementValue):
# Score! We can optimize!
if issubclass(curElementClass, BodyElementOperation):
calculatedValue = curElement.performOperation(prevElement, nextElement)
#elif issubclass(curElementClass, BodyElementComparison):
else:
# Only Comparison left
calculatedValue = curElement.doComparison(prevElement, nextElement)
# Strip off the previous value, and replace this operation and next value with calculated
ret = ret[ : -1 ] + [calculatedValue]
# Set previous value to this value
prevElement = calculatedValue
prevElementClass = prevElement.__class__
# And increment past the next element
i += 2
continue
# No optimization available, add the element as-is
ret.append(curElement)
# Update previous element to this element for next round
prevElement = curElement
prevElementClass = curElementClass
# Increment to next element
i += 1
return ret
# vim: set ts=4 sw=4 st=4 expandtab :
| lgpl-3.0 | -9,045,549,128,452,777,000 | 33.874741 | 258 | 0.621279 | false |
dcos/dcos | packages/adminrouter/extra/src/test-harness/modules/mocker/common.py | 1 | 7971 | # Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""
Shared management code for DC/OS mocks used by AR instances, both EE and Open.
"""
import concurrent.futures
import logging
from mocker.endpoints.marathon import MarathonEndpoint
from mocker.endpoints.mesos import MesosEndpoint
from mocker.endpoints.mesos_dns import MesosDnsEndpoint
from mocker.endpoints.reflectors import (
ReflectingTcpIpEndpoint,
ReflectingUnixSocketEndpoint,
)
log = logging.getLogger(__name__)
class MockerBase:
"""This class represents mocking behaviour shared between both EE and Open
repositories.
It should not be instantiated directly but instead inheriting classes should
override/extend it's methods.
"""
_endpoints = None
def _register_endpoints(self, endpoints):
"""Register given endpoints list with the mock
This method registers all the endpoints that are going to be managed
by this Mocker instance.
Args:
endpoints (object: [EndpointA, EndpointB,...]): list of endpoints
that should be registered
"""
self._endpoints = {}
for endpoint in endpoints:
log.info("Registering endpoint `%s`", endpoint.id)
assert endpoint.id not in self._endpoints
self._endpoints[endpoint.id] = endpoint
@staticmethod
def _create_common_endpoints():
"""Helper function that takes care of creating/instantiating all the
endpoints that are common for both EE and Open repositories"""
res = []
# pkgpanda endpoint
res.append(ReflectingUnixSocketEndpoint('/run/dcos/pkgpanda-api.sock'))
# exhibitor
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=8181))
# Mesos masters
res.append(MesosEndpoint(ip='127.0.0.2', port=5050))
res.append(MesosEndpoint(ip='127.0.0.3', port=5050))
# Marathon instances running on the masters
res.append(MarathonEndpoint(ip='127.0.0.1', port=8080))
res.append(MarathonEndpoint(ip='127.0.0.2', port=8080))
# cosmos
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=7070))
# dcos-net
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=62080))
# Mesos agents:
# - plain/without TLS
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=15001))
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=15002))
# - TLS version. It's used for testing e.g. DEFAULT_SCHEME variable
# where AR is connecting to the upstream Mesos Agent using TLS.
# 127.0.0.1 address stems from certificate names matching.
res.append(ReflectingTcpIpEndpoint(
ip='127.0.0.1',
port=15401,
certfile='/run/dcos/pki/tls/certs/adminrouter-ec.crt',
keyfile='/run/dcos/pki/tls/private/adminrouter-ec.key'))
# Agent3
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.4', port=15003))
# Agent AR 1
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=61001))
# Agent AR 2
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=61001))
# task /scheduler-alwaysthere
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=16000))
# task /nest1/scheduler-alwaysthere
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=17000))
# task /nest2/nest1/scheduler-alwaysthere
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18000))
# task /nest2/nest1/scheduler-onlymarathon
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18001))
# task /nest2/nest1/scheduler-onlymesos
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18002))
# task /nest2/nest1/scheduler-onlymesosdns
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18003))
# task /scheduler-alwaysthere but with different ip+port, used i.e. in
# `/service` endpoint tests
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.15', port=16001))
# catch-all for /scheduler-alwaysthere task. Its role is to respond for all
# the requests which i.e. used mesos_dns'es second entry in SRV reply.
# Successfull tests will never use it.
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=16002))
# other Admin Router Masters, used i.e. during Marathon leader testing
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=80))
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=80))
res.append(ReflectingTcpIpEndpoint(
ip='127.0.0.4',
port=443,
certfile='/run/dcos/pki/tls/certs/adminrouter-ec.crt',
keyfile='/run/dcos/pki/tls/private/adminrouter-ec.key'))
# metrics endpoint
res.append(ReflectingUnixSocketEndpoint('/run/dcos/telegraf-dcos-metrics.sock'))
# log endpoint
res.append(ReflectingUnixSocketEndpoint('/run/dcos/dcos-log.sock'))
# Mesos DNS
res.append(MesosDnsEndpoint(ip='127.0.0.1', port=8123))
# DDDT, two variants:
# TODO (prozlach): cleanup DDDT sockets
res.append(
ReflectingTcpIpEndpoint(ip='127.0.0.1', port=1050))
res.append(
ReflectingUnixSocketEndpoint('/run/dcos/dcos-diagnostics.sock'))
# DC/OS Metronome
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=9000))
# Checks API
res.append(
ReflectingUnixSocketEndpoint('/run/dcos/dcos-checks-api.sock'))
# TODO - other endpoints common for all flavours go here...
return res
def __init__(self, extra_endpoints=None):
"""Initialize new MockerBase instance
Args:
extra_endpoints (obj: [EndpointA, EndpointB,...]): list of endpoints
that are unique to the inheriting class/represent specific behaviour
of given flavour
"""
common_endpoints = self._create_common_endpoints()
endpoints = common_endpoints + extra_endpoints
self._register_endpoints(endpoints)
def start(self):
"""Start all endpoints registered with this Mocker instance"""
with concurrent.futures.ThreadPoolExecutor() as executor:
for endpoint in self._endpoints.values():
executor.submit(endpoint.start)
def stop(self):
"""Stop all endpoints registered with this Mocker instance.
Usually called right before object destruction
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
for endpoint in self._endpoints.values():
executor.submit(endpoint.stop)
def reset(self):
"""Reset all the endpoints to their initial state
Used to make sure that all the tests start with fresh state/are not
interfering with each other through Mocker
"""
for endpoint in self._endpoints.values():
endpoint.reset()
def send_command(self, endpoint_id, func_name, aux_data=None):
"""Reconfigure endpoint manager by Mocker
This method reconfigures endpoint previously started by Mocker. The
reconfiguration is basically calling method `func_name` belonging to
endpoint `endpoint_id` with data `aux_data`
Args:
endpoint_id (str): id of the endpoint to reconfigure
func_name (str): name of the endpoint's function to call
aux_data (str): auxilary data to pass to function
Returns:
Depends on the endpoint - it returns anything that endpoint returns.
Raises:
KeyError: endpoint with given id does not exists
AttributeError: endpoint does not defines function `func_name`
"""
endpoint = self._endpoints[endpoint_id]
f = getattr(endpoint, func_name)
return f(aux_data)
| apache-2.0 | -4,933,649,222,516,970,000 | 40.952632 | 88 | 0.648476 | false |
gnachman/iTerm2 | api/library/python/iterm2/iterm2/variables.py | 1 | 3098 | """
Provides support for iTerm2 variables, which hold information associated
with various objects such as sessions, tabs, and windows.
"""
import asyncio
import enum
import json
import typing
import iterm2.connection
import iterm2.notifications
class VariableScopes(enum.Enum):
"""Describes the scope in which a variable can be evaluated."""
SESSION = iterm2.api_pb2.VariableScope.Value("SESSION") #: Session scope
TAB = iterm2.api_pb2.VariableScope.Value("TAB") #: Tab scope
WINDOW = iterm2.api_pb2.VariableScope.Value("WINDOW") #: Window scope
APP = iterm2.api_pb2.VariableScope.Value("APP") #: Whole-app scope
class VariableMonitor:
"""
Watches for changes to a variable.
`VariableMonitor` is a context manager that helps observe changes in iTerm2
Variables.
:param connection: The connection to iTerm2.
:param scope: The scope in which the variable should be evaluated.
:param name: The variable name.
:param identifier: A tab, window, or session identifier. Must correspond to
the passed-in scope. If the scope is `APP` this should be None. If the
scope is `SESSION` or `WINDOW` the identifier may be "all" or "active".
.. seealso::
* Example ":ref:`colorhost_example`"
* Example ":ref:`theme_example`"
Example:
.. code-block:: python
async with iterm2.VariableMonitor(
connection,
iterm2.VariableScopes.SESSION,
"jobName",
my_session.session_id) as mon:
while True:
new_value = await mon.async_get()
DoSomething(new_value)
"""
def __init__(
self,
connection: iterm2.connection.Connection,
scope: VariableScopes,
name: str,
identifier: typing.Optional[str]):
self.__connection = connection
self.__scope = scope
self.__name = name
self.__identifier = identifier
self.__token = None
self.__queue: asyncio.Queue = asyncio.Queue(
loop=asyncio.get_event_loop())
async def __aenter__(self):
async def callback(_connection, message):
"""Called when a variable changes."""
await self.__queue.put(message)
self.__token = await (
iterm2.notifications.
async_subscribe_to_variable_change_notification(
self.__connection,
callback,
self.__scope.value,
self.__name,
self.__identifier))
return self
async def async_get(self) -> typing.Any:
"""Returns the new value of the variable."""
result = await self.__queue.get()
json_new_value = result.json_new_value
return json.loads(json_new_value)
async def __aexit__(self, exc_type, exc, _tb):
try:
await iterm2.notifications.async_unsubscribe(
self.__connection, self.__token)
except iterm2.notifications.SubscriptionException:
pass
| gpl-2.0 | -8,994,132,710,832,068,000 | 31.957447 | 79 | 0.603938 | false |
breuderink/psychic | psychic/nodes/filter.py | 1 | 1820 | import numpy as np
from scipy import signal
from golem import DataSet
from golem.nodes import BaseNode
from psychic.utils import get_samplerate
class Filter(BaseNode):
def __init__(self, filt_design_func):
'''
Forward-backward filtering node. filt_design_func is a function that takes
the sample rate as an argument, and returns the filter coefficients (b, a).
'''
BaseNode.__init__(self)
self.filt_design_func = filt_design_func
def train_(self, d):
fs = get_samplerate(d)
self.log.info('Detected sample rate of %d Hz' % fs)
self.filter = self.filt_design_func(fs)
def apply_(self, d):
b, a = self.filter
xs = np.hstack([signal.filtfilt(b, a, d.xs[:, i]).reshape(-1, 1)
for i in range(d.nfeatures)])
return DataSet(xs=xs, default=d)
class OnlineFilter(Filter):
def __init__(self, filt_design_func):
Filter.__init__(self, filt_design_func)
self.zi = []
def apply_(self, d):
b, a = self.filter
if self.zi == []:
self.zi = [signal.lfiltic(b, a, np.zeros(b.size)) for fi in
range(d.nfeatures)]
new_zi = []
xs = []
for i in range(d.nfeatures):
xi, zii = signal.lfilter(b, a, d.xs[:, i], zi=self.zi[i])
xs.append(xi.reshape(-1, 1))
new_zi.append(zii)
self.zi = new_zi
return DataSet(xs=np.hstack(xs), default=d)
class Winsorize(BaseNode):
def __init__(self, cutoff=[.05, .95]):
self.cutoff = np.atleast_1d(cutoff)
assert self.cutoff.size == 2
BaseNode.__init__(self)
def train_(self, d):
assert len(d.feat_shape) == 1
self.lims = np.apply_along_axis(lambda x: np.interp(self.cutoff,
np.linspace(0, 1, d.ninstances), np.sort(x)), 0, d.xs)
def apply_(self, d):
return DataSet(xs=np.clip(d.xs, self.lims[0,:], self.lims[1:]),
default=d)
| bsd-3-clause | 3,304,547,324,791,819,000 | 28.836066 | 79 | 0.621978 | false |
Ferjapolis/Indexador-MongoDB | sv03ToMongo.py | 1 | 2461 | # -*- coding: cp1252 -*-
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
import os
import sys
from PyPDF2 import PdfFileReader
enc = sys.stdin.encoding
c = MongoClient("localhost",27017)
print ("Conectado")
dbh = c["sv03"]
errores = dbh["errores"]
def carga(tabla,carpeta):
collections = dbh[tabla]
listaB= [""]
rootdir = r"\\sv-03\QA12\GENERAL\PROYECTOS\\"+str(carpeta)
for path, dirs, files in os.walk(rootdir):
for fil in files:
ruta = str(path)+"\\"+str(fil)
search = collections.find_one({"path":ruta})
if search is None:
try:
datos = {}
statinfo = os.stat(ruta)
datos["size"] = statinfo.st_size
datos["path"] = ruta
ext = fil.lower().rsplit('.', 1)[-1]
extension = "."+ext
datos["name"] = fil.replace(extension,"")
datos["format"] = ext
if ext =="pdf":
try:
Formatos = []
Point = 0.0353
pdf = PdfFileReader(open(ruta,'rb'))
num_pages = int(pdf.getNumPages())
datos["pag"]=num_pages
for i in range(0, num_pages):
hoja = pdf.getPage(i).mediaBox.upperRight
ancho = int(int(hoja[0])*Point)
largo = int(int(hoja[1])*Point)
formato = str(ancho)+"x"+str(largo)
Formatos.append(formato)
except:
datos["pag"]="Error"
hojas = {}
for elemn in Formatos:
if not elemn in hojas:
hojas[elemn] = 1
if elemn in hojas:
hojas[elemn] = 1 + hojas[elemn]
datos["pagF"] = hojas
if not datos["name"] in listaB:
collections.insert_one(datos)
except:
falla = {"path":ruta}
errores.insert_one(falla)
else:
"error"
print("cargado")
| unlicense | 5,193,583,938,490,873,000 | 37.453125 | 76 | 0.407152 | false |
thelabnyc/django-oscar-wfrs | src/wellsfargo/tests/connector/test_client.py | 1 | 2934 | from urllib.parse import parse_qs
from django.core.cache import cache
from django.test import TestCase
from wellsfargo.connector.client import WFRSGatewayAPIClient
import requests_mock
class WFRSGatewayAPIClientTest(TestCase):
def setUp(self):
super().setUp()
cache.clear()
@requests_mock.Mocker()
def test_get_api_key(self, rmock):
call_count = {
"i": 0,
}
# Setup mock for generating a token
def match_request(request):
# Check auth header
self.assertTrue(request.headers["Authorization"].startswith("Basic "))
# Check data in body
data = parse_qs(request.body)
self.assertEqual(
data,
{
"grant_type": [
"client_credentials",
],
"scope": [
" ".join(
[
"PLCCA-Prequalifications",
"PLCCA-Applications",
"PLCCA-Payment-Calculations",
"PLCCA-Transactions-Authorization",
"PLCCA-Transactions-Charge",
"PLCCA-Transactions-Authorization-Charge",
"PLCCA-Transactions-Return",
"PLCCA-Transactions-Cancel-Authorization",
"PLCCA-Transactions-Void-Return",
"PLCCA-Transactions-Void-Sale",
"PLCCA-Transactions-Timeout-Authorization-Charge",
"PLCCA-Transactions-Timeout-Return",
"PLCCA-Account-Details",
]
),
],
},
)
# Increment call count
call_count["i"] += 1
return True
# Register request mock
rmock.post(
"https://api-sandbox.wellsfargo.com/token",
additional_matcher=match_request,
json={
"access_token": "16a05f65dd41569af67dbdca7ea4da4d",
"scope": "",
"token_type": "Bearer",
"expires_in": 79900,
},
)
self.assertEqual(call_count["i"], 0)
# Get a token
token = WFRSGatewayAPIClient().get_api_key()
self.assertEqual(token.api_key, "16a05f65dd41569af67dbdca7ea4da4d")
self.assertEqual(token.is_expired, False)
self.assertEqual(call_count["i"], 1)
# Get token again
token = WFRSGatewayAPIClient().get_api_key()
self.assertEqual(token.api_key, "16a05f65dd41569af67dbdca7ea4da4d")
self.assertEqual(token.is_expired, False)
self.assertEqual(call_count["i"], 1)
| isc | 9,074,675,246,136,069,000 | 35.675 | 82 | 0.480913 | false |
ivannz/study_notes | data_study/facebook2012/base.py | 1 | 2045 | # -*- coding: UTF-8 -*-
## Base modules
import scipy.sparse as sp
import numpy as np
from collections import deque
import pandas as pd
## Read a sparese adjacency matrix from a two-column CSV file
def __csr_from_csv( file_name, **kwargs ) :
return __csr_from_pandas( pd.read_csv( file_name, **kwargs ) )
## Creates a sparse matrix from a two-column source-destination dataframe
def __csr_from_pandas( df, symmetrize = False ) :
return __csr_from_endpoints( df.values[ :, 0 ],
df.values[ :, 1 ], symmetrize = symmetrize )
def __csr_from_endpoints( u, v, symmetrize = False ) :
assert( len( u ) == len( v ) )
## Convert to a COO matrix
if not symmetrize :
adj = sp.coo_matrix( ( np.ones( len( u ), dtype = np.float ), ( u, v ) ) )
else :
adj = sp.coo_matrix( ( np.ones( len( u ) + len( v ), dtype = np.float ),
( np.concatenate( ( u, v ) ), np.concatenate( ( v, u ) )) ) )
## Convert to CSR and remove duplicates
adj = adj.tocsr( ) ; adj.data[ : ] = 1
return adj
def __sparse_bfs( A, sources, num_nodes = np.inf, max_hops = np.inf ) :
sources = np.asarray( sources, np.int )
## Initialize the hops array
dist = np.full( A.shape[ 0 ], np.inf, np.float )
## THe source is immediately reachable
dist[ sources ] = 0.0
## Setup the vertex traversal schedule.
Q = deque( sources )
## Setup the counter of visited nodes
num_visited = 0
## If the allotted number of nodes has been exceeded, break the cycle.
while Q :
## Get the current vertex from the top of the FIFO queue
v = Q.popleft( )
## ... find its nerighbours (A is CSR)
N = A[ v, : ].nonzero( )[ 1 ]
## ... keep those that were not visited
N = N[ np.isinf( dist[ N ] ) ]
## Add the mto the queue
if len( N ) > 0 :
dist[ N ] = 1.0 + dist[ v ]
## Nodes farther than max_hops away from the sources are not traversed.
if 1.0 + dist[ v ] < max_hops :
Q.extend( N )
## Unless the current vertex is the source, increase the number of visited nodes.
if dist[ v ] > 0 :
num_visited += len( N )
if num_visited >= num_nodes :
break
return dist
| mit | 3,795,678,255,961,516,000 | 32.52459 | 81 | 0.636186 | false |
adamchainz/mysqlclient-python | tests/test_MySQLdb_nonstandard.py | 1 | 3020 | import unittest
import _mysql
import MySQLdb
from MySQLdb.constants import FIELD_TYPE
from configdb import connection_factory
import warnings
warnings.simplefilter("ignore")
class TestDBAPISet(unittest.TestCase):
def test_set_equality(self):
self.assertTrue(MySQLdb.STRING == MySQLdb.STRING)
def test_set_inequality(self):
self.assertTrue(MySQLdb.STRING != MySQLdb.NUMBER)
def test_set_equality_membership(self):
self.assertTrue(FIELD_TYPE.VAR_STRING == MySQLdb.STRING)
def test_set_inequality_membership(self):
self.assertTrue(FIELD_TYPE.DATE != MySQLdb.STRING)
class TestCoreModule(unittest.TestCase):
"""Core _mysql module features."""
def test_NULL(self):
"""Should have a NULL constant."""
self.assertEqual(_mysql.NULL, 'NULL')
def test_version(self):
"""Version information sanity."""
self.assertTrue(isinstance(_mysql.__version__, str))
self.assertTrue(isinstance(_mysql.version_info, tuple))
self.assertEqual(len(_mysql.version_info), 5)
def test_client_info(self):
self.assertTrue(isinstance(_mysql.get_client_info(), str))
def test_thread_safe(self):
self.assertTrue(isinstance(_mysql.thread_safe(), int))
def test_escape_string(self):
self.assertEqual(_mysql.escape_string(b'foo"bar'),
b'foo\\"bar', "escape byte string")
self.assertEqual(_mysql.escape_string(u'foo"bar'),
b'foo\\"bar', "escape unicode string")
class CoreAPI(unittest.TestCase):
"""Test _mysql interaction internals."""
def setUp(self):
self.conn = connection_factory(use_unicode=True)
def tearDown(self):
self.conn.close()
def test_thread_id(self):
tid = self.conn.thread_id()
self.assertTrue(isinstance(tid, int),
"thread_id didn't return an int.")
self.assertRaises(TypeError, self.conn.thread_id, ('evil',),
"thread_id shouldn't accept arguments.")
def test_affected_rows(self):
self.assertEquals(self.conn.affected_rows(), 0,
"Should return 0 before we do anything.")
#def test_debug(self):
## FIXME Only actually tests if you lack SUPER
#self.assertRaises(MySQLdb.OperationalError,
#self.conn.dump_debug_info)
def test_charset_name(self):
self.assertTrue(isinstance(self.conn.character_set_name(), str),
"Should return a string.")
def test_host_info(self):
self.assertTrue(isinstance(self.conn.get_host_info(), str),
"Should return a string.")
def test_proto_info(self):
self.assertTrue(isinstance(self.conn.get_proto_info(), int),
"Should return an int.")
def test_server_info(self):
self.assertTrue(isinstance(self.conn.get_server_info(), str),
"Should return an str.")
| gpl-2.0 | 8,151,237,055,662,304,000 | 31.12766 | 72 | 0.620861 | false |
WilJoey/ckanext-tnext | ckanext/tnext/controllers/MUser.py | 1 | 9024 | import ckan.plugins as p
#from ckan.lib.base import BaseController, config
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.new_authz as new_authz
import ckan.lib.captcha as captcha
import ckan.lib.navl.dictization_functions as dictization_functions
from pylons import config
from ckan.common import _, c, g, request
c = base.c
request = base.request
class MUserController(base.BaseController):
def index (self):
LIMIT = 20
page = int(request.params.get('page', 1))
c.q = request.params.get('q', '')
c.order_by = request.params.get('order_by', 'name')
context = {'return_query': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'q': c.q,
'limit': LIMIT,
'offset': (page - 1) * LIMIT,
'order_by': c.order_by}
try:
logic.check_access('user_list', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
users_list = logic.get_action('user_list')(context, data_dict)
c.users = users_list
c.page = h.Page(
collection=users_list,
page=page,
url=h.pager_url,
item_count=users_list.count(),
items_per_page=LIMIT
)
return base.render('muser/index.html')
def new (self, data=None, errors=None, error_summary=None):
#q = model.Session.query(model.User).filter(model.User.sysadmin==True)
#c.sysadmins = [a.name for a in q.all()]
'''GET to display a form for registering a new user.
or POST the form data to actually do the user registration.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj,
'schema': self._new_form_to_db_schema(),
'save': 'save' in request.params}
c.is_sysadmin = new_authz.is_sysadmin(c.user)
if not c.user or not c.is_sysadmin:
return base.render('user/logout_first.html')
try:
logic.check_access('user_create', context)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create a user'))
if context['save'] and not data:
return self._save_new(context)
c.data = data or {}
c.errors = errors or {}
c.error_summary = error_summary or {}
#vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
#c.form = render(self.new_user_form, extra_vars=vars)
#return render('user/new.html')
return base.render('muser/new.html')
def _new_form_to_db_schema(self):
return schema.user_new_form_schema()
def _save_new(self, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
captcha.check_recaptcha(request)
user = logic.get_action('user_create')(context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create user %s') % '')
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except captcha.CaptchaError:
error_msg = _(u'Bad Captcha. Please try again.')
h.flash_error(error_msg)
return self.new(data_dict)
except logic.ValidationError, e:
c.errors = e.error_dict
c.error_summary = e.error_summary
return self.new(data_dict, c.errors, c.error_summary)
# success
h.flash_success(_('User "%s" is now registered.') % (data_dict['name']))
#return base.render('user/logout_first.html')
return base.render('muser/new.html')
def edit(self, id=None, data=None, errors=None, error_summary=None):
context = {'save': 'save' in request.params,
'schema': self._edit_form_to_db_schema(),
'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj
}
if id is None:
base.abort(400, _('No user specified'))
if not new_authz.is_sysadmin(c.user):
base.abort(401, _('User %s not authorized to edit %s') % (str(c.user), id))
data_dict = {'id': id}
try:
logic.check_access('user_update', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit a user.'))
if (context['save']) and not data:
return self._save_edit(id, context)
try:
old_data = logic.get_action('user_show')(context, data_dict)
schema = self._db_to_edit_form_schema()
if schema:
old_data, errors = validate(old_data, schema)
c.display_name = old_data.get('display_name')
c.user_name = old_data.get('name')
data = data or old_data
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % '')
except logic.NotFound:
base.abort(404, _('User not found'))
user_obj = context.get('user_obj')
errors = errors or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
self._setup_template_variables({'model': model,
'session': model.Session,
'user': c.user or c.author},
data_dict)
c.is_myself = True
c.show_email_notifications = h.asbool(
config.get('ckan.activity_streams_email_notifications'))
c.form = base.render('muser/edit_user_form.html', extra_vars=vars)
return base.render('muser/edit.html')
def _save_edit(self, id, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
# MOAN: Do I really have to do this here?
if 'activity_streams_email_notifications' not in data_dict:
data_dict['activity_streams_email_notifications'] = False
user = logic.get_action('user_update')(context, data_dict)
h.flash_success(_('Profile updated'))
user_index = h.url_for(controller='ckanext.tnext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % id)
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except logic.ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def _setup_template_variables(self, context, data_dict):
c.is_sysadmin = new_authz.is_sysadmin(c.user)
try:
user_dict = logic.get_action('user_show')(context, data_dict)
except logic.NotFound:
base.abort(404, _('User not found'))
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
c.user_dict = user_dict
c.is_myself = user_dict['name'] == c.user
c.about_formatted = h.render_markdown(user_dict['about'])
def _db_to_edit_form_schema(self):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
def _edit_form_to_db_schema(self):
return schema.user_edit_form_schema()
def delete(self, id):
'''Delete user with id passed as parameter'''
context = {'model': model,
'session': model.Session,
'user': c.user,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
logic.get_action('user_delete')(context, data_dict)
h.flash_success(_('User deleted!'))
user_index = h.url_for(controller='ckanext.tnext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
msg = _('Unauthorized to delete user with id "{user_id}".')
base.abort(401, msg.format(user_id=id))
| mit | -4,200,688,493,169,835,500 | 36.915966 | 112 | 0.566379 | false |
newtrino/vertigo | tests/test7.py | 1 | 1747 | #!/usr/bin/python
""" This will be a test for rave's two-jet feature """
width=0.0015
length=5.3
from vertigo import RaveVertexFactory, EventFactory, RaveConstantMagneticField, \
RaveVacuumPropagator, LoopSettings, RaveTrackContainer, WeightedRaveTrack, \
ObserverManager_Instance, RaveCovariance3D, RaveEllipsoid3D, RavePoint3D
c=RaveCovariance3D( width**2, 0,0,width**2,0,length**2 )
e=RaveEllipsoid3D( RavePoint3D(),c )
LoopSettings.Instance().setVerbosity(0)
ravefactory=RaveVertexFactory ( RaveConstantMagneticField(0.,0.,4.), RaveVacuumPropagator(), e, "avr-primcut:3.0" )
eventfactory=EventFactory ( "bjets.170.1.txt.gz" )
event=eventfactory.next()
for simvtx in event.simVertices():
print simvtx
print len(event.jets()),"jets in event."
print len(event.tracks()),"tracks in event."
primaries=RaveTrackContainer()
first=True
secondaries=None
for jet in event.jets():
tracks=jet.tracks()
print len(tracks), "tracks in jet."
if not first: # put all tracks but of "first jet" in "primaries"
for track in tracks:
primaries.append ( track )
else:
secondaries=tracks
first=False
vertices=ravefactory.create ( event.tracks(), True )
print len(vertices),"vertices with all tracks"
for vtx in vertices:
print "Vtx Pos: (%.4f, %.4f, %.4f)" % (vtx.position().x(),vtx.position().y(),vtx.position().z() )
print len(primaries),"primary tracks."
vertices=ravefactory.create ( primaries, secondaries, True )
# vertices=ravefactory.create ( primaries, True )
print len(vertices),"vertices with all tracks"
for vtx in vertices:
print "Vtx Pos: (%.4f, %.4f, %.4f)" % (vtx.position().x(),vtx.position().y(),vtx.position().z() )
# obs=ObserverManager_Instance().get("EventPrinter")
# obs.process(event)
| gpl-2.0 | -6,821,613,297,838,847,000 | 31.351852 | 115 | 0.721809 | false |
rubik/poly | tests/test_poly.py | 1 | 9508 | import copy
import unittest
from poly import Poly, monomial
def pass_to(func, convert=(True, True), star=False):
def wrapper(meth):
def inner(self):
for value, expected in meth(self).items():
if convert[0] and not star:
value = Poly(value)
if convert[1]:
if isinstance(expected, tuple):
expected = tuple(map(Poly, expected))
else:
expected = Poly(expected)
val = func(*map(Poly, value)) if star else func(value)
self.assertEqual(val, expected)
return inner
return wrapper
class TestPypolFuncs(unittest.TestCase):
def test_monomial(self):
self.assertEqual(monomial(1, 1), Poly([(1, 1)]))
self.assertEqual(monomial(-1, 0), Poly([(-1, 0)]))
self.assertEqual(monomial(0, 0), Poly([]))
self.assertEqual(monomial(1, 2), Poly([(1, 2)]))
@pass_to(Poly.from_string, (False, True))
def test_parse(self):
return {
'3x - 2': [(3, 1), (-2, 0)],
'x + 1': [(1, 1), (1, 0)],
'4x**2 + x - 1': [(4, 2), (1, 1), (-1, 0)],
'-2x^3 + x**2 -x + 1': [(-2, 3), (1, 2), (-1, 1), (1, 0)],
'- x ^ 3 + 2': [(-1, 3), (2, 0)],
'4 x': [(4, 1)],
'- 5 x ^ 3 + 1 - 4': [(-5, 3), (-3, 0)],
'-x - x^2': [(-1, 2), (-1, 1)],
'x + x - 3x': [(-1, 1)],
}
class TestPypolPoly(unittest.TestCase):
@pass_to(Poly.__repr__, (True, False))
def test_repr(self):
return {
((1, 2), (4, 1), (-2, 0)): '+ x^2 + 4x - 2',
((-3, 4), (-1, 2)): '- 3x^4 - x^2',
((-2, 2), (3, 1)): '- 2x^2 + 3x',
((2, 0),): '+ 2',
((1, 1),): '+ x',
((-1, 10),): '- x^10',
(): '0',
((-1, 0),): '- 1'
}
@pass_to(Poly.degree.fget, (True, False))
def test_degree(self):
return {
((-3, 2), (4, 0)): 2,
((4, 3), (0, 5), (0, 7), (9, 2)): 3,
((-1, 0),): 0,
((3, 2), (4, 1)): 2,
(): 0,
}
@pass_to(Poly.rhs.fget, (True, False))
def test_rhs(self):
return {
((-3, 4), (4, 2)): 0,
((-1, 0),): -1,
((9, 0), (-3, 2), (4, 2), (-5, 1)): 9,
((2, 2), (0, 0)): 0,
}
@pass_to(Poly.append, star=True)
def test_append(self):
return {
(((2, 3), (-3, 4)), ((1, 4), (2, 2))): [(-2, 4), (2, 3), (2, 2)],
(((-2, 3), (1, 2), (1, 1)), ((3, 2),)): [(-2, 3), (4, 2), (1, 1)],
(((3, 1),), ((-5, 1),)): [(-2, 1)],
(((4, 2), (-1, 1)), ()): [(4, 2), (-1, 1)],
}
@pass_to(Poly.is_num, (True, False))
def test_is_num(self):
return {
((-2, 0),): True,
((9, 9), (0, 4)): False,
((1, 1), (1, 0)): False,
((0, 0),): True,
}
@pass_to(Poly.simplify, (False, False))
def test_simplify(self):
return {
((1, 2), (3, 0), (-1, 0)): [(1, 2), (2, 0)],
((-3, 2), (-4, 2), (0, 4), (-2, 1)): [(-7, 2), (-2, 1)],
((0, 2),): [],
((4, 4), (-4, 4)): [],
((2, 1), (-8, 0)): [(2, 1), (-8, 0)]
}
@pass_to(copy.copy)
def test_copy(self):
return {
((1, 4), (-1, 0)): [(1, 4), (-1, 0)],
((-1, 2), (2, 3), (4, 1)): [(2, 3), (-1, 2), (4, 1)],
((3, 2),): [(3, 2)],
}
@pass_to(copy.deepcopy)
def test_deepcopy(self):
return {
((1, 4), (-1, 0)): [(1, 4), (-1, 0)],
((-1, 2), (2, 3), (4, 1)): [(2, 3), (-1, 2), (4, 1)],
((3, 2),): [(3, 2)],
}
def test_getitem(self):
self.assertEqual(Poly([(1, 2), (-1, 0)])[0], Poly([(1, 2)]))
self.assertEqual(Poly([(-3, 0), (4, 4)])[0], Poly([(4, 4)]))
self.assertEqual(Poly([(1, 1), (2, 0), (3, 2)])[1:],
Poly([(1, 1), (2, 0)]))
self.assertEqual(Poly([(-2, 3), (1, 2), (-1, 0)])[2:3],
Poly([(-1, 0)]))
@pass_to(Poly.__nonzero__, (True, False))
def test_nonzero(self):
return {
(): False,
((1, 0),): True,
((0, 0),): False,
((1, 1), (-3, 1), (4, 2)): True,
}
@pass_to(Poly.__bool__, (True, False))
def test_nonzero(self):
return {
(): False,
((1, 0),): True,
((0, 0),): False,
((1, 1), (-3, 1), (4, 2)): True,
}
@pass_to(len, (True, False))
def test_len(self):
return {
(): 0,
((0, 0),): 0,
((1, 0),): 1,
((1, 4), (-1, 4), (1, 1)): 1,
((3, 2), (4, 1)): 2,
((1, 4), (-1, 3), (1, 2), (-1, 1), (1, 0)): 5
}
@pass_to(Poly.__eq__, (True, False), True)
def test_eq(self):
return {
(((1, 3), (-1, 2)), ((1, 3), (2, 2), (-3, 2))): True,
(((1, 3), (4, 2)), ((1, 3), (-4, 2))): False,
(((1, 0),), ((1, 0),)): True,
((), ()): True,
}
@pass_to(Poly.__ne__, (True, False), True)
def test_ne(self):
return {
(((1, 3), (-1, 2)), ((1, 3), (2, 2), (-3, 2))): False,
(((1, 3), (4, 2)), ((1, 3), (-4, 2))): True,
(((1, 0),), ((1, 0),)): False,
((), ()): False,
}
@pass_to(Poly.__pos__)
def test_pos(self):
return {
(): [],
((1, 0), (-1, 1)): [(1, 0), (-1, 1)],
((3, 2), (-3, 2), (4, 1)): [(4, 1)],
}
@pass_to(Poly.__neg__)
def test_neg(self):
return {
((1, 1),): [(-1, 1)],
((2, 4), (-3, 5)): [(3, 5), (-2, 4)],
((3, 1), (1, 1)): [(-4, 1)],
((1, 1),): [(-1, 1)],
}
@pass_to(Poly.__add__, star=True)
def test_add(self):
return {
(((3, 2), (4, 1)), ((1, 2), (-1, 1))): [(4, 2), (3, 1)],
(((1, 2), (3, 3)), ((2, 4), (-1, 3))): [(2, 4), (2, 3), (1, 2)],
(((3, 3),), ((-3, 3),)): [],
(((1, 1), (-2, 4)), ((3, 1), (2, 4))): [(4, 1)],
((), ((-3, 2),)): [(-3, 2)],
}
@pass_to(Poly.__sub__, star=True)
def test_sub(self):
return {
(((3, 2), (4, 1)), ((1, 2), (-1, 1))): [(2, 2), (5, 1)],
(((1, 2), (3, 3)), ((2, 4), (3, 3))): [(-2, 4), (1, 2)],
(((3, 3),), ((-3, 3),)): [(6, 3)],
(((1, 1), (-2, 4)), ((3, 1), (2, 4))): [(-4, 4), (-2, 1)],
((), ((-3, 2),)): [(3, 2)],
}
@pass_to(Poly.__mul__, star=True)
def test_mul(self):
return {
(((1, 1), (-1, 0)), ((1, 1), (-1, 0))): [(1, 2), (-2, 1), (1, 0)],
(((1, 0),), ((2, 3), (-1, 4))): [(-1, 4), (2, 3)],
(((-1, 1),), ((2, 3), (-1, 4))): [(1, 5), (-2, 4)]
}
@pass_to(divmod, star=True)
def test_divmod(self):
return {
(((3, 3), (-2, 2), (4, 1), (-3, 0)), ((1, 2), (3, 1), (3, 0))):
([(3, 1), (-11, 0)], [(28, 1), (30, 0)]),
(((1, 3), (-2, 2), (1, 1), (-5, 0)), ((-1, 1), (1, 0))):
([(-1, 2), (1, 1)], [(-5, 0)]),
(((1, 2), (8, 1), (-54, 0)), ((1, 1), (11, 0))):
([(1, 1), (-3, 0)], [(-21, 0)]),
(((6, 0),), ((2, 0),)): ([(3, 0)], []),
(((4, 2), (-2, 1), (2, 0)), ((2, 0),)):
([(2, 2), (-1, 1), (1, 0)], []),
((), ()): ([], []),
}
def test_divmod_value_error(self):
self.assertRaises(ValueError, divmod,
Poly([(1, 2), (-3, 1)]), Poly([(3, 3), (4, 0)]))
@pass_to(Poly.__div__, star=True)
def test_div(self):
return {
(((3, 3), (-2, 2), (4, 1), (-3, 0)), ((1, 2), (3, 1), (3, 0))):
[(3, 1), (-11, 0)],
(((1, 3), (-2, 2), (1, 1), (-5, 0)), ((-1, 1), (1, 0))):
[(-1, 2), (1, 1)],
(((1, 2), (8, 1), (-54, 0)), ((1, 1), (11, 0))):
[(1, 1), (-3, 0)],
(((6, 0),), ((2, 0),)): [(3, 0)],
(((4, 2), (-2, 1), (2, 0)), ((2, 0),)):
[(2, 2), (-1, 1), (1, 0)],
((), ()): [],
}
@pass_to(Poly.__mod__, star=True)
def test_mod(self):
return {
(((3, 3), (-2, 2), (4, 1), (-3, 0)), ((1, 2), (3, 1), (3, 0))):
[(28, 1), (30, 0)],
(((1, 3), (-2, 2), (1, 1), (-5, 0)), ((-1, 1), (1, 0))):
[(-5, 0)],
(((1, 2), (8, 1), (-54, 0)), ((1, 1), (11, 0))):
[(-21, 0)],
(((6, 0),), ((2, 0),)): [],
(((4, 2), (-2, 1), (2, 0)), ((2, 0),)):
[],
((), ()): [],
}
def test_pow(self):
self.assertRaises(TypeError, lambda: Poly([(2, 2), (-1, 1)]) ** -1)
self.assertRaises(TypeError, lambda: Poly([]) ** 0)
self.assertEqual(Poly([(1, 3), (2, 1)]) ** 1, Poly([(1, 3), (2, 1)]))
self.assertEqual(Poly([(1, 1), (-1, 0)]) ** 2,
Poly([(1, 2), (-2, 1), (1, 0)]))
self.assertEqual(Poly([(1, 3), (-1, 2)]) ** 0, Poly([(1, 0)]))
self.assertEqual(Poly([(1, 1)]) ** 3, Poly([(1, 3)]))
self.assertEqual(Poly([(1, 4)]) ** 3, Poly([(1, 12)]))
if __name__ == '__main__':
unittest.main()
| mit | 8,370,560,494,488,886,000 | 32.361404 | 78 | 0.30101 | false |
sandialabs/BioCompoundML | bcml/PubChemUtils/pubchempy_utils.py | 1 | 14579 | from __future__ import print_function
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from collections import Callable, defaultdict
from six.moves import xrange
try:
# For Python 3.0 and later
import urllib.request as urllib2
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from bs4 import BeautifulSoup
from time import sleep
_base = 'pubchem.ncbi.nlm.nih.gov'
_pug_rest = '"http://pubchem.ncbi.nlm.nih.gov/pug_rest"'
_dir = os.path.dirname(__file__)
_fp_file = os.path.abspath(os.path.join(_dir, 'fingerprints.txt'))
'''
This module extends the common functionality of the PubChemPy
package
'''
class CompoundDict(OrderedDict):
'''
The compound dictionary is ordred and contains various levels of
dictionaries underneath, this is the reason for the complicated structure
'''
def __init__(self, default_factory=defaultdict, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('First argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(tuple(self.items())))
def __repr__(self):
return 'CompoundDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
def verbose_print(verbose, line):
if verbose:
print(line)
def _url_factory(uri):
'''
Handle the pubchem RESTful interface by passing a url directly
'''
uri = 'http://' + _base + uri
response = urllib2.urlopen(uri)
value = response.read().strip()
return value
def convert_cactvs(cactvs):
'''
This internal function converts 2D fingerprints to a string of 0/1s
The fingerprint is defined here:
ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
The way that this function works is:
1) Pass cactvs
2) Strip the 2 trailing bytes
3) Strip the 2 leading bytes
4) Convert the letters to base64 binary (6-bits)
5) Report bits 32 through (881+32-11), which are the 881 informative
bits.
'''
b64 = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6,
"H": 7, "I": 8, "J": 9, "K": 10, "L": 11, "M": 12, "N": 13,
"O": 14, "P": 15, "Q": 16, "R": 17, "S": 18, "T": 19,
"U": 20, "V": 21, "W": 22, "X": 23, "Y": 24, "Z": 25,
"a": 26, "b": 27, "c": 28, "d": 29, "e": 30, "f": 31,
"g": 32, "h": 33, "i": 34, "j": 35, "k": 36, "l": 37,
"m": 38, "n": 39, "o": 40, "p": 41, "q": 42, "r": 43,
"s": 44, "t": 45, "u": 46, "v": 47, "w": 48, "x": 49,
"y": 50, "z": 51, "0": 52, "1": 53, "2": 54, "3": 55,
"4": 56, "5": 57, "6": 58, "7": 59, "8": 60, "9": 61,
"+": 62, "/": 63}
c = cactvs[:-2].strip()
binstring = (''.join([str(bin(b64[x]))[2:].zfill(6) for x in c.decode('utf-8')]))
return binstring[32:-11]
def _parse_fingerprint():
'''
Read the NCBI fingerprint spec file and assign features to
each fingerprint
'''
fp_features = {}
with open(_fp_file) as fp:
for line in fp:
(pos, feature) = line.strip().split('\t')
fp_features[int(pos)] = feature
return fp_features
def get_binhash(cactvs):
'''
Convert CACTVS into a dictionary of fingerprint
features
'''
fingerprint = _parse_fingerprint()
binstring = convert_cactvs(cactvs)
binhash = {}
for count, val in enumerate(binstring):
binhash[fingerprint[count]] = val
return binhash
def cactvs_uri(ids):
'''
This function retreives the CACTVS uri from PubChem, which is a base64
encoded string, specifying the 881 bits, corresponding to the
fingerprint
'''
_id = str(ids)
uri = '/rest/pug/compound/cid/' + _id + '/property/Fingerprint2D/TXT'
return uri
def smiles_uri(ids):
_id = str(ids)
uri = '/rest/pug/compound/smiles/' + _id + '/cids/TXT'
return uri
def get_smiles(_id):
'''
This function retreives the CID for a SMILES from PubChem
'''
uri = smiles_uri(_id)
cid = _url_factory(uri)
return cid
def stream_sdf(ids):
'''
This function allows bulk streaming of SDF into a data structure
'''
concatenated_ids = ','.join(ids)
uri = sdf_uri(concatenated_ids)
sdf_stream = _url_factory(uri).decode().strip('$$$$')
sdfs = ["".join((data.lstrip(), '$$$$')) for data in
sdf_stream.split('$$$$') if data is not ""]
return sdfs
def sdf_uri(ids):
'''
This function retreives the SDF URI from PubChem
'''
_id = str(ids)
uri = '/rest/pug/compound/cid/' + _id + '/record/SDF'
return uri
def stream_xml(_id):
'''
This function allows streaming of pubchem XML into a data structure
'''
uri = xml_uri(_id)
xml = _url_factory(uri)
return xml
def xml_uri(_id):
'''
This function retreives the XML URI from PubChem
'''
_id = str(_id)
uri = '/rest/pug_view/data/compound/' + _id + '/XML/'
return uri
def extract_pubchem_xml_features(xml):
'''
Extracts primary PubChem Chemical and Physical data.
If multiple values are reported
for a given descriptor, the first is given, since, by
convention, these are the highest quality.
'''
xml_glob = BeautifulSoup(xml, "lxml")
values = {}
def _return_value_list(text, key):
'''Special function for returning list of values'''
return [y.get_text() for y in text.find_next_siblings(key)]
xml_globs = xml_glob.find_all('section')
properties = ''
match_text = 'Chemical and Physical Properties'
for xml_glob in xml_globs:
try:
if xml_glob.find('tocheading').get_text() == match_text:
properties = xml_glob
except:
pass
try:
for x in properties.find_all('name'):
value = None
name = x.get_text()
if name not in values:
if x.find_next_sibling('numvalue'):
value = x.find_next_sibling('numvalue').get_text()
if x.find_next_sibling('stringvalue'):
value = x.find_next_sibling('stringvalue').get_text()
if x.find_next_siblings('stringvaluelist'):
value = _return_value_list(x, 'stringvaluelist')
if x.find_next_siblings('numvaluelist'):
value = _return_value_list(x, 'stringvaluelist')
if value:
values[name] = value
except:
pass
return values
class Collect(object):
"""Initialize variables for Collect class"""
def __init__(self, compounds, fingerprint=False,
xml=False, sdf=False, proxy=False, user=False,
id_name='PubChem', chunks=False, try_count=3, verbose=False,
predictors=False, weights=False, smiles=False):
self.id_name = id_name
self.compounds = compounds
self.pubchem_ids = [x[id_name] for x in compounds]
self.compound = CompoundDict()
self.proxy = proxy
self.chunks = chunks
self.verbose = verbose
self.smiles = smiles
if proxy:
self.set_proxy()
if smiles is not False:
id_list = []
for count, _id in enumerate(self.pubchem_ids):
cid = get_smiles(_id)
id_list.append(cid)
self.compounds[count][id_name] = cid
self.pubchem_ids = id_list
if predictors is not False:
for count, _id in enumerate(self.pubchem_ids):
self.compound[_id]['predictor'] = predictors[count]
if weights is not False:
for count, _id in enumerate(self.pubchem_ids):
self.compound[_id]['weight'] = weights[count]
self.user = user
if user:
self.add_user()
self.verbose = verbose
self.fingerprint = fingerprint
if fingerprint:
self.add_fingerprint(fingerprint=True)
self.sdf = sdf
if sdf:
self.add_sdf()
self.xml = xml
if xml:
self.add_xml()
def set_proxy(self, proxy=False):
"""This function sets the proxy for the urllib2 library"""
if self.proxy:
proxy = self.proxy
if proxy is not False:
verbose_print(self.verbose, "Initializing proxy")
result = urlparse(proxy)
assert result.scheme, "Proxy must be a web address"
proxy_support = urllib2.ProxyHandler({
'http': proxy,
'https': proxy
})
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
def add_user(self, user=False):
"""This function allows user features to be passed
through the Collect Class"""
if self.user:
user = self.user
if user is True:
verbose_print(self.verbose, "Adding user provided features")
for count, _id in enumerate(self.pubchem_ids):
self.compound[_id]['userhash'] = self.compounds[count]['userhash']
def add_fingerprint(self, fingerprint=False, chunks=False):
"""This function collects fingerprint data from NCBI, currently
PubChemPy collects only ASN.1 data, which is difficult to parse
into a binary hash of fingerprint values. It also doesn't allows
bulk collection of the fingerprints. This function allows these"""
if self.fingerprint:
fingerprint = self.fingerprint
if self.chunks:
chunks = self.chunks
if fingerprint is True:
ids = self.pubchem_ids
verbose_print(self.verbose, "Getting fingerprints from NCBI")
fps = []
percent = 0.
length = float(len(self.pubchem_ids))
if length > 100 and chunks is False:
chunks = 100.
if chunks is not False:
for chunk_id in [ids[i:i + chunks] for i in xrange(0, len(ids), chunks)]:
'''This loop allows the ids to be chunked into size chunks. This is
important for really long lists, which may create problems in trying
to query huge numbers of ids'''
percent = percent + float(chunks) / length
#print_string = '{:2.1%} out of {}'.format(percent, length)
#verbose_print(self.verbose, print_string)
concatenated_ids = ','.join(chunk_id)
uri = cactvs_uri(concatenated_ids)
fps.extend(_url_factory(uri).splitlines())
else:
concatenated_ids = ','.join(ids)
verbose_print(self.verbose, 'Collecting all fingerprints')
uri = cactvs_uri(concatenated_ids)
fps = _url_factory(uri).splitlines()
for i, cactvs in enumerate(fps):
self.compound[ids[i]]['binhash'] = get_binhash(cactvs)
def add_sdf(self, sdf=False, chunks=False):
"""This function collects NCBI sdfs and stores them for use
in cheminformatic tools"""
if self.sdf:
sdf = self.sdf
if self.chunks:
chunks = self.chunks
if sdf is True:
percent = 0.
length = float(len(self.pubchem_ids))
ids = self.pubchem_ids
if length > 100 and chunks is False:
chunks = 100
if chunks is not False:
for chunk_id in [ids[i:i + chunks] for i in xrange(0, len(ids), chunks)]:
'''This loop allows the ids to be chunked into size chunks. This is
important for really long lists, which may create problems in trying
to query huge numbers of ids'''
percent = percent + chunks / length
#print_string = '{:2.1%} out of {}'.format(percent, length)
#verbose_print(self.verbose, print_string)
concatenated_ids = chunk_id
sdfs = stream_sdf(concatenated_ids)
for i, sdf in enumerate(sdfs):
self.compound[chunk_id[i]]['sdf'] = sdf
else:
sdfs = stream_sdf(ids)
for i, sdf in enumerate(sdfs):
self.compound[ids[i]]['sdf'] = sdf
def add_xml(self, xml=False, try_count=3):
"""This function collects NCBI XML and stores them for later parsing"""
if self.xml:
xml = self.xml
if xml is True:
percent = 0.
length = float(len(self.pubchem_ids))
ids = self.pubchem_ids
verbose_print(self.verbose, 'Collecting all XMLs')
for count, _id in enumerate(ids):
percent = float(count) / float(length)
#print_string = '{:2.1%} out of {}'.format(percent, length)
#verbose_print(self.verbose, print_string)
val = False
count = 0
while (val is False) and (count < try_count):
try:
xml_stream = stream_xml(_id)
self.compound[_id]['xml'] = extract_pubchem_xml_features(xml_stream)
val = True
except:
sleep(5)
count = count + 1
| bsd-3-clause | 4,310,988,585,706,478,600 | 34.13012 | 92 | 0.554016 | false |
novafloss/django-chartjs | demo/demoproject/tests.py | 1 | 5181 | """Unit tests for chartjs api."""
import json
from django.test import TestCase
from django.urls import reverse
from chartjs.util import NULL, value_or_null
from demoproject.models import Meter
class LineChartJSTestCase(TestCase):
def test_line_chartjs(self):
resp = self.client.get(reverse("line_chart"))
self.assertContains(resp, "Chart.min.js")
def test_list_chartjs_json(self):
resp = self.client.get(reverse("line_chart_json"))
try:
data = json.loads(resp.content.decode("utf-8"))
except ValueError:
self.fail("%r is not valid json" % self.resp.content)
self.assertIn("datasets", data)
self.assertNotIn("series", data)
class ColorTestCase(TestCase):
def test_colorview(self):
resp = self.client.get(reverse("colors"))
self.assertContains(resp, "100px")
class HighChartJSTestCase(TestCase):
def test_column_chartjs_json(self):
resp = self.client.get(reverse("column_highchart_json"))
try:
data = json.loads(resp.content.decode("utf-8"))
except ValueError:
self.fail("%r is not valid json" % self.resp.content)
self.assertIn("title", data)
self.assertIn("text", data["title"])
self.assertEqual(data["title"]["text"], "Column Highchart test")
self.assertIn("credits", data)
credits = data["credits"]
self.assertEqual(credits["enabled"], False)
def test_list_chartjs_json(self):
resp = self.client.get(reverse("line_highchart_json"))
try:
data = json.loads(resp.content.decode("utf-8"))
except ValueError:
self.fail("%r is not valid json" % self.resp.content)
self.assertIn("series", data)
self.assertNotIn("datasets", data)
self.assertIn("credits", data)
credits = data["credits"]
self.assertEqual(credits["enabled"], True)
self.assertEqual(credits["href"], "http://example.com")
self.assertEqual(credits["text"], "Novapost Team")
def test_pie_chartjs_json(self):
resp = self.client.get(reverse("pie_highchart_json"))
try:
json.loads(resp.content.decode("utf-8"))
except ValueError:
self.fail("%r is not valid json" % self.resp.content)
def test_donut_chartjs_json(self):
resp = self.client.get(reverse("donut_highchart_json"))
try:
json.loads(resp.content.decode("utf-8"))
except ValueError:
self.fail("%r is not valid json" % self.resp.content)
class DiscontinuousDataTestCase(TestCase):
def setUp(self):
self.start_date = "2019-05-26"
self.end_date = "2019-06-04"
# water meter readings
Meter.objects.create(date="2019-05-26", name="water", reading=10)
Meter.objects.create(date="2019-05-27", name="water", reading=12)
Meter.objects.create(date="2019-05-28", name="water", reading=13)
Meter.objects.create(date="2019-05-29", name="water", reading=15)
Meter.objects.create(date="2019-06-01", name="water", reading=16)
Meter.objects.create(date="2019-06-02", name="water", reading=18)
Meter.objects.create(date="2019-06-03", name="water", reading=20)
Meter.objects.create(date="2019-06-04", name="water", reading=21)
# gas meter readings
Meter.objects.create(date="2019-05-28", name="gas", reading=15)
Meter.objects.create(date="2019-05-29", name="gas", reading=13)
Meter.objects.create(date="2019-05-30", name="gas", reading=12)
Meter.objects.create(date="2019-05-31", name="gas", reading=14)
Meter.objects.create(date="2019-06-01", name="gas", reading=16)
Meter.objects.create(date="2019-06-02", name="gas", reading=17)
def test_generator_fills_end_values_with_null(self):
queryset = Meter.objects.filter(name="gas")
actual_data = []
for item in value_or_null(
self.start_date, self.end_date, queryset, "date", "reading"
):
actual_data.append(item)
expected_data = [NULL, NULL, 15, 13, 12, 14, 16, 17, NULL, NULL]
self.assertEqual(actual_data, expected_data)
def test_generator_fills_middle_values_with_null(self):
queryset = Meter.objects.filter(name="water")
actual_data = []
for item in value_or_null(
self.start_date, self.end_date, queryset, "date", "reading"
):
actual_data.append(item)
expected_data = [10, 12, 13, 15, NULL, NULL, 16, 18, 20, 21]
self.assertEqual(actual_data, expected_data)
class ChartOptionsTestCase(TestCase):
def test_line_chart_with_options_json(self):
resp = self.client.get(reverse("line_chart_with_options"))
try:
data = json.loads(resp.content.decode("utf-8"))
except ValueError:
self.fail("%r is not valid json" % self.resp.content)
self.assertIn("data", data)
self.assertIn("datasets", data["data"])
self.assertIn("labels", data["data"])
self.assertIn("options", data)
self.assertIn("title", data["options"])
| bsd-3-clause | 4,093,138,191,086,040,600 | 38.549618 | 73 | 0.619958 | false |
averaart/fq_delta | fq_delta/fq_delta.py | 1 | 10373 | __author__ = 'averaart'
"""This module offers a means to store multiple versions of the same fastq file, by only storing the differences between
them and recreating the processed file based on the original file and the differences."""
# Batteries included
import os
import sys
from subprocess import Popen, PIPE
import hashlib
import zipfile
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except ImportError:
compression = zipfile.ZIP_STORED
# 3rd party imports
import diff_match_patch as dmp_module
class InputError(Exception):
pass
class ChecksumError(Exception):
pass
# global variables
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 0.0005 # default is 1
dmp.Match_Distance = 1000 # default is 1000
dmp.Match_MaxBits = 0 # default is 32, 0 is advised for python
def _open(name):
"""Opens a file, or streams an unquiping archive."""
if name[-3:] == '.qp':
return Popen('unquip -c ' + name, shell=True, stdout=PIPE).stdout
else:
try:
return open(name, 'r')
except IOError:
print "Couldn't find the file..."
def create_delta(original_file=sys.stdin, processed_file=sys.stdin, delta_filename='', output_processed=False):
"""This function creates a delta file based on an original file and a processed file. Either files could come from
standard in."""
if isinstance(processed_file, str):
processed_file = _open(processed_file)
if delta_filename == '':
delta_filename = processed_file.name
delta_file = DeltaFile('w', delta_filename, original_file)
for line in processed_file:
delta_file.write(line)
if output_processed:
print line,
delta_file.close()
def rebuild_fastq(delta_filename, original_file=sys.stdin, out=sys.stdout, to_stdout=False):
"""Recreates the processed file from the original and delta files."""
# Convert file names to files, and open quip-files while we're at it.
if isinstance(original_file, str):
original_file = _open(original_file)
processed_file = DeltaFile('r', delta_filename, original_file)
if isinstance(out, str):
out = open(out, 'w')
if out == sys.stdout:
to_stdout = False
for line in processed_file:
out.write(line + '\n')
if to_stdout:
sys.stdout.write(line + '\n')
class DeltaFile():
def __init__(self, mode, delta_filename, original_file=sys.stdin, processed_file=sys.stdin, reuse=False):
self.leftover = list()
self.mode = mode
self.reuse = reuse
# Open an existing deltafile to read the processed file
if self.mode == 'r':
self.delta_filename = delta_filename
self.buffer = list()
# Convert file names to files, and open quip-files while we're at it.
if isinstance(original_file, str):
self.original_file = _open(original_file)
else:
self.original_file = original_file
self.md5 = hashlib.md5()
# Extract the checksum and delta files.
# If there is no checksum file in the zipfile, bail out.
# ("I'm not touching that with a 10 foot pole!")
zf = zipfile.ZipFile(delta_filename)
namelist = zf.namelist()
if 'md5_checksum' not in namelist:
raise ChecksumError('No checksum found.')
else:
namelist.pop(namelist.index("md5_checksum"))
self.checksum = zf.open('md5_checksum', "r").read()
# For the delta file, first assume the filename is the same as the archive's name
# minus ".zip". If that fails, find the first file that contains the word "delta".
# Else just extract the first file you can find. Ugly, I know... :D
self.filename = self.delta_filename.rpartition('.')[0]
try:
zf.extract(self.filename)
except KeyError:
delta_names = [s for s in namelist if "delta" in s]
if len(delta_names) > 0:
self.filename = delta_names[0]
else:
self.filename = namelist[0]
zf.extract(self.filename)
self.deltas = open(self.filename, "r")
# Write a new deltafile from the processed data.
elif self.mode == 'w':
# Raise an Exception if both "files" turn out to be sys.stdin.
if original_file == sys.stdin and processed_file == sys.stdin:
raise InputError("Only one of the inputfiles can be STDIN.")
# Convert file names to files, and open quip-files while we're at it.
if isinstance(original_file, str):
self.original_file = _open(original_file)
else:
self.original_file = original_file
if isinstance(processed_file, str):
self.processed_file = _open(processed_file)
else:
self.processed_file = processed_file
self.md5 = hashlib.md5()
if delta_filename == '':
self.delta_filename = processed_file.name
else:
self.delta_filename = delta_filename
# Remove .zip if entered as delta_filename argument.
# It'll be added when the file is zipped.
if self.delta_filename[-4:] == '.zip':
self.delta_filename = self.delta_filename[:-4]
self.delta_file = open(self.delta_filename, 'a')
else:
raise Exception('Illegal mode: ' + str(mode))
def __iter__(self):
return self
def reset(self):
self.deltas.seek(0)
self.original_file.seek(0)
self.leftover = list()
self.md5 = hashlib.md5()
def next(self):
self.check_reading()
if self.original_file.closed or self.deltas.closed:
raise IOError("Trying to iterate over closed files...")
while len(self.buffer) <= 0:
while len(self.buffer) < 4:
delta = ''
t1 = ''
t2 = ''
t1 = self.original_file.readline()
delta = self.deltas.readline().strip()
if delta == '':
# End of File
# Check the checksum...
if not self.md5.digest() == self.checksum:
self.close()
raise ChecksumError("Checksum did not match!")
if self.reuse:
self.reset()
else:
# Clean up the uncompressed delta file
self.deltas.close()
os.remove(self.filename)
# Kill the iterator
raise StopIteration
diff = dmp.diff_fromDelta(t1.strip(), delta.strip())
t2 = dmp.diff_text2(diff)
self.buffer.append(t2)
# Check if the read was removed. If so, clear the buffer so the next four lines are read.
if self.buffer == ['', '', '', '']:
self.buffer = list()
nextline = self.buffer.pop(0)
self.md5.update(nextline)
return nextline
def readline(self):
self.check_reading()
return self.next()
def readlines(self):
self.check_reading()
return [line for line in self]
def writelines(self, lines, output_processed=False, close_file=False):
lines = self.leftover + lines
while len(lines) >= 4:
id1 = self.original_file.readline().strip()
id2 = lines.pop(0).strip()
seq1 = self.original_file.readline().strip()
seq2 = lines.pop(0).strip()
com1 = self.original_file.readline().strip()
com2 = lines.pop(0).strip()
qua1 = self.original_file.readline().strip()
qua2 = lines.pop(0).strip()
if id2 == '':
break
self.md5.update(id2)
self.md5.update(seq2)
self.md5.update(com2)
self.md5.update(qua2)
while id1.partition('\t')[0] != id2.partition('\t')[0]:
self.delta_file.write('-' + str(len(id1.strip())) + '\n')
self.delta_file.write('-' + str(len(seq1.strip())) + '\n')
self.delta_file.write('-' + str(len(com1.strip())) + '\n')
self.delta_file.write('-' + str(len(qua1.strip())) + '\n')
id1 = self.original_file.readline().strip()
seq1 = self.original_file.readline().strip()
com1 = self.original_file.readline().strip()
qua1 = self.original_file.readline().strip()
if id1 == '':
break
for (t1, t2) in ((id1, id2), (seq1, seq2), (com1, com2), (qua1, qua2)):
diff = dmp.diff_main(t1.strip(), t2.strip())
delta = dmp.diff_toDelta(diff) + '\n'
self.delta_file.write(delta)
if output_processed:
print t2
self.leftover = lines
if close_file:
self.close()
def write(self, string, output_processed=False, close_file=False):
lines = string.strip().split('\n')
self.writelines(lines, output_processed, close_file)
def close(self):
if self.mode is 'r':
if not self.deltas.closed:
self.deltas.close()
try:
os.remove(self.filename)
except OSError:
pass
else:
self.delta_file.close()
# Copy the delta file to a compressed archive, and remove the delta file
self.zf = zipfile.ZipFile(self.delta_filename + '.zip', mode='w')
try:
self.zf.write(self.delta_filename, self.delta_filename.rpartition('/')[2], compress_type=compression)
self.zf.writestr('md5_checksum', self.md5.digest(), compress_type=compression)
os.remove(self.delta_filename)
finally:
self.zf.close()
def check_reading(self):
if self.mode is not 'r':
raise IOError('File not open for reading')
| bsd-3-clause | 63,940,633,616,006,200 | 33.347682 | 120 | 0.550757 | false |
ChinaQuants/blaze | blaze/expr/core.py | 1 | 10471 | from __future__ import absolute_import, division, print_function
import numbers
import toolz
import inspect
from toolz import unique, concat, compose, partial
import toolz
from pprint import pprint
from ..compatibility import StringIO, _strtypes, builtins
from ..dispatch import dispatch
__all__ = ['Node', 'path', 'common_subexpression', 'eval_str']
base = (numbers.Number,) + _strtypes
def isidentical(a, b):
""" Strict equality testing
Different from x == y -> Eq(x, y)
>>> isidentical(1, 1)
True
>>> from blaze.expr import symbol
>>> x = symbol('x', 'int')
>>> isidentical(x, 1)
False
>>> isidentical(x + 1, x + 1)
True
>>> isidentical(x + 1, x + 2)
False
>>> isidentical((x, x + 1), (x, x + 1))
True
>>> isidentical((x, x + 1), (x, x + 2))
False
"""
if isinstance(a, base) and isinstance(b, base):
return a == b
if type(a) != type(b):
return False
if isinstance(a, Node):
return all(map(isidentical, a._args, b._args))
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
return len(a) == len(b) and all(map(isidentical, a, b))
return a == b
class Node(object):
""" Node in a tree
This serves as the base class for ``Expr``. This class holds all of the
tree traversal functions that are independent of tabular or array
computation. This is everything that we can do independent of the problem
domain. Note that datashape is not imported.
See Also
--------
blaze.expr.expressions.Expr
"""
__slots__ = ()
__inputs__ = '_child',
def __init__(self, *args, **kwargs):
assert frozenset(kwargs).issubset(self.__slots__)
for slot, arg in zip(self.__slots__[1:], args):
setattr(self, slot, arg)
for key, value in kwargs.items():
setattr(self, key, value)
@property
def _args(self):
return tuple(getattr(self, slot) for slot in self.__slots__[1:])
@property
def _inputs(self):
return tuple(getattr(self, i) for i in self.__inputs__)
def _leaves(self):
""" Leaves of an expression tree
All nodes without inputs. Leaves are returned in order, left to right.
>>> from blaze.expr import symbol, join, by
>>> t = symbol('t', 'var * {id: int32, name: string}')
>>> t._leaves()
[t]
>>> by(t.name, count=t.id.nunique())._leaves()
[t]
>>> v = symbol('v', 'var * {id: int32, city: string}')
>>> join(t, v)._leaves()
[t, v]
"""
if not self._inputs:
return [self]
else:
return list(unique(concat(i._leaves() for i in self._inputs if
isinstance(i, Node))))
isidentical = isidentical
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash((type(self), self._args))
return self._hash
def __str__(self):
rep = ["%s=%s" % (slot, _str(arg))
for slot, arg in zip(self.__slots__[1:], self._args)]
return "%s(%s)" % (type(self).__name__, ', '.join(rep))
def __repr__(self):
return str(self)
def _traverse(self):
""" Traverse over tree, yielding all subtrees and leaves """
yield self
traversals = (arg._traverse() if isinstance(arg, Node) else [arg]
for arg in self._args)
for trav in traversals:
for item in trav:
yield item
def _subs(self, d):
""" Substitute terms in the tree
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount + 3
>>> expr._subs({3: 4, 'amount': 'id'}).isidentical(t.id + 4)
True
"""
return subs(self, d)
def _resources(self):
return toolz.merge([arg._resources() for arg in self._args
if isinstance(arg, Node)])
def _subterms(self):
return subterms(self)
def __contains__(self, other):
return other in set(self._subterms())
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.__init__(*state)
def __eq__(self, other):
ident = self.isidentical(other)
if ident is True:
return ident
try:
return self._eq(other)
except AttributeError:
# e.g., we can't compare whole tables to other things (yet?)
pass
return False
def __ne__(self, other):
return self._ne(other)
def __lt__(self, other):
return self._lt(other)
def __le__(self, other):
return self._le(other)
def __gt__(self, other):
return self._gt(other)
def __ge__(self, other):
return self._ge(other)
def __add__(self, other):
return self._add(other)
def __radd__(self, other):
return self._radd(other)
def __mul__(self, other):
return self._mul(other)
def __rmul__(self, other):
return self._rmul(other)
def __div__(self, other):
return self._div(other)
def __rdiv__(self, other):
return self._rdiv(other)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self, other):
return self._floordiv(other)
def __rfloordiv__(self, other):
return self._rfloordiv(other)
def __sub__(self, other):
return self._sub(other)
def __rsub__(self, other):
return self._rsub(other)
def __pow__(self, other):
return self._pow(other)
def __rpow__(self, other):
return self._rpow(other)
def __mod__(self, other):
return self._mod(other)
def __rmod__(self, other):
return self._rmod(other)
def __or__(self, other):
return self._or(other)
def __ror__(self, other):
return self._ror(other)
def __and__(self, other):
return self._and(other)
def __rand__(self, other):
return self._rand(other)
def __neg__(self):
return self._neg()
def __invert__(self):
return self._invert()
def __abs__(self):
from .math import abs
return abs(self)
def get_callable_name(o):
"""Welcome to str inception. Leave your kittens at home.
"""
# special case partial objects
if isinstance(o, partial):
return 'partial(%s, %s)' % (get_callable_name(o.func),
', '.join(map(str, o.args)))
try:
# python 3 makes builtins look nice
return o.__qualname__
except AttributeError:
try:
# show the module of the object, if we can
return '%s.%s' % (inspect.getmodule(o).__name__, o.__name__)
except AttributeError:
try:
# __self__ tells us the class the method is bound to
return '%s.%s' % (o.__self__.__name__, o.__name__)
except AttributeError:
# exhausted all avenues of printing callables so just print the
# name of the object
return o.__name__
def _str(s):
""" Wrap single quotes around strings """
if isinstance(s, str):
return "'%s'" % s
elif callable(s):
return get_callable_name(s)
elif isinstance(s, Node):
return str(s)
elif isinstance(s, (list, tuple)):
body = ", ".join(_str(x) for x in s)
return "({0})".format(body if len(s) > 1 else (body + ","))
else:
stream = StringIO()
pprint(s, stream=stream)
return stream.getvalue().rstrip()
@dispatch(Node)
def subterms(expr):
return concat([[expr], concat(map(subterms, expr._inputs))])
@dispatch(object)
def subterms(x):
yield x
def subs(o, d):
""" Substitute values within data structure
>>> subs(1, {1: 2})
2
>>> subs([1, 2, 3], {2: 'Hello'})
[1, 'Hello', 3]
"""
d = dict((k, v) for k, v in d.items() if k is not v)
if not d:
return o
try:
if o in d:
d = d.copy()
o = d.pop(o)
except TypeError:
pass
return _subs(o, d)
@dispatch((tuple, list), dict)
def _subs(o, d):
return type(o)([subs(arg, d) for arg in o])
@dispatch(Node, dict)
def _subs(o, d):
"""
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> subs(t, {'balance': 'amount'}).fields
['name', 'amount']
"""
newargs = [subs(arg, d) for arg in o._args]
return type(o)(*newargs)
@dispatch(object, dict)
def _subs(o, d):
""" Private dispatched version of ``subs``
>>> subs('Hello', {})
'Hello'
"""
return o
def path(a, b):
""" A path of nodes from a to b
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount.sum()
>>> list(path(expr, t))
[sum(t.amount), t.amount, t]
"""
while not a.isidentical(b):
yield a
if not a._inputs:
break
for child in a._inputs:
if any(b.isidentical(node) for node in child._traverse()):
a = child
break
yield a
def common_subexpression(*exprs):
""" Common sub expression between subexpressions
Examples
--------
>>> from blaze.expr import symbol, common_subexpression
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> common_subexpression(t.x, t.y)
t
"""
sets = [set(subterms(t)) for t in exprs]
return builtins.max(set.intersection(*sets),
key=compose(len, str))
def eval_str(expr):
""" String suitable for evaluation
>>> from blaze.expr import symbol, eval_str
>>> x = symbol('x', 'real')
>>> eval_str(2*x + 1)
'(2 * x) + 1'
>>> from datetime import date
>>> eval_str(date(2000, 1, 20))
'datetime.date(2000, 1, 20)'
"""
from datetime import date, datetime
if isinstance(expr, (date, datetime)):
return repr(expr)
return repr(expr) if isinstance(expr, _strtypes) else str(expr)
def parenthesize(s):
"""
>>> parenthesize('1')
'1'
>>> parenthesize('1 + 2')
'(1 + 2)'
"""
if ' ' in s:
return '(%s)' % s
else:
return s
| bsd-3-clause | 5,760,293,578,376,260,000 | 23.464953 | 79 | 0.530895 | false |
rs2/pandas | pandas/conftest.py | 1 | 33159 | """
This file is very long and growing, but it was decided to not split it yet, as
it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989
Instead of splitting it was decided to define sections here:
- Configuration / Settings
- Autouse fixtures
- Common arguments
- Missing values & co.
- Classes
- Indices
- Series'
- DataFrames
- Operators & Operations
- Data sets/files
- Time zones
- Dtypes
- Misc
"""
from collections import abc
from datetime import date, time, timedelta, timezone
from decimal import Decimal
import operator
import os
from dateutil.tz import tzlocal, tzutc
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pytz import FixedOffset, utc
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.core import ops
from pandas.core.indexes.api import Index, MultiIndex
# ----------------------------------------------------------------
# Configuration / Settings
# ----------------------------------------------------------------
# pytest
def pytest_configure(config):
# Register marks to avoid warnings in pandas.test()
# sync with setup.cfg
config.addinivalue_line("markers", "single: mark a test as single cpu only")
config.addinivalue_line("markers", "slow: mark a test as slow")
config.addinivalue_line("markers", "network: mark a test as network")
config.addinivalue_line(
"markers", "db: tests requiring a database (mysql or postgres)"
)
config.addinivalue_line("markers", "high_memory: mark a test as a high-memory only")
config.addinivalue_line("markers", "clipboard: mark a pd.read_clipboard test")
config.addinivalue_line(
"markers", "arm_slow: mark a test as slow for arm64 architecture"
)
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true", help="skip slow tests")
parser.addoption("--skip-network", action="store_true", help="skip network tests")
parser.addoption("--skip-db", action="store_true", help="skip db tests")
parser.addoption(
"--run-high-memory", action="store_true", help="run high memory tests"
)
parser.addoption("--only-slow", action="store_true", help="run only slow tests")
parser.addoption(
"--strict-data-files",
action="store_true",
help="Fail if a test is skipped for missing data file.",
)
def pytest_runtest_setup(item):
if "slow" in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if "slow" not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if "network" in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if "db" in item.keywords and item.config.getoption("--skip-db"):
pytest.skip("skipping due to --skip-db")
if "high_memory" in item.keywords and not item.config.getoption(
"--run-high-memory"
):
pytest.skip("skipping high memory test since --run-high-memory was not set")
# Hypothesis
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
deadline=500,
suppress_health_check=(hypothesis.HealthCheck.too_slow,),
)
hypothesis.settings.load_profile("ci")
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())
)
for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
),
)
for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12),
),
)
# ----------------------------------------------------------------
# Autouse fixtures
# ----------------------------------------------------------------
@pytest.fixture(autouse=True)
def configure_tests():
"""
Configure settings for all tests and test modules.
"""
pd.set_option("chained_assignment", "raise")
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
"""
Make `np` and `pd` names available for doctests.
"""
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
# ----------------------------------------------------------------
# Common arguments
# ----------------------------------------------------------------
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis {repr(x)}")
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[0, "index"], ids=lambda x: f"axis {repr(x)}")
def axis_series(request):
"""
Fixture for returning the axis numbers of a Series.
"""
return request.param
@pytest.fixture(params=[True, False, None])
def observed(request):
"""
Pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatibility
if we decide to change the default (and would need to warn if this
parameter is not passed).
"""
return request.param
@pytest.fixture(params=[True, False, None])
def ordered(request):
"""
Boolean 'ordered' parameter for Categorical.
"""
return request.param
@pytest.fixture(params=["first", "last", False])
def keep(request):
"""
Valid values for the 'keep' parameter used in
.duplicated or .drop_duplicates
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
"""
Fixture for trying all interval closed parameters.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed.
"""
return request.param
@pytest.fixture(params=[None, "gzip", "bz2", "zip", "xz"])
def compression(request):
"""
Fixture for trying common compression types in compression tests.
"""
return request.param
@pytest.fixture(params=["gzip", "bz2", "zip", "xz"])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case.
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable.
"""
return request.param
@pytest.fixture(params=["inner", "outer", "left", "right"])
def join_type(request):
"""
Fixture for trying all types of join operations.
"""
return request.param
@pytest.fixture(params=["nlargest", "nsmallest"])
def nselect_method(request):
"""
Fixture for trying all nselect methods.
"""
return request.param
# ----------------------------------------------------------------
# Missing values & co.
# ----------------------------------------------------------------
@pytest.fixture(params=[None, np.nan, pd.NaT, float("nan"), pd.NA], ids=str)
def nulls_fixture(request):
"""
Fixture for each null type in pandas.
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once.
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
# ----------------------------------------------------------------
# Classes
# ----------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series], ids=["index", "series"])
def index_or_series(request):
"""
Fixture to parametrize over Index and Series, made necessary by a mypy
bug, giving an error:
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
See GH#29725
"""
return request.param
# Generate cartesian product of index_or_series fixture:
index_or_series2 = index_or_series
@pytest.fixture
def dict_subclass():
"""
Fixture for a dictionary subclass.
"""
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
return TestSubDict
@pytest.fixture
def non_dict_mapping_subclass():
"""
Fixture for a non-mapping dictionary subclass.
"""
class TestNonDictMapping(abc.Mapping):
def __init__(self, underlying_dict):
self._data = underlying_dict
def __getitem__(self, key):
return self._data.__getitem__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
return TestNonDictMapping
# ----------------------------------------------------------------
# Indices
# ----------------------------------------------------------------
@pytest.fixture
def multiindex_year_month_day_dataframe_random_data():
"""
DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data
"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
def _create_multiindex():
"""
MultiIndex used to test the general functionality of this object
"""
# See Also: tests.multi.conftest.idx
major_axis = Index(["foo", "bar", "baz", "qux"])
minor_axis = Index(["one", "two"])
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index_names = ["first", "second"]
mi = MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=index_names,
verify_integrity=False,
)
return mi
def _create_mi_with_dt64tz_level():
"""
MultiIndex with a level that is a tzaware DatetimeIndex.
"""
# GH#8367 round trip with pickle
return MultiIndex.from_product(
[[1, 2], ["a", "b"], pd.date_range("20130101", periods=3, tz="US/Eastern")],
names=["one", "two", "three"],
)
indices_dict = {
"unicode": tm.makeUnicodeIndex(100),
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
"uint": tm.makeUIntIndex(100),
"range": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"bool": tm.makeBoolIndex(10),
"categorical": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
"mi-with-dt64tz-level": _create_mi_with_dt64tz_level(),
"multi": _create_multiindex(),
"repeats": Index([0, 0, 1, 1, 2, 2]),
}
@pytest.fixture(params=indices_dict.keys())
def index(request):
"""
Fixture for many "simple" kinds of indices.
These indices are unlikely to cover corner cases, e.g.
- no names
- no NaTs/NaNs
- no values near implementation bounds
- ...
"""
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
# Needed to generate cartesian product of indices
index_fixture2 = index
@pytest.fixture(params=indices_dict.keys())
def index_with_missing(request):
"""
Fixture for indices with missing values
"""
if request.param in ["int", "uint", "range", "empty", "repeats"]:
pytest.xfail("missing values not supported")
# GH 35538. Use deep copy to avoid illusive bug on np-dev
# Azure pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
vals = ind.values
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
# For setting missing values in the top level of MultiIndex
vals = ind.tolist()
vals[0] = tuple([None]) + vals[0][1:]
vals[-1] = tuple([None]) + vals[-1][1:]
return MultiIndex.from_tuples(vals)
else:
vals[0] = None
vals[-1] = None
return type(ind)(vals)
# ----------------------------------------------------------------
# Series'
# ----------------------------------------------------------------
@pytest.fixture
def empty_series():
return pd.Series([], index=[], dtype=np.float64)
@pytest.fixture
def string_series():
"""
Fixture for Series of floats with Index of unique strings
"""
s = tm.makeStringSeries()
s.name = "series"
return s
@pytest.fixture
def object_series():
"""
Fixture for Series of dtype object with Index of unique strings
"""
s = tm.makeObjectSeries()
s.name = "objects"
return s
@pytest.fixture
def datetime_series():
"""
Fixture for Series of floats with DatetimeIndex
"""
s = tm.makeTimeSeries()
s.name = "ts"
return s
def _create_series(index):
""" Helper for the _series dict """
size = len(index)
data = np.random.randn(size)
return pd.Series(data, index=index, name="a")
_series = {
f"series-with-{index_id}-index": _create_series(index)
for index_id, index in indices_dict.items()
}
@pytest.fixture
def series_with_simple_index(index):
"""
Fixture for tests on series with changing types of indices.
"""
return _create_series(index)
_narrow_dtypes = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
_narrow_series = {
f"{dtype.__name__}-series": tm.makeFloatSeries(name="a").astype(dtype)
for dtype in _narrow_dtypes
}
@pytest.fixture(params=_narrow_series.keys())
def narrow_series(request):
"""
Fixture for Series with low precision data types
"""
# copy to avoid mutation, e.g. setting .name
return _narrow_series[request.param].copy()
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
@pytest.fixture(params=_index_or_series_objs.keys())
def index_or_series_obj(request):
"""
Fixture for tests on indexes, series and series with a narrow dtype
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
# ----------------------------------------------------------------
# DataFrames
# ----------------------------------------------------------------
@pytest.fixture
def empty_frame():
return DataFrame()
@pytest.fixture
def int_frame():
"""
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
A B C D
vpBeWjM651 1 0 1 0
5JyxmrP1En -1 0 0 0
qEDaoD49U2 -1 1 0 0
m66TkTfsFe 0 0 0 0
EHPaNzEUFm -1 0 -1 0
fpRJCevQhi 2 0 0 0
OlQvnmfi3Q 0 0 -2 0
... .. .. .. ..
uB1FPlz4uP 0 0 0 1
EcSe6yNzCU 0 0 -1 0
L50VudaiI8 -1 1 -2 0
y3bpw4nwIp 0 -1 0 0
H0RdLLwrCT 1 1 0 0
rY82K0vMwm 0 0 0 0
1OPIUjnkjk 2 0 0 0
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData()).astype("int64")
@pytest.fixture
def datetime_frame():
"""
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
A B C D
2000-01-03 -1.122153 0.468535 0.122226 1.693711
2000-01-04 0.189378 0.486100 0.007864 -1.216052
2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
2000-01-06 0.430050 0.894352 0.090719 0.036939
2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
2000-01-10 -0.752633 0.328434 -0.815325 0.699674
2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
... ... ... ... ...
2000-02-03 1.642618 -0.579288 0.046005 1.385249
2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
2000-02-07 -2.656149 -0.601387 1.410148 0.444150
2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
2000-02-09 1.377373 0.398619 1.008453 -0.928207
2000-02-10 0.473194 -0.636677 0.984058 0.511519
2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
[30 rows x 4 columns]
"""
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def float_frame():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465
qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901
tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433
wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651
M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938
QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053
r78Jwns6dn -0.653707 0.883127 0.682199 0.206159
... ... ... ... ...
IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316
lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999
qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121
yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962
65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987
eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871
xSucinXxuV -1.263557 0.252799 -0.552247 0.400426
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData())
# ----------------------------------------------------------------
# Operators & Operations
# ----------------------------------------------------------------
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
]
)
def all_arithmetic_functions(request):
"""
Fixture for operator and roperator arithmetic functions.
Notes
-----
This includes divmod and rdivmod, whereas all_arithmetic_operators
does not.
"""
return request.param
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
"prod",
"std",
"var",
"median",
"kurt",
"skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
_all_reductions = _all_numeric_reductions + _all_boolean_reductions
@pytest.fixture(params=_all_reductions)
def all_reductions(request):
"""
Fixture for all (boolean + numeric) reduction names.
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"])
def compare_operators_no_eq_ne(request):
"""
Fixture for dunder names for compare operations except == and !=
* >=
* >
* <
* <=
"""
return request.param
@pytest.fixture(
params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"]
)
def all_logical_operators(request):
"""
Fixture for dunder names for common logical operations
* |
* &
* ^
"""
return request.param
# ----------------------------------------------------------------
# Data sets/files
# ----------------------------------------------------------------
@pytest.fixture
def strict_data_files(pytestconfig):
"""
Returns the configuration for the test setting `--strict-data-files`.
"""
return pytestconfig.getoption("--strict-data-files")
@pytest.fixture
def datapath(strict_data_files):
"""
Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if strict_data_files:
raise ValueError(
f"Could not find file {path} and --strict-data-files is set."
)
else:
pytest.skip(f"Could not find {path}.")
return path
return deco
@pytest.fixture
def iris(datapath):
"""
The iris dataset as a DataFrame.
"""
return pd.read_csv(datapath("io", "data", "csv", "iris.csv"))
# ----------------------------------------------------------------
# Time zones
# ----------------------------------------------------------------
TIMEZONES = [
None,
"UTC",
"US/Eastern",
"Asia/Tokyo",
"dateutil/US/Pacific",
"dateutil/Asia/Singapore",
tzutc(),
tzlocal(),
FixedOffset(300),
FixedOffset(0),
FixedOffset(-300),
timezone.utc,
timezone(timedelta(hours=1)),
timezone(timedelta(hours=-1), name="foo"),
]
TIMEZONE_IDS = [repr(i) for i in TIMEZONES]
@td.parametrize_fixture_doc(str(TIMEZONE_IDS))
@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))
@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
# Generate cartesian product of tz_aware_fixture:
tz_aware_fixture2 = tz_aware_fixture
@pytest.fixture(scope="module")
def datetime_tz_utc():
"""
Yields the UTC timezone object from the datetime module.
"""
return timezone.utc
@pytest.fixture(params=["utc", "dateutil/UTC", utc, tzutc(), timezone.utc])
def utc_fixture(request):
"""
Fixture to provide variants of UTC timezone strings and tzinfo objects.
"""
return request.param
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
@pytest.fixture(params=tm.STRING_DTYPES)
def string_dtype(request):
"""
Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=tm.BYTES_DTYPES)
def bytes_dtype(request):
"""
Parametrized fixture for bytes dtypes.
* bytes
* 'bytes'
"""
return request.param
@pytest.fixture(params=tm.OBJECT_DTYPES)
def object_dtype(request):
"""
Parametrized fixture for object dtypes.
* object
* 'object'
"""
return request.param
@pytest.fixture(params=tm.DATETIME64_DTYPES)
def datetime64_dtype(request):
"""
Parametrized fixture for datetime64 dtypes.
* 'datetime64[ns]'
* 'M8[ns]'
"""
return request.param
@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
"""
Parametrized fixture for timedelta64 dtypes.
* 'timedelta64[ns]'
* 'm8[ns]'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex
* 'complex64'
* 'complex128'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int
* 'int8'
* 'int16'
* 'int32'
* 'int64'
"""
return request.param
@pytest.fixture(params=tm.UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* 'uint8'
* 'uint16'
* 'uint32'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_EA_INT_DTYPES)
def any_nullable_int_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_EA_INT_DTYPES)
def any_signed_nullable_int_dtype(request):
"""
Parameterized fixture for any signed nullable integer dtype.
* 'Int8'
* 'Int16'
* 'Int32'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* bool
* 'bool'
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
* complex
* 'complex64'
* 'complex128'
* str
* 'str'
* 'U'
* bytes
* 'bytes'
* 'datetime64[ns]'
* 'M8[ns]'
* 'timedelta64[ns]'
* 'm8[ns]'
* object
* 'object'
"""
return request.param
# categoricals are handled separately
_any_skipna_inferred_dtype = [
("string", ["a", np.nan, "c"]),
("string", ["a", pd.NA, "c"]),
("bytes", [b"a", np.nan, b"c"]),
("empty", [np.nan, np.nan, np.nan]),
("empty", []),
("mixed-integer", ["a", np.nan, 2]),
("mixed", ["a", np.nan, 2.0]),
("floating", [1.0, np.nan, 2.0]),
("integer", [1, np.nan, 2]),
("mixed-integer-float", [1, np.nan, 2.0]),
("decimal", [Decimal(1), np.nan, Decimal(2)]),
("boolean", [True, np.nan, False]),
("boolean", [True, pd.NA, False]),
("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]),
("datetime", [pd.Timestamp("20130101"), np.nan, pd.Timestamp("20180101")]),
("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),
# The following two dtypes are commented out due to GH 23554
# ('complex', [1 + 1j, np.nan, 2 + 2j]),
# ('timedelta64', [np.timedelta64(1, 'D'),
# np.nan, np.timedelta64(2, 'D')]),
("timedelta", [timedelta(1), np.nan, timedelta(2)]),
("time", [time(1), np.nan, time(2)]),
("period", [pd.Period(2013), pd.NaT, pd.Period(2018)]),
("interval", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),
]
ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
def any_skipna_inferred_dtype(request):
"""
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> import pandas._libs.lib as lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
"""
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# ----------------------------------------------------------------
# Misc
# ----------------------------------------------------------------
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
# GH#35711 make sure sqlite history file handle is not leaked
from traitlets.config import Config # noqa: F401 isort:skip
c = Config()
c.HistoryManager.hist_file = ":memory:"
return InteractiveShell(config=c)
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
"""
Yields scipy sparse matrix classes.
"""
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@pytest.fixture(params=list(tm.cython_table))
def cython_table_items(request):
"""
Yields a tuple of a function and its corresponding name. Correspond to
the list of aggregator "Cython functions" used on selected table items.
"""
return request.param
@pytest.fixture(
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
]
)
def tick_classes(request):
"""
Fixture for Tick based datetime offsets available for a time series.
"""
return request.param
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture()
def fsspectest():
pytest.importorskip("fsspec")
from fsspec import register_implementation
from fsspec.implementations.memory import MemoryFileSystem
from fsspec.registry import _registry as registry
class TestMemoryFS(MemoryFileSystem):
protocol = "testmem"
test = [None]
def __init__(self, **kwargs):
self.test[0] = kwargs.pop("test", None)
super().__init__(**kwargs)
register_implementation("testmem", TestMemoryFS, clobber=True)
yield TestMemoryFS()
registry.pop("testmem", None)
TestMemoryFS.test[0] = None
TestMemoryFS.store.clear()
| bsd-3-clause | -5,213,168,821,077,794,000 | 24.645012 | 88 | 0.588407 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.