repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
gladsonvm/haystackdemo | refs/heads/master | lib/python2.7/site-packages/django/contrib/gis/sitemaps/georss.py | 314 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.gis.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.views.feed', args=(obj,))
|
kbrebanov/ansible | refs/heads/devel | test/units/module_utils/basic/test_heuristic_log_sanitize.py | 66 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import sys
import syslog
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.basic import heuristic_log_sanitize
class TestHeuristicLogSanitize(unittest.TestCase):
def setUp(self):
self.URL_SECRET = 'http://username:pas:[email protected]/data'
self.SSH_SECRET = 'username:pas:[email protected]/data'
self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {
'host%s' % i: {
'pstack': {
'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']
},
}
}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def test_did_not_hide_too_much(self):
self.assertEquals(heuristic_log_sanitize(self.clean_data), self.clean_data)
def test_hides_url_secrets(self):
url_output = heuristic_log_sanitize(self.url_data)
# Basic functionality: Successfully hid the password
self.assertNotIn('pas:word', url_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(self.url_data))
def test_hides_ssh_secrets(self):
ssh_output = heuristic_log_sanitize(self.ssh_data)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', ssh_output)
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
self.assertIn(":********@foo.com/data'", ssh_output)
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
self.assertNotIn('secret', output)
|
yeming233/horizon | refs/heads/master | openstack_dashboard/dashboards/project/instances/console.py | 2 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from novaclient import exceptions as nova_exception
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
CONSOLES = OrderedDict([('VNC', api.nova.server_vnc_console),
('SPICE', api.nova.server_spice_console),
('RDP', api.nova.server_rdp_console),
('SERIAL', api.nova.server_serial_console)])
def get_console(request, console_type, instance):
"""Get a tuple of console url and console type."""
if console_type == 'AUTO':
check_consoles = CONSOLES
else:
try:
check_consoles = {console_type: CONSOLES[console_type]}
except KeyError:
msg = _('Console type "%s" not supported.') % console_type
raise exceptions.NotAvailable(msg)
# Ugly workaround due novaclient API change from 2.17 to 2.18.
try:
httpnotimplemented = nova_exception.HttpNotImplemented
except AttributeError:
httpnotimplemented = nova_exception.HTTPNotImplemented
for con_type, api_call in check_consoles.items():
try:
console = api_call(request, instance.id)
# If not supported, don't log it to avoid lot of errors in case
# of AUTO.
except httpnotimplemented:
continue
except Exception:
LOG.debug('Console not available', exc_info=True)
continue
if con_type == 'SERIAL':
console_url = console.url
else:
console_url = "%s&%s(%s)" % (
console.url,
urlencode({'title': getattr(instance, "name", "")}),
instance.id)
return (con_type, console_url)
raise exceptions.NotAvailable(_('No available console found.'))
|
brianlorenz/COSMOS_IMACS_Redshifts | refs/heads/master | PlotCodes/Plot_AvStructMassCut_paper.py | 1 | #Creates a BPT diagram for all objects, and a second figure that shows objects for which single lines are low
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from astropy.cosmology import WMAP9 as cosmo
from astropy.stats import biweight_midvariance
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux_red.txt'
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#File with the structural properties
spropdatapath = '/Users/blorenz/COSMOS/COSMOSData/struct_prop.txt'
#Read in the scale of the lines
sprop_df = ascii.read(spropdatapath).to_pandas()
sprop_df = sprop_df.rename(columns={'id':'OBJID'})
fluxdata = pd.merge(fluxdata,sprop_df)
#Read in the sfr file
sfdata = '/Users/blorenz/COSMOS/COSMOSData/sfrs.txt'
sfr_df = ascii.read(sfdata).to_pandas()
fluxdata = pd.merge(fluxdata,sfr_df,on='fluxfile')
#Fontsizes for plotting
axisfont = 24
ticksize = 18
ticks = 8
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
lines=['6563_fix','4861']
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
combinemass = 1
if not combinemass:
fig,axarr = plt.subplots(2,3,figsize=(24,15),sharex=False,sharey=False)
axarr = np.reshape(axarr,6)
else:
fig,axarr = plt.subplots(2,2,figsize=(16,15),sharex=False,sharey=False)
axarr = np.reshape(axarr,4)
#Gets rid of objects with bad ellipticities
filtar = fluxdata['ar']>0
#Plot the data with error bars
#Counter
c = 0
plotdata = 'ar'
ylabel = 'b/a'
savename = 'AxisRatio'
#fluxdata['n']=np.log10(fluxdata['n'])
#fluxdata['SMD']=np.log10(divz(fluxdata['LMASS'],(4*np.pi*fluxdata['re_kpc']**2)))
ms=12
lwbw=2
notbad = np.logical_not(baddata)
#colormap = np.log10(fluxdata['sSFR'])
colormap = fluxdata['re_kpc']
#cut = 2.7
cut = 3.83
colorcut = 1
colorcut1 = 'blue'
colormed1 = 'darkblue'
colorcut2 = 'red'
colormed2 = 'maroon'
propname = 're'
for ax in axarr:
color1='dodgerblue'
color3='darkblue'
color2= 'blue'
color4='black'
if c in [0,1,2]:
massfilt = fluxdata['LMASS']<9.5
else:
massfilt = fluxdata['LMASS']>=9.5
if c in [0,2,3,5]:
col = 'good'
filt = notbad
if combinemass: filt = allgood
color='blue'
elif c in [1,4]:
col = 'low'
filt = notbad
if combinemass: filt = (fluxdata.OBJID < 0)
color='orange'
else:
col = 'bad'
filt = baddata
color='red'
#ax.errorbar(fluxdata[filt][filtar]['av'],fluxdata[filt][filtar]['ar'],xerr=fluxdata[filt][filtar]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
#ax2.errorbar(fluxdata[filt][filtar]['av'],fluxdata[filt][filtar]['re_kpc'],xerr=fluxdata[filt][filtar]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
#Titles, axes, legends
acount = 0
filttype = (fluxdata[plotdata]>-98.9)
if c==0:
ax.set_ylabel(ylabel+', LMASS < 9.5',fontsize = axisfont)
if c==3:
ax.set_ylabel(ylabel+', LMASS >= 9.5',fontsize = axisfont)
ax.set_xlabel('Av (mag)',fontsize = axisfont)
ax.tick_params(labelsize = ticksize, size=ticks)
filters = np.logical_and(filt,massfilt)
filters = np.logical_and(filters,filttype)
if c in [0,2,3,5]:
loc1 = np.sin(22.5/180*np.pi)
loc2 = np.sin(45.0/180*np.pi)
loc3 = np.sin(67.5/180*np.pi)
mr1 = (fluxdata[filters]['ar']<loc1)
mr2 = np.logical_and(fluxdata[filters]['ar']>=loc1,fluxdata[filters]['ar']<loc2)
mr3 = np.logical_and(fluxdata[filters]['ar']>=loc2,fluxdata[filters]['ar']<loc3)
mr4 = (fluxdata[filters]['ar']>=loc3)
med1 = np.median(fluxdata[filters][mr1].av)
med2 = np.median(fluxdata[filters][mr2].av)
med3 = np.median(fluxdata[filters][mr3].av)
med4 = np.median(fluxdata[filters][mr4].av)
med751 = np.percentile(fluxdata[filters][mr1].av,75)
med752 = np.percentile(fluxdata[filters][mr2].av,75)
med753 = np.percentile(fluxdata[filters][mr3].av,75)
med754 = np.percentile(fluxdata[filters][mr4].av,75)
emed1 = np.sqrt(biweight_midvariance(fluxdata[filters][mr1].av))/len(fluxdata[filters][mr1])
emed2 = np.sqrt(biweight_midvariance(fluxdata[filters][mr2].av))/len(fluxdata[filters][mr2])
emed3 = np.sqrt(biweight_midvariance(fluxdata[filters][mr3].av))/len(fluxdata[filters][mr3])
emed4 = np.sqrt(biweight_midvariance(fluxdata[filters][mr4].av))/len(fluxdata[filters][mr4])
s1 = np.median(fluxdata[filters][mr1]['ar'])
s2 = np.median(fluxdata[filters][mr2]['ar'])
s3 = np.median(fluxdata[filters][mr3]['ar'])
s4 = np.median(fluxdata[filters][mr4]['ar'])
if c in [0,3]:
ax.errorbar(fluxdata[filters][mr1]['av'],fluxdata[filters][mr1][plotdata],xerr=fluxdata[filters][mr1]['dav1'],color=color1,marker='o',ms=4,lw=0.5,ls='None',label=None)
ax.errorbar(fluxdata[filters][mr2]['av'],fluxdata[filters][mr2][plotdata],xerr=fluxdata[filters][mr2]['dav1'],color=color2,marker='o',ms=4,lw=0.5,ls='None',label=None)
ax.errorbar(fluxdata[filters][mr3]['av'],fluxdata[filters][mr3][plotdata],xerr=fluxdata[filters][mr3]['dav1'],color=color3,marker='o',ms=4,lw=0.5,ls='None',label=None)
ax.errorbar(fluxdata[filters][mr4]['av'],fluxdata[filters][mr4][plotdata],xerr=fluxdata[filters][mr4]['dav1'],color=color4,marker='o',ms=4,lw=0.5,ls='None',label=None)
if colorcut:
#Cut so that we only have SF galaxies
sf = np.log10(fluxdata.sSFR)>-10.5
above = (colormap[filters]>cut)
below = (colormap[filters]<=cut)
above = np.logical_and(above,sf[filters])
below = np.logical_and(below,sf[filters])
ax.errorbar(fluxdata[filters][above]['av'],fluxdata[filters][above][plotdata],xerr=fluxdata[filters][above]['dav1'],color=colorcut1,marker='o',ms=4,lw=0.5,ls='None',label=propname+'>'+str(cut))
ax.errorbar(fluxdata[filters][below]['av'],fluxdata[filters][below][plotdata],xerr=fluxdata[filters][below]['dav1'],color=colorcut2,marker='o',ms=4,lw=0.5,ls='None',label=propname+'<'+str(cut))
medsabove = [np.median(fluxdata[filters][np.logical_and(above,g)].av) for g in [mr1,mr2,mr3,mr4]]
medsbelow = [np.median(fluxdata[filters][np.logical_and(below,g)].av) for g in [mr1,mr2,mr3,mr4]]
#emedsabove = [np.std(fluxdata[filters][np.logical_and(above,g)].av) for g in [mr1,mr2,mr3,mr4]]
emedsabove = 1.49*np.array([np.median(np.abs(fluxdata[filters][np.logical_and(above,g)].av-np.median(fluxdata[filters][np.logical_and(above,g)].av))) for g in [mr1,mr2,mr3,mr4]])
emedsabove = 1.49*np.array([np.median(np.abs(fluxdata[filters][np.logical_and(below,g)].av-np.median(fluxdata[filters][np.logical_and(below,g)].av))) for g in [mr1,mr2,mr3,mr4]])
#emedsbelow = [np.std(fluxdata[filters][np.logical_and(below,g)].av) for g in [mr1,mr2,mr3,mr4]]
ax.legend(fontsize=axisfont-6,loc=4)
s = 12
ax.errorbar(medsabove,[s1,s2,s3,s4],xerr=emedsabove,label='Median ' + propname + ' > ' + str(cut),ms=s,ls='None',marker='x',zorder=10**10, markerfacecolor='None', markeredgecolor=colormed1,mew=4)
ax.errorbar(medsbelow,[s1,s2,s3,s4],xerr=emedsbelow,label='Median ' + propname + ' < ' + str(cut),ms=s,ls='None',marker='o',zorder=10**10, markerfacecolor='None', markeredgecolor=colormed2,mew=4)
else:
ax.errorbar(med1,s1,xerr=emed1,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label=None)
ax.errorbar(med2,s2,xerr=emed2,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label=None)
ax.errorbar(med3,s3,xerr=emed3,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label=None)
ax.errorbar(med4,s4,xerr=emed4,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label='Median in bin')
ax.text(0.685,0.02,'Median in bin',fontsize = axisfont-2, transform=ax.transAxes,color='red')
#ax.errorbar(med751,loc1/2,xerr=emed1,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
#ax.errorbar(med752,(loc1+loc2)/2,xerr=emed2,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
#ax.errorbar(med753,(loc2+loc3)/2,xerr=emed3,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
#ax.errorbar(med754,(1+loc3)/2,xerr=emed4,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
ax.plot((-100,100),(loc1,loc1),color='black',ls='--',label=None)
ax.plot((-100,100),(loc2,loc2),color='black',ls='--',label=None)
ax.plot((-100,100),(loc3,loc3),color='black',ls='--',label=None)
ydist1 = np.arange(len(fluxdata[filters][mr1]['av']))/float(len(fluxdata[filters][mr1]['av']))
xdist1 = np.sort(fluxdata[filters][mr1]['av'])
ydist2 = np.arange(len(fluxdata[filters][mr2]['av']))/float(len(fluxdata[filters][mr2]['av']))
xdist2 = np.sort(fluxdata[filters][mr2]['av'])
ydist3 = np.arange(len(fluxdata[filters][mr3]['av']))/float(len(fluxdata[filters][mr3]['av']))
xdist3 = np.sort(fluxdata[filters][mr3]['av'])
ydist4 = np.arange(len(fluxdata[filters][mr4]['av']))/float(len(fluxdata[filters][mr4]['av']))
xdist4 = np.sort(fluxdata[filters][mr4]['av'])
if c in [2,5]:
ax.plot(xdist1,ydist1,color=color1)
ax.plot(xdist2,ydist2,color=color2)
ax.plot(xdist3,ydist3,color=color3)
ax.plot(xdist4,ydist4,color=color4)
ax.set_ylabel('Cumulative Distribution',fontsize=axisfont)
ax.set_xlim(-0.1,3)
ax.set_ylim(0,1)
c = c+1
if (combinemass and (c in [1,4])): c = c+1
fig.tight_layout()
if colorcut: fig.savefig(figout + 'Av_'+savename+'_'+propname+'_cut.pdf')
elif combinemass: fig.savefig(figout + 'Av_'+savename+'_combmass.pdf')
else:fig.savefig(figout + 'Av_'+savename+'_mass.pdf')
plt.close(fig)
#Color for BoT > 0.1 or 0.2
#Red for BoT <0.2 and r<2.7...
#Remove bulge galaxies since we only want to look at disks
#thinka bout whether to use stddev or se in the mean (stddev/sqrt(n))
#<Av> vs i, arccos(b/a)
|
pramasoul/micropython | refs/heads/master | tests/extmod/ubinascii_unhexlify.py | 14 | try:
try:
import ubinascii as binascii
except ImportError:
import binascii
except ImportError:
print("SKIP")
raise SystemExit
print(binascii.unhexlify(b"0001020304050607"))
print(binascii.unhexlify(b"08090a0b0c0d0e0f"))
print(binascii.unhexlify(b"7f80ff"))
print(binascii.unhexlify(b"313233344142434461626364"))
try:
a = binascii.unhexlify(b"0") # odd buffer length
except ValueError:
print("ValueError")
try:
a = binascii.unhexlify(b"gg") # digit not hex
except ValueError:
print("ValueError")
|
svohara/pyvision | refs/heads/master | samples/WACV2012_Tutorial/tutorials/TutorialAnnotations.py | 4 | import pyvision as pv
import scipy as sp
if __name__ == '__main__':
im = pv.Image(sp.zeros((128,128)))
pts = [pv.Point(48,55),pv.Point(80,55)]
im.annotatePoints(pts)
elipse = pv.CenteredRect(64,64,96,96)
im.annotateEllipse(elipse)
im.annotateLabel(pv.Point(40,36),"MMM")
im.annotateLabel(pv.Point(72,36),"MMM")
im.annotateLabel(pv.Point(58,64),"db")
im.annotatePolygon([pv.Point(48,90),
pv.Point(80,90),pv.Point(64,100)])
im.show(delay=0) |
thanhpete/selenium | refs/heads/master | py/selenium/webdriver/ie/__init__.py | 2454 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
cuongnv23/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright IBM Corp. 2016
# Author(s): Andreas Nafpliotis <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_local_user_manager
short_description: Manage local users on an ESXi host
description:
- Manage local users on an ESXi host
version_added: "2.2"
author: Andreas Nafpliotis
notes:
- Tested on ESXi 6.0
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
requirements:
- "python >= 2.6"
- PyVmomi installed
options:
local_user_name:
description:
- The local user name to be changed
required: True
local_user_password:
description:
- The password to be set
required: False
local_user_description:
description:
- Description for the user
required: False
state:
description:
- Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_user_manager command from Ansible Playbooks
- name: Add local user to ESXi
local_action:
module: vmware_local_user_manager
hostname: esxi_hostname
username: root
password: vmware
local_user_name: foo
'''
RETURN = '''# '''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import HAS_PYVMOMI, connect_to_api, vmware_argument_spec
class VMwareLocalUserManager(object):
def __init__(self, module):
self.module = module
self.content = connect_to_api(self.module)
self.local_user_name = self.module.params['local_user_name']
self.local_user_password = self.module.params['local_user_password']
self.local_user_description = self.module.params['local_user_description']
self.state = self.module.params['state']
def process_state(self):
try:
local_account_manager_states = {
'absent': {
'present': self.state_remove_user,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_user,
'absent': self.state_create_user,
}
}
local_account_manager_states[self.state][self.check_local_user_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_user_manager_state(self):
user_account = self.find_user_account()
if not user_account:
return 'absent'
else:
return 'present'
def find_user_account(self):
searchStr = self.local_user_name
exactMatch = True
findUsers = True
findGroups = False
user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
return user_account
def create_account_spec(self):
account_spec = vim.host.LocalAccountManager.AccountSpecification()
account_spec.id = self.local_user_name
account_spec.password = self.local_user_password
account_spec.description = self.local_user_description
return account_spec
def state_create_user(self):
account_spec = self.create_account_spec()
try:
self.content.accountManager.CreateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_update_user(self):
account_spec = self.create_account_spec()
try:
self.content.accountManager.UpdateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_remove_user(self):
try:
self.content.accountManager.RemoveUser(self.local_user_name)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
local_user_password=dict(required=False, type='str', no_log=True),
local_user_description=dict(required=False, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_local_user_manager = VMwareLocalUserManager(module)
vmware_local_user_manager.process_state()
if __name__ == '__main__':
main()
|
NetApp/cinder | refs/heads/master | cinder/tests/unit/test_volume_throttling.py | 13 | # Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume copy throttling helpers."""
import mock
from cinder import test
from cinder import utils
from cinder.volume import throttling
class ThrottleTestCase(test.TestCase):
def test_NoThrottle(self):
with throttling.Throttle().subcommand('volume1', 'volume2') as cmd:
self.assertEqual([], cmd['prefix'])
@mock.patch.object(utils, 'get_blkdev_major_minor')
def test_BlkioCgroup(self, mock_major_minor):
def fake_get_blkdev_major_minor(path):
return {'src_volume1': "253:0", 'dst_volume1': "253:1",
'src_volume2': "253:2", 'dst_volume2': "253:3"}[path]
mock_major_minor.side_effect = fake_get_blkdev_major_minor
self.exec_cnt = 0
def fake_execute(*cmd, **kwargs):
cmd_set = ['cgset', '-r',
'blkio.throttle.%s_bps_device=%s %d', 'fake_group']
set_order = [None,
('read', '253:0', 1024),
('write', '253:1', 1024),
# a nested job starts; bps limit are set to the half
('read', '253:0', 512),
('read', '253:2', 512),
('write', '253:1', 512),
('write', '253:3', 512),
# a nested job ends; bps limit is resumed
('read', '253:0', 1024),
('write', '253:1', 1024)]
if set_order[self.exec_cnt] is None:
self.assertEqual(('cgcreate', '-g', 'blkio:fake_group'), cmd)
else:
cmd_set[2] %= set_order[self.exec_cnt]
self.assertEqual(tuple(cmd_set), cmd)
self.exec_cnt += 1
with mock.patch.object(utils, 'execute', side_effect=fake_execute):
throttle = throttling.BlkioCgroup(1024, 'fake_group')
with throttle.subcommand('src_volume1', 'dst_volume1') as cmd:
self.assertEqual(['cgexec', '-g', 'blkio:fake_group'],
cmd['prefix'])
# a nested job
with throttle.subcommand('src_volume2', 'dst_volume2') as cmd:
self.assertEqual(['cgexec', '-g', 'blkio:fake_group'],
cmd['prefix'])
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.4/django/core/management/commands/sql.py | 90 | from optparse import make_option
from django.core.management.base import AppCommand
from django.core.management.sql import sql_create
from django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the CREATE TABLE SQL statements for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_create(app, self.style, connections[options.get('database')])).encode('utf-8')
|
uwosh/UWOshMusicRecruiting | refs/heads/master | content/SchoolFolder.py | 1 | # -*- coding: utf-8 -*-
#
# File: SchoolFolder.py
#
# Copyright (c) 2008 by []
# Generator: ArchGenXML Version 2.0-beta10
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """unknown <unknown>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
import interfaces
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.ATBackRef.BackReferenceField import BackReferenceField, BackReferenceWidget
from Products.UWOshMusicRecruiting.config import *
##code-section module-header #fill in your manual code here
##/code-section module-header
schema = Schema((
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
SchoolFolder_schema = BaseBTreeFolderSchema.copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class SchoolFolder(BaseBTreeFolder, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.ISchoolFolder)
meta_type = 'SchoolFolder'
_at_rename_after_creation = True
schema = SchoolFolder_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
registerType(SchoolFolder, PROJECTNAME)
# end of class SchoolFolder
##code-section module-footer #fill in your manual code here
##/code-section module-footer
|
bkahlert/seqan-research | refs/heads/master | raw/workshop11/workshop2011-data-20110925/trunk/misc/seqan_instrumentation/bin/classes/flushfile.py | 21 | class Flushfile(object):
def __init__(self, fd):
self.fd = fd
def write(self, x):
ret=self.fd.write(x)
self.fd.flush()
return ret
def writelines(self, lines):
ret=self.writelines(line)
self.fd.flush()
return ret
def flush(self):
return self.fd.flush
def close(self):
return self.fd.close()
def fileno(self):
return self.fd.fileno()
|
corbt/twitter-geo-visuals | refs/heads/master | get_subset.py | 1 | # Gets N randomly-selected lines without replacement from the input file and saves them to the output file
import os, random, json
in_file = "raw/2013-11-27-05.11.56.569021.json"
out_file = "processed/24h_5k.json"
num_selected = 5000
lines_with_location = []
with open(in_file) as f:
for i,line in enumerate(f):
tweet = json.loads(line)
if tweet['user'] and tweet['user']['location'] and len(tweet['user']['location']) > 0:
lines_with_location.append(i)
random.shuffle(lines_with_location)
print "{0} entries with location discovered".format(len(lines_with_location))
lines_selected = set(lines_with_location[0:num_selected])
with open(in_file) as i_f:
with open(out_file, 'w') as o_f:
for i,line in enumerate(i_f):
if i in lines_selected:
o_f.write(line) |
Tehsmash/nova | refs/heads/master | nova/virt/xenapi/vm_utils.py | 20 | # Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
import os
import time
import urllib
import uuid
from xml.dom import minidom
from xml.parsers import expat
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import netutils
from nova.virt.xenapi import agent
from nova.virt.xenapi.image import utils as image_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
cfg.StrOpt('cache_images',
default='all',
choices=('all', 'some', 'none'),
help='Cache glance images locally. `all` will cache all'
' images, `some` will only cache images that have the'
' image_property `cache_in_nova=True`, and `none` turns'
' off caching entirely'),
cfg.IntOpt('image_compression_level',
help='Compression level for images, e.g., 9 for gzip -9.'
' Range is 1-9, 9 being most compressed but most CPU'
' intensive on dom0.'),
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * units.Mi,
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='default-sr:true',
help='Filter for finding the SR to be used to install guest '
'instances on. To use the Local Storage in default '
'XenServer/XCP installations set this flag to '
'other-config:i18n-key=local-storage. To select an SR '
'with a different matching criteria, you could set it to '
'other-config:my_favorite_sr=true. On the other hand, to '
'fall back on the Default SR, as displayed by XenCenter, '
'set this flag to: default-sr:true'),
cfg.BoolOpt('sparse_copy',
default=True,
help='Whether to use sparse_copy for copying data on a '
'resize down (False will use standard dd). This speeds '
'up resizes down considerably since large runs of zeros '
'won\'t have to be rsynced'),
cfg.IntOpt('num_vbd_unplug_retries',
default=10,
help='Maximum number of retries to unplug VBD. if <=0, '
'should try once and no retry'),
cfg.StrOpt('torrent_images',
default='none',
choices=('all', 'some', 'none'),
help='Whether or not to download images via Bit Torrent.'),
cfg.StrOpt('ipxe_network_name',
help='Name of network to use for booting iPXE ISOs'),
cfg.StrOpt('ipxe_boot_menu_url',
help='URL to the iPXE boot menu'),
cfg.StrOpt('ipxe_mkisofs_cmd',
default='mkisofs',
help='Name and optionally path of the tool used for '
'ISO image creation'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vm_utils_opts, 'xenserver')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
MAX_VDI_CHAIN_SIZE = 16
PROGRESS_INTERVAL_SECONDS = 300
# Fudge factor to allow for the VHD chain to be slightly larger than
# the partitioned space. Otherwise, legitimate images near their
# maximum allowed size can fail on build with FlavorDiskTooSmall.
VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10
class ImageType(object):
"""Enumeration class for distinguishing different image types
| 0 - kernel image (goes on dom0's filesystem)
| 1 - ramdisk image (goes on dom0's filesystem)
| 2 - disk image (local SR, partitioned by objectstore plugin)
| 3 - raw disk image (local SR, NOT partitioned by plugin)
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
| 6 - config drive
"""
KERNEL = 0
RAMDISK = 1
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
DISK_CONFIGDRIVE = 6
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "root"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
return dict(zip(cls._ids, ImageType._strs)).get(image_type)
@classmethod
def get_role(cls, image_type_id):
"""Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
cls.DISK_ISO: 'iso',
cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
def get_vm_device_id(session, image_properties):
# NOTE: device_id should be 2 for windows VMs which run new xentools
# (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more
# information.
if image_properties is None:
image_properties = {}
device_id = image_properties.get('xenapi_device_id')
# The device_id is required to be set for hypervisor version 6.1 and above
if device_id:
hypervisor_version = session.product_version
if _hypervisor_supports_device_id(hypervisor_version):
return device_id
else:
msg = _("Device id %(id)s specified is not supported by "
"hypervisor version %(version)s") % {'id': device_id,
'version': hypervisor_version}
raise exception.NovaException(msg)
def _hypervisor_supports_device_id(version):
version_as_string = '.'.join(str(v) for v in version)
return(versionutils.is_compatible('6.1', version_as_string))
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False, device_id=None):
"""Create a VM record. Returns new VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
flavor = instance.get_flavor()
mem = str(long(flavor.memory_mb) * units.Mi)
vcpus = str(flavor.vcpus)
vcpu_weight = flavor.vcpu_weight
vcpu_params = {}
if vcpu_weight is not None:
# NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means
# we need to specify both weight and cap for either to apply
vcpu_params = {"weight": str(vcpu_weight), "cap": "0"}
cpu_mask_list = hardware.get_vcpu_pin_set()
if cpu_mask_list:
cpu_mask = hardware.format_cpu_spec(cpu_mask_list,
allow_ranges=False)
vcpu_params["mask"] = cpu_mask
viridian = 'true' if instance['os_type'] == 'windows' else 'false'
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
'other_config': {'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': viridian, 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': vcpu_params,
'xenstore_data': {'vm-data/allowvssprovider': 'false'}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance['kernel_id']:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
if device_id:
rec['platform']['device_id'] = device_id
vm_ref = session.VM.create(rec)
LOG.debug('Created VM', instance=instance)
return vm_ref
def destroy_vm(session, instance, vm_ref):
"""Destroys a VM record."""
try:
session.VM.destroy(vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Destroy VM failed'))
return
LOG.debug("VM destroyed", instance=instance)
def clean_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug("Shutting down VM (cleanly)", instance=instance)
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (cleanly) failed.'))
return False
return True
def hard_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug("Shutting down VM (hard)", instance=instance)
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (hard) failed'))
return False
return True
def is_vm_shutdown(session, vm_ref):
state = get_power_state(session, vm_ref)
if state == power_state.SHUTDOWN:
return True
return False
def is_enough_free_mem(session, instance):
flavor = instance.get_flavor()
mem = long(flavor.memory_mb) * units.Mi
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
session.host_ref))
return host_free_mem >= mem
def _should_retry_unplug_vbd(err):
# Retry if unplug failed with DEVICE_DETACH_REJECTED
# For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
# Since XenServer 6.2, we also need to retry if we get
# INTERNAL_ERROR, as that error goes away when you retry.
return (err == 'DEVICE_DETACH_REJECTED'
or
err == 'INTERNAL_ERROR')
def unplug_vbd(session, vbd_ref, this_vm_ref):
# make sure that perform at least once
max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1
for num_attempt in range(1, max_attempts + 1):
try:
if num_attempt > 1:
greenthread.sleep(1)
session.VBD.unplug(vbd_ref, this_vm_ref)
return
except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_LI('VBD %s already detached'), vbd_ref)
return
elif _should_retry_unplug_vbd(err):
LOG.info(_LI('VBD %(vbd_ref)s uplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'),
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err})
else:
LOG.exception(_LE('Unable to unplug VBD'))
raise exception.StorageError(
reason=_('Unable to unplug VBD %s') % vbd_ref)
raise exception.StorageError(
reason=_('Reached maximum number of retries '
'trying to unplug VBD %s')
% vbd_ref)
def destroy_vbd(session, vbd_ref):
"""Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to destroy VBD'))
raise exception.StorageError(
reason=_('Unable to destroy VBD %s') % vbd_ref)
def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
read_only=False, bootable=False, osvol=False,
empty=False, unpluggable=True):
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
if vdi_ref is None:
vdi_ref = 'OpaqueRef:NULL'
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = vbd_type
vbd_rec['unpluggable'] = unpluggable
vbd_rec['empty'] = empty
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ',
{'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.',
{'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
if osvol:
# set osvol=True in other-config to indicate this is an
# attached nova (or cinder) volume
session.call_xenapi('VBD.add_to_other_config',
vbd_ref, 'osvol', 'True')
return vbd_ref
def attach_cd(session, vm_ref, vdi_ref, userdevice):
"""Create an empty VBD, then insert the CD."""
vbd_ref = create_vbd(session, vm_ref, None, userdevice,
vbd_type='cd', read_only=True,
bootable=True, empty=True,
unpluggable=False)
session.call_xenapi('VBD.insert', vbd_ref, vdi_ref)
return vbd_ref
def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure:
msg = "Unable to destroy VDI %s" % vdi_ref
LOG.debug(msg, exc_info=True)
msg = _("Unable to destroy VDI %s") % vdi_ref
LOG.error(msg)
raise exception.StorageError(reason=msg)
def safe_destroy_vdis(session, vdi_refs):
"""Tries to destroy the requested VDIs, but ignores any errors."""
for vdi_ref in vdi_refs:
try:
destroy_vdi(session, vdi_ref)
except exception.StorageError:
msg = "Ignoring error while destroying VDI: %s" % vdi_ref
LOG.debug(msg)
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': _get_vdi_other_config(disk_type, instance=instance),
'sm_config': {},
'tags': []})
LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.',
{'vdi_ref': vdi_ref, 'name_label': name_label,
'virtual_size': virtual_size, 'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref
@contextlib.contextmanager
def _dummy_vm(session, instance, vdi_ref):
"""This creates a temporary VM so that we can snapshot a VDI.
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
work around this, we need to create a temporary VM and then map the VDI to
the VM using a temporary VBD.
"""
name_label = "dummy"
vm_ref = create_vm(session, instance, name_label, None, None)
try:
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
read_only=True)
try:
yield vm_ref
finally:
try:
destroy_vbd(session, vbd_ref)
except exception.StorageError:
# destroy_vbd() will log error
pass
finally:
destroy_vm(session, instance, vm_ref)
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference.
This function differs from the XenAPI `VDI.copy` call in that the copy is
atomic and isolated, meaning we don't see half-downloaded images. It
accomplishes this by copying the VDI's into a temporary directory and then
atomically renaming them into the SR when the copy is completed.
The correct long term solution is to fix `VDI.copy` so that it is atomic
and isolated.
"""
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
label = "snapshot"
with snapshot_attached_here(
session, instance, vm_ref, label) as vdi_uuids:
imported_vhds = session.call_plugin_serialized(
'workarounds', 'safe_copy_vdis',
sr_path=get_sr_path(session, sr_ref=sr_ref),
vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack())
root_uuid = imported_vhds['root']['uuid']
# rescan to discover new VHDs
scan_default_sr(session)
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s',
{'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref})
return vdi_ref
def _get_vdi_other_config(disk_type, instance=None):
"""Return metadata to store in VDI's other_config attribute.
`nova_instance_uuid` is used to associate a VDI with a particular instance
so that, if it becomes orphaned from an unclean shutdown of a
compute-worker, we can safely detach it.
"""
other_config = {'nova_disk_type': disk_type}
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
if instance:
other_config['nova_instance_uuid'] = instance['uuid']
return other_config
def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description,
instance):
existing_other_config = session.call_xenapi('VDI.get_other_config',
vdi_ref)
session.call_xenapi('VDI.set_name_label', vdi_ref, name_label)
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
other_config = _get_vdi_other_config(vdi_type, instance=instance)
for key, value in six.iteritems(other_config):
if key not in existing_other_config:
session.call_xenapi(
"VDI.add_to_other_config", vdi_ref, key, value)
def _vm_get_vbd_refs(session, vm_ref):
return session.call_xenapi("VM.get_VBDs", vm_ref)
def _vbd_get_rec(session, vbd_ref):
return session.call_xenapi("VBD.get_record", vbd_ref)
def _vdi_get_rec(session, vdi_ref):
return session.call_xenapi("VDI.get_record", vdi_ref)
def _vdi_get_uuid(session, vdi_ref):
return session.call_xenapi("VDI.get_uuid", vdi_ref)
def _vdi_snapshot(session, vdi_ref):
return session.call_xenapi("VDI.snapshot", vdi_ref, {})
def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'):
"""Retrieves the primary VDI for a VM."""
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == userdevice:
vdi_ref = vbd_rec['VDI']
vdi_rec = _vdi_get_rec(session, vdi_ref)
return vdi_ref, vdi_rec
raise exception.NovaException(_("No primary VDI found for %s") % vm_ref)
def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0):
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
if int(vbd_rec['userdevice']) >= min_userdevice:
vdi_ref = vbd_rec['VDI']
yield _vdi_get_uuid(session, vdi_ref)
def _try_strip_base_mirror_from_vdi(session, vdi_ref):
try:
session.call_xenapi("VDI.remove_from_sm_config", vdi_ref,
"base_mirror")
except session.XenAPI.Failure:
LOG.debug("Error while removing sm_config", exc_info=True)
def strip_base_mirror_from_vdis(session, vm_ref):
# NOTE(johngarbutt) part of workaround for XenServer bug CA-98606
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
_try_strip_base_mirror_from_vdi(session, vdi_ref)
def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
possible_snapshot_parents = vdi_uuid_chain[1:]
if len(possible_snapshot_parents) == 0:
LOG.debug("No VHD chain.", instance=instance)
return
snapshot_uuids = _child_vhds(session, sr_ref, possible_snapshot_parents,
old_snapshots_only=True)
number_of_snapshots = len(snapshot_uuids)
if number_of_snapshots <= 0:
LOG.debug("No snapshots to remove.", instance=instance)
return
vdi_refs = [session.VDI.get_by_uuid(vdi_uuid)
for vdi_uuid in snapshot_uuids]
safe_destroy_vdis(session, vdi_refs)
# ensure garbage collector has been run
_scan_sr(session, sr_ref)
LOG.info(_LI("Deleted %s snapshots.") % number_of_snapshots,
instance=instance)
def remove_old_snapshots(session, instance, vm_ref):
"""See if there is an snapshot present that should be removed."""
LOG.debug("Starting remove_old_snapshots for VM", instance=instance)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
sr_ref = vm_vdi_rec["SR"]
_delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0',
post_snapshot_callback=None):
# impl method allow easier patching for tests
return _snapshot_attached_here_impl(session, instance, vm_ref, label,
userdevice, post_snapshot_callback)
def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
LOG.debug("Starting snapshot for VM", instance=instance)
# Memorize the VDI chain so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref,
userdevice)
chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
sr_ref = vm_vdi_rec["SR"]
# clean up after any interrupted snapshot attempts
_delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
snapshot_ref = _vdi_snapshot(session, vm_vdi_ref)
if post_snapshot_callback is not None:
post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
# When the VDI snapshot is taken a new parent is introduced.
# If we have taken a snapshot before, the new parent can be coalesced.
# We need to wait for this to happen before trying to copy the chain.
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
vdi_uuid_chain)
snapshot_uuid = _vdi_get_uuid(session, snapshot_ref)
chain = _walk_vdi_chain(session, snapshot_uuid)
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain]
yield vdi_uuids
finally:
safe_destroy_vdis(session, [snapshot_ref])
# TODO(johngarbut) we need to check the snapshot has been coalesced
# now its associated VDI has been deleted.
def get_sr_path(session, sr_ref=None):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
if sr_ref is None:
sr_ref = safe_find_sr(session)
pbd_rec = session.call_xenapi("PBD.get_all_records_where",
'field "host"="%s" and '
'field "SR"="%s"' %
(session.host_ref, sr_ref))
# NOTE(bobball): There can only be one PBD for a host/SR pair, but path is
# not always present - older versions of XS do not set it.
pbd_ref = pbd_rec.keys()[0]
device_config = pbd_rec[pbd_ref]['device_config']
if 'path' in device_config:
return device_config['path']
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
if sr_rec["type"] not in ["ext", "nfs"]:
raise exception.NovaException(
_("Only file-based SRs (ext/NFS) are supported by this feature."
" SR %(uuid)s is of type %(type)s") %
{"uuid": sr_uuid, "type": sr_rec["type"]})
return os.path.join(CONF.xenserver.sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
"""Destroy used or unused cached images.
A cached image that is being used by at least one VM is said to be 'used'.
In the case of an 'unused' image, the cached image will be the only
descendent of the base-copy. So when we delete the cached-image, the
refcount will drop to zero and XenServer will automatically destroy the
base-copy for us.
The default behavior of this function is to destroy only 'unused' cached
images. To destroy all cached images, use the `all_cached=True` kwarg.
"""
cached_images = _find_cached_images(session, sr_ref)
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug("Destroying cached VDI '%(vdi_uuid)s'")
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
for vdi_ref in cached_images.values():
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
if all_cached:
destroy_cached_vdi(vdi_uuid, vdi_ref)
continue
# Unused-Only: Search for siblings
# Chain length greater than two implies a VM must be holding a ref to
# the base-copy (otherwise it would have coalesced), so consider this
# cached image used.
chain = list(_walk_vdi_chain(session, vdi_uuid))
if len(chain) > 2:
continue
elif len(chain) == 2:
# Siblings imply cached image is used
root_vdi_rec = chain[-1]
children = _child_vhds(session, sr_ref, [root_vdi_rec['uuid']])
if len(children) > 1:
continue
destroy_cached_vdi(vdi_uuid, vdi_ref)
return destroyed
def _find_cached_images(session, sr_ref):
"""Return a dict(uuid=vdi_ref) representing all cached images."""
cached_images = {}
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
image_id = vdi_rec['other_config']['image-id']
except KeyError:
continue
cached_images[image_id] = vdi_ref
return cached_images
def _find_cached_image(session, image_id, sr_ref):
"""Returns the vdi-ref of the cached image."""
name_label = _get_image_vdi_label(image_id)
recs = session.call_xenapi("VDI.get_all_records_where",
'field "name__label"="%s"'
% name_label)
number_found = len(recs)
if number_found > 0:
if number_found > 1:
LOG.warning(_LW("Multiple base images for image: %s"), image_id)
return recs.keys()[0]
def _get_resize_func_name(session):
brand = session.product_brand
version = session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if version and brand:
xcp = brand == 'XCP'
r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def _vdi_get_virtual_size(session, vdi_ref):
size = session.call_xenapi('VDI.get_virtual_size', vdi_ref)
return int(size)
def _vdi_resize(session, vdi_ref, new_size):
resize_func_name = _get_resize_func_name(session)
session.call_xenapi(resize_func_name, vdi_ref, str(new_size))
def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
virtual_size = _vdi_get_virtual_size(session, vdi_ref)
new_disk_size = new_gb * units.Gi
msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size},
instance=instance)
if virtual_size < new_disk_size:
# For resize up. Simple VDI resize will do the trick
_vdi_resize(session, vdi_ref, new_disk_size)
elif virtual_size == new_disk_size:
LOG.debug("No need to change vdi virtual size.",
instance=instance)
else:
# NOTE(johngarbutt): we should never get here
# but if we don't raise an exception, a user might be able to use
# more storage than allowed by their chosen instance flavor
msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger "
"than flavor size of %(new_disk_size)d bytes.")
msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size}
LOG.debug(msg, instance=instance)
raise exception.ResizeError(reason=msg)
def resize_disk(session, instance, vdi_ref, flavor):
size_gb = flavor.root_gb
if size_gb == 0:
reason = _("Can't resize a disk to 0 GB.")
raise exception.ResizeError(reason=reason)
sr_ref = safe_find_sr(session)
clone_ref = _clone_vdi(session, vdi_ref)
try:
# Resize partition and filesystem down
_auto_configure_disk(session, clone_ref, size_gb)
# Create new VDI
vdi_size = size_gb * units.Gi
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
'root', vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = size_gb * units.Gi
_copy_partition(session, clone_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
finally:
destroy_vdi(session, clone_ref)
def _auto_configure_disk(session, vdi_ref, new_gb):
"""Partition and resize FS to match the size specified by
flavors.root_gb.
This is a fail-safe to prevent accidentally destroying data on a disk
erroneously marked as auto_disk_config=True.
The criteria for allowing resize are:
1. 'auto_disk_config' must be true for the instance (and image).
(If we've made it here, then auto_disk_config=True.)
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
"""
if new_gb == 0:
LOG.debug("Skipping auto_config_disk as destination size is 0GB")
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
reason = _('Disk must have only one partition.')
raise exception.CannotResizeDisk(reason=reason)
num, start, old_sectors, fstype, name, flags = partitions[0]
if fstype not in ('ext3', 'ext4'):
reason = _('Disk contains a filesystem '
'we are unable to resize: %s')
raise exception.CannotResizeDisk(reason=(reason % fstype))
if num != 1:
reason = _('The only partition should be partition 1.')
raise exception.CannotResizeDisk(reason=reason)
new_sectors = new_gb * units.Gi / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors, flags)
def try_auto_configure_disk(session, vdi_ref, new_gb):
try:
_auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s')
LOG.warn(msg % e)
def _make_partition(session, dev, partition_start, partition_end):
dev_path = utils.make_dev_path(dev)
# NOTE(bobball) If this runs in Dom0, parted will error trying
# to re-read the partition table and return a generic error
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True,
check_exit_code=not session.is_local_connection)
utils.execute('parted', '--script', dev_path, '--',
'mkpart', 'primary',
partition_start,
partition_end,
run_as_root=True,
check_exit_code=not session.is_local_connection)
partition_path = utils.make_dev_path(dev, partition=1)
if session.is_local_connection:
# Need to refresh the partitions
utils.trycmd('kpartx', '-a', dev_path,
run_as_root=True,
discard_warnings=True)
# Sometimes the partition gets created under /dev/mapper, depending
# on the setup in dom0.
mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path)
if os.path.exists(mapper_path):
return mapper_path
return partition_path
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type):
"""Steps to programmatically generate a disk:
1. Create VDI of desired size
2. Attach VDI to compute worker
3. Create partition
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = units.Mi
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create partition
partition_start = "0"
partition_end = "-0"
partition_path = _make_partition(session, dev,
partition_start, partition_end)
if fs_type == 'linux-swap':
utils.execute('mkswap', partition_path, run_as_root=True)
elif fs_type is not None:
utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
# 4. Create VBD between instance VM and VDI
if vm_ref:
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while generating disk number: %s" % userdevice
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
return vdi_ref
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
fs_type = "vfat" if is_windows else "linux-swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
def get_ephemeral_disk_sizes(total_size_gb):
if not total_size_gb:
return
max_size_gb = 2000
if total_size_gb % 1024 == 0:
max_size_gb = 1024
left_to_allocate = total_size_gb
while left_to_allocate > 0:
size_gb = min(max_size_gb, left_to_allocate)
yield size_gb
left_to_allocate -= size_gb
def generate_single_ephemeral(session, instance, vm_ref, userdevice,
size_gb, instance_name_label=None):
if instance_name_label is None:
instance_name_label = instance["name"]
name_label = "%s ephemeral" % instance_name_label
# TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
label_number = int(userdevice) - 4
if label_number > 0:
name_label = "%s (%d)" % (name_label, label_number)
return _generate_disk(session, instance, vm_ref, str(userdevice),
name_label, 'ephemeral', size_gb * 1024,
CONF.default_ephemeral_format)
def generate_ephemeral(session, instance, vm_ref, first_userdevice,
instance_name_label, total_size_gb):
# NOTE(johngarbutt): max possible size of a VHD disk is 2043GB
sizes = get_ephemeral_disk_sizes(total_size_gb)
first_userdevice = int(first_userdevice)
vdi_refs = []
try:
for userdevice, size_gb in enumerate(sizes, start=first_userdevice):
ref = generate_single_ephemeral(session, instance, vm_ref,
userdevice, size_gb,
instance_name_label)
vdi_refs.append(ref)
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.debug("Error when generating ephemeral disk. "
"Device: %(userdevice)s Size GB: %(size_gb)s "
"Error: %(exc)s", {
'userdevice': userdevice,
'size_gb': size_gb,
'exc': exc})
safe_destroy_vdis(session, vdi_refs)
def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
name_label, size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'user', size_gb * 1024, CONF.default_ephemeral_format)
def generate_configdrive(session, instance, vm_ref, userdevice,
network_info, admin_password=None, files=None):
sr_ref = safe_find_sr(session)
vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md,
network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive')
cdb.make_drive(tmp_file)
dev_path = utils.make_dev_path(dev)
utils.execute('dd',
'if=%s' % tmp_file,
'of=%s' % dev_path,
'oflag=direct,sync',
run_as_root=True)
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
read_only=True)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while generating config drive"
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
def _create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
If the image is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
if CONF.xenserver.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
if filename == "":
return _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
else:
vdi_type = ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
def create_kernel_and_ramdisk(context, session, instance, name_label):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['kernel_id'],
ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['ramdisk_id'],
ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
return kernel_file, ramdisk_file
def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
if args:
LOG.debug("Removing kernel/ramdisk files from dom0",
instance=instance)
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
def _get_image_vdi_label(image_id):
return 'Glance Image %s' % image_id
def _create_cached_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_type', sr_ref)
if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_LW("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag."), sr_type)
@utils.synchronized('xenapi-image-cache' + image_id)
def _create_cached_image_impl(context, session, instance, name_label,
image_id, image_type, sr_ref):
cache_vdi_ref = _find_cached_image(session, image_id, sr_ref)
downloaded = False
if cache_vdi_ref is None:
downloaded = True
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
cache_vdi_ref = session.call_xenapi(
'VDI.get_by_uuid', vdis['root']['uuid'])
session.call_xenapi('VDI.set_name_label', cache_vdi_ref,
_get_image_vdi_label(image_id))
session.call_xenapi('VDI.set_name_description', cache_vdi_ref,
'root')
session.call_xenapi('VDI.add_to_other_config',
cache_vdi_ref, 'image-id', str(image_id))
if CONF.use_cow_images:
new_vdi_ref = _clone_vdi(session, cache_vdi_ref)
elif sr_type == 'ext':
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance,
cache_vdi_ref)
else:
new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref,
sr_ref)
session.call_xenapi('VDI.set_name_label', new_vdi_ref, '')
session.call_xenapi('VDI.set_name_description', new_vdi_ref, '')
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
return downloaded, vdi_uuid
downloaded, vdi_uuid = _create_cached_image_impl(context, session,
instance, name_label,
image_id, image_type,
sr_ref)
vdis = {}
vdi_type = ImageType.get_role(image_type)
vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
return downloaded, vdis
def create_image(context, session, instance, name_label, image_id,
image_type):
"""Creates VDI from the image stored in the local cache. If the image
is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
cache_images = CONF.xenserver.cache_images.lower()
# Determine if the image is cacheable
if image_type == ImageType.DISK_ISO:
cache = False
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
except KeyError:
cache = False
elif cache_images == 'none':
cache = False
else:
LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to"
" True"), CONF.xenserver.cache_images)
cache = True
# Fetch (and cache) the image
start_time = timeutils.utcnow()
if cache:
downloaded, vdis = _create_cached_image(context, session, instance,
name_label, image_id,
image_type)
else:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
downloaded = True
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
LOG.info(_LI("Image creation data, cacheable: %(cache)s, "
"downloaded: %(downloaded)s duration: %(duration).2f secs "
"for image %(image_id)s"),
{'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
'duration': duration})
for vdi_type, vdi in six.iteritems(vdis):
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
_set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type,
instance)
return vdis
def _fetch_image(context, session, instance, name_label, image_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
vdis = _fetch_vhd_image(context, session, instance, image_id)
else:
vdis = _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in six.iteritems(vdis):
vdi_uuid = vdi['uuid']
LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'",
{'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid},
instance=instance)
return vdis
def _make_uuid_stack():
# NOTE(sirp): The XenAPI plugins run under Python 2.4
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in range(MAX_VDI_CHAIN_SIZE)]
def _image_uses_bittorrent(context, instance):
bittorrent = False
torrent_images = CONF.xenserver.torrent_images.lower()
if torrent_images == 'all':
bittorrent = True
elif torrent_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
bittorrent = strutils.bool_from_string(
sys_meta['image_bittorrent'])
except KeyError:
pass
elif torrent_images == 'none':
pass
else:
LOG.warning(_LW("Invalid value '%s' for torrent_images"),
torrent_images)
return bittorrent
def _default_download_handler():
# TODO(sirp): This should be configurable like upload_handler
return importutils.import_object(
'nova.virt.xenapi.image.glance.GlanceStore')
def _choose_download_handler(context, instance):
if _image_uses_bittorrent(context, instance):
return importutils.import_object(
'nova.virt.xenapi.image.bittorrent.BittorrentStore')
else:
return _default_download_handler()
def get_compression_level():
level = CONF.xenserver.image_compression_level
if level is not None and (level < 1 or level > 9):
LOG.warning(_LW("Invalid value '%d' for image_compression_level"),
level)
return None
return level
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
LOG.debug("Asking xapi to fetch vhd image %s", image_id,
instance=instance)
handler = _choose_download_handler(context, instance)
try:
vdis = handler.download_image(context, session, instance, image_id)
except Exception:
default_handler = _default_download_handler()
# Using type() instead of isinstance() so instance of subclass doesn't
# test as equivalent
if type(handler) == type(default_handler):
raise
LOG.exception(_LE("Download handler '%(handler)s' raised an"
" exception, falling back to default handler"
" '%(default_handler)s'"),
{'handler': handler,
'default_handler': default_handler})
vdis = default_handler.download_image(
context, session, instance, image_id)
# Ensure we can see the import VHDs as VDIs
scan_default_sr(session)
vdi_uuid = vdis['root']['uuid']
try:
_check_vdi_size(context, session, instance, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while checking vdi size"
LOG.debug(msg, instance=instance, exc_info=True)
for vdi in vdis.values():
vdi_uuid = vdi['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
safe_destroy_vdis(session, [vdi_ref])
return vdis
def _get_vdi_chain_size(session, vdi_uuid):
"""Compute the total size of a VDI chain, starting with the specified
VDI UUID.
This will walk the VDI chain to the root, add the size of each VDI into
the total.
"""
size_bytes = 0
for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d',
{'cur_vdi_uuid': cur_vdi_uuid,
'vdi_size_bytes': vdi_size_bytes})
size_bytes += vdi_size_bytes
return size_bytes
def _check_vdi_size(context, session, instance, vdi_uuid):
flavor = instance.get_flavor()
allowed_size = (flavor.root_gb +
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi
if not flavor.root_gb:
# root_gb=0 indicates that we're disabling size checks
return
size = _get_vdi_chain_size(session, vdi_uuid)
if size > allowed_size:
LOG.error(_LE("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d"),
{'size': size, 'allowed_size': allowed_size},
instance=instance)
raise exception.FlavorDiskTooSmall()
def _fetch_disk_image(context, session, instance, name_label, image_id,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_vhd_image, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
image_type_str = ImageType.to_string(image_type)
LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s",
{'image_id': image_id, 'image_type_str': image_type_str},
instance=instance)
if image_type == ImageType.DISK_ISO:
sr_ref = _safe_find_iso_sr(session)
else:
sr_ref = safe_find_sr(session)
glance_image = image_utils.GlanceImage(context, image_id)
if glance_image.is_raw_tgz():
image = image_utils.RawTGZImage(glance_image)
else:
image = image_utils.RawImage(glance_image)
virtual_size = image.get_size()
vdi_size = virtual_size
LOG.debug("Size for image %(image_id)s: %(virtual_size)d",
{'image_id': image_id, 'virtual_size': virtual_size},
instance=instance)
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
vdi_size > CONF.xenserver.max_kernel_ramdisk_size):
max_size = CONF.xenserver.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") %
{'vdi_size': vdi_size, 'max_size': max_size})
vdi_ref = create_vdi(session, sr_ref, instance, name_label,
image_type_str, vdi_size)
# From this point we have a VDI on Xen host;
# If anything goes wrong, we need to remember its uuid.
try:
filename = None
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_stream_disk(
session, image.stream_to, image_type, virtual_size, dev)
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
# We need to invoke a plugin for copying the
# content of the VDI into the proper path.
LOG.debug("Copying VDI %s to /boot/guest on dom0",
vdi_ref, instance=instance)
args = {}
args['vdi-ref'] = vdi_ref
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
if CONF.xenserver.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
# Remove the VDI as it is not needed anymore.
destroy_vdi(session, vdi_ref)
LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref,
instance=instance)
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=None, file=filename)}
else:
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_LE("Failed to fetch glance image"),
instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid,
file=filename)],)
raise
def determine_disk_image_type(image_meta):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
if not image_meta or 'disk_format' not in image_meta:
return None
disk_format = image_meta['disk_format']
disk_format_map = {
'ami': ImageType.DISK,
'aki': ImageType.KERNEL,
'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD,
'iso': ImageType.DISK_ISO,
}
try:
image_type = disk_format_map[disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=disk_format)
image_ref = image_meta.get('id')
params = {
'image_type_str': ImageType.to_string(image_type),
'image_ref': image_ref
}
LOG.debug("Detected %(image_type_str)s format for image %(image_ref)s",
params)
return image_type
def determine_vm_mode(instance, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
os_type = instance['os_type']
if os_type == "linux":
return vm_mode.XEN
if os_type == "windows":
return vm_mode.HVM
# disk_image_type specific default for backwards compatibility
if disk_image_type == ImageType.DISK_VHD or \
disk_image_type == ImageType.DISK:
return vm_mode.XEN
# most images run OK as HVM
return vm_mode.HVM
def set_vm_name_label(session, vm_ref, name_label):
session.call_xenapi("VM.set_name_label", vm_ref, name_label)
def list_vms(session):
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="false" and '
'field "is_a_template"="false" and '
'field "resident_on"="%s"' % session.host_ref)
for vm_ref in vms.keys():
yield vm_ref, vms[vm_ref]
def lookup_vm_vdis(session, vm_ref):
"""Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
vdi_refs = []
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
# Test valid VDI
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
LOG.debug('VDI %s is still available', vdi_uuid)
vbd_other_config = session.call_xenapi("VBD.get_other_config",
vbd_ref)
if not vbd_other_config.get('osvol'):
# This is not an attached volume
vdi_refs.append(vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('"Look for the VDIs failed'))
return vdi_refs
def lookup(session, name_label, check_rescue=False):
"""Look the instance up and return it if available.
:param:check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
if check_rescue:
result = lookup(session, name_label + '-rescue', False)
if result:
return result
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
return None
elif n > 1:
raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
key = str(instance['key_data'])
net = netutils.get_injected_network_template(network_info)
metadata = instance['metadata']
# As mounting the image VDI is expensive, we only want do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = key or net or metadata
if not mount_required:
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_mounted_processing(dev, key, net, metadata)
def lookup_kernel_ramdisk(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
else:
return (None, None)
def is_snapshot(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
else:
return False
def get_power_state(session, vm_ref):
xapi_state = session.call_xenapi("VM.get_power_state", vm_ref)
return XENAPI_POWER_STATE[xapi_state]
def compile_info(session, vm_ref):
"""Fill record with VM status information."""
power_state = get_power_state(session, vm_ref)
max_mem = session.call_xenapi("VM.get_memory_static_max", vm_ref)
mem = session.call_xenapi("VM.get_memory_dynamic_max", vm_ref)
num_cpu = session.call_xenapi("VM.get_VCPUs_max", vm_ref)
return hardware.InstanceInfo(state=power_state,
max_mem_kb=long(max_mem) >> 10,
mem_kb=long(mem) >> 10,
num_cpu=num_cpu)
def compile_instance_diagnostics(instance, vm_rec):
vm_power_state_int = XENAPI_POWER_STATE[vm_rec['power_state']]
vm_power_state = power_state.STATE_MAP[vm_power_state_int]
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=vm_power_state,
driver='xenapi',
config_drive=config_drive)
for cpu_num in range(0, long(vm_rec['VCPUs_max'])):
diags.add_cpu()
for vif in vm_rec['VIFs']:
diags.add_nic()
for vbd in vm_rec['VBDs']:
diags.add_disk()
max_mem_bytes = long(vm_rec['memory_dynamic_max'])
diags.memory_details.maximum = max_mem_bytes / units.Mi
return diags
def compile_diagnostics(vm_rec):
"""Compile VM diagnostics data."""
try:
keys = []
diags = {}
vm_uuid = vm_rec["uuid"]
xml = _get_rrd(_get_rrd_server(), vm_uuid)
if xml:
rrd = minidom.parseString(xml)
for i, node in enumerate(rrd.firstChild.childNodes):
# Provide the last update of the information
if node.localName == 'lastupdate':
diags['last_update'] = node.firstChild.data
# Create a list of the diagnostic keys (in their order)
if node.localName == 'ds':
ref = node.childNodes
# Name and Value
if len(ref) > 6:
keys.append(ref[0].firstChild.data)
# Read the last row of the first RRA to get the latest info
if node.localName == 'rra':
rows = node.childNodes[4].childNodes
last_row = rows[rows.length - 1].childNodes
for j, value in enumerate(last_row):
diags[keys[j]] = value.firstChild.data
break
return diags
except expat.ExpatError as e:
LOG.exception(_LE('Unable to parse rrd of %s'), e)
return {"Unable to retrieve diagnostics": e}
def fetch_bandwidth(session):
bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth')
return bw
def _scan_sr(session, sr_ref=None, max_attempts=4):
if sr_ref:
# NOTE(johngarbutt) xenapi will collapse any duplicate requests
# for SR.scan if there is already a scan in progress.
# However, we don't want that, because the scan may have started
# before we modified the underlying VHDs on disk through a plugin.
# Using our own mutex will reduce cases where our periodic SR scan
# in host.update_status starts racing the sr.scan after a plugin call.
@utils.synchronized('sr-scan-' + sr_ref)
def do_scan(sr_ref):
LOG.debug("Scanning SR %s", sr_ref)
attempt = 1
while True:
try:
return session.call_xenapi('SR.scan', sr_ref)
except session.XenAPI.Failure as exc:
with excutils.save_and_reraise_exception() as ctxt:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
if attempt < max_attempts:
ctxt.reraise = False
LOG.warning(_LW("Retry SR scan due to error: "
"%s"), exc)
greenthread.sleep(2 ** attempt)
attempt += 1
do_scan(sr_ref)
def scan_default_sr(session):
"""Looks for the system default SR and triggers a re-scan."""
sr_ref = safe_find_sr(session)
_scan_sr(session, sr_ref)
return sr_ref
def safe_find_sr(session):
"""Same as _find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = _find_sr(session)
if sr_ref is None:
raise exception.StorageRepositoryNotFound()
return sr_ref
def _find_sr(session):
"""Return the storage repository to hold VM images."""
host = session.host_ref
try:
tokens = CONF.xenserver.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect "
"formatting convention"),
CONF.xenserver.sr_matching_filter)
return None
if filter_criteria == 'other-config':
key, value = filter_pattern.split('=', 1)
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if not (key in sr_rec['other_config'] and
sr_rec['other_config'][key] == value):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_rec('PBD', pbd_ref)
if pbd_rec and pbd_rec['host'] == host:
return sr_ref
elif filter_criteria == 'default-sr' and filter_pattern == 'true':
pool_ref = session.call_xenapi('pool.get_all')[0]
sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref)
if sr_ref:
return sr_ref
# No SR found!
LOG.error(_LE("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."))
return None
def _safe_find_iso_sr(session):
"""Same as _find_iso_sr except raises a NotFound exception if SR
cannot be determined
"""
sr_ref = _find_iso_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
def _find_iso_sr(session):
"""Return the storage repository to hold ISO images."""
host = session.host_ref
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug("ISO: looking at SR %s", sr_rec)
if not sr_rec['content_type'] == 'iso':
LOG.debug("ISO: not iso content")
continue
if 'i18n-key' not in sr_rec['other_config']:
LOG.debug("ISO: iso content_type, no 'i18n-key' key")
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'")
continue
LOG.debug("ISO: SR MATCHing our criteria")
for pbd_ref in sr_rec['PBDs']:
LOG.debug("ISO: ISO, looking to see if it is host local")
pbd_rec = session.get_rec('PBD', pbd_ref)
if not pbd_rec:
LOG.debug("ISO: PBD %s disappeared", pbd_ref)
continue
pbd_rec_host = pbd_rec['host']
LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s",
{'pbd_rec': pbd_rec, 'host': host})
if pbd_rec_host == host:
LOG.debug("ISO: SR with local PBD")
return sr_ref
return None
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return [xs_url.scheme, xs_url.netloc]
def _get_rrd(server, vm_uuid):
"""Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
CONF.xenserver.connection_username,
CONF.xenserver.connection_password,
server[1],
vm_uuid))
return xml.read()
except IOError:
LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'),
{'vm_uuid': vm_uuid, 'server': server})
return None
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
vdi_rec = session.get_rec('VDI', vdi_ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_rec call
if vdi_rec:
yield vdi_ref, vdi_rec
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
"""Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref):
yield vdi_ref
except session.XenAPI.Failure:
continue
def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None):
if vdi_rec is None:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
if 'vhd-parent' not in vdi_rec['sm_config']:
return None
parent_uuid = vdi_rec['sm_config']['vhd-parent']
vdi_uuid = vdi_rec['uuid']
LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s',
{'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid})
return parent_uuid
def _walk_vdi_chain(session, vdi_uuid):
"""Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
yield vdi_rec
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec)
if not parent_uuid:
break
vdi_uuid = parent_uuid
def _is_vdi_a_snapshot(vdi_rec):
"""Ensure VDI is a snapshot, and not cached image."""
is_a_snapshot = vdi_rec['is_a_snapshot']
image_id = vdi_rec['other_config'].get('image-id')
return is_a_snapshot and not image_id
def _child_vhds(session, sr_ref, vdi_uuid_list, old_snapshots_only=False):
"""Return the immediate children of a given VHD.
This is not recursive, only the immediate children are returned.
"""
children = set()
for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
rec_uuid = rec['uuid']
if rec_uuid in vdi_uuid_list:
continue
parent_uuid = _get_vhd_parent_uuid(session, ref, rec)
if parent_uuid not in vdi_uuid_list:
continue
if old_snapshots_only and not _is_vdi_a_snapshot(rec):
continue
children.add(rec_uuid)
return list(children)
def _count_children(session, parent_vdi_uuid, sr_ref):
# Search for any other vdi which has the same parent as us to work out
# whether we have siblings and therefore if coalesce is possible
children = 0
for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
if (rec['sm_config'].get('vhd-parent') == parent_vdi_uuid):
children = children + 1
return children
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
vdi_uuid_list):
"""Spin until the parent VHD is coalesced into one of the VDIs in the list
vdi_uuid_list is a list of acceptable final parent VDIs for vdi_ref; once
the parent of vdi_ref is in vdi_uuid_chain we consider the coalesce over.
The use case is there are any number of VDIs between those in
vdi_uuid_list and vdi_ref that we expect to be coalesced, but any of those
in vdi_uuid_list may also be coalesced (except the base UUID - which is
guaranteed to remain)
"""
# If the base disk was a leaf node, there will be no coalescing
# after a VDI snapshot.
if len(vdi_uuid_list) == 1:
LOG.debug("Old chain is single VHD, coalesce not possible.",
instance=instance)
return
# If the parent of the original disk has other children,
# there will be no coalesce because of the VDI snapshot.
# For example, the first snapshot for an instance that has been
# spawned from a cached image, will not coalesce, because of this rule.
parent_vdi_uuid = vdi_uuid_list[1]
if _count_children(session, parent_vdi_uuid, sr_ref) > 1:
LOG.debug("Parent has other children, coalesce is unlikely.",
instance=instance)
return
# When the VDI snapshot is taken, a new parent is created.
# Assuming it is not one of the above cases, that new parent
# can be coalesced, so we need to wait for that to happen.
max_attempts = CONF.xenserver.vhd_coalesce_max_attempts
# Remove the leaf node from list, to get possible good parents
# when the coalesce has completed.
# Its possible that other coalesce operation happen, so we need
# to consider the full chain, rather than just the most recent parent.
good_parent_uuids = vdi_uuid_list[1:]
for i in range(max_attempts):
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
# matches the underlying VHDs.
# This can also kick XenServer into performing a pending coalesce.
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if parent_uuid and (parent_uuid not in good_parent_uuids):
LOG.debug("Parent %(parent_uuid)s not yet in parent list"
" %(good_parent_uuids)s, waiting for coalesce...",
{'parent_uuid': parent_uuid,
'good_parent_uuids': good_parent_uuids},
instance=instance)
else:
LOG.debug("Coalesce detected, because parent is: %s" % parent_uuid,
instance=instance)
return
greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%d)"
", giving up...") % max_attempts)
raise exception.NovaException(msg)
def _remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
fixed in future versions:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
should_remap = CONF.xenserver.remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
new_prefix = CONF.xenserver.remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
def _wait_for_device(dev):
"""Wait for device node to appear."""
for i in range(0, CONF.xenserver.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
time.sleep(1)
raise exception.StorageError(
reason=_('Timeout waiting for device %s to be created') % dev)
def cleanup_attached_vdis(session):
"""Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
except session.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an
# unclean restart
LOG.info(_LI('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref)
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = _get_this_vm_ref(session)
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
try:
LOG.debug('Plugging VBD %s ... ', vbd_ref)
session.VBD.plug(vbd_ref, this_vm_ref)
try:
LOG.debug('Plugging VBD %s done.', vbd_ref)
orig_dev = session.call_xenapi("VBD.get_device", vbd_ref)
LOG.debug('VBD %(vbd_ref)s plugged as %(orig_dev)s',
{'vbd_ref': vbd_ref, 'orig_dev': orig_dev})
dev = _remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s',
{'vbd_ref': vbd_ref, 'dev': dev})
_wait_for_device(dev)
yield dev
finally:
utils.execute('sync', run_as_root=True)
LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref)
unplug_vbd(session, vbd_ref, this_vm_ref)
finally:
try:
destroy_vbd(session, vbd_ref)
except exception.StorageError:
# destroy_vbd() will log error
pass
LOG.debug('Destroying VBD for VDI %s done.', vdi_ref)
def _get_sys_hypervisor_uuid():
with file('/sys/hypervisor/uuid') as f:
return f.readline().strip()
def get_this_vm_uuid(session):
if session and session.is_local_connection:
# UUID is the control domain running on this host
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="true" and '
'field "resident_on"="%s"' %
session.host_ref)
return vms[vms.keys()[0]]['uuid']
try:
return _get_sys_hypervisor_uuid()
except IOError:
# Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25)
# cannot read from uuid after a reboot. Fall back to trying xenstore.
# See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182
domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True)
vm_key, _ = utils.execute('xenstore-read',
'/local/domain/%s/vm' % domid.strip(),
run_as_root=True)
return vm_key.strip()[4:]
def _get_this_vm_ref(session):
return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session))
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug("Partitions:")
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
num = int(num)
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(" %(num)s: %(fstype)s %(size)d sectors",
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
return partitions
def _stream_disk(session, image_service_func, image_type, virtual_size, dev):
offset = 0
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(session, virtual_size, dev)
dev_path = utils.make_dev_path(dev)
with utils.temporary_chown(dev_path):
with open(dev_path, 'wb') as f:
f.seek(offset)
image_service_func(f)
def _write_partition(session, virtual_size, dev):
dev_path = utils.make_dev_path(dev)
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...',
{'primary_first': primary_first, 'primary_last': primary_last,
'dev_path': dev_path})
_make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last)
LOG.debug('Writing partition table %s done.', dev_path)
def _repair_filesystem(partition_path):
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags):
"""Resize partition and fileystem.
This assumes we are dealing with a single primary partition and using
ext3 or ext4.
"""
size = new_sectors - start
end = new_sectors - 1
dev_path = utils.make_dev_path(dev)
partition_path = utils.make_dev_path(dev, partition=1)
# Replay journal if FS wasn't cleanly unmounted
_repair_filesystem(partition_path)
# Remove ext3 journal (making it ext2)
utils.execute('tune2fs', '-O ^has_journal', partition_path,
run_as_root=True)
if new_sectors < old_sectors:
# Resizing down, resize filesystem before partition resize
try:
utils.execute('resize2fs', partition_path, '%ds' % size,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(six.text_type(exc))
reason = _("Shrinking the filesystem down with resize2fs "
"has failed, please check if you have "
"enough free space on your disk.")
raise exception.ResizeError(reason=reason)
utils.execute('parted', '--script', dev_path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', dev_path, 'mkpart',
'primary',
'%ds' % start,
'%ds' % end,
run_as_root=True)
if "boot" in flags.lower():
utils.execute('parted', '--script', dev_path,
'set', '1', 'boot', 'on',
run_as_root=True)
if new_sectors > old_sectors:
# Resizing up, resize filesystem after partition resize
utils.execute('resize2fs', partition_path, run_as_root=True)
# Add back journal
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
def _log_progress_if_required(left, last_log_time, virtual_size):
if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS):
last_log_time = timeutils.utcnow()
complete_pct = float(virtual_size - left) / virtual_size * 100
LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": complete_pct, "left": left})
return last_log_time
def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
"""Copy data, skipping long runs of zeros to create a sparse file."""
start_time = last_log_time = timeutils.utcnow()
EMPTY_BLOCK = '\0' * block_size
bytes_read = 0
skipped_bytes = 0
left = virtual_size
LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d",
{'src_path': src_path, 'dst_path': dst_path,
'virtual_size': virtual_size, 'block_size': block_size})
# NOTE(sirp): we need read/write access to the devices; since we don't have
# the luxury of shelling out to a sudo'd command, we temporarily take
# ownership of the devices.
with utils.temporary_chown(src_path):
with utils.temporary_chown(dst_path):
with open(src_path, "r") as src:
with open(dst_path, "w") as dst:
data = src.read(min(block_size, left))
while data:
if data == EMPTY_BLOCK:
dst.seek(block_size, os.SEEK_CUR)
left -= block_size
bytes_read += block_size
skipped_bytes += block_size
else:
dst.write(data)
data_len = len(data)
left -= data_len
bytes_read += data_len
if left <= 0:
break
data = src.read(min(block_size, left))
greenthread.sleep(0)
last_log_time = _log_progress_if_required(
left, last_log_time, virtual_size)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
compression_pct = float(skipped_bytes) / bytes_read * 100
LOG.debug("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size",
{'duration': duration, 'compression_pct': compression_pct})
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
# Part of disk taken up by MBR
virtual_size -= MBR_SIZE_BYTES
with vdi_attached_here(session, src_ref, read_only=True) as src:
src_path = utils.make_dev_path(src, partition=partition)
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
dst_path = utils.make_dev_path(dst, partition=partition)
_write_partition(session, virtual_size, dst)
if CONF.xenserver.sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
utils.execute('dd',
'if=%s' % src_path,
'of=%s' % dst_path,
'count=%d' % num_blocks,
'iflag=direct,sync',
'oflag=direct,sync',
run_as_root=True)
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
except processutils.ProcessExecutionError as e:
err = six.text_type(e)
return err
def _mounted_processing(device, key, net, metadata):
"""Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
# Mount only Linux filesystems, to avoid disturbing NTFS images
err = _mount_filesystem(dev_path, tmpdir)
if not err:
try:
# This try block ensures that the umount occurs
if not agent.find_guest_agent(tmpdir):
# TODO(berrange) passing in a None filename is
# rather dubious. We shouldn't be re-implementing
# the mount/unmount logic here either, when the
# VFSLocalFS impl has direct support for mount
# and unmount handling if it were passed a
# non-None filename
vfs = vfsimpl.VFSLocalFS(
imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW),
imgdir=tmpdir)
LOG.info(_LI('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
else:
LOG.info(_LI('Failed to mount filesystem (expected for '
'non-linux instances): %s'), err)
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here.
"""
this_vm_uuid = get_this_vm_uuid(session)
try:
session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] != 'UUID_INVALID':
raise
raise Exception(_('This domU must be running on the host '
'specified by connection_url'))
def import_all_migrated_disks(session, instance, import_root=True):
root_vdi = None
if import_root:
root_vdi = _import_migrated_root_disk(session, instance)
eph_vdis = _import_migrate_ephemeral_disks(session, instance)
return {'root': root_vdi, 'ephemerals': eph_vdis}
def _import_migrated_root_disk(session, instance):
chain_label = instance['uuid']
vdi_label = instance['name']
return _import_migrated_vhds(session, instance, chain_label, "root",
vdi_label)
def _import_migrate_ephemeral_disks(session, instance):
ephemeral_vdis = {}
instance_uuid = instance['uuid']
ephemeral_gb = instance["ephemeral_gb"]
disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb)
for chain_number, _size in enumerate(disk_sizes, start=1):
chain_label = instance_uuid + "_ephemeral_%d" % chain_number
vdi_label = "%(name)s ephemeral (%(number)d)" % dict(
name=instance['name'], number=chain_number)
ephemeral_vdi = _import_migrated_vhds(session, instance,
chain_label, "ephemeral",
vdi_label)
userdevice = 3 + chain_number
ephemeral_vdis[str(userdevice)] = ephemeral_vdi
return ephemeral_vdis
def _import_migrated_vhds(session, instance, chain_label, disk_type,
vdi_label):
"""Move and possibly link VHDs via the XAPI plugin."""
# TODO(johngarbutt) tidy up plugin params
imported_vhds = session.call_plugin_serialized(
'migration', 'move_vhds_into_sr', instance_uuid=chain_label,
sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack())
# Now we rescan the SR so we find the VHDs
scan_default_sr(session)
vdi_uuid = imported_vhds['root']['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
# Set name-label so we can find if we need to clean up a failed migration
_set_vdi_info(session, vdi_ref, disk_type, vdi_label,
disk_type, instance)
return {'uuid': vdi_uuid, 'ref': vdi_ref}
def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
ephemeral_number=0):
LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d",
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
chain_label = instance['uuid']
if ephemeral_number:
chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number
try:
# TODO(johngarbutt) tidy up plugin params
session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except session.XenAPI.Failure:
msg = "Failed to transfer vhd to new host"
LOG.debug(msg, instance=instance, exc_info=True)
raise exception.MigrationError(reason=msg)
def vm_ref_or_raise(session, instance_name):
vm_ref = lookup(session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
return vm_ref
def handle_ipxe_iso(session, instance, cd_vdi, network_info):
"""iPXE ISOs are a mechanism to allow the customer to roll their own
image.
To use this feature, a service provider needs to configure the
appropriate Nova flags, roll an iPXE ISO, then distribute that image
to customers via Glance.
NOTE: `mkisofs` is not present by default in the Dom0, so the service
provider can either add that package manually to Dom0 or include the
`mkisofs` binary in the image itself.
"""
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
if not boot_menu_url:
LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...'), instance=instance)
return
network_name = CONF.xenserver.ipxe_network_name
if not network_name:
LOG.warning(_LW('ipxe_network_name not set, user will have to'
' enter IP manually...'), instance=instance)
return
network = None
for vif in network_info:
if vif['network']['label'] == network_name:
network = vif['network']
break
if not network:
LOG.warning(_LW("Unable to find network matching '%(network_name)s', "
"user will have to enter IP manually..."),
{'network_name': network_name}, instance=instance)
return
sr_path = get_sr_path(session)
# Unpack IPv4 network info
subnet = [sn for sn in network['subnets']
if sn['version'] == 4][0]
ip = subnet['ips'][0]
ip_address = ip['address']
netmask = network_model.get_netmask(ip, subnet)
gateway = subnet['gateway']['address']
dns = subnet['dns'][0]['address']
try:
session.call_plugin_serialized("ipxe", "inject", sr_path,
cd_vdi['uuid'], boot_menu_url, ip_address, netmask,
gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd)
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'CommandNotFound':
LOG.warning(_LW("ISO creation tool '%s' does not exist."),
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
else:
raise
def set_other_config_pci(session, vm_ref, params):
"""Set the pci key of other-config parameter to params."""
other_config = session.call_xenapi("VM.get_other_config", vm_ref)
other_config['pci'] = params
session.call_xenapi("VM.set_other_config", vm_ref, other_config)
|
Danielhiversen/home-assistant | refs/heads/master | tests/components/geo_location/test_geo_json_events.py | 1 | """The tests for the geojson platform."""
import unittest
from unittest import mock
from unittest.mock import patch, MagicMock
import homeassistant
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.geo_location.geo_json_events import \
SCAN_INTERVAL, ATTR_EXTERNAL_ID
from homeassistant.const import CONF_URL, EVENT_HOMEASSISTANT_START, \
CONF_RADIUS, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_FRIENDLY_NAME, \
ATTR_UNIT_OF_MEASUREMENT
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant, assert_setup_component, \
fire_time_changed
import homeassistant.util.dt as dt_util
URL = 'http://geo.json.local/geo_json_events.json'
CONFIG = {
geo_location.DOMAIN: [
{
'platform': 'geo_json_events',
CONF_URL: URL,
CONF_RADIUS: 200
}
]
}
class TestGeoJsonPlatform(unittest.TestCase):
"""Test the geojson platform."""
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@staticmethod
def _generate_mock_feed_entry(external_id, title, distance_to_home,
coordinates):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
return feed_entry
@mock.patch('geojson_client.generic_feed.GenericFeed')
def test_setup(self, mock_feed):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = self._generate_mock_feed_entry('1234', 'Title 1', 15.5,
(-31.0, 150.0))
mock_entry_2 = self._generate_mock_feed_entry('2345', 'Title 2', 20.5,
(-31.1, 150.1))
mock_entry_3 = self._generate_mock_feed_entry('3456', 'Title 3', 25.5,
(-31.2, 150.2))
mock_entry_4 = self._generate_mock_feed_entry('4567', 'Title 4', 12.5,
(-31.3, 150.3))
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1,
mock_entry_2,
mock_entry_3]
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
with assert_setup_component(1, geo_location.DOMAIN):
self.assertTrue(setup_component(self.hass, geo_location.DOMAIN,
CONFIG))
# Artificially trigger update.
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
# Collect events.
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 3
state = self.hass.states.get("geo_location.title_1")
self.assertIsNotNone(state)
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234", ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0, ATTR_FRIENDLY_NAME: "Title 1",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: 'geo_json_events'}
self.assertAlmostEqual(float(state.state), 15.5)
state = self.hass.states.get("geo_location.title_2")
self.assertIsNotNone(state)
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345", ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1, ATTR_FRIENDLY_NAME: "Title 2",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: 'geo_json_events'}
self.assertAlmostEqual(float(state.state), 20.5)
state = self.hass.states.get("geo_location.title_3")
self.assertIsNotNone(state)
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456", ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2, ATTR_FRIENDLY_NAME: "Title 3",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: 'geo_json_events'}
self.assertAlmostEqual(float(state.state), 25.5)
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed.return_value.update.return_value = 'OK', [
mock_entry_1, mock_entry_4, mock_entry_3]
fire_time_changed(self.hass, utcnow + SCAN_INTERVAL)
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = 'OK_NO_DATA', None
# mock_restdata.return_value.data = None
fire_time_changed(self.hass, utcnow +
2 * SCAN_INTERVAL)
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = 'ERROR', None
fire_time_changed(self.hass, utcnow +
2 * SCAN_INTERVAL)
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 0
@mock.patch('geojson_client.generic_feed.GenericFeed')
def test_setup_race_condition(self, mock_feed):
"""Test a particular race condition experienced."""
# 1. Feed returns 1 entry -> Feed manager creates 1 entity.
# 2. Feed returns error -> Feed manager removes 1 entity.
# However, this stayed on and kept listening for dispatcher signals.
# 3. Feed returns 1 entry -> Feed manager creates 1 entity.
# 4. Feed returns 1 entry -> Feed manager updates 1 entity.
# Internally, the previous entity is updating itself, too.
# 5. Feed returns error -> Feed manager removes 1 entity.
# There are now 2 entities trying to remove themselves from HA, but
# the second attempt fails of course.
# Set up some mock feed entries for this test.
mock_entry_1 = self._generate_mock_feed_entry('1234', 'Title 1', 15.5,
(-31.0, 150.0))
mock_feed.return_value.update.return_value = 'OK', [mock_entry_1]
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
with assert_setup_component(1, geo_location.DOMAIN):
self.assertTrue(setup_component(self.hass, geo_location.DOMAIN,
CONFIG))
# This gives us the ability to assert the '_delete_callback'
# has been called while still executing it.
original_delete_callback = homeassistant.components\
.geo_location.geo_json_events.GeoJsonLocationEvent\
._delete_callback
def mock_delete_callback(entity):
original_delete_callback(entity)
with patch('homeassistant.components.geo_location'
'.geo_json_events.GeoJsonLocationEvent'
'._delete_callback',
side_effect=mock_delete_callback,
autospec=True) as mocked_delete_callback:
# Artificially trigger update.
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
# Collect events.
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 1
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = 'ERROR', None
fire_time_changed(self.hass, utcnow + SCAN_INTERVAL)
self.hass.block_till_done()
assert mocked_delete_callback.call_count == 1
all_states = self.hass.states.all()
assert len(all_states) == 0
# Simulate an update - 1 entry
mock_feed.return_value.update.return_value = 'OK', [
mock_entry_1]
fire_time_changed(self.hass, utcnow + 2 * SCAN_INTERVAL)
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 1
# Simulate an update - 1 entry
mock_feed.return_value.update.return_value = 'OK', [
mock_entry_1]
fire_time_changed(self.hass, utcnow + 3 * SCAN_INTERVAL)
self.hass.block_till_done()
all_states = self.hass.states.all()
assert len(all_states) == 1
# Reset mocked method for the next test.
mocked_delete_callback.reset_mock()
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = 'ERROR', None
fire_time_changed(self.hass, utcnow + 4 * SCAN_INTERVAL)
self.hass.block_till_done()
assert mocked_delete_callback.call_count == 1
all_states = self.hass.states.all()
assert len(all_states) == 0
|
jamesroutley/formation | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="formation",
version="0.1.0",
author="James Routley",
author_email="[email protected]",
license="Apache2",
packages=[
"formation"
],
package_dir={
"formation": "formation"
},
setup_requires=["pytest-runner"],
tests_require=["pytest"],
test_suite="test"
)
|
hornn/interviews | refs/heads/pairing_sizeunit_AT120115 | tools/bin/gppylib/test/regress/test_regress_gpexpand.py | 30 | #!/usr/bin/env python
import unittest2 as unittest
import os, socket
from gppylib.commands.base import Command, ExecutionError
from gppylib.commands.gp import GpStart
from gppylib.db import dbconn
class GpExpandTestCase(unittest.TestCase):
EXPANSION_INPUT_FILE = 'test_expand.input'
GP_COMMAND_FAULT_POINT = 'GP_COMMAND_FAULT_POINT'
GPMGMT_FAULT_POINT = 'GPMGMT_FAULT_POINT'
MASTER_DATA_DIRECTORY = os.environ['MASTER_DATA_DIRECTORY']
SEGMENTS = 1
TEST_DB = 'testdb'
NUM_TABLES = 10
primary_host_name = None
mirror_host_name = None
primary_host_address = None
mirror_host_address = None
def setUp(self):
self._create_test_db()
self._create_expansion_input_file()
def tearDown(self):
os.remove(self.EXPANSION_INPUT_FILE)
if self.GP_COMMAND_FAULT_POINT in os.environ:
del os.environ[self.GP_COMMAND_FAULT_POINT]
def _create_expansion_input_file(self):
"""This code has been taken from system_management utilities
test suite.
creates a expansion input file"""
with dbconn.connect(dbconn.DbURL()) as conn:
next_dbid = dbconn.execSQLForSingletonRow(conn,
"select max(dbid)+1 \
from pg_catalog.gp_segment_configuration")[0]
next_content = dbconn.execSQL(conn,
"select max(content)+1 \
from pg_catalog.gp_segment_configuration").fetchall()[0][0]
next_pri_port = dbconn.execSQL(conn,
"select max(port)+1 \
from pg_catalog.gp_segment_configuration \
where role='p'").fetchall()[0][0]
self.primary_host_name = dbconn.execSQL(conn,
"select distinct hostname \
from gp_segment_configuration \
where content >= 0 and preferred_role = 'p'").fetchall()[0][0]
next_mir_port = dbconn.execSQL(conn,
"select max(port)+1 \
from pg_catalog.gp_segment_configuration \
where role='m'").fetchall()[0][0]
if next_mir_port == None or next_mir_port == ' ' or next_mir_port == 0:
mirroring_on = False
else:
mirroring_on = True
next_pri_replication_port = dbconn.execSQL(conn,
"select max(replication_port)+1 \
from pg_catalog.gp_segment_configuration \
where role='p'").fetchall()[0][0]
next_mir_replication_port = dbconn.execSQL(conn,
"select max(replication_port)+1 \
from pg_catalog.gp_segment_configuration \
where role='m'").fetchall()[0][0]
select_mirror = "select distinct hostname \
from gp_segment_configuration \
where content >= 0 and preferred_role = 'm' and hostname != '%s'" % self.primary_host_name
mirror_name_row = dbconn.execSQL(conn, select_mirror).fetchall()
if mirror_name_row == None or len(mirror_name_row) == 0:
self.mirror_host_name = self.primary_host_name
else:
self.mirror_host_name = mirror_name_row[0][0]
self.primary_host_address = socket.getaddrinfo(self.primary_host_name, None)[0][4][0]
self.mirror_host_address = socket.getaddrinfo(self.mirror_host_name, None)[0][4][0]
with open(self.EXPANSION_INPUT_FILE, 'w') as outfile:
for i in range(self.SEGMENTS):
pri_datadir = os.path.join(os.getcwd(), 'new_pri_seg%d' % i)
mir_datadir = os.path.join(os.getcwd(), 'new_mir_seg%d' % i)
temp_str = "%s:%s:%d:%s:%d:%d:%s" % (self.primary_host_name, self.primary_host_address, next_pri_port, pri_datadir, next_dbid, next_content, 'p')
if mirroring_on:
temp_str = temp_str + ":" + str(next_pri_replication_port)
temp_str = temp_str + "\n"
outfile.write(temp_str)
if mirroring_on: # The content number for mirror is same as the primary segment's content number
next_dbid += 1
outfile.write("%s:%s:%d:%s:%d:%d:%s:%s\n" % (self.mirror_host_name, self.mirror_host_address, next_mir_port, mir_datadir, next_dbid, next_content, 'm', str(next_mir_replication_port)))
next_mir_port += 1
next_pri_replication_port += 1
next_mir_replication_port += 1
next_pri_port += 1
next_dbid += 1
next_content += 1
def _create_test_db(self):
testdb_exists = True
with dbconn.connect(dbconn.DbURL()) as conn:
row = dbconn.execSQLForSingletonRow(conn, "select count(*) from pg_database where datname='%s'" % self.TEST_DB)
if row[0] == 0:
testdb_exists = False
if not testdb_exists:
Command('create a test database', 'createdb %s' % self.TEST_DB).run(validateAfter=True)
def _create_tables(self):
with dbconn.connect(dbconn.DbURL()) as conn:
for i in range(self.NUM_TABLES):
dbconn.execSQL(conn, 'create table tab%d(i integer)' % i)
conn.commit()
def _drop_tables(self):
with dbconn.connect(dbconn.DbURL()) as conn:
for i in range(self.NUM_TABLES):
dbconn.execSQL(conn, 'drop table tab%d' % i)
conn.commit()
def _get_dist_policies(self):
policies = []
with dbconn.connect(dbconn.DbURL()) as conn:
cursor = dbconn.execSQL(conn, 'select * from gp_distribution_policy;').fetchall()
for row in cursor:
policies.append(row)
return policies
def test00_pg_hba_conf_file(self):
os.environ[self.GP_COMMAND_FAULT_POINT] = 'gpexpand tar segment template'
cmd = Command(name='run gpexpand', cmdStr='gpexpand -D %s -i %s' % (self.TEST_DB, self.EXPANSION_INPUT_FILE))
with self.assertRaisesRegexp(ExecutionError, 'Fault Injection'):
cmd.run(validateAfter=True)
#Read from the pg_hba.conf file and ensure that
#The address of the new hosts is present.
cmd = Command(name='get the temp pg_hba.conf file',
cmdStr="ls %s" % os.path.join(os.path.dirname(self.MASTER_DATA_DIRECTORY),
'gpexpand*',
'pg_hba.conf'))
cmd.run(validateAfter=True)
results = cmd.get_results()
temp_pg_hba_conf = results.stdout.strip()
actual_values = set()
expected_values = set([self.primary_host_address, self.mirror_host_address])
with open(temp_pg_hba_conf) as f:
for line in f:
if line.strip() == '# %s' % self.primary_host_name or\
line.strip() == '# %s' % self.mirror_host_name:
address = f.next().strip().split()[3]
address = address[:address.rfind('/')]
actual_values.add(address)
self.assertEqual(actual_values, expected_values)
GpStart(name='start the database in master only mode', masterOnly=True).run(validateAfter=True)
Command(name='rollback the expansion', cmdStr='gpexpand -r -D %s' % self.TEST_DB).run(validateAfter=True)
GpStart(name='start the database').run(validateAfter=True)
def test01_distribution_policy(self):
self._create_tables()
try:
os.environ[self.GPMGMT_FAULT_POINT] = 'gpexpand MPP-14620 fault injection'
original_dist_policies = self._get_dist_policies()
cmd = Command(name='run gpexpand', cmdStr='gpexpand -D %s -i %s' % (self.TEST_DB, self.EXPANSION_INPUT_FILE))
with self.assertRaisesRegexp(ExecutionError, 'Fault Injection'):
cmd.run(validateAfter=True)
rollback = Command(name='rollback expansion', cmdStr='gpexpand -r -D %s' % self.TEST_DB)
rollback.run(validateAfter=True)
dist_policies = self._get_dist_policies()
self.assertEqual(original_dist_policies, dist_policies)
finally:
self._drop_tables()
|
germs-lab/RefSoil | refs/heads/master | script_google_api/scripts/getting-soil-genomes/parse-the-giant-excel.py | 3 | import sys
for n, line in enumerate(open(sys.argv[1], 'rU')):
if n < 2:
continue
else:
dat = line.rstrip().split('\t')
if len(dat) == 36:
strain = dat[9]
strain = strain.replace('"', '')
print strain
|
LohithBlaze/scikit-learn | refs/heads/master | doc/datasets/mldata_fixture.py | 367 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
jxw1102/Projet-merou | refs/heads/master | ModelChecker/make-package.py | 1 | import sys
from subprocess import call
import os
import shutil
"""Prints the syntax and options to the user"""
def print_usage():
print("\nThis command packs the relevant binary files in a jar. The output jar is stored in the release folder.")
print("\nSyntax : make-package release-name [options]","\n\nrelease-name can be any of the following :")
print("\t. -ctl-only : packs ctl-related classes only\n\t. -ast-only : packs ast-related classes only\n\t. -full : packs the whole application")
print("\n-----------------\nOptions\n-----------------\n\n. -src : adds the corresponding source code to the archive")
"""Packs together the packages passed as a parameter"""
def make_jar(folder,build,packages):
root = os.getcwd()
os.chdir(folder)
call([ "jar","-cf","../release/tmp/" + build[1:] + "-" + folder + ".jar" ] + packages)
os.chdir(root)
def main():
if len(sys.argv) < 2:
print_usage()
return
build = sys.argv[1]
if build == "-ctl-only":
packages = [ "ctl","graph" ]
elif build == "-ast-only":
packages = [ "ast","graph" ]
elif build == "-full":
packages = [ "ast","ctl","cfg","graph" ]
else:
print_usage()
return
# create a temporary folder to contain the sub-jars
if not(os.path.exists("release/tmp")):
os.mkdir("release/tmp")
make_jar("bin",build,packages)
if "-src" in sys.argv:
make_jar("src",build,packages)
# license files
shutil.copyfile("../LICENSE.txt","release/tmp/LICENSE.txt")
shutil.copyfile("../THIRD_PARTY.txt","release/tmp/THIRD_PARTY.txt")
shutil.copyfile("../NOTICE.txt","release/tmp/NOTICE.txt")
# package the sub-jars together
os.chdir("release")
call([ "jar","-cf",build[1:] + ".jar","-C","tmp","." ])
shutil.rmtree("tmp")
if __name__ == "__main__":
main()
|
Alphadelta14/ansible | refs/heads/devel | lib/ansible/plugins/action/script.py | 67 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if self._play_context.check_mode:
return dict(skipped=True, msg='check mode not supported for this module')
if not tmp:
tmp = self._make_tmp_path()
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars, tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=removes), task_vars=task_vars, tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and not stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
# the script name is the first item in the raw params, so we split it
# out now so we know the file name we need to transfer to the remote,
# and everything else is an argument to the script which we need later
# to append to the remote command
parts = self._task.args.get('_raw_params', '').strip().split()
source = parts[0]
args = ' '.join(parts[1:])
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
self._connection.put_file(source, tmp_src)
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
if self._play_context.become and self._play_context.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
chmod_mode = '+rx'
self._remote_chmod(tmp, chmod_mode, tmp_src, sudoable=sudoable)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()
script_cmd = ' '.join([env_string, tmp_src, args])
result = self._low_level_execute_command(cmd=script_cmd, tmp=None, sudoable=True)
# clean up after
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
self._remove_tmp_path(tmp)
result['changed'] = True
return result
|
libyal/libqcow | refs/heads/main | tests/pyqcow_test_support.py | 1 | #!/usr/bin/env python
#
# Python-bindings support functions test script
#
# Copyright (C) 2010-2021, Joachim Metz <[email protected]>
#
# Refer to AUTHORS for acknowledgements.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import sys
import unittest
import pyqcow
class SupportFunctionsTests(unittest.TestCase):
"""Tests the support functions."""
def test_get_version(self):
"""Tests the get_version function."""
version = pyqcow.get_version()
self.assertIsNotNone(version)
def test_check_file_signature(self):
"""Tests the check_file_signature function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
result = pyqcow.check_file_signature(test_source)
self.assertTrue(result)
def test_check_file_signature_file_object(self):
"""Tests the check_file_signature_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
with open(test_source, "rb") as file_object:
result = pyqcow.check_file_signature_file_object(file_object)
self.assertTrue(result)
def test_open(self):
"""Tests the open function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
qcow_file = pyqcow.open(test_source)
self.assertIsNotNone(qcow_file)
qcow_file.close()
with self.assertRaises(TypeError):
pyqcow.open(None)
with self.assertRaises(ValueError):
pyqcow.open(test_source, mode="w")
def test_open_file_object(self):
"""Tests the open_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
if not os.path.isfile(test_source):
raise unittest.SkipTest("source not a regular file")
with open(test_source, "rb") as file_object:
qcow_file = pyqcow.open_file_object(file_object)
self.assertIsNotNone(qcow_file)
qcow_file.close()
with self.assertRaises(TypeError):
pyqcow.open_file_object(None)
with self.assertRaises(ValueError):
pyqcow.open_file_object(file_object, mode="w")
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"source", nargs="?", action="store", metavar="PATH",
default=None, help="path of the source file.")
options, unknown_options = argument_parser.parse_known_args()
unknown_options.insert(0, sys.argv[0])
setattr(unittest, "source", options.source)
unittest.main(argv=unknown_options, verbosity=2)
|
aisworld/mailmanclient | refs/heads/master | src/mailmanclient/testing/documentation.py | 1 | # Copyright (C) 2007-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Harness for testing Mailman's documentation.
Note that doctest extraction does not currently work for zip file
distributions. doctest discovery currently requires file system traversal.
"""
from __future__ import absolute_import, print_function, unicode_literals
from inspect import isfunction, ismethod
__metaclass__ = type
__all__ = [
'setup',
'teardown'
]
def stop():
"""Call into pdb.set_trace()"""
# Do the import here so that you get the wacky special hacked pdb instead
# of Python's normal pdb.
import pdb
pdb.set_trace()
def dump(results):
if results is None:
print(None)
return
for key in sorted(results):
if key == 'entries':
for i, entry in enumerate(results[key]):
# entry is a dictionary.
print('entry %d:' % i)
for entry_key in sorted(entry):
print(' {0}: {1}'.format(entry_key, entry[entry_key]))
else:
print('{0}: {1}'.format(key, results[key]))
def setup(testobj):
"""Test setup."""
# Make sure future statements in our doctests are the same as everywhere
# else.
testobj.globs['absolute_import'] = absolute_import
testobj.globs['print_function'] = print_function
testobj.globs['unicode_literals'] = unicode_literals
# In general, I don't like adding convenience functions, since I think
# doctests should do the imports themselves. It makes for better
# documentation that way. However, a few are really useful, or help to
# hide some icky test implementation details.
testobj.globs['stop'] = stop
testobj.globs['dump'] = dump
# Add this so that cleanups can be automatically added by the doctest.
testobj.globs['cleanups'] = []
def teardown(testobj):
for cleanup in testobj.globs['cleanups']:
if isfunction(cleanup) or ismethod(cleanup):
cleanup()
else:
cleanup[0](*cleanup[1:])
|
takis/django | refs/heads/master | tests/forms_tests/widget_tests/base.py | 192 | from django.test import SimpleTestCase
class WidgetTest(SimpleTestCase):
beatles = (('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))
def check_html(self, widget, name, value, html='', attrs=None, **kwargs):
output = widget.render(name, value, attrs=attrs, **kwargs)
self.assertHTMLEqual(output, html)
|
BCGamer/CheckIn-Server | refs/heads/master | registration/migrations/0005_auto_20140913_0515.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('registration', '0004_registereduser_reg_erros'),
]
operations = [
migrations.RenameField(
model_name='registereduser',
old_name='reg_erros',
new_name='reg_errors',
),
]
|
cchurch/ansible-modules-core | refs/heads/devel | network/netvisor/pn_vrouter.py | 19 | #!/usr/bin/python
""" PN CLI vrouter-create/vrouter-delete/vrouter-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_vrouter
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1
short_description: CLI command to create/delete/modify a vrouter.
description:
- Execute vrouter-create, vrouter-delete, vrouter-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
- C(vrouter-create) creates a new vRouter service.
- C(vrouter-delete) deletes a vRouter service.
- C(vrouter-modify) modifies a vRouter service.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to create vrouter,
'absent' to delete vrouter and 'update' to modify vrouter.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name of the vRouter.
required: true
pn_vnet:
description:
- Specify the name of the VNET.
- Required for vrouter-create.
pn_service_type:
description:
- Specify if the vRouter is a dedicated or shared VNET service.
choices: ['dedicated', 'shared']
pn_service_state:
description:
- Specify to enable or disable vRouter service.
choices: ['enable', 'disable']
pn_router_type:
description:
- Specify if the vRouter uses software or hardware.
- Note that if you specify hardware as router type, you cannot assign IP
addresses using DHCP. You must specify a static IP address.
choices: ['hardware', 'software']
pn_hw_vrrp_id:
description:
- Specifies the VRRP ID for a hardware vrouter.
pn_router_id:
description:
- Specify the vRouter IP address.
pn_bgp_as:
description:
- Specify the Autonomous System Number(ASN) if the vRouter runs Border
Gateway Protocol(BGP).
pn_bgp_redistribute:
description:
- Specify how BGP routes are redistributed.
choices: ['static', 'connected', 'rip', 'ospf']
pn_bgp_max_paths:
description:
- Specify the maximum number of paths for BGP. This is a number between
1 and 255 or 0 to unset.
pn_bgp_options:
description:
- Specify other BGP options as a whitespaces separated string within
single quotes ''.
pn_rip_redistribute:
description:
- Specify how RIP routes are redistributed.
choices: ['static', 'connected', 'ospf', 'bgp']
pn_ospf_redistribute:
description:
- Specify how OSPF routes are redistributed.
choices: ['static', 'connected', 'bgp', 'rip']
pn_ospf_options:
description:
- Specify other OSPF options as a whitespaces separated string within
single quotes ''.
"""
EXAMPLES = """
- name: create vrouter
pn_vrouter:
state: 'present'
pn_name: 'ansible-vrouter'
pn_vnet: 'ansible-fab-global'
pn_router_id: 208.74.182.1
- name: delete vrouter
pn_vrouter:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouter command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouter command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
VROUTER_NAME_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the vlan-show command.
A switch can have only one vRouter configuration.
If a vRouter already exists on the given switch, return VROUTER_EXISTS as
True else False.
If a vRouter with the given name exists(on a different switch), return
VROUTER_NAME_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, VROUTER_NAME_EXISTS
"""
name = module.params['pn_name']
# Global flags
global VROUTER_EXISTS, VROUTER_NAME_EXISTS
# Get the name of the local switch
location = cli + ' switch-setup-show format switch-name'
location = shlex.split(location)
out = module.run_command(location)[1]
location = out.split()[1]
# Check for any vRouters on the switch
check_vrouter = cli + ' vrouter-show location %s ' % location
check_vrouter += 'format name no-show-headers'
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
if out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for any vRouters with the given name
show = cli + ' vrouter-show format name no-show-headers '
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if name in out:
VROUTER_NAME_EXISTS = True
else:
VROUTER_NAME_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-create'
if state == 'absent':
command = 'vrouter-delete'
if state == 'update':
command = 'vrouter-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_vnet=dict(type='str'),
pn_service_type=dict(type='str', choices=['dedicated', 'shared']),
pn_service_state=dict(type='str', choices=['enable', 'disable']),
pn_router_type=dict(type='str', choices=['hardware', 'software']),
pn_hw_vrrp_id=dict(type='int'),
pn_router_id=dict(type='str'),
pn_bgp_as=dict(type='int'),
pn_bgp_redistribute=dict(type='str', choices=['static', 'connected',
'rip', 'ospf']),
pn_bgp_max_paths=dict(type='int'),
pn_bgp_options=dict(type='str'),
pn_rip_redistribute=dict(type='str', choices=['static', 'connected',
'bgp', 'ospf']),
pn_ospf_redistribute=dict(type='str', choices=['static', 'connected',
'bgp', 'rip']),
pn_ospf_options=dict(type='str'),
pn_vrrp_track_port=dict(type='str')
),
required_if=(
["state", "present", ["pn_name", "pn_vnet"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
vnet = module.params['pn_vnet']
service_type = module.params['pn_service_type']
service_state = module.params['pn_service_state']
router_type = module.params['pn_router_type']
hw_vrrp_id = module.params['pn_hw_vrrp_id']
router_id = module.params['pn_router_id']
bgp_as = module.params['pn_bgp_as']
bgp_redistribute = module.params['pn_bgp_redistribute']
bgp_max_paths = module.params['pn_bgp_max_paths']
bgp_options = module.params['pn_bgp_options']
rip_redistribute = module.params['pn_rip_redistribute']
ospf_redistribute = module.params['pn_ospf_redistribute']
ospf_options = module.params['pn_ospf_options']
vrrp_track_port = module.params['pn_vrrp_track_port']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'vrouter-delete':
check_cli(module, cli)
if VROUTER_NAME_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'vrouter-create':
check_cli(module, cli)
if VROUTER_EXISTS is True:
module.exit_json(
skipped=True,
msg='Maximum number of vRouters has been reached on this '
'switch'
)
if VROUTER_NAME_EXISTS is True:
module.exit_json(
skipped=True,
msg='vRouter with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
if vnet:
cli += ' vnet ' + vnet
if service_type:
cli += ' %s-vnet-service ' % service_type
if service_state:
cli += ' ' + service_state
if router_type:
cli += ' router-type ' + router_type
if hw_vrrp_id:
cli += ' hw-vrrp-id ' + str(hw_vrrp_id)
if router_id:
cli += ' router-id ' + router_id
if bgp_as:
cli += ' bgp-as ' + str(bgp_as)
if bgp_redistribute:
cli += ' bgp-redistribute ' + bgp_redistribute
if bgp_max_paths:
cli += ' bgp-max-paths ' + str(bgp_max_paths)
if bgp_options:
cli += ' %s ' % bgp_options
if rip_redistribute:
cli += ' rip-redistribute ' + rip_redistribute
if ospf_redistribute:
cli += ' ospf-redistribute ' + ospf_redistribute
if ospf_options:
cli += ' %s ' % ospf_options
if vrrp_track_port:
cli += ' vrrp-track-port ' + vrrp_track_port
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
irwinlove/django | refs/heads/master | django/contrib/flatpages/migrations/0001_initial.py | 134 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='URL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(help_text="Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'.", max_length=70, verbose_name='template name', blank=True)),
('registration_required', models.BooleanField(default=False, help_text='If this is checked, only logged-in users will be able to view the page.', verbose_name='registration required')),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
|
ryfeus/lambda-packs | refs/heads/master | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/distributions/python/ops/deterministic.py | 18 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Deterministic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
__all__ = [
"Deterministic",
"VectorDeterministic",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDeterministic(distribution.Distribution):
"""Base class for Deterministic distributions."""
def __init__(self,
loc,
atol=None,
rtol=None,
is_vector=False,
validate_args=False,
allow_nan_stats=True,
name="_BaseDeterministic"):
"""Initialize a batch of `_BaseDeterministic` distributions.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor`. The point (or batch of points) on which this
distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,
else `Deterministic`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `loc` is a scalar.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, atol, rtol]):
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
if loc.get_shape().ndims is not None:
if loc.get_shape().ndims < 1:
raise ValueError(msg)
else:
loc = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(loc, 1, message=msg)], loc)
self._loc = loc
super(_BaseDeterministic, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
self._atol = self._get_tol(atol)
self._rtol = self._get_tol(rtol)
# Avoid using the large broadcast with self.loc if possible.
if rtol is None:
self._slack = self.atol
else:
self._slack = self.atol + self.rtol * math_ops.abs(self.loc)
def _get_tol(self, tol):
if tol is None:
return ops.convert_to_tensor(0, dtype=self.loc.dtype)
tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype)
if self.validate_args:
tol = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol
@property
def loc(self):
"""Point (or batch of points) at which this distribution is supported."""
return self._loc
@property
def atol(self):
"""Absolute tolerance for comparing points to `self.loc`."""
return self._atol
@property
def rtol(self):
"""Relative tolerance for comparing points to `self.loc`."""
return self._rtol
def _mean(self):
return array_ops.identity(self.loc)
def _variance(self):
return array_ops.zeros_like(self.loc)
def _mode(self):
return self.mean()
def _sample_n(self, n, seed=None): # pylint: disable=unused-arg
n_static = tensor_util.constant_value(ops.convert_to_tensor(n))
if n_static is not None and self.loc.get_shape().ndims is not None:
ones = [1] * self.loc.get_shape().ndims
multiples = [n_static] + ones
else:
ones = array_ops.ones_like(array_ops.shape(self.loc))
multiples = array_ops.concat(([n], ones), axis=0)
return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples)
class Deterministic(_BaseDeterministic):
"""Scalar `Deterministic` distribution on the real line.
The scalar `Deterministic` distribution is parameterized by a [batch] point
`loc` on the real line. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) and cumulative distribution function (cdf)
are
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
#### Examples
```python
# Initialize a single Deterministic supported at zero.
constant = tf.contrib.distributions.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
==> 0.
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
constant = tf.contrib.distributions.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
"""
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="Deterministic"):
"""Initialize a scalar `Deterministic` distribution.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(Deterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)
def _batch_shape(self):
return self.loc.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _prob(self, x):
return math_ops.cast(
math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype)
def _cdf(self, x):
return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype)
class VectorDeterministic(_BaseDeterministic):
"""Vector `Deterministic` distribution on `R^k`.
The `VectorDeterministic` distribution is parameterized by a [batch] point
`loc in R^k`. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) is
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
#### Examples
```python
tfd = tf.contrib.distributions
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tfd.Deterministic([0., 2.])
constant.prob([0., 2.])
==> 1.
constant.prob([0., 3.])
==> 0.
# Initialize a [3] batch of constants on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
constant = tfd.VectorDeterministic(loc)
constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])
==> [1., 0., 0.]
```
"""
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="VectorDeterministic"):
"""Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.
Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0`
then `self.prob([]) == 1`.
The `atol` and `rtol` parameters allow for some slack in `pmf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(VectorDeterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
is_vector=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)[:-1]
def _batch_shape(self):
return self.loc.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.loc)[-1]
def _event_shape(self):
return self.loc.get_shape()[-1:]
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
|
rolandgeider/wger | refs/heads/master | wger/manager/tests/test_pdf.py | 1 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import WgerTestCase
from wger.utils.helpers import make_token
class WorkoutPdfLogExportTestCase(WgerTestCase):
"""
Tests exporting a workout as a pdf
"""
def export_pdf_token(self):
"""
Helper function to test exporting a workout as a pdf using tokens
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:workout:pdf-log', kwargs={'id': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-log.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def export_pdf_token_wrong(self):
"""
Helper function to test exporting a workout as a pdf using a wrong token
"""
uid = 'AB'
token = 'abc-11223344556677889900'
response = self.client.get(reverse('manager:workout:pdf-log', kwargs={'id': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_pdf(self, fail=False):
"""
Helper function to test exporting a workout as a pdf
"""
response = self.client.get(reverse('manager:workout:pdf-log', kwargs={'id': 3}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-log.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def export_pdf_with_comments(self, fail=False):
"""
Helper function to test exporting a workout as a pdf, with exercise coments
"""
response = self.client.get(reverse('manager:workout:pdf-log', kwargs={'id': 3,
'comments': 0}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-log.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def export_pdf_with_images(self, fail=False):
"""
Helper function to test exporting a workout as a pdf, with exercise images
"""
response = self.client.get(reverse('manager:workout:pdf-log', kwargs={'id': 3,
'images': 1}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-log.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def export_pdf_with_images_and_comments(self, fail=False):
"""
Helper function to test exporting a workout as a pdf, with images and comments
"""
response = self.client.get(reverse('manager:workout:pdf-log', kwargs={'id': 3,
'images': 1,
'comments': 1}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-log.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def test_export_pdf_anonymous(self):
"""
Tests exporting a workout as a pdf as an anonymous user
"""
self.export_pdf(fail=True)
self.export_pdf_token()
self.export_pdf_token_wrong()
def test_export_pdf_owner(self):
"""
Tests exporting a workout as a pdf as the owner user
"""
self.user_login('test')
self.export_pdf(fail=False)
self.export_pdf_token()
self.export_pdf_token_wrong()
def test_export_pdf_other(self):
"""
Tests exporting a workout as a pdf as a logged user not owning the data
"""
self.user_login('admin')
self.export_pdf(fail=True)
self.export_pdf_token()
self.export_pdf_token_wrong()
class WorkoutPdfTableExportTestCase(WgerTestCase):
"""
Tests exporting a workout as a pdf
"""
def export_pdf_token(self):
"""
Helper function to test exporting a workout as a pdf using tokens
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:workout:pdf-table', kwargs={'id': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-table.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def export_pdf_token_wrong(self):
"""
Helper function to test exporting a workout as a pdf using a wrong token
"""
uid = 'AB'
token = 'abc-11223344556677889900'
response = self.client.get(reverse('manager:workout:pdf-table', kwargs={'id': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_pdf(self, fail=False):
"""
Helper function to test exporting a workout as a pdf
"""
# Create a workout
response = self.client.get(reverse('manager:workout:pdf-table', kwargs={'id': 3}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Workout-3-table.pdf')
# Approximate size only
self.assertGreater(int(response['Content-Length']), 38000)
self.assertLess(int(response['Content-Length']), 42000)
def test_export_pdf_anonymous(self):
"""
Tests exporting a workout as a pdf as an anonymous user
"""
self.export_pdf(fail=True)
self.export_pdf_token()
self.export_pdf_token_wrong()
def test_export_pdf_owner(self):
"""
Tests exporting a workout as a pdf as the owner user
"""
self.user_login('test')
self.export_pdf(fail=False)
self.export_pdf_token()
self.export_pdf_token_wrong()
def test_export_pdf_other(self):
"""
Tests exporting a workout as a pdf as a logged user not owning the data
"""
self.user_login('admin')
self.export_pdf(fail=True)
self.export_pdf_token()
self.export_pdf_token_wrong()
|
mateon1/servo | refs/heads/master | tests/wpt/web-platform-tests/XMLHttpRequest/resources/chunked.py | 219 | def main(request, response):
chunks = ["First chunk\r\n",
"Second chunk\r\n",
"Yet another (third) chunk\r\n",
"Yet another (fourth) chunk\r\n",
]
response.headers.set("Transfer-Encoding", "chunked");
response.headers.set("Trailer", "X-Test-Me");
response.headers.set("Content-Type", "text/plain");
response.write_status_headers()
for value in chunks:
response.writer.write("%x\r\n" % len(value))
response.writer.write(value)
response.writer.write("\r\n")
response.writer.write("0\r\n")
response.writer.write("X-Test-Me: Trailer header value\r\n\r\n")
|
ShownX/incubator-mxnet | refs/heads/master | python/mxnet/test_utils.py | 1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tools for testing."""
# pylint: disable=too-many-lines
from __future__ import absolute_import, print_function, division
import time
import gzip
import struct
import traceback
import numbers
import subprocess
import sys
import os
import errno
import logging
import bz2
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import numpy.random as rnd
try:
import requests
except ImportError:
# in rare cases requests may be not installed
pass
import mxnet as mx
from .context import Context
from .ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
from .ndarray import array
from .symbol import Symbol
_rng = np.random.RandomState(1234)
def default_context():
"""Get default context for regression test."""
# _TODO: get context from environment variable to support
# testing with GPUs
return Context.default_ctx
def set_default_context(ctx):
"""Set default context."""
Context.default_ctx = ctx
def default_dtype():
"""Get default data type for regression test."""
# _TODO: get default dtype from environment variable
return np.float32
def get_atol(atol=None):
"""Get default numerical threshold for regression test."""
# _TODO: get from env variable, different threshold might
# be needed for different device and dtype
return 1e-20 if atol is None else atol
def get_rtol(rtol=None):
"""Get default numerical threshold for regression test."""
# _TODO: get from env variable, different threshold might
# be needed for different device and dtype
return 1e-5 if rtol is None else rtol
def random_arrays(*shapes):
"""Generate some random numpy arrays."""
arrays = [np.random.randn(*s).astype(default_dtype())
for s in shapes]
if len(arrays) == 1:
return arrays[0]
return arrays
def random_sample(population, k):
"""Return a k length list of the elements chosen from the population sequence."""
assert 0 <= k <= len(population)
population_copy = population[:]
np.random.shuffle(population_copy)
return population_copy[0:k]
def _validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="uniform"):
"""Validates inputs for csr generation helper functions
"""
total_nnz = int(num_rows * num_cols * density)
if density < 0 or density > 1:
raise ValueError("density has to be between 0 and 1")
if num_rows <= 0 or num_cols <= 0:
raise ValueError("num_rows or num_cols should be greater than 0")
if distribution == "powerlaw":
if total_nnz < 2 * num_rows:
raise ValueError("not supported for this density: %s"
" for this shape (%s, %s)"
" Please keep :"
" num_rows * num_cols * density >= 2 * num_rows"
% (density, num_rows, num_cols))
def shuffle_csr_column_indices(csr):
"""Shuffle CSR column indices per row
This allows validation of unordered column indices, which is not a requirement
for a valid CSR matrix
"""
row_count = len(csr.indptr) - 1
for i in range(row_count):
start_index = csr.indptr[i]
end_index = csr.indptr[i + 1]
sublist = np.array(csr.indices[start_index : end_index])
np.random.shuffle(sublist)
csr.indices[start_index : end_index] = sublist
def _get_uniform_dataset_csr(num_rows, num_cols, density=0.1, dtype=None,
data_init=None, shuffle_csr_indices=False):
"""Returns CSRNDArray with uniform distribution
This generates a csr matrix with totalnnz unique randomly chosen numbers
from num_rows*num_cols and arranges them in the 2d array in the
following way:
row_index = (random_number_generated / num_rows)
col_index = random_number_generated - row_index * num_cols
"""
_validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="uniform")
try:
from scipy import sparse as spsp
csr = spsp.rand(num_rows, num_cols, density, dtype=dtype, format="csr")
if data_init is not None:
csr.data.fill(data_init)
if shuffle_csr_indices is True:
shuffle_csr_column_indices(csr)
result = mx.nd.sparse.csr_matrix(csr.data, csr.indptr, csr.indices,
(num_rows, num_cols), dtype=dtype)
except ImportError:
assert(data_init is None), \
"data_init option is not supported when scipy is absent"
assert(not shuffle_csr_indices), \
"shuffle_csr_indices option is not supported when scipy is absent"
# scipy not available. try to generate one from a dense array
dns = mx.nd.random.uniform(shape=(num_rows, num_cols), dtype=dtype)
masked_dns = dns * (dns < density)
result = masked_dns.tostype('csr')
return result
def _get_powerlaw_dataset_csr(num_rows, num_cols, density=0.1, dtype=None):
"""Returns CSRNDArray with powerlaw distribution
with exponentially increasing number of non zeros in each row.
Not supported for cases where total_nnz < 2*num_rows. This is because
the algorithm first tries to ensure that there are rows with no zeros by
putting non zeros at beginning of each row.
"""
_validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="powerlaw")
total_nnz = int(num_rows * num_cols * density)
unused_nnz = total_nnz
output_arr = np.zeros((num_rows, num_cols), dtype=dtype)
# Start with ones on each row so that no row is empty
for row in range(num_rows):
output_arr[row][0] = 1 + rnd.uniform(0.001, 2)
unused_nnz = unused_nnz - 1
if unused_nnz <= 0:
return mx.nd.array(output_arr).tostype("csr")
# Populate rest of matrix with 2^i items in ith row.
# if we have used all total nnz return the sparse matrix
# else if we reached max column size then fill up full columns until we use all nnz
col_max = 2
for row in range(num_rows):
col_limit = min(num_cols, col_max)
# In case col_limit reached assign same value to all elements, which is much faster
if col_limit == num_cols and unused_nnz > col_limit:
output_arr[row] = 1 + rnd.uniform(0.001, 2)
unused_nnz = unused_nnz - col_limit + 1
if unused_nnz <= 0:
return mx.nd.array(output_arr).tostype("csr")
else:
continue
for col_index in range(1, col_limit):
output_arr[row][col_index] = 1 + rnd.uniform(0.001, 2)
unused_nnz = unused_nnz - 1
if unused_nnz <= 0:
return mx.nd.array(output_arr).tostype("csr")
col_max = col_max * 2
if unused_nnz > 0:
raise ValueError("not supported for this density: %s"
" for this shape (%s,%s)" % (density, num_rows, num_cols))
else:
return mx.nd.array(output_arr).tostype("csr")
def assign_each(the_input, function):
"""Return ndarray composed of passing each array value through some function"""
if function is not None:
it_input = np.nditer(the_input, flags=['f_index'])
output = np.zeros(the_input.shape)
it_out = np.nditer(output, flags=['f_index'], op_flags=['writeonly'])
while not it_input.finished:
val_input = it_input[0]
it_out[0] = function(val_input)
it_input.iternext()
it_out.iternext()
return output
else:
return np.array(the_input)
def assign_each2(input1, input2, function):
"""Return ndarray composed of passing two array values through some function"""
if function is not None:
assert input1.shape == input2.shape
it_input1 = np.nditer(input1, flags=['f_index'])
it_input2 = np.nditer(input2, flags=['f_index'])
output = np.zeros(input1.shape)
it_out = np.nditer(output, flags=['f_index'], op_flags=['writeonly'])
while not it_input1.finished:
val_input1 = it_input1[0]
val_input2 = it_input2[0]
it_out[0] = function(val_input1, val_input2)
it_input1.iternext()
it_input2.iternext()
it_out.iternext()
return output
else:
return np.array(input1)
# TODO(haibin) also include types in arguments
def rand_sparse_ndarray(shape, stype, density=None, dtype=None, distribution=None,
data_init=None, rsp_indices=None, modifier_func=None,
shuffle_csr_indices=False):
"""Generate a random sparse ndarray. Returns the ndarray, value(np) and indices(np)
Parameters
----------
shape: list or tuple
stype: str, valid values: "csr" or "row_sparse"
density, optional: float, should be between 0 and 1
distribution, optional: str, valid values: "uniform" or "powerlaw"
dtype, optional: numpy.dtype, default value is None
Returns
-------
Result of type CSRNDArray or RowSparseNDArray
Examples
--------
Below is an example of the powerlaw distribution with csr as the stype.
It calculates the nnz using the shape and density.
It fills up the ndarray with exponentially increasing number of elements.
If there are enough unused_nnzs, n+1th row will have twice more nnzs compared to nth row.
else, remaining unused_nnzs will be used in n+1th row
If number of cols is too small and we have already reached column size it will fill up
all following columns in all followings rows until we reach the required density.
>>> csr_arr, _ = rand_sparse_ndarray(shape=(5, 16), stype="csr",
density=0.50, distribution="powerlaw")
>>> indptr = csr_arr.indptr.asnumpy()
>>> indices = csr_arr.indices.asnumpy()
>>> data = csr_arr.data.asnumpy()
>>> row2nnz = len(data[indptr[1]:indptr[2]])
>>> row3nnz = len(data[indptr[2]:indptr[3]])
>>> assert(row3nnz == 2*row2nnz)
>>> row4nnz = len(data[indptr[3]:indptr[4]])
>>> assert(row4nnz == 2*row3nnz)
"""
density = rnd.rand() if density is None else density
dtype = default_dtype() if dtype is None else dtype
distribution = "uniform" if distribution is None else distribution
if stype == 'row_sparse':
assert (distribution == "uniform"), \
"Distribution %s not supported for row_sparse" % (distribution)
# sample index
if rsp_indices is not None:
indices = rsp_indices
assert(len(indices) <= shape[0])
else:
idx_sample = rnd.rand(shape[0])
indices = np.argwhere(idx_sample < density).flatten()
if indices.shape[0] == 0:
result = mx.nd.zeros(shape, stype='row_sparse', dtype=dtype)
return result, (np.array([], dtype=dtype), np.array([], dtype='int64'))
# generate random values
val = rnd.rand(indices.shape[0], *shape[1:]).astype(dtype)
# Allow caller to override or adjust random values
if data_init is not None:
val.fill(data_init)
if modifier_func is not None:
val = assign_each(val, modifier_func)
arr = mx.nd.sparse.row_sparse_array(val, indices, shape, indices_type=np.int64, dtype=dtype)
return arr, (val, indices)
elif stype == 'csr':
assert len(shape) == 2
if distribution == "uniform":
csr = _get_uniform_dataset_csr(shape[0], shape[1], density,
data_init=data_init,
shuffle_csr_indices=shuffle_csr_indices, dtype=dtype)
return csr, (csr.indptr, csr.indices, csr.data)
elif distribution == "powerlaw":
csr = _get_powerlaw_dataset_csr(shape[0], shape[1], density=density, dtype=dtype)
return csr, (csr.indptr, csr.indices, csr.data)
else:
assert(False), "Distribution not supported: %s" % (distribution)
else:
assert(False), "unknown storage type"
def rand_ndarray(shape, stype, density=None, dtype=None,
modifier_func=None, shuffle_csr_indices=False, distribution=None):
if stype == 'default':
arr = mx.nd.array(random_arrays(shape), dtype=dtype)
else:
arr, _ = rand_sparse_ndarray(shape, stype, density=density,
modifier_func=modifier_func, dtype=dtype,
shuffle_csr_indices=shuffle_csr_indices,
distribution=distribution)
return arr
def create_sparse_array(shape, stype, data_init=None, rsp_indices=None,
dtype=None, modifier_func=None, density=.5,
shuffle_csr_indices=False):
"""Create a sparse array, For Rsp, assure indices are in a canonical format"""
if stype == 'row_sparse':
if rsp_indices is not None:
arr_indices = np.asarray(rsp_indices)
arr_indices.sort()
else:
arr_indices = None
arr_data, (_, _) = rand_sparse_ndarray(shape, stype,
density=density,
data_init=data_init,
rsp_indices=arr_indices,
dtype=dtype,
modifier_func=modifier_func)
elif stype == 'csr':
arr_data, (_, _, _) = rand_sparse_ndarray(shape,
stype,
density=density,
data_init=data_init,
dtype=dtype,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices)
else:
msg = "Unknown storage type: " + stype
raise AssertionError(msg)
return arr_data
def create_sparse_array_zd(shape, stype, density, data_init=None,
rsp_indices=None, dtype=None, modifier_func=None,
shuffle_csr_indices=False):
"""Create sparse array, using only rsp_indices to determine density"""
if stype == 'row_sparse':
density = 0.0
if rsp_indices is not None:
assert len(rsp_indices) <= shape[0]
return create_sparse_array(shape, stype,
data_init=data_init,
rsp_indices=rsp_indices,
dtype=dtype,
modifier_func=modifier_func,
density=density,
shuffle_csr_indices=shuffle_csr_indices)
def rand_shape_2d(dim0=10, dim1=10):
return rnd.randint(1, dim0 + 1), rnd.randint(1, dim1 + 1)
def rand_shape_3d(dim0=10, dim1=10, dim2=10):
return rnd.randint(1, dim0 + 1), rnd.randint(1, dim1 + 1), rnd.randint(1, dim2 + 1)
def rand_shape_nd(num_dim, dim=10):
return tuple(rnd.randint(1, dim+1, size=num_dim))
def np_reduce(dat, axis, keepdims, numpy_reduce_func):
"""Compatible reduce for old version of NumPy.
Parameters
----------
dat : np.ndarray
Same as NumPy.
axis : None or int or list-like
Same as NumPy.
keepdims : bool
Same as NumPy.
numpy_reduce_func : function
A NumPy reducing function like ``np.sum`` or ``np.max``.
"""
if isinstance(axis, int):
axis = [axis]
else:
axis = list(axis) if axis is not None else range(len(dat.shape))
ret = dat
for i in reversed(sorted(axis)):
ret = numpy_reduce_func(ret, axis=i)
if keepdims:
keepdims_shape = list(dat.shape)
for i in axis:
keepdims_shape[i] = 1
ret = ret.reshape(tuple(keepdims_shape))
return ret
def find_max_violation(a, b, rtol=None, atol=None):
"""Finds and returns the location of maximum violation."""
rtol = get_rtol(rtol)
atol = get_atol(atol)
diff = np.abs(a-b)
tol = atol + rtol*np.abs(b)
violation = diff/(tol+1e-20)
loc = np.argmax(violation)
idx = np.unravel_index(loc, violation.shape)
return idx, np.max(violation)
def same(a, b):
"""Test if two NumPy arrays are the same.
Parameters
----------
a : np.ndarray
b : np.ndarray
"""
return np.array_equal(a, b)
def almost_equal(a, b, rtol=None, atol=None, equal_nan=False):
"""Test if two numpy arrays are almost equal."""
return np.allclose(a, b, rtol=get_rtol(rtol), atol=get_atol(atol), equal_nan=equal_nan)
def assert_almost_equal(a, b, rtol=None, atol=None, names=('a', 'b'), equal_nan=False):
"""Test that two numpy arrays are almost equal. Raise exception message if not.
Parameters
----------
a : np.ndarray
b : np.ndarray
threshold : None or float
The checking threshold. Default threshold will be used if set to ``None``.
"""
rtol = get_rtol(rtol)
atol = get_atol(atol)
if almost_equal(a, b, rtol, atol, equal_nan=equal_nan):
return
index, rel = find_max_violation(a, b, rtol, atol)
np.set_printoptions(threshold=4, suppress=True)
msg = npt.build_err_msg([a, b],
err_msg="Error %f exceeds tolerance rtol=%f, atol=%f. "
" Location of maximum error:%s, a=%f, b=%f"
% (rel, rtol, atol, str(index), a[index], b[index]),
names=names)
raise AssertionError(msg)
def almost_equal_ignore_nan(a, b, rtol=None, atol=None):
"""Test that two NumPy arrays are almost equal (ignoring NaN in either array).
Combines a relative and absolute measure of approximate eqality.
If either the relative or absolute check passes, the arrays are considered equal.
Including an absolute check resolves issues with the relative check where all
array values are close to zero.
Parameters
----------
a : np.ndarray
b : np.ndarray
rtol : None or float
The relative threshold. Default threshold will be used if set to ``None``.
atol : None or float
The absolute threshold. Default threshold will be used if set to ``None``.
"""
a = np.copy(a)
b = np.copy(b)
nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
a[nan_mask] = 0
b[nan_mask] = 0
return almost_equal(a, b, rtol, atol)
def assert_almost_equal_ignore_nan(a, b, rtol=None, atol=None, names=('a', 'b')):
"""Test that two NumPy arrays are almost equal (ignoring NaN in either array).
Combines a relative and absolute measure of approximate eqality.
If either the relative or absolute check passes, the arrays are considered equal.
Including an absolute check resolves issues with the relative check where all
array values are close to zero.
Parameters
----------
a : np.ndarray
b : np.ndarray
rtol : None or float
The relative threshold. Default threshold will be used if set to ``None``.
atol : None or float
The absolute threshold. Default threshold will be used if set to ``None``.
"""
a = np.copy(a)
b = np.copy(b)
nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
a[nan_mask] = 0
b[nan_mask] = 0
assert_almost_equal(a, b, rtol, atol, names)
def assert_exception(f, exception_type, *args, **kwargs):
"""Test that function f will throw an exception of type given by `exception_type`"""
try:
f(*args, **kwargs)
assert(False)
except exception_type:
return
def retry(n):
"""Retry n times before failing for stochastic test cases."""
assert n > 0
def decorate(f):
"""Decorate a test case."""
def wrapper(*args, **kwargs):
"""Wrapper for tests function."""
for _ in range(n):
try:
f(*args, **kwargs)
except AssertionError as e:
err = e
continue
return
raise err
return wrapper
return decorate
def simple_forward(sym, ctx=None, is_train=False, **inputs):
"""A simple forward function for a symbol.
Primarily used in doctest to test the functionality of a symbol.
Takes NumPy arrays as inputs and outputs are also converted to NumPy arrays.
Parameters
----------
ctx : Context
If ``None``, will take the default context.
inputs : keyword arguments
Mapping each input name to a NumPy array.
Returns
-------
The result as a numpy array. Multiple results will
be returned as a list of NumPy arrays.
"""
ctx = ctx or default_context()
inputs = {k: array(v) for k, v in inputs.items()}
exe = sym.bind(ctx, args=inputs)
exe.forward(is_train=is_train)
outputs = [x.asnumpy() for x in exe.outputs]
if len(outputs) == 1:
outputs = outputs[0]
return outputs
def _parse_location(sym, location, ctx, dtype=default_dtype()):
"""Parses the given location to a dictionary.
Arguments of the provided op `sym` are used as dictionary keys
and elements of `location` are used as values.
Parameters
----------
sym : Symbol
Symbol containing op
location : list or tuple or dict
Argument values location
- if type is list or tuple of `np.ndarray`
inner elements are arrays correspoding to
``sym.list_arguments()``.
- if type is dict of str -> `np.ndarray`
maps the name of arguments to the corresponding `np.ndarray`.
*In either case, value of all the arguments must be provided.*
ctx : Context
Device context.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
Returns
-------
dict
Dictionary with `sym` arguments as keys and `location` elements as
values.
Examples
-------
>>> a = mx.symbol.Variable('a')
>>> b = mx.symbol.Variable('b')
>>> l1 = np.ndarray([2,3])
>>> l2 = np.ndarray([3,4])
>>> _parse_location(a * b, [l1, l2], None)
{'a': <NDArray 2x3 @cpu(0)>, 'b': <NDArray 3x4 @cpu(0)>}
>>> _parse_location(a * b, {'a': l1, 'b': l2}, None)
{'a': <NDArray 2x3 @cpu(0)>, 'b': <NDArray 3x4 @cpu(0)>}
>>> _parse_location(a * b, {'a': l1}, None)
ValueError: Symbol arguments and keys of the given location do not match.
"""
assert isinstance(location, (dict, list, tuple))
assert dtype == np.float32 or dtype == np.float64
if isinstance(location, dict):
if set(location.keys()) != set(sym.list_arguments()):
raise ValueError("Symbol arguments and keys of the given location do not match."
"symbol args:%s, location.keys():%s"
% (str(set(sym.list_arguments())), str(set(location.keys()))))
else:
location = {k: v for k, v in zip(sym.list_arguments(), location)}
location = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) if isinstance(v, np.ndarray) \
else v for k, v in location.items()}
return location
def _parse_aux_states(sym, aux_states, ctx, dtype=default_dtype()):
"""Parses the given auxiliary states to a dictionary.
Auxiliary states of the provided op `sym` are used as dictionary
keys and elements of `aux_states` are used as values.
Parameters
----------
sym : Symbol
Symbol containing op
aux_states : None or list or dict
Aux states
- if type is list or tuple of `np.ndarray`
inner elements are arrays correspoding to
``sym.list_auxiliary_states()``.
- if type is dict of str -> `np.ndarray`
maps the name of arguments to the corresponding `np.ndarray`.
*In either case, all aux states of `sym` must be provided.*
ctx : Context
Device context.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
Returns
-------
dict
Dictionary with `sym` aux states as keys and `aux_states` elements
as values.
Examples
-------
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> mean_states = np.ones(3)
>>> var_states = np.ones(3)
>>> _parse_aux_states(fc2, [mean_states, var_states], None)
{'batchnorm0_moving_var': <NDArray 3 @cpu(0)>, 'batchnorm0_moving_mean': <NDArray 3 @cpu(0)>}
>>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states,
... 'batchnorm0_moving_mean': var_states}, None)
{'batchnorm0_moving_var': <NDArray 3 @cpu(0)>, 'batchnorm0_moving_mean': <NDArray 3 @cpu(0)>}
>>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states}, None)
ValueError: Symbol aux_states names and given aux_states do not match.
"""
assert dtype == np.float32 or dtype == np.float64
if aux_states is not None:
if isinstance(aux_states, dict):
if set(aux_states.keys()) != set(sym.list_auxiliary_states()):
raise ValueError("Symbol aux_states names and given aux_states do not match."
"symbol aux_names:%s, aux_states.keys:%s"
% (str(set(sym.list_auxiliary_states())),
str(set(aux_states.keys()))))
elif isinstance(aux_states, (list, tuple)):
aux_names = sym.list_auxiliary_states()
aux_states = {k:v for k, v in zip(aux_names, aux_states)}
aux_states = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) for k, v in aux_states.items()}
return aux_states
def numeric_grad(executor, location, aux_states=None, eps=1e-4,
use_forward_train=True, dtype=default_dtype()):
"""Calculates a numeric gradient via finite difference method.
Class based on Theano's `theano.gradient.numeric_grad` [1]
Parameters
----------
executor : Executor
Executor that computes the forward pass.
location : list of numpy.ndarray or dict of str to numpy.ndarray
Argument values used as location to compute gradient
Maps the name of arguments to the corresponding numpy.ndarray.
Value of all the arguments must be provided.
aux_states : None or list of numpy.ndarray or dict of str to numpy.ndarray, optional
Auxiliary states values used as location to compute gradient
Maps the name of aux_states to the corresponding numpy.ndarray.
Value of all the auxiliary arguments must be provided.
eps : float, optional
Epsilon for the finite-difference method.
use_forward_train : bool, optional
Whether to use `is_train=True` in testing.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
References
---------
..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py
"""
def as_stype(var, stype, dtype):
return mx.nd.cast_storage(mx.nd.array(var, dtype=dtype), stype=stype)
assert dtype == np.float32 or dtype == np.float64
approx_grads = {k: np.zeros(v.shape, dtype=dtype)
for k, v in location.items()}
for k, v in location.items():
stype = executor.arg_dict[k].stype
if stype == 'default':
executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)
for k in location:
location[k] = np.ascontiguousarray(location[k])
for k, v in location.items():
if v.dtype.kind != 'f':
continue
stype = executor.arg_dict[k].stype
old_value = v.copy()
for i in range(np.prod(v.shape)):
# inplace update
v.ravel()[i] += eps/2.0
executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)
if aux_states is not None:
for key, val in aux_states.items():
executor.aux_dict[key][:] = val
executor.forward(is_train=use_forward_train)
f_peps = executor.outputs[0].asnumpy()
v.ravel()[i] -= eps
executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)
if aux_states is not None:
for key, val in aux_states.items():
adstype = executor.aux_dict[key].stype
executor.aux_dict[key][:] = as_stype(val, adstype, dtype=dtype)
executor.forward(is_train=use_forward_train)
f_neps = executor.outputs[0].asnumpy()
approx_grad = (f_peps - f_neps).sum() / eps
approx_grads[k].ravel()[i] = approx_grad
v.ravel()[i] = old_value.ravel()[i]
# copy back the original value
executor.arg_dict[k][:] = as_stype(old_value, stype, dtype=dtype)
return approx_grads
def check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-3, rtol=1e-2,
atol=None, grad_nodes=None, use_forward_train=True, ctx=None,
grad_stype_dict=None, dtype=default_dtype()):
"""Verify an operation by checking backward pass via finite difference method.
Based on Theano's `theano.gradient.verify_grad` [1]
Parameters
----------
sym : Symbol
Symbol containing op to test
location : list or tuple or dict
Argument values used as location to compute gradient
- if type is list of numpy.ndarray
inner elements should have the same order as mxnet.sym.list_arguments().
- if type is dict of str -> numpy.ndarray
maps the name of arguments to the corresponding numpy.ndarray.
*In either case, value of all the arguments must be provided.*
aux_states : list or tuple or dict, optional
The auxiliary states required when generating the executor for the symbol.
numeric_eps : float, optional
Delta for the finite difference method that approximates the gradient.
check_eps : float, optional
relative error eps used when comparing numeric grad to symbolic grad.
grad_nodes : None or list or tuple or dict, optional
Names of the nodes to check gradient on
use_forward_train : bool
Whether to use is_train=True when computing the finite-difference.
ctx : Context, optional
Check the gradient computation on the specified device.
grad_stype_dict : dict of str->str, optional
Storage type dictionary for gradient ndarrays.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
References
---------
..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py
"""
assert dtype == np.float32 or dtype == np.float64
if ctx is None:
ctx = default_context()
def random_projection(shape):
"""Get a random weight matrix with not too small elements
Parameters
----------
shape : list or tuple
"""
# random_projection should not have elements too small,
# otherwise too much precision is lost in numerical gradient
plain = _rng.rand(*shape) + 0.1
return plain
location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)
location_npy = {k:v.asnumpy() for k, v in location.items()}
aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,
dtype=dtype)
if aux_states is not None:
aux_states_npy = {k: v.asnumpy() for k, v in aux_states.items()}
else:
aux_states_npy = None
if grad_nodes is None:
grad_nodes = sym.list_arguments()
grad_req = {k: 'write' for k in grad_nodes}
elif isinstance(grad_nodes, (list, tuple)):
grad_nodes = list(grad_nodes)
grad_req = {k: 'write' for k in grad_nodes}
elif isinstance(grad_nodes, dict):
grad_req = grad_nodes.copy()
grad_nodes = grad_nodes.keys()
else:
raise ValueError
input_shape = {k: v.shape for k, v in location.items()}
_, out_shape, _ = sym.infer_shape(**input_shape)
proj = mx.sym.Variable("__random_proj")
out = sym * proj
out = mx.sym.MakeLoss(out)
location = dict(list(location.items()) +
[("__random_proj", mx.nd.array(random_projection(out_shape[0]),
ctx=ctx, dtype=dtype))])
args_grad_npy = dict([(k, _rng.normal(0, 0.01, size=location[k].shape)) for k in grad_nodes]
+ [("__random_proj", _rng.normal(0, 0.01, size=out_shape[0]))])
args_grad = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) for k, v in args_grad_npy.items()}
if grad_stype_dict is not None:
assert isinstance(grad_stype_dict, dict), "grad_stype_dict must be a dict"
for k, v in grad_stype_dict.items():
if k in args_grad and v in _STORAGE_TYPE_STR_TO_ID and v != 'default':
# create an uninitialized sparse ndarray for executor
# if the symbolic grad is expected to be zero, it should not be initialized at all
args_grad[k] = mx.nd.zeros(args_grad[k].shape, args_grad[k].context,
args_grad[k].dtype, v)
executor = out.bind(ctx, grad_req=grad_req,
args=location, args_grad=args_grad, aux_states=aux_states)
inps = executor.arg_arrays
if len(inps) != len(location):
raise ValueError("Executor arg_arrays and and location len do not match."
"Got %d inputs and %d locations"%(len(inps), len(location)))
assert len(executor.outputs) == 1
executor.forward(is_train=True)
executor.backward()
symbolic_grads = {k:executor.grad_dict[k].asnumpy() for k in grad_nodes}
numeric_gradients = numeric_grad(
executor, location_npy, aux_states_npy,
eps=numeric_eps, use_forward_train=use_forward_train, dtype=dtype)
for name in grad_nodes:
fd_grad = numeric_gradients[name]
orig_grad = args_grad_npy[name]
sym_grad = symbolic_grads[name]
if grad_req[name] == 'write':
assert_almost_equal(fd_grad, sym_grad, rtol, atol,
("NUMERICAL_%s"%name, "BACKWARD_%s"%name))
elif grad_req[name] == 'add':
assert_almost_equal(fd_grad, sym_grad - orig_grad, rtol, atol,
("NUMERICAL_%s"%name, "BACKWARD_%s"%name))
elif grad_req[name] == 'null':
assert_almost_equal(orig_grad, sym_grad, rtol, atol,
("NUMERICAL_%s"%name, "BACKWARD_%s"%name))
else:
raise ValueError("Invalid grad_req %s for argument %s"%(grad_req[name], name))
def check_symbolic_forward(sym, location, expected, rtol=1E-4, atol=None,
aux_states=None, ctx=None, equal_nan=False,
dtype=default_dtype()):
"""Compares a symbol's forward results with the expected ones.
Prints error messages if the forward results are not the same as the expected ones.
Parameters
---------
sym : Symbol
output symbol
location : list of np.ndarray or dict of str to np.ndarray
The evaluation point
- if type is list of np.ndarray
Contains all the numpy arrays corresponding to `sym.list_arguments()`.
- if type is dict of str to np.ndarray
Contains the mapping between argument names and their values.
expected : list of np.ndarray or dict of str to np.ndarray
The expected output value
- if type is list of np.ndarray
Contains arrays corresponding to exe.outputs.
- if type is dict of str to np.ndarray
Contains mapping between sym.list_output() and exe.outputs.
check_eps : float, optional
Relative error to check to.
aux_states : list of np.ndarray of dict, optional
- if type is list of np.ndarray
Contains all the NumPy arrays corresponding to sym.list_auxiliary_states
- if type is dict of str to np.ndarray
Contains the mapping between names of auxiliary states and their values.
ctx : Context, optional
running context
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
equal_nan: Boolean
if True, `nan` is a valid value for checking equivalency (ie `nan` == `nan`)
Example
-------
>>> shape = (2, 2)
>>> lhs = mx.symbol.Variable('lhs')
>>> rhs = mx.symbol.Variable('rhs')
>>> sym_dot = mx.symbol.dot(lhs, rhs)
>>> mat1 = np.array([[1, 2], [3, 4]])
>>> mat2 = np.array([[5, 6], [7, 8]])
>>> ret_expected = np.array([[19, 22], [43, 50]])
>>> check_symbolic_forward(sym_dot, [mat1, mat2], [ret_expected])
"""
assert dtype == np.float32 or dtype == np.float64
if ctx is None:
ctx = default_context()
location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)
aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,
dtype=dtype)
if isinstance(expected, dict):
expected = [expected[k] for k in sym.list_outputs()]
args_grad_data = {k:mx.nd.empty(v.shape, ctx=ctx, dtype=dtype) for k, v in location.items()}
executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data, aux_states=aux_states)
for g in executor.grad_arrays:
g[:] = 0
executor.forward(is_train=False)
outputs = [x.asnumpy() for x in executor.outputs]
for output_name, expect, output in zip(sym.list_outputs(), expected, outputs):
assert_almost_equal(expect, output, rtol, atol,
("EXPECTED_%s"%output_name, "FORWARD_%s"%output_name),
equal_nan=equal_nan)
return executor.outputs
def check_symbolic_backward(sym, location, out_grads, expected, rtol=1e-5, atol=None,
aux_states=None, grad_req='write', ctx=None, grad_stypes=None,
equal_nan=False, dtype=default_dtype()):
"""Compares a symbol's backward results with the expected ones.
Prints error messages if the backward results are not the same as the expected results.
Parameters
---------
sym : Symbol
output symbol
location : list of np.ndarray or dict of str to np.ndarray
The evaluation point
- if type is list of np.ndarray
Contains all the NumPy arrays corresponding to ``mx.sym.list_arguments``.
- if type is dict of str to np.ndarray
Contains the mapping between argument names and their values.
out_grads : None or list of np.ndarray or dict of str to np.ndarray
NumPys arrays corresponding to sym.outputs for incomming gradient.
- if type is list of np.ndarray
Contains arrays corresponding to ``exe.outputs``.
- if type is dict of str to np.ndarray
contains mapping between mxnet.sym.list_output() and Executor.outputs
expected : list of np.ndarray or dict of str to np.ndarray
expected gradient values
- if type is list of np.ndarray
Contains arrays corresponding to exe.grad_arrays
- if type is dict of str to np.ndarray
Contains mapping between ``sym.list_arguments()`` and exe.outputs.
check_eps: float, optional
Relative error to check to.
aux_states : list of np.ndarray or dict of str to np.ndarray
grad_req : str or list of str or dict of str to str, optional
Gradient requirements. 'write', 'add' or 'null'.
ctx : Context, optional
Running context.
grad_stypes: dict of str->str
dictionary of mapping argument name to stype for the gradient
equal_nan: Boolean
if True, `nan` is a valid value for checking equivalency (ie `nan` == `nan`)
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
Example
-------
>>> lhs = mx.symbol.Variable('lhs')
>>> rhs = mx.symbol.Variable('rhs')
>>> sym_add = mx.symbol.elemwise_add(lhs, rhs)
>>> mat1 = np.array([[1, 2], [3, 4]])
>>> mat2 = np.array([[5, 6], [7, 8]])
>>> grad1 = mx.nd.zeros(shape)
>>> grad2 = mx.nd.zeros(shape)
>>> exec_add = sym_add.bind(default_context(), args={'lhs': mat1, 'rhs': mat2},
... args_grad={'lhs': grad1, 'rhs': grad2}, grad_req={'lhs': 'write', 'rhs': 'write'})
>>> exec_add.forward(is_train=True)
>>> ograd = mx.nd.ones(shape)
>>> grad_expected = ograd.copy().asnumpy()
>>> check_symbolic_backward(sym_add, [mat1, mat2], [ograd], [grad_expected, grad_expected])
"""
assert dtype == np.float32 or dtype == np.float64
if ctx is None:
ctx = default_context()
location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)
aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,
dtype=dtype)
if isinstance(expected, (list, tuple)):
expected = {k:v for k, v in zip(sym.list_arguments(), expected)}
args_grad_npy = {k:_rng.normal(size=v.shape) for k, v in expected.items()}
args_grad_data = {}
for k, v in args_grad_npy.items():
nd = mx.nd.array(v, ctx=ctx, dtype=dtype)
if grad_stypes is not None and k in grad_stypes:
stype = grad_stypes[k]
if stype is not None and stype != 'default':
out = create_sparse_array(v.shape, stype, density=0.0)
else:
out = nd
args_grad_data[k] = out
else:
args_grad_data[k] = nd
if isinstance(grad_req, str):
grad_req = {k:grad_req for k in sym.list_arguments()}
elif isinstance(grad_req, (list, tuple)):
grad_req = {k:v for k, v in zip(sym.list_arguments(), grad_req)}
executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data,
aux_states=aux_states, grad_req=grad_req)
executor.forward(is_train=True)
if isinstance(out_grads, (tuple, list)):
outg = list()
for arr in out_grads:
if isinstance(arr, np.ndarray):
outg.append(mx.nd.array(arr, ctx=ctx, dtype=dtype))
else:
outg.append(arr)
out_grads = outg
elif isinstance(out_grads, dict):
outg = dict()
for k, v in out_grads.items():
if isinstance(v, np.ndarray):
outg[k] = mx.nd.array(v, ctx=ctx, dtype=dtype)
else:
outg[k] = v
out_grads = outg
else:
assert out_grads is None
executor.backward(out_grads)
grads = {k: v.asnumpy() for k, v in args_grad_data.items()}
for name in expected:
if grad_req[name] == 'write':
assert_almost_equal(expected[name], grads[name], rtol, atol,
("EXPECTED_%s"%name, "BACKWARD_%s"%name),
equal_nan=equal_nan)
elif grad_req[name] == 'add':
assert_almost_equal(expected[name], grads[name] - args_grad_npy[name],
rtol, atol, ("EXPECTED_%s"%name, "BACKWARD_%s"%name),
equal_nan=equal_nan)
elif grad_req[name] == 'null':
assert_almost_equal(args_grad_npy[name], grads[name],
rtol, atol, ("EXPECTED_%s"%name, "BACKWARD_%s"%name),
equal_nan=equal_nan)
else:
raise ValueError("Invalid grad_req %s for argument %s"%(grad_req[name], name))
return args_grad_data
def check_speed(sym, location=None, ctx=None, N=20, grad_req=None, typ="whole",
**kwargs):
"""Check the running speed of a symbol.
Parameters
----------
sym : Symbol
Symbol to run the speed test.
location : none or dict of str to np.ndarray
Location to evaluate the inner executor.
ctx : Context
Running context.
N : int, optional
Repeat times.
grad_req : None or str or list of str or dict of str to str, optional
Gradient requirements.
typ : str, optional
"whole" or "forward"
- "whole"
Test the forward_backward speed.
- "forward"
Only test the forward speed.
"""
if ctx is None:
ctx = default_context()
if grad_req is None:
grad_req = 'write'
if location is None:
exe = sym.simple_bind(grad_req=grad_req, ctx=ctx, **kwargs)
location = {k: _rng.normal(size=arr.shape, scale=1.0) for k, arr in
exe.arg_dict.items()}
else:
assert isinstance(location, dict), "Expect dict, get \"location\"=%s" %str(location)
exe = sym.simple_bind(grad_req=grad_req, ctx=ctx,
**{k: v.shape for k, v in location.items()})
for name, iarr in location.items():
exe.arg_dict[name][:] = iarr.astype(exe.arg_dict[name].dtype)
if typ == "whole":
# Warm up
exe.forward(is_train=True)
exe.backward(out_grads=exe.outputs)
for output in exe.outputs:
output.wait_to_read()
# Test forward + backward
tic = time.time()
for _ in range(N):
exe.forward(is_train=True)
exe.backward(out_grads=exe.outputs)
mx.nd.waitall()
toc = time.time()
forward_backward_time = (toc - tic) * 1.0 / N
return forward_backward_time
elif typ == "forward":
# Warm up
exe.forward(is_train=False)
for output in exe.outputs:
output.wait_to_read()
# Test forward only
tic = time.time()
for _ in range(N):
exe.forward(is_train=False)
mx.nd.waitall()
toc = time.time()
forward_time = (toc - tic) * 1.0 / N
return forward_time
else:
raise ValueError('typ can only be "whole" or "forward".')
def check_consistency(sym, ctx_list, scale=1.0, grad_req='write',
arg_params=None, aux_params=None, tol=None,
raise_on_err=True, ground_truth=None, equal_nan=False):
"""Check symbol gives the same output for different running context
Parameters
----------
sym : Symbol or list of Symbols
Symbol(s) to run the consistency test.
ctx_list : list
Running context. See example for more detail.
scale : float, optional
Standard deviation of the inner normal distribution. Used in initialization.
grad_req : str or list of str or dict of str to str
Gradient requirement.
Examples
--------
>>> # create the symbol
>>> sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
>>> # initialize the running context
>>> ctx_list =\
[{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},\
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}]
>>> check_consistency(sym, ctx_list)
>>> sym = mx.sym.Concat(name='concat', num_args=2)
>>> ctx_list = \
[{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},\
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},\
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
>>> check_consistency(sym, ctx_list)
"""
if tol is None:
tol = {np.dtype(np.float16): 1e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
elif isinstance(tol, numbers.Number):
tol = {np.dtype(np.float16): tol,
np.dtype(np.float32): tol,
np.dtype(np.float64): tol,
np.dtype(np.uint8): tol,
np.dtype(np.int32): tol}
assert len(ctx_list) > 1
if isinstance(sym, Symbol):
sym = [sym]*len(ctx_list)
else:
assert len(sym) == len(ctx_list)
output_names = sym[0].list_outputs()
arg_names = sym[0].list_arguments()
exe_list = []
for s, ctx in zip(sym, ctx_list):
assert s.list_arguments() == arg_names
assert s.list_outputs() == output_names
exe_list.append(s.simple_bind(grad_req=grad_req, **ctx))
arg_params = {} if arg_params is None else arg_params
aux_params = {} if aux_params is None else aux_params
for n, arr in exe_list[0].arg_dict.items():
if n not in arg_params:
arg_params[n] = np.random.normal(size=arr.shape, scale=scale)
for n, arr in exe_list[0].aux_dict.items():
if n not in aux_params:
aux_params[n] = 0
for exe in exe_list:
for name, arr in exe.arg_dict.items():
arr[:] = arg_params[name]
for name, arr in exe.aux_dict.items():
arr[:] = aux_params[name]
dtypes = [np.dtype(exe.outputs[0].dtype) for exe in exe_list]
max_idx = np.argmax(dtypes)
gt = ground_truth
if gt is None:
gt = exe_list[max_idx].output_dict.copy()
if grad_req != 'null':
gt.update(exe_list[max_idx].grad_dict)
# test
for exe in exe_list:
exe.forward(is_train=False)
for i, exe in enumerate(exe_list):
if i == max_idx:
continue
for name, arr in zip(output_names, exe.outputs):
gtarr = gt[name].astype(dtypes[i]).asnumpy()
arr = arr.asnumpy()
try:
assert_almost_equal(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]],
equal_nan=equal_nan)
except AssertionError as e:
print('Predict Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))
traceback.print_exc()
if raise_on_err:
raise e
else:
print(str(e))
# train
if grad_req != 'null':
for exe in exe_list:
exe.forward(is_train=True)
exe.backward(exe.outputs)
for i, exe in enumerate(exe_list):
if i == max_idx:
continue
curr = zip(output_names + arg_names, exe.outputs + exe.grad_arrays)
for name, arr in curr:
if gt[name] is None:
assert arr is None
continue
gtarr = gt[name].astype(dtypes[i]).asnumpy()
arr = arr.asnumpy()
try:
assert_almost_equal(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]],
equal_nan=equal_nan)
except AssertionError as e:
print('Train Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))
traceback.print_exc()
if raise_on_err:
raise e
else:
print(str(e))
return gt
def list_gpus():
"""Return a list of GPUs
Returns
-------
list of int:
If there are n GPUs, then return a list [0,1,...,n-1]. Otherwise returns
[].
"""
re = ''
nvidia_smi = ['nvidia-smi', '/usr/bin/nvidia-smi', '/usr/local/nvidia/bin/nvidia-smi']
for cmd in nvidia_smi:
try:
re = subprocess.check_output([cmd, "-L"], universal_newlines=True)
except OSError:
pass
return range(len([i for i in re.split('\n') if 'GPU' in i]))
def download(url, fname=None, dirname=None, overwrite=False):
"""Download an given URL
Parameters
----------
url : str
URL to download
fname : str, optional
filename of the downloaded file. If None, then will guess a filename
from url.
dirname : str, optional
output directory name. If None, then guess from fname or use the current
directory
overwrite : bool, optional
Default is false, which means skipping download if the local file
exists. If true, then download the url to overwrite the local file if
exists.
Returns
-------
str
The filename of the downloaded file
"""
if fname is None:
fname = url.split('/')[-1]
if dirname is None:
dirname = os.path.dirname(fname)
else:
fname = os.path.join(dirname, fname)
if dirname != "":
if not os.path.exists(dirname):
try:
logging.info('create directory %s', dirname)
os.makedirs(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise OSError('failed to create ' + dirname)
if not overwrite and os.path.exists(fname):
logging.info("%s exists, skipping download", fname)
return fname
r = requests.get(url, stream=True)
assert r.status_code == 200, "failed to open %s" % url
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
logging.info("downloaded %s into %s successfully", url, fname)
return fname
def get_mnist():
"""Download and load the MNIST dataset
Returns
-------
dict
A dict containing the data
"""
def read_data(label_url, image_url):
with gzip.open(mx.test_utils.download(label_url)) as flbl:
struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(mx.test_utils.download(image_url), 'rb') as fimg:
_, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32)/255
return (label, image)
# changed to mxnet.io for more stable hosting
# path = 'http://yann.lecun.com/exdb/mnist/'
path = 'http://data.mxnet.io/data/mnist/'
(train_lbl, train_img) = read_data(
path+'train-labels-idx1-ubyte.gz', path+'train-images-idx3-ubyte.gz')
(test_lbl, test_img) = read_data(
path+'t10k-labels-idx1-ubyte.gz', path+'t10k-images-idx3-ubyte.gz')
return {'train_data':train_img, 'train_label':train_lbl,
'test_data':test_img, 'test_label':test_lbl}
def get_bz2_data(data_dir, data_name, url, data_origin_name):
"""Download and extract bz2 data."""
download(url, dirname=data_dir, overwrite=False)
os.chdir(data_dir)
if not os.path.exists(data_name):
bz_file = bz2.BZ2File(data_origin_name, 'rb')
with open(data_name, 'wb') as fout:
try:
content = bz_file.read()
fout.write(content)
finally:
bz_file.close()
os.remove(data_origin_name)
os.chdir("..")
def set_env_var(key, val, default_val=""):
"""Set environment variable
Parameters
----------
key : str
Env var to set
val : str
New value assigned to the env var
default_val : str, optional
Default value returned if the env var doesn't exist
Returns
-------
str
The value of env var before it is set to the new value
"""
prev_val = os.environ.get(key, default_val)
os.environ[key] = val
return prev_val
def same_array(array1, array2):
"""Check whether two NDArrays sharing the same memory block
Parameters
----------
array1 : NDArray
First NDArray to be checked
array2 : NDArray
Second NDArray to be checked
Returns
-------
bool
Whether two NDArrays share the same memory
"""
array1[:] += 1
if not same(array1.asnumpy(), array2.asnumpy()):
array1[:] -= 1
return False
array1[:] -= 1
return same(array1.asnumpy(), array2.asnumpy())
@contextmanager
def discard_stderr():
"""
Discards error output of a routine if invoked as:
with discard_stderr():
...
"""
try:
stderr_fileno = sys.stderr.fileno()
old_stderr = os.dup(stderr_fileno)
bit_bucket = open(os.devnull, 'w')
os.dup2(bit_bucket.fileno(), stderr_fileno)
yield
finally:
os.dup2(old_stderr, stderr_fileno)
bit_bucket.close()
|
jimi-c/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/digital_ocean.py | 60 | # Copyright (c) 2018, Ansible Project
# Copyright (c) 2018, Abhijeet Kasurde ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Parameters for DigitalOcean modules
DOCUMENTATION = '''
options:
oauth_token:
description:
- DigitalOcean OAuth token.
- "There are several other environment variables which can be used to provide this value."
- "i.e., - 'DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN' and 'OAUTH_TOKEN'"
required: false
aliases: ['api_token']
timeout:
description:
- The timeout in seconds used for polling DigitalOcean's API.
default: 30
validate_certs:
description:
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
default: true
type: bool
'''
|
MartinPetkov/django-security | refs/heads/master | security/password_expiry.py | 3 | # Copyright (c) 2011, SD Elements. See LICENSE.txt for details.
from .models import PasswordExpiry
from django.conf import settings
def password_is_expired(user):
password_expiry, _ = PasswordExpiry.objects.get_or_create(user=user)
password_settings = getattr(settings, 'MANDATORY_PASSWORD_CHANGE', {})
include_superusers = password_settings.get('INCLUDE_SUPERUSERS', False)
if include_superusers:
return password_expiry.is_expired()
else:
return not user.is_superuser and password_expiry.is_expired()
def never_expire_password(user):
password_expiry, _ = PasswordExpiry.objects.get_or_create(user=user)
password_expiry.never_expire()
|
mayoub/hhana | refs/heads/master | statstools/plotting.py | 5 | # python imports
from itertools import cycle
import os
import pickle
import re
# root/rootpy imports
from rootpy import ROOT
from rootpy.plotting import Canvas, Legend, Hist, Graph
from rootpy.plotting.hist import _HistBase
from rootpy.plotting.graph import _Graph1DBase
from rootpy.plotting.shapes import Line
from rootpy.plotting.utils import draw
from rootpy.memory import keepalive
from rootpy.context import preserve_current_canvas
# local imports
from . import log; log = log[__name__]
gaussian_cdf_c = ROOT.Math.gaussian_cdf_c
UNBLIND = {
2012: {
'vbf': 3,
'boosted': 2},
2011: {
'vbf': 2,
'boosted': 2}
}
PATTERNS = [
re.compile('^(?P<type>workspace|channel)(_hh)?_(?P<year>\d+)_(?P<category>[a-z_]+)(?P<mass>\d+)(_[a-z]+[a-z0-9_]*)?$'),
re.compile('^(?P<type>workspace|channel)(_hh)?_(?P<category>[a-z]+)_(?P<mass>\d+)_(?P<year>\d+)$'),
re.compile('^(?P<type>workspace|channel)(_hh)?_(?P<category>[a-z_]+)(?P<year>\d+)_(?P<mass>\d+)(_[a-z]+[a-z0-9_]*)?$'),
]
def make_density(hist):
"""
Make a density object (divide content by bin width)
hist can be a Histogram or a Graph
"""
if isinstance(hist, _HistBase) and hist.GetDimension()==1:
for bin, width in zip(hist.bins(), hist.xwidth()):
if width==0:
raise RuntimeError('Cannot create density histogram with 0 width bins')
bin.value /= width
bin.error /= width
elif isinstance(hist, _Graph1DBase):
for idx in xrange(len(hist)):
x, y = hist[idx]
xlow, xhigh = hist.xerrl(idx), hist.xerrh(idx)
ylow, yhigh = hist.yerrl(idx), hist.yerrh(idx)
if (xhigh+xlow)==0:
raise RuntimeError('Cannot create density graph with 0 width bins')
hist[idx] = (x, y/(xhigh+xlow))
hist.SetPointError(idx, xlow, xhigh, ylow/(xhigh+xlow), yhigh/(xhigh+xlow))
else:
log.warning('Could not make density from object of type {0}'.format(type(hist)))
return
def parse_name(name):
"""
determine year, category and mass from ws/channel name
"""
for p in PATTERNS:
match = re.match(p, name)
if match:
break
if not match:
raise ValueError(
"not a valid workspace/channel name: {0}".format(name))
return (int(match.group('year')) % 1000 + 2000,
match.group('category').strip('_'),
int(match.group('mass')))
def get_data(pickle_file):
# read NP pull data from a pickle
with open(pickle_file) as f:
data = pickle.load(f)
return data
def print_np(np):
# strip unneeded text from NP names
if np.split('_')[0]=='alpha':
return np.replace('alpha_', '').replace('ATLAS_', '').replace('_', ' ')
elif np.split('_')[0]=='gamma':
return np.replace('gamma_stat', '').replace('channel_', '').replace('125_mmc1_mass_', '').replace('_', ' ')
else:
return np.replace('ATLAS_', '').replace('_', ' ')
def get_rebinned_hist(hist_origin, binning=None):
if binning is None:
return hist_origin
hist_rebin = Hist(binning, name=hist_origin.name+'_rebinned')
hist_rebin[:] = hist_origin[:]
return hist_rebin
def get_rebinned_graph(graph_origin, binning=None):
if binning is None:
return graph_origin
graph_rebin = Graph(len(binning) - 1)
if len(graph_origin) != len(graph_rebin):
log.warning('uniform: {0} bins != rebinned: {1} bins'.format(
len(graph_origin), len(graph_rebin)))
raise RuntimeError('wrong binning')
for ipoint, (y, yerr) in enumerate(zip(graph_origin.y(),
graph_origin.yerr())):
x_rebin_err = 0.5 * (binning[ipoint + 1] - binning[ipoint])
x_rebin = binning[ipoint] + x_rebin_err
graph_rebin.SetPoint(ipoint, x_rebin, y)
graph_rebin.SetPointError(
ipoint,
x_rebin_err, x_rebin_err,
yerr[0], yerr[1])
return graph_rebin
def blind_graph(graph, window, replace=0):
"""
Blind a graph in-place
"""
if window is False or window is None:
# do nothing
return
if isinstance(window, (tuple, list)):
low, high = window
for idx in xrange(len(graph)):
x, y = graph[idx]
xlow, xhigh = graph.xerrl(idx), graph.xerrh(idx)
if ((low < x - xlow < high) or
(low < x + xhigh < high) or
(low < x < high)):
graph[idx] = (x, replace)
graph.SetPointError(idx, xlow, xhigh, 0, 0)
else:
for idx in xrange(len(graph) - 1, len(graph) - 1 - window, -1):
x, y = graph[idx]
xlow, xhigh = graph.xerrl(idx), graph.xerrh(idx)
graph[idx] = (x, replace)
graph.SetPointError(idx, xlow, xhigh, 0, 0)
def get_category(category_name, categories):
for category in categories:
if category.name == category_name:
return category
return None
def get_binning(category, year, fit_var='mmc', clf_bins='optimal'):
if fit_var == 'mmc':
binning = category.limitbins
if isinstance(binning, (tuple, list)):
binning[-1] = 250
return binning
binning[year][-1] = 250
return binning[year]
if fit_var == 'bdt':
from mva import CACHE_DIR
binning_cache = os.path.join(
CACHE_DIR, 'binning/binning_{0}_{1}_{2}.pickle'.format(
category.name, 125, year % 1000))
if clf_bins == 'optimal':
if os.path.exists(binning_cache) and clf_bins == 'optimal':
with open(binning_cache) as f:
binning = pickle.load(f)
return binning
else:
return [-1+2*i/float(clf_bins) for i in xrange(int(clf_bins)+1)]
# use original binning in WS
return None
def get_blinding(category, year, fit_var='mmc'):
if fit_var == 'mmc':
return (100, 150)
return UNBLIND[year][category.name]
def get_uncertainty_graph(hnom, curve_uncert):
"""
Convert an histogram and a RooCurve
into a TGraphAsymmError
Parameters
----------
hnom: TH1F, TH1D, ...
The histogram of nominal values
curve_uncert: RooCurve
The uncertainty band around the nominal value
TODO: Improve the handling of the underflow and overflow bins
"""
graph = Graph(hnom.GetNbinsX())
for ibin in xrange(1, hnom.GetNbinsX() + 1):
uncerts = []
for ip in xrange(3, curve_uncert.GetN() - 3):
x, y = ROOT.Double(0.), ROOT.Double(0.)
curve_uncert.GetPoint(ip, x, y)
if hnom.GetBinLowEdge(ibin) <= x < hnom.GetBinLowEdge(ibin + 1):
uncerts.append(y)
log.debug('{0}, bin {1}: {2}'.format(hnom.name, ibin, uncerts))
low, high = min(uncerts), max(uncerts)
bin_center = 0.5 * (hnom.GetBinLowEdge(ibin + 1) +
hnom.GetBinLowEdge(ibin))
e_x_low = bin_center - hnom.GetBinLowEdge(ibin)
e_x_high = hnom.GetBinLowEdge(ibin + 1) - bin_center
bin_content = hnom.GetBinContent(ibin)
e_y_low = hnom.GetBinContent(ibin) - low
e_y_high = high - hnom.GetBinContent(ibin)
graph.SetPoint(ibin - 1, bin_center, bin_content)
graph.SetPointError(ibin - 1, e_x_low, e_x_high, e_y_low, e_y_high)
return graph
def pvalue_plot(poi, pvalues, pad=None,
xtitle='X', ytitle='P_{0}',
linestyle=None,
linecolor=None,
yrange=None,
verbose=False):
"""
Draw a pvalue plot
Parameters
----------
poi : list
List of POI values tested
pvalues : list
List of p-values or list of lists of p-values to overlay
multiple p-value curves
pad : Canvas or Pad, optional (default=None)
Pad to draw onto. Create new pad if None.
xtitle : str, optional (default='X')
The x-axis label (POI name)
ytitle : str, optional (default='P_{0}')
The y-axis label
linestyle : str or list, optional (default=None)
Line style for the p-value graph or a list of linestyles for
multiple p-value graphs.
linecolor : str or list, optional (default=None)
Line color for the p-value graph or a list of linestyles for
multiple p-value graphs.
Returns
-------
pad : Canvas
The pad.
graphs : list of Graph
The p-value graphs
"""
if not pvalues:
raise ValueError("pvalues is empty")
if not poi:
raise ValueError("poi is empty")
# determine if pvalues is list or list of lists
if not isinstance(pvalues[0], (list, tuple)):
pvalues = [pvalues]
if linecolor is not None:
if not isinstance(linecolor, list):
linecolor = [linecolor]
linecolor = cycle(linecolor)
if linestyle is not None:
if not isinstance(linestyle, list):
linestyle = [linestyle]
linestyle = cycle(linestyle)
with preserve_current_canvas():
if pad is None:
pad = Canvas()
pad.cd()
pad.SetLogy()
# create the axis
min_poi, max_poi = min(poi), max(poi)
haxis = Hist(1000, min_poi, max_poi)
xaxis = haxis.xaxis
yaxis = haxis.yaxis
xaxis.SetRangeUser(min_poi, max_poi)
haxis.Draw('AXIS')
min_pvalue = float('inf')
graphs = []
for ipv, pv in enumerate(pvalues):
graph = Graph(len(poi), linestyle='dashed',
drawstyle='L', linewidth=2)
for idx, (point, pvalue) in enumerate(zip(poi, pv)):
graph.SetPoint(idx, point, pvalue)
if linestyle is not None:
graph.linestyle = linestyle.next()
if linecolor is not None:
graph.linecolor = linecolor.next()
graphs.append(graph)
curr_min_pvalue = min(pv)
if curr_min_pvalue < min_pvalue:
min_pvalue = curr_min_pvalue
if verbose:
for graph in graphs:
log.info(['{0:1.1f}'.format(xval) for xval in list(graph.x())])
log.info(['{0:0.3f}'.format(yval) for yval in list(graph.y())])
# automatically handles axis limits
axes, bounds = draw(graphs, pad=pad, same=True, logy=True,
xtitle=xtitle, ytitle=ytitle,
xaxis=xaxis, yaxis=yaxis, ypadding=(0.2, 0.1),
logy_crop_value=1E-300)
if yrange is not None:
xaxis, yaxis = axes
yaxis.SetLimits(*yrange)
yaxis.SetRangeUser(*yrange)
min_pvalue = yrange[0]
# draw sigma levels up to minimum of pvalues
line = Line()
line.SetLineStyle(2)
line.SetLineColor(2)
latex = ROOT.TLatex()
latex.SetNDC(False)
latex.SetTextSize(20)
latex.SetTextColor(2)
sigma = 0
while True:
pvalue = gaussian_cdf_c(sigma)
if pvalue < min_pvalue:
break
keepalive(pad, latex.DrawLatex(max_poi, pvalue, " {0}#sigma".format(sigma)))
keepalive(pad, line.DrawLine(min_poi, pvalue, max_poi, pvalue))
sigma += 1
pad.RedrawAxis()
pad.Update()
return pad, graphs
if __name__ == '__main__':
from rootpy.plotting import Canvas, Legend, get_style
from rootpy.plotting.style.atlas.labels import ATLAS_label
mass_points = [100,105,120,125,130,135,140,145,150]
pvalues = [
[0.5, 0.25, 0.15, 0.05, 0.03, 0.01, 0.03, 0.05, 0.15, 0.25, 0.5],
[0.4, 0.3, 0.17, 0.02, 0.01, 0.008, 0.08, 0.06, 0.14, 0.2, 0.2],
]
names = ['A', 'B']
style = get_style('ATLAS', shape='rect')
# allow space for sigma labels on right
style.SetPadRightMargin(0.05)
with style:
c = Canvas()
_, graphs = pvalue_plot(
mass_points, pvalues, pad=c, xtitle='m_{H} [GeV]',
linestyle=['dashed', 'solid'])
for name, graph in zip(names, graphs):
graph.title = name
graph.legendstyle = 'L'
leg = Legend(graphs, leftmargin=0.4, topmargin=0.2)
leg.Draw()
ATLAS_label(0.57, 0.88, text="Internal 2012", sqrts=8, pad=c, sep=0.09)
c.SaveAs('pvalue_plot.png')
|
ciber96/mtasa-blue | refs/heads/master | vendor/google-breakpad/src/tools/gyp/test/generator-output/actions/subdir2/make-file.py | 973 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = "Hello from make-file.py\n"
open(sys.argv[1], 'wb').write(contents)
|
jspraul/bite-project | refs/heads/master | deps/gdata-python-client/src/gdata/tlslite/utils/compat.py | 361 | """Miscellaneous functions to mask Python version differences."""
import sys
import os
if sys.version_info < (2,2):
raise AssertionError("Python 2.2 or later required")
if sys.version_info < (2,3):
def enumerate(collection):
return zip(range(len(collection)), collection)
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in self.values.keys():
del(self.values[e])
def union(self, s):
ret = Set()
for e in self.values.keys():
ret.values[e] = None
for e in s.values.keys():
ret.values[e] = None
return ret
def issubset(self, other):
for e in self.values.keys():
if e not in other.values.keys():
return False
return True
def __nonzero__( self):
return len(self.values.keys())
def __contains__(self, e):
return e in self.values.keys()
def __iter__(self):
return iter(set.values.keys())
if os.name != "java":
import array
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
import math
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
BaseException = Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
#NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS.
#THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A
#CHANCE OF WORKING AGAIN.
import java
import jarray
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1L * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
BaseException = java.lang.Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr |
qnub/django-cms | refs/heads/develop | cms/south_migrations/0069_static_placeholder_permissions.py | 48 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(DataMigration):
def forwards(self, orm):
try:
ct = orm['contenttypes.ContentType'].objects.get(model='page', app_label='cms')
except orm['contenttypes.ContentType'].DoesNotExist:
ct = orm['contenttypes.ContentType'].objects.create(name='page', model='page', app_label='cms')
try:
perm = orm['auth.permission'].objects.get(codename='edit_static_placeholder')
except orm['auth.permission'].DoesNotExist:
perm = orm['auth.permission'].objects.create(content_type=ct, codename='edit_static_placeholder', name=u'Can edit static placeholders')
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
symmetrical = True
|
sopier/django | refs/heads/master | django/contrib/gis/serializers/geojson.py | 275 | from __future__ import unicode_literals
from django.contrib.gis.gdal import HAS_GDAL
from django.core.serializers.base import (
SerializationError, SerializerDoesNotExist,
)
from django.core.serializers.json import Serializer as JSONSerializer
if HAS_GDAL:
from django.contrib.gis.gdal import CoordTransform, SpatialReference
class Serializer(JSONSerializer):
"""
Convert a queryset to GeoJSON, http://geojson.org/
"""
def _init_options(self):
super(Serializer, self)._init_options()
self.geometry_field = self.json_kwargs.pop('geometry_field', None)
self.srid = self.json_kwargs.pop('srid', 4326)
def start_serialization(self):
self._init_options()
self._cts = {} # cache of CoordTransform's
self.stream.write(
'{"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "EPSG:%d"}},'
' "features": [' % self.srid)
def end_serialization(self):
self.stream.write(']}')
def start_object(self, obj):
super(Serializer, self).start_object(obj)
self._geometry = None
if self.geometry_field is None:
# Find the first declared geometry field
for field in obj._meta.fields:
if hasattr(field, 'geom_type'):
self.geometry_field = field.name
break
def get_dump_object(self, obj):
data = {
"type": "Feature",
"properties": self._current,
}
if self._geometry:
if self._geometry.srid != self.srid:
# If needed, transform the geometry in the srid of the global geojson srid
if not HAS_GDAL:
raise SerializationError(
'Unable to convert geometry to SRID %s when GDAL is not installed.' % self.srid
)
if self._geometry.srid not in self._cts:
srs = SpatialReference(self.srid)
self._cts[self._geometry.srid] = CoordTransform(self._geometry.srs, srs)
self._geometry.transform(self._cts[self._geometry.srid])
data["geometry"] = eval(self._geometry.geojson)
else:
data["geometry"] = None
return data
def handle_field(self, obj, field):
if field.name == self.geometry_field:
self._geometry = field.value_from_object(obj)
else:
super(Serializer, self).handle_field(obj, field)
class Deserializer(object):
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("geojson is a serialization-only serializer")
|
ECESeniorDesign/lazy_record | refs/heads/master | lazy_record/repo.py | 1 | import re
import sqlite3
from itertools import chain
from inflector import Inflector, English
inflector = Inflector(English)
class Invalid(Exception):
pass
class Repo(object):
"""
Wrapper object around the database.
"""
db = None
def __init__(self, table_name):
"""
Instantiates a Repo object for the passed +table_name+ for adding,
updating, or destroying records in that table.
"""
self.table_name = table_name
self.where_clause = ""
self.where_values = []
self.inner_joins = []
self.order_clause = ""
self.group_clause = ""
self.having_clause = ""
self.having_values = []
self.limit_value = []
def where(self, custom_restrictions=[], **restrictions):
"""
Analog to SQL "WHERE". Does not perform a query until `select` is
called. Returns a repo object. Options selected through keyword
arguments are assumed to use == unles the value is a list, tuple, or
dictionary. List or tuple values translate to an SQL `IN` over those
values, and a dictionary looks up under a different table when joined.
ex)
>>> Repo("foos").where(id=11).select("*")
SELECT foos.* FROM foos WHERE foos.id == 11
>>> Repo("foos").where([("id > ?", 12)]).select("*")
SELECT foos.* FROM foos WHERE foos.id > 12
>>> Repo("foos").where(id=[1,2,3]).select("*")
SELECT foos.* FROM foos WHERE foos.id IN (1, 2, 3)
"""
# Generate the SQL pieces and the relevant values
standard_names, standard_values = self._standard_items(restrictions)
custom_names, custom_values = self._custom_items(custom_restrictions)
in_names, in_values = self._in_items(restrictions)
query_names = standard_names + custom_names + in_names
# Stitch them into a clause with values
if query_names:
self.where_values = standard_values + custom_values + in_values
self.where_clause = "where {query} ".format(
query=" and ".join(query_names))
return self
def _in_items(self, restrictions):
"""Generate argument pairs for queries like where(id=[1, 2])"""
def build_in(table, name, value):
return "{}.{} IN ({})".format(table, name,
", ".join(["?"] * len(value)))
in_items = self._build_where(restrictions, for_in=True)
names = [build_in(*restriction) for restriction in in_items]
values = list(chain(*[item[2] for item in in_items]))
return (names, values)
def _custom_items(self, restrictions):
"""Generate argument pairs for queries like where("id > ?", 7)"""
def scope_name(query, table):
# The first entry in the query is the column
# If the column already has a ".", that means that the table has
# already been chosen
for splitter in (" and ", " or "):
split_query = re.split(splitter, query, re.IGNORECASE)
query = splitter.join("{}.{}".format(table, entry)
if "." not in entry else entry
for entry in split_query)
return query
names = [scope_name(restriction[0], self.table_name)
for restriction in restrictions]
values = list(chain(
*[restriction[1:] for restriction in restrictions]))
return (names, values)
def _standard_items(self, restrictions):
"""Generate argument pairs for queries like where(id=2)"""
standard_items = self._build_where(restrictions, for_in=False)
names = ["{}.{} == ?".format(pair[0], pair[1])
for pair in standard_items]
values = [item[2] for item in standard_items]
return (names, values)
def _build_where(self, where_query, for_in):
# Recursively loops through the where query to produce a list of
# 3-tuples that contain the (table name, column, value)
def builder(where_dict, default_table, for_in):
for key, value in where_dict.items():
use_in = type(value) in (tuple, list)
if type(value) is dict:
for entry in builder(value, key, for_in):
yield entry
elif (use_in and for_in or not (use_in or for_in)):
yield (default_table, key, value)
return list(builder(where_query, self.table_name, for_in))
def inner_join(self, *joiners):
"""
Analog to SQL "INNER JOIN". +joiners+ is a list with entries of the
form:
{
'table': <table_name>,
'on': [<foreign_key>, <local_id>]
}
Example:
>>> Repo('bs').inner_join(
{'table': 'cs', on: ['b_id', 'id']}).select("*")
SELECT bs.* FROM bs INNER JOIN cs ON cs.b_id == bs.id
"""
def inner_joins(js, current_table):
for joiner in js:
yield (((current_table, joiner['on'][1]),
(joiner['table'], joiner['on'][0])))
current_table = joiner['table']
self.inner_joins = list(inner_joins(joiners, self.table_name))
return self
def order_by(self, **kwargs):
"""
Analog to SQL "ORDER BY". +kwargs+ should only contain one item.
examples)
NO: repo.order_by()
NO: repo.order_by(id="desc", name="asc")
YES: repo.order_by(id="asc)
"""
if kwargs:
col, order = kwargs.popitem()
self.order_clause = "order by {col} {order} ".format(
col=col, order=order)
return self
def group_by(self, column):
if column:
self.group_clause = "GROUP BY {} ".format(column)
return self
def having(self, conditions):
names = [condition[0] for condition in conditions]
self.having_values = list(chain(
*[condition[1:] for condition in conditions]))
self.having_clause = "HAVING {query} ".format(
query=" and ".join(names))
return self
def limit(self, count):
"""Limit number of returned rows."""
if count == 0:
raise Invalid("Cannot limit to 0 records.")
self.limit_value = [count]
return self
@property
def join_clause(self):
# Internal use only, but the API should be stable, except for when we
# add support for multi-level joins
return "".join(("inner join {foreign_table} on "
"{foreign_table}.{foreign_on} == "
"{local_table}.{local_on} ").format(
foreign_table=inner_join[1][0],
foreign_on=inner_join[1][1],
local_table=inner_join[0][0],
local_on=inner_join[0][1],
) for inner_join in self.inner_joins)
@property
def limit_clause(self):
if self.limit_value != []:
return "LIMIT ? "
else:
return ""
def select(self, *attributes):
"""
Select the passed +attributes+ from the table, subject to the
restrictions provided by the other methods in this class.
ex)
>>> Repo("foos").select("name", "id")
SELECT foos.name, foos.id FROM foos
"""
namespaced_attributes = [
"{table}.{attr}".format(table=self.table_name, attr=attr)
for attr in attributes
]
cmd = ('select {attrs} from {table} '
'{join_clause}{where_clause}{order_clause}'
'{group_clause}{having_clause}{limit_clause}').format(
table=self.table_name,
attrs=", ".join(namespaced_attributes),
where_clause=self.where_clause,
join_clause=self.join_clause,
order_clause=self.order_clause,
group_clause=self.group_clause,
having_clause=self.having_clause,
limit_clause=self.limit_clause,
).rstrip()
return Repo.db.execute(cmd, self.where_values + self.having_values + \
self.limit_value)
def count(self):
"""
Count the number of records in the table, subject to the query.
"""
cmd = ("select COUNT(*) from {table} "
"{join_clause}{where_clause}{order_clause}").format(
table=self.table_name,
where_clause=self.where_clause,
join_clause=self.join_clause,
order_clause=self.order_clause).rstrip()
return Repo.db.execute(cmd, self.where_values)
def insert(self, **data):
"""
Insert the passed +data+ into the table. Raises Invalid if a where
clause is present (i.e. no INSERT INTO table WHERE)
"""
if self.where_clause:
raise Invalid("Cannot insert with 'where' clause.")
# Ensure that order is preserved
data = data.items()
cmd = "insert into {table} ({attrs}) values ({values})".format(
table=self.table_name,
attrs=", ".join(entry[0] for entry in data),
values=", ".join(["?"] * len(data)),
)
handle = Repo.db.execute(cmd, [entry[1] for entry in data])
# Return the id of the added row
return handle.lastrowid
def update(self, **data):
"""
Update records in the table with +data+. Often combined with `where`,
as it acts on all records in the table unless restricted.
ex)
>>> Repo("foos").update(name="bar")
UPDATE foos SET name = "bar"
"""
data = data.items()
update_command_arg = ", ".join("{} = ?".format(entry[0])
for entry in data)
cmd = "update {table} set {update_command_arg} {where_clause}".format(
update_command_arg=update_command_arg,
where_clause=self.where_clause,
table=self.table_name).rstrip()
Repo.db.execute(cmd, [entry[1] for entry in data] + self.where_values)
def delete(self):
"""
Remove entries from the table. Often combined with `where`, as it acts
on all records in the table unless restricted.
"""
cmd = "delete from {table} {where_clause}".format(
table=self.table_name,
where_clause=self.where_clause
).rstrip()
Repo.db.execute(cmd, self.where_values)
@staticmethod
def table_name(model):
"""
Get a model's table name. (e.g. MyModel => "my_models")
"""
return inflector.tableize(model.__name__)
@classmethod
def connect_db(Repo, database=":memory:"):
"""
Connect Repo to a database with path +database+ so all instances can
interact with the database.
"""
Repo.db = sqlite3.connect(database,
detect_types=sqlite3.PARSE_DECLTYPES)
return Repo.db
|
jart/tensorflow | refs/heads/master | tensorflow/tools/git/gen_git_source.py | 16 | #!/usr/bin/env python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import shutil
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
elif not os.path.exists(src):
# Git repo is configured in a way we don't support such as having
# packed refs. Even though in a git repo, tf.__git_version__ will not
# be accurate.
# TODO(mikecase): Support grabbing git info when using packed refs.
open(os.path.join(gen_path, target), "w").write("")
spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, "symlink"):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path, git_tag_override):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
if git_tag_override and val:
split_val = val.split("-")
if len(split_val) < 3:
raise Exception(
("Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' "
"but got '%s'") % val)
# There might be "-" in the tag name. But we can be sure that the final
# two "-" are those inserted by the git describe command.
abbrev_commit = split_val[-1]
val = bytes(
"-".join([git_tag_override, "0", abbrev_commit]))
return val if val else unknown_label
except (subprocess.CalledProcessError, OSError):
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = "git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
#include <string>
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {
#ifdef _MSC_VER
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
return "MSVC " TOSTRING(_MSC_FULL_VER);
#else
return __VERSION__;
#endif
}
const int tf_cxx11_abi_flag() {
#ifdef _GLIBCXX_USE_CXX11_ABI
return _GLIBCXX_USE_CXX11_ABI;
#else
return 0;
#endif
}
const int tf_monolithic_build() {
#ifdef TENSORFLOW_MONOLITHIC_BUILD
return 1;
#else
return 0;
#endif
}
""" % git_version
open(filename, "w").write(contents)
def generate(arglist, git_tag_override=None):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"], git_tag_override)
write_version_info(dest_file, git_version)
def raw_generate(output_file, source_dir, git_tag_override=None):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
source_dir: Base path of the source code
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
"""
git_version = get_git_version(source_dir, git_tag_override)
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--git_tag_override", type=str,
help="Override git tag value in the __git_version__ string. Useful when "
"creating release builds before the release tag is created.")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
parser.add_argument(
"--source_dir",
type=str,
help="Base path of the source code (used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate, args.git_tag_override)
elif args.raw_generate is not None:
source_path = "."
if args.source_dir is not None:
source_path = args.source_dir
raw_generate(args.raw_generate, source_path, args.git_tag_override)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
|
eaas-framework/virtualbox | refs/heads/master | src/VBox/GuestHost/OpenGL/error/error.py | 22 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightC()
print """#include <stdio.h>
#include "cr_error.h"
#include "cr_spu.h"
#include "state/cr_statetypes.h"
#if defined(WINDOWS)
#define ERROR_APIENTRY __stdcall
#else
#define ERROR_APIENTRY
#endif
#define ERROR_UNUSED(x) ((void)x)"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print '\nstatic %s ERROR_APIENTRY error%s( %s )' % (return_type, func_name, apiutil.MakeDeclarationString(params ))
print '{'
# Handle the void parameter list
for (name, type, vecSize) in params:
print '\tERROR_UNUSED(%s);' % name
print '\tcrError( "ERROR SPU: Unsupported function gl%s called!" );' % func_name
if return_type != "void":
print '\treturn (%s)0;' % return_type
print '}'
print 'SPUNamedFunctionTable _cr_error_table[] = {'
for index in range(len(keys)):
func_name = keys[index]
print '\t{ "%s", (SPUGenericFunction) error%s },' % (func_name, func_name )
print '\t{ NULL, NULL }'
print '};'
|
vnc-biz/openerp-server | refs/heads/master | bin/openerp-server.py | 6 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
if __name__ == "__main__":
print '-' * 70
print "DEPRECATED: you are starting the OpenERP server with its old path,"
print "please use the new executable (available in the parent directory)."
print '-' * 70
# Change to the parent directory ...
os.chdir(os.path.normpath(os.path.dirname(__file__)))
os.chdir('..')
# ... and execute the new executable.
os.execv('openerp-server', sys.argv)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dannykopping/mysql-utilities | refs/heads/master | mysql-test/t/diff_sql_views.py | 1 | #
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import os
import test_sql_template
from mysql.utilities.exception import MUTLibError, UtilDBError
_TEST_VIEW_TABLE = "CREATE TABLE `diff_view`.`t1` (a int)"
# (comment, def1, def2, expected result)
_VIEW_TESTS = [
("View definition",
"CREATE VIEW diff_view.v1 as SELECT 1;",
"CREATE VIEW diff_view.v1 as SELECT 2;",
0),
("View definer",
"CREATE definer='root'@'localhost' VIEW diff_view.v1 as SELECT 3;",
"CREATE definer='joe'@'otherhost' VIEW diff_view.v1 as SELECT 3;",
0),
("View security",
"CREATE SQL SECURITY DEFINER VIEW diff_view.v1 as SELECT 4;",
"CREATE SQL SECURITY INVOKER VIEW diff_view.v1 as SELECT 4;",
0),
("View check option",
"CREATE VIEW diff_view.v1 as SELECT * FROM `diff_view`.`t1` " + \
"WHERE a < 11 WITH CASCADED CHECK OPTION;",
"CREATE VIEW diff_view.v1 as SELECT * FROM `diff_view`.`t1` " + \
"WHERE a < 11;",
0),
]
class test(test_sql_template.test):
"""test mysqldiff --difftype=sql generation for views
This test uses the test_sql_template for testing views.
"""
def check_prerequisites(self):
return test_sql_template.test.check_prerequisites(self)
def setup(self):
test_object = {
'db1' : 'diff_view',
'db2' : 'diff_view',
'object_name' : 'v1',
'startup_cmds' : [_TEST_VIEW_TABLE],
'shutdown_cmds' : [],
}
for view in _VIEW_TESTS:
new_test_obj = test_object.copy()
new_test_obj['comment'] = view[0]
new_test_obj['server1_object'] = view[1]
new_test_obj['server2_object'] = view[2]
new_test_obj['expected_result'] = view[3]
self.test_objects.append(new_test_obj)
self.utility = 'mysqldiff.py'
return test_sql_template.test.setup(self)
def run(self):
return test_sql_template.test.run(self)
def get_result(self):
return test_sql_template.test.get_result(self)
def record(self):
return True # Not a comparative test
def cleanup(self):
try:
self.server1.exec_query(_DROP_VIEW_DB)
except:
pass
try:
self.server2.exec_query(_DROP_VIEW_DB)
except:
pass
return test_sql_template.test.cleanup(self)
|
avicizhu/Load-balancer | refs/heads/master | test.py | 7 | #! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2009 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import sys
import time
import optparse
import subprocess
import threading
import Queue
import signal
import xml.dom.minidom
import shutil
import re
from utils import get_list_from_file
#
# XXX This should really be part of a waf command to list the configuration
# items relative to optional ns-3 pieces.
#
# A list of interesting configuration items in the waf configuration
# cache which we may be interested in when deciding on which examples
# to run and how to run them. These are set by waf during the
# configuration phase and the corresponding assignments are usually
# found in the associated subdirectory wscript files.
#
interesting_config_items = [
"NS3_ENABLED_MODULES",
"NS3_MODULE_PATH",
"NSC_ENABLED",
"ENABLE_REAL_TIME",
"ENABLE_THREADING",
"ENABLE_EXAMPLES",
"ENABLE_TESTS",
"EXAMPLE_DIRECTORIES",
"ENABLE_PYTHON_BINDINGS",
"ENABLE_CLICK",
"ENABLE_OPENFLOW",
"APPNAME",
"BUILD_PROFILE",
"VERSION",
"PYTHON",
]
NSC_ENABLED = False
ENABLE_REAL_TIME = False
ENABLE_THREADING = False
ENABLE_EXAMPLES = True
ENABLE_TESTS = True
ENABLE_CLICK = False
ENABLE_OPENFLOW = False
EXAMPLE_DIRECTORIES = []
APPNAME = ""
BUILD_PROFILE = ""
VERSION = ""
PYTHON = ""
#
# This will be given a prefix and a suffix when the waf config file is
# read.
#
test_runner_name = "test-runner"
#
# If the user has constrained us to run certain kinds of tests, we can tell waf
# to only build
#
core_kinds = ["bvt", "core", "system", "unit"]
#
# There are some special cases for test suites that kill valgrind. This is
# because NSC causes illegal instruction crashes when run under valgrind.
#
core_valgrind_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
"routing-click",
]
#
# There are some special cases for test suites that fail when NSC is
# missing.
#
core_nsc_missing_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
]
#
# Parse the examples-to-run file if it exists.
#
# This function adds any C++ examples or Python examples that are to be run
# to the lists in example_tests and python_tests, respectively.
#
def parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
python_tests):
# Look for the examples-to-run file exists.
if os.path.exists(examples_to_run_path):
# Each tuple in the C++ list of examples to run contains
#
# (example_name, do_run, do_valgrind_run)
#
# where example_name is the executable to be run, do_run is a
# condition under which to run the example, and do_valgrind_run is
# a condition under which to run the example under valgrind. This
# is needed because NSC causes illegal instruction crashes with
# some tests when they are run under valgrind.
#
# Note that the two conditions are Python statements that
# can depend on waf configuration variables. For example,
#
# ("tcp-nsc-lfn", "NSC_ENABLED == True", "NSC_ENABLED == False"),
#
cpp_examples = get_list_from_file(examples_to_run_path, "cpp_examples")
for example_name, do_run, do_valgrind_run in cpp_examples:
# Seperate the example name from its arguments.
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_name = "%s%s-%s-%s" % (APPNAME, VERSION, example_name, BUILD_PROFILE)
# Set the full path for the example.
example_path = os.path.join(cpp_executable_dir, example_name)
# Add all of the C++ examples that were built, i.e. found
# in the directory, to the list of C++ examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
example_tests.append((example_path, do_run, do_valgrind_run))
# Each tuple in the Python list of examples to run contains
#
# (example_name, do_run)
#
# where example_name is the Python script to be run and
# do_run is a condition under which to run the example.
#
# Note that the condition is a Python statement that can
# depend on waf configuration variables. For example,
#
# ("realtime-udp-echo.py", "ENABLE_REAL_TIME == True"),
#
python_examples = get_list_from_file(examples_to_run_path, "python_examples")
for example_name, do_run in python_examples:
# Seperate the example name from its arguments.
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Set the full path for the example.
example_path = os.path.join(python_script_dir, example_name)
# Add all of the Python examples that were found to the
# list of Python examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
python_tests.append((example_path, do_run))
#
# The test suites are going to want to output status. They are running
# concurrently. This means that unless we are careful, the output of
# the test suites will be interleaved. Rather than introducing a lock
# file that could unintentionally start serializing execution, we ask
# the tests to write their output to a temporary directory and then
# put together the final output file when we "join" the test tasks back
# to the main thread. In addition to this issue, the example programs
# often write lots and lots of trace files which we will just ignore.
# We put all of them into the temp directory as well, so they can be
# easily deleted.
#
TMP_OUTPUT_DIR = "testpy-output"
def read_test(test):
result = test.find('Result').text
name = test.find('Name').text
if not test.find('Time') is None:
time_real = test.find('Time').get('real')
else:
time_real = ''
return (result, name, time_real)
#
# A simple example of writing a text file with a test result summary. It is
# expected that this output will be fine for developers looking for problems.
#
def node_to_text (test, f):
(result, name, time_real) = read_test(test)
output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
for details in test.findall('FailureDetails'):
f.write(" Details:\n")
f.write(" Message: %s\n" % details.find('Message').text)
f.write(" Condition: %s\n" % details.find('Condition').text)
f.write(" Actual: %s\n" % details.find('Actual').text)
f.write(" Limit: %s\n" % details.find('Limit').text)
f.write(" File: %s\n" % details.find('File').text)
f.write(" Line: %s\n" % details.find('Line').text)
for child in test.findall('Test'):
node_to_text(child, f)
def translate_to_text(results_file, text_file):
f = open(text_file, 'w')
import xml.etree.ElementTree as ET
et = ET.parse (results_file)
for test in et.findall('Test'):
node_to_text (test, f)
for example in et.findall('Example'):
result = example.find('Result').text
name = example.find('Name').text
if not example.find('Time') is None:
time_real = example.find('Time').get('real')
else:
time_real = ''
output = "%s: Example \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
f.close()
#
# A simple example of writing an HTML file with a test result summary. It is
# expected that this will eventually be made prettier as time progresses and
# we have time to tweak it. This may end up being moved to a separate module
# since it will probably grow over time.
#
def translate_to_html(results_file, html_file):
f = open(html_file, 'w')
f.write("<html>\n")
f.write("<body>\n")
f.write("<center><h1>ns-3 Test Results</h1></center>\n")
#
# Read and parse the whole results file.
#
import xml.etree.ElementTree as ET
et = ET.parse(results_file)
#
# Iterate through the test suites
#
f.write("<h2>Test Suites</h2>\n")
for suite in et.findall('Test'):
#
# For each test suite, get its name, result and execution time info
#
(result, name, time) = read_test (suite)
#
# Print a level three header with the result, name and time. If the
# test suite passed, the header is printed in green. If the suite was
# skipped, print it in orange, otherwise assume something bad happened
# and print in red.
#
if result == "PASS":
f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
elif result == "SKIP":
f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
else:
f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
#
# The test case information goes in a table.
#
f.write("<table border=\"1\">\n")
#
# The first column of the table has the heading Result
#
f.write("<th> Result </th>\n")
#
# If the suite crashed or is skipped, there is no further information, so just
# delare a new table row with the result (CRASH or SKIP) in it. Looks like:
#
# +--------+
# | Result |
# +--------+
# | CRASH |
# +--------+
#
# Then go on to the next test suite. Valgrind and skipped errors look the same.
#
if result in ["CRASH", "SKIP", "VALGR"]:
f.write("<tr>\n")
if result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("</tr>\n")
f.write("</table>\n")
continue
#
# If the suite didn't crash, we expect more information, so fill out
# the table heading row. Like,
#
# +--------+----------------+------+
# | Result | Test Case Name | Time |
# +--------+----------------+------+
#
f.write("<th>Test Case Name</th>\n")
f.write("<th> Time </th>\n")
#
# If the test case failed, we need to print out some failure details
# so extend the heading row again. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
#
if result == "FAIL":
f.write("<th>Failure Details</th>\n")
#
# Now iterate through all of the test cases.
#
for case in suite.findall('Test'):
#
# Get the name, result and timing information from xml to use in
# printing table below.
#
(result, name, time) = read_test(case)
#
# If the test case failed, we iterate through possibly multiple
# failure details
#
if result == "FAIL":
#
# There can be multiple failures for each test case. The first
# row always gets the result, name and timing information along
# with the failure details. Remaining failures don't duplicate
# this information but just get blanks for readability. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
# | FAIL | The name | time | It's busted |
# +--------+----------------+------+-----------------+
# | | | | Really broken |
# +--------+----------------+------+-----------------+
# | | | | Busted bad |
# +--------+----------------+------+-----------------+
#
first_row = True
for details in case.findall('FailureDetails'):
#
# Start a new row in the table for each possible Failure Detail
#
f.write("<tr>\n")
if first_row:
first_row = False
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
else:
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td>")
f.write("<b>Message: </b>%s, " % details.find('Message').text)
f.write("<b>Condition: </b>%s, " % details.find('Condition').text)
f.write("<b>Actual: </b>%s, " % details.find('Actual').text)
f.write("<b>Limit: </b>%s, " % details.find('Limit').text)
f.write("<b>File: </b>%s, " % details.find('File').text)
f.write("<b>Line: </b>%s" % details.find('Line').text)
f.write("</td>\n")
#
# End the table row
#
f.write("</td>\n")
else:
#
# If this particular test case passed, then we just print the PASS
# result in green, followed by the test case name and its execution
# time information. These go off in <td> ... </td> table data.
# The details table entry is left blank.
#
# +--------+----------------+------+---------+
# | Result | Test Case Name | Time | Details |
# +--------+----------------+------+---------+
# | PASS | The name | time | |
# +--------+----------------+------+---------+
#
f.write("<tr>\n")
f.write("<td style=\"color:green\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
f.write("<td></td>\n")
f.write("</tr>\n")
#
# All of the rows are written, so we need to end the table.
#
f.write("</table>\n")
#
# That's it for all of the test suites. Now we have to do something about
# our examples.
#
f.write("<h2>Examples</h2>\n")
#
# Example status is rendered in a table just like the suites.
#
f.write("<table border=\"1\">\n")
#
# The table headings look like,
#
# +--------+--------------+--------------+
# | Result | Example Name | Elapsed Time |
# +--------+--------------+--------------+
#
f.write("<th> Result </th>\n")
f.write("<th>Example Name</th>\n")
f.write("<th>Elapsed Time</th>\n")
#
# Now iterate through all of the examples
#
for example in et.findall("Example"):
#
# Start a new row for each example
#
f.write("<tr>\n")
#
# Get the result and name of the example in question
#
(result, name, time) = read_test(example)
#
# If the example either failed or crashed, print its result status
# in red; otherwise green. This goes in a <td> ... </td> table data
#
if result == "PASS":
f.write("<td style=\"color:green\">%s</td>\n" % result)
elif result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
#
# Write the example name as a new tag data.
#
f.write("<td>%s</td>\n" % name)
#
# Write the elapsed time as a new tag data.
#
f.write("<td>%s</td>\n" % time)
#
# That's it for the current example, so terminate the row.
#
f.write("</tr>\n")
#
# That's it for the table of examples, so terminate the table.
#
f.write("</table>\n")
#
# And that's it for the report, so finish up.
#
f.write("</body>\n")
f.write("</html>\n")
f.close()
#
# Python Control-C handling is broken in the presence of multiple threads.
# Signals get delivered to the runnable/running thread by default and if
# it is blocked, the signal is simply ignored. So we hook sigint and set
# a global variable telling the system to shut down gracefully.
#
thread_exit = False
def sigint_hook(signal, frame):
global thread_exit
thread_exit = True
return 0
#
# In general, the build process itself naturally takes care of figuring out
# which tests are built into the test runner. For example, if waf configure
# determines that ENABLE_EMU is false due to some missing dependency,
# the tests for the emu net device simply will not be built and will
# therefore not be included in the built test runner.
#
# Examples, however, are a different story. In that case, we are just given
# a list of examples that could be run. Instead of just failing, for example,
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
# for relevant configuration items.
#
# XXX This function pokes around in the waf internal state file. To be a
# little less hacky, we should add a commmand to waf to return this info
# and use that result.
#
def read_waf_config():
for line in open(".lock-waf_" + sys.platform + "_build", "rt"):
if line.startswith("top_dir ="):
key, val = line.split('=')
top_dir = eval(val.strip())
if line.startswith("out_dir ="):
key, val = line.split('=')
out_dir = eval(val.strip())
global NS3_BASEDIR
NS3_BASEDIR = top_dir
global NS3_BUILDDIR
NS3_BUILDDIR = out_dir
for line in open("%s/c4che/_cache.py" % out_dir).readlines():
for item in interesting_config_items:
if line.startswith(item):
exec(line, globals())
if options.verbose:
for item in interesting_config_items:
print "%s ==" % item, eval(item)
#
# It seems pointless to fork a process to run waf to fork a process to run
# the test runner, so we just run the test runner directly. The main thing
# that waf would do for us would be to sort out the shared library path but
# we can deal with that easily and do here.
#
# There can be many different ns-3 repositories on a system, and each has
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
# path -- it is cooked up dynamically, so we do that too.
#
def make_paths():
have_DYLD_LIBRARY_PATH = False
have_LD_LIBRARY_PATH = False
have_PATH = False
have_PYTHONPATH = False
keys = os.environ.keys()
for key in keys:
if key == "DYLD_LIBRARY_PATH":
have_DYLD_LIBRARY_PATH = True
if key == "LD_LIBRARY_PATH":
have_LD_LIBRARY_PATH = True
if key == "PATH":
have_PATH = True
if key == "PYTHONPATH":
have_PYTHONPATH = True
pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, "bindings", "python")
if not have_PYTHONPATH:
os.environ["PYTHONPATH"] = pypath
else:
os.environ["PYTHONPATH"] += ":" + pypath
if options.verbose:
print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"]
if sys.platform == "darwin":
if not have_DYLD_LIBRARY_PATH:
os.environ["DYLD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["DYLD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"]
elif sys.platform == "win32":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ';' + path
if options.verbose:
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
elif sys.platform == "cygwin":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ":" + path
if options.verbose:
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
else:
if not have_LD_LIBRARY_PATH:
os.environ["LD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["LD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"]
#
# Short note on generating suppressions:
#
# See the valgrind documentation for a description of suppressions. The easiest
# way to generate a suppression expression is by using the valgrind
# --gen-suppressions option. To do that you have to figure out how to run the
# test in question.
#
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
# you need. For example, if you are getting a valgrind error in the
# devices-mesh-dot11s-regression test suite, you can run:
#
# ./test.py -v -g -s devices-mesh-dot11s-regression
#
# You should see in the verbose output something that looks like:
#
# Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output/2010-01-12-22-47-50-CUT
# --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
#
# You need to pull out the useful pieces, and so could run the following to
# reproduce your error:
#
# valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Hint: Use the first part of the command as is, and point the "tempdir" to
# somewhere real. You don't need to specify an "out" file.
#
# When you run the above command you should see your valgrind error. The
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
# option to valgrind. Use something like:
#
# valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Now when valgrind detects an error it will ask:
#
# ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
#
# to which you just enter 'y'<ret>.
#
# You will be provided with a suppression expression that looks something like
# the following:
# {
# <insert_a_suppression_name_here>
# Memcheck:Addr8
# fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
# fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
# fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
# ...
# the rest of the stack frame
# ...
# }
#
# You need to add a supression name which will only be printed out by valgrind in
# verbose mode (but it needs to be there in any case). The entire stack frame is
# shown to completely characterize the error, but in most cases you won't need
# all of that info. For example, if you want to turn off all errors that happen
# when the function (fun:) is called, you can just delete the rest of the stack
# frame. You can also use wildcards to make the mangled signatures more readable.
#
# I added the following to the testpy.supp file for this particular error:
#
# {
# Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
# Memcheck:Addr8
# fun:*HwmpProtocolMac*SendPreq*
# }
#
# Now, when you run valgrind the error will be suppressed.
#
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=""):
suppressions_path = os.path.join (NS3_BASEDIR, VALGRIND_SUPPRESSIONS_FILE)
if is_python:
path_cmd = PYTHON[0] + " " + os.path.join (NS3_BASEDIR, shell_command)
else:
if len(build_path):
path_cmd = os.path.join (build_path, shell_command)
else:
path_cmd = os.path.join (NS3_BUILDDIR, shell_command)
if valgrind:
cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path,
path_cmd)
else:
cmd = path_cmd
if options.verbose:
print "Synchronously execute %s" % cmd
start_time = time.time()
proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
elapsed_time = time.time() - start_time
retval = proc.returncode
#
# valgrind sometimes has its own idea about what kind of memory management
# errors are important. We want to detect *any* leaks, so the way to do
# that is to look for the presence of a valgrind leak summary section.
#
# If another error has occurred (like a test suite has failed), we don't
# want to trump that error, so only do the valgrind output scan if the
# test has otherwise passed (return code was zero).
#
if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
retval = 2
if options.verbose:
print "Return code = ", retval
print "stderr = ", stderr_results
return (retval, stdout_results, stderr_results, elapsed_time)
#
# This class defines a unit of testing work. It will typically refer to
# a test suite to run using the test-runner, or an example to run directly.
#
class Job:
def __init__(self):
self.is_break = False
self.is_skip = False
self.is_example = False
self.is_pyexample = False
self.shell_command = ""
self.display_name = ""
self.basedir = ""
self.tempdir = ""
self.cwd = ""
self.tmp_file_name = ""
self.returncode = False
self.elapsed_time = 0
self.build_path = ""
#
# A job is either a standard job or a special job indicating that a worker
# thread should exist. This special job is indicated by setting is_break
# to true.
#
def set_is_break(self, is_break):
self.is_break = is_break
#
# If a job is to be skipped, we actually run it through the worker threads
# to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
#
def set_is_skip(self, is_skip):
self.is_skip = is_skip
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_example(self, is_example):
self.is_example = is_example
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_pyexample(self, is_pyexample):
self.is_pyexample = is_pyexample
#
# This is the shell command that will be executed in the job. For example,
#
# "utils/ns3-dev-test-runner-debug --test-name=some-test-suite"
#
def set_shell_command(self, shell_command):
self.shell_command = shell_command
#
# This is the build path where ns-3 was built. For example,
#
# "/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build/debug"
#
def set_build_path(self, build_path):
self.build_path = build_path
#
# This is the dispaly name of the job, typically the test suite or example
# name. For example,
#
# "some-test-suite" or "udp-echo"
#
def set_display_name(self, display_name):
self.display_name = display_name
#
# This is the base directory of the repository out of which the tests are
# being run. It will be used deep down in the testing framework to determine
# where the source directory of the test was, and therefore where to find
# provided test vectors. For example,
#
# "/home/user/repos/ns-3-dev"
#
def set_basedir(self, basedir):
self.basedir = basedir
#
# This is the directory to which a running test suite should write any
# temporary files.
#
def set_tempdir(self, tempdir):
self.tempdir = tempdir
#
# This is the current working directory that will be given to an executing
# test as it is being run. It will be used for examples to tell them where
# to write all of the pcap files that we will be carefully ignoring. For
# example,
#
# "/tmp/unchecked-traces"
#
def set_cwd(self, cwd):
self.cwd = cwd
#
# This is the temporary results file name that will be given to an executing
# test as it is being run. We will be running all of our tests in parallel
# so there must be multiple temporary output files. These will be collected
# into a single XML file at the end and then be deleted.
#
def set_tmp_file_name(self, tmp_file_name):
self.tmp_file_name = tmp_file_name
#
# The return code received when the job process is executed.
#
def set_returncode(self, returncode):
self.returncode = returncode
#
# The elapsed real time for the job execution.
#
def set_elapsed_time(self, elapsed_time):
self.elapsed_time = elapsed_time
#
# The worker thread class that handles the actual running of a given test.
# Once spawned, it receives requests for work through its input_queue and
# ships the results back through the output_queue.
#
class worker_thread(threading.Thread):
def __init__(self, input_queue, output_queue):
threading.Thread.__init__(self)
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
job = self.input_queue.get()
#
# Worker threads continue running until explicitly told to stop with
# a special job.
#
if job.is_break:
return
#
# If the global interrupt handler sets the thread_exit variable,
# we stop doing real work and just report back a "break" in the
# normal command processing has happened.
#
if thread_exit == True:
job.set_is_break(True)
self.output_queue.put(job)
continue
#
# If we are actually supposed to skip this job, do so. Note that
# if is_skip is true, returncode is undefined.
#
if job.is_skip:
if options.verbose:
print "Skip %s" % job.shell_command
self.output_queue.put(job)
continue
#
# Otherwise go about the business of running tests as normal.
#
else:
if options.verbose:
print "Launch %s" % job.shell_command
if job.is_example or job.is_pyexample:
#
# If we have an example, the shell command is all we need to
# know. It will be something like "examples/udp/udp-echo" or
# "examples/wireless/mixed-wireless.py"
#
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command,
job.cwd, options.valgrind, job.is_pyexample, job.build_path)
else:
#
# If we're a test suite, we need to provide a little more info
# to the test runner, specifically the base directory and temp
# file name
#
if options.update_data:
update_data = '--update-data'
else:
update_data = ''
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command +
" --xml --tempdir=%s --out=%s %s" % (job.tempdir, job.tmp_file_name, update_data),
job.cwd, options.valgrind, False)
job.set_elapsed_time(et)
if options.verbose:
print "returncode = %d" % job.returncode
print "---------- begin standard out ----------"
print standard_out
print "---------- begin standard err ----------"
print standard_err
print "---------- end standard err ----------"
self.output_queue.put(job)
#
# This is the main function that does the work of interacting with the
# test-runner itself.
#
def run_tests():
#
# Pull some interesting configuration information out of waf, primarily
# so we can know where executables can be found, but also to tell us what
# pieces of the system have been built. This will tell us what examples
# are runnable.
#
read_waf_config()
#
# Add the proper prefix and suffix to the test-runner name to
# match what is done in the wscript file.
#
test_runner_name = "%s%s-%s-%s" % (APPNAME, VERSION, "test-runner", BUILD_PROFILE)
#
# Run waf to make sure that everything is built, configured and ready to go
# unless we are explicitly told not to. We want to be careful about causing
# our users pain while waiting for extraneous stuff to compile and link, so
# we allow users that know what they''re doing to not invoke waf at all.
#
if not options.nowaf:
#
# If the user is running the "kinds" or "list" options, there is an
# implied dependency on the test-runner since we call that program
# if those options are selected. We will exit after processing those
# options, so if we see them, we can safely only build the test-runner.
#
# If the user has constrained us to running only a particular type of
# file, we can only ask waf to build what we know will be necessary.
# For example, if the user only wants to run BVT tests, we only have
# to build the test-runner and can ignore all of the examples.
#
# If the user only wants to run a single example, then we can just build
# that example.
#
# If there is no constraint, then we have to build everything since the
# user wants to run everything.
#
if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
if sys.platform == "win32":
waf_cmd = "waf --target=test-runner"
else:
waf_cmd = "./waf --target=test-runner"
elif len(options.example):
if sys.platform == "win32":
waf_cmd = "waf --target=%s" % os.path.basename(options.example)
else:
waf_cmd = "./waf --target=%s" % os.path.basename(options.example)
else:
if sys.platform == "win32":
waf_cmd = "waf"
else:
waf_cmd = "./waf"
if options.verbose:
print "Building: %s" % waf_cmd
proc = subprocess.Popen(waf_cmd, shell = True)
proc.communicate()
if proc.returncode:
print >> sys.stderr, "Waf died. Not running tests"
return proc.returncode
#
# Dynamically set up paths.
#
make_paths()
# Get the information from the build status file.
build_status_file = os.path.join (NS3_BUILDDIR, 'build-status.py')
if os.path.exists(build_status_file):
ns3_runnable_programs = get_list_from_file(build_status_file, "ns3_runnable_programs")
ns3_runnable_scripts = get_list_from_file(build_status_file, "ns3_runnable_scripts")
else:
print >> sys.stderr, 'The build status file was not found. You must do waf build before running test.py.'
sys.exit(2)
# Generate the lists of examples to run as smoke tests in order to
# ensure that they remain buildable and runnable over time.
#
example_tests = []
python_tests = []
for directory in EXAMPLE_DIRECTORIES:
# Set the directories and paths for this example.
example_directory = os.path.join("examples", directory)
examples_to_run_path = os.path.join(example_directory, "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this example directory's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
python_tests)
for module in NS3_ENABLED_MODULES:
# Remove the "ns3-" from the module name.
module = module[len("ns3-"):]
# Set the directories and paths for this example.
module_directory = os.path.join("src", module)
example_directory = os.path.join(module_directory, "examples")
examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this module's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
python_tests)
#
# If lots of logging is enabled, we can crash Python when it tries to
# save all of the text. We just don't allow logging to be turned on when
# test.py runs. If you want to see logging output from your tests, you
# have to run them using the test-runner directly.
#
os.environ["NS_LOG"] = ""
#
# There are a couple of options that imply we can to exit before starting
# up a bunch of threads and running tests. Let's detect these cases and
# handle them without doing all of the hard work.
#
if options.kinds:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-type-list")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
print standard_out
if options.list:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
print standard_out
if options.kinds or options.list:
return
#
# We communicate results in two ways. First, a simple message relating
# PASS, FAIL, CRASH or SKIP is always written to the standard output. It
# is expected that this will be one of the main use cases. A developer can
# just run test.py with no options and see that all of the tests still
# pass.
#
# The second main use case is when detailed status is requested (with the
# --text or --html options). Typicall this will be text if a developer
# finds a problem, or HTML for nightly builds. In these cases, an
# XML file is written containing the status messages from the test suites.
# This file is then read and translated into text or HTML. It is expected
# that nobody will really be interested in the XML, so we write it somewhere
# with a unique name (time) to avoid collisions. In case an error happens, we
# provide a runtime option to retain the temporary files.
#
# When we run examples as smoke tests, they are going to want to create
# lots and lots of trace files. We aren't really interested in the contents
# of the trace files, so we also just stash them off in the temporary dir.
# The retain option also causes these unchecked trace files to be kept.
#
date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
if not os.path.exists(TMP_OUTPUT_DIR):
os.makedirs(TMP_OUTPUT_DIR)
testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
if not os.path.exists(testpy_output_dir):
os.makedirs(testpy_output_dir)
#
# Create the main output file and start filling it with XML. We need to
# do this since the tests will just append individual results to this file.
#
xml_results_file = os.path.join(testpy_output_dir, "results.xml")
f = open(xml_results_file, 'w')
f.write('<?xml version="1.0"?>\n')
f.write('<Results>\n')
f.close()
#
# We need to figure out what test suites to execute. We are either given one
# suite or example explicitly via the --suite or --example/--pyexample option,
# or we need to call into the test runner and ask it to list all of the available
# test suites. Further, we need to provide the constraint information if it
# has been given to us.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test,py: run all of the suites and examples
# ./test.py --constrain=core: run all of the suites of all kinds
# ./test.py --constrain=unit: run all unit suites
# ./test.py --suite=some-test-suite: run a single suite
# ./test.py --example=examples/udp/udp-echo: run single example
# ./test.py --pyexample=examples/wireless/mixed-wireless.py: run python example
# ./test.py --suite=some-suite --example=some-example: run the single suite
#
# We can also use the --constrain option to provide an ordering of test
# execution quite easily.
#
if len(options.suite):
# See if this is a valid test suite.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if options.suite in suites:
suites = options.suite + "\n"
else:
print >> sys.stderr, 'The test suite was not run because an unknown test suite name was requested.'
sys.exit(2)
elif len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % options.constrain)
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
suites = ""
#
# suite_list will either a single test suite name that the user has
# indicated she wants to run or a list of test suites provided by
# the test-runner possibly according to user provided constraints.
# We go through the trouble of setting up the parallel execution
# even in the case of a single suite to avoid having two process the
# results in two different places.
#
suite_list = suites.split('\n')
#
# We now have a possibly large number of test suites to run, so we want to
# run them in parallel. We're going to spin up a number of worker threads
# that will run our test jobs for us.
#
input_queue = Queue.Queue(0)
output_queue = Queue.Queue(0)
jobs = 0
threads=[]
#
# In Python 2.6 you can just use multiprocessing module, but we don't want
# to introduce that dependency yet; so we jump through a few hoops.
#
processors = 1
if sys.platform != "win32":
if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
processors = os.sysconf('SC_NPROCESSORS_ONLN')
else:
proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
if len(stderr_results) == 0:
processors = int(stdout_results)
#
# Now, spin up one thread per processor which will eventually mean one test
# per processor running concurrently.
#
for i in range(processors):
thread = worker_thread(input_queue, output_queue)
threads.append(thread)
thread.start()
#
# Keep track of some summary statistics
#
total_tests = 0
skipped_tests = 0
#
# We now have worker threads spun up, and a list of work to do. So, run
# through the list of test suites and dispatch a job to run each one.
#
# Dispatching will run with unlimited speed and the worker threads will
# execute as fast as possible from the queue.
#
# Note that we actually dispatch tests to be skipped, so all of the
# PASS, FAIL, CRASH and SKIP processing is done in the same place.
#
for test in suite_list:
test = test.strip()
if len(test):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
job.set_cwd(os.getcwd())
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
if (options.multiple):
multiple = ""
else:
multiple = " --stop-on-failure"
path_cmd = os.path.join("utils", test_runner_name + " --test-name=%s%s" % (test, multiple))
job.set_shell_command(path_cmd)
if options.valgrind and test in core_valgrind_skip_tests:
job.set_is_skip(True)
# Skip tests that will fail if NSC is missing.
if not NSC_ENABLED and test in core_nsc_missing_skip_tests:
job.set_is_skip(True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# We've taken care of the discovered or specified test suites. Now we
# have to deal with examples run as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied. For example, if an example depends
# on NSC being configured by waf, that example should have a condition
# that evaluates to true if NSC is enabled. For example,
#
# ("tcp-nsc-zoo", "NSC_ENABLED == True"),
#
# In this case, the example "tcp-nsc-zoo" will only be run if we find the
# waf configuration variable "NSC_ENABLED" to be True.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# XXX As it stands, all of the trace files have unique names, and so file
# collisions can only happen if two instances of an example are running in
# two versions of the test.py process concurrently. We may want to create
# uniquely named temporary traces directories to avoid this problem.
#
# We need to figure out what examples to execute. We are either given one
# suite or example explicitly via the --suite or --example option, or we
# need to walk the list of examples looking for available example
# conditions.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test,py: run all of the examples
# ./test.py --constrain=unit run no examples
# ./test.py --constrain=example run all of the examples
# ./test.py --suite=some-test-suite: run no examples
# ./test.py --example=some-example: run the single example
# ./test.py --suite=some-suite --example=some-example: run the single example
#
# XXX could use constrain to separate out examples used for performance
# testing
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "example":
if ENABLE_EXAMPLES:
for test, do_run, do_valgrind_run in example_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if test_name in ns3_runnable_programs:
if eval(do_run):
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path(options.buildpath)
if options.valgrind and not eval(do_valgrind_run):
job.set_is_skip (True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.example):
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
(example_path_without_name, example_name) = os.path.split(options.example)
example_name = "%s%s-%s-%s" % (APPNAME, VERSION, example_name, BUILD_PROFILE)
example_path = os.path.join(example_path_without_name, example_name)
# Don't try to run this example if it isn't runnable.
if example_name not in ns3_runnable_programs:
print "Example %s is not runnable." % example_name
else:
#
# If you tell me to run an example, I will try and run the example
# irrespective of any condition.
#
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(example_name)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(example_path)
job.set_build_path(options.buildpath)
if options.verbose:
print "Queue %s" % example_name
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Run some Python examples as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# We need to figure out what python examples to execute. We are either
# given one pyexample explicitly via the --pyexample option, or we
# need to walk the list of python examples
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py --constrain=pyexample run all of the python examples
# ./test.py --pyexample=some-example.py: run the single python example
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "pyexample":
if ENABLE_EXAMPLES:
for test, do_run in python_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if test_name in ns3_runnable_scripts:
if eval(do_run):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(True)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path("")
#
# Python programs and valgrind do not work and play
# well together, so we skip them under valgrind.
# We go through the trouble of doing all of this
# work to report the skipped tests in a consistent
# way throught the output formatter.
#
if options.valgrind:
job.set_is_skip (True)
#
# The user can disable python bindings, so we need
# to pay attention to that and give some feedback
# that we're not testing them
#
if not ENABLE_PYTHON_BINDINGS:
job.set_is_skip (True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.pyexample):
# Don't try to run this example if it isn't runnable.
example_name = os.path.basename(options.pyexample)
if example_name not in ns3_runnable_scripts:
print "Example %s is not runnable." % example_name
else:
#
# If you tell me to run a python example, I will try and run the example
# irrespective of any condition.
#
job = Job()
job.set_is_pyexample(True)
job.set_display_name(options.pyexample)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(options.pyexample)
job.set_build_path("")
if options.verbose:
print "Queue %s" % options.pyexample
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Tell the worker threads to pack up and go home for the day. Each one
# will exit when they see their is_break task.
#
for i in range(processors):
job = Job()
job.set_is_break(True)
input_queue.put(job)
#
# Now all of the tests have been dispatched, so all we have to do here
# in the main thread is to wait for them to complete. Keyboard interrupt
# handling is broken as mentioned above. We use a signal handler to catch
# sigint and set a global variable. When the worker threads sense this
# they stop doing real work and will just start throwing jobs back at us
# with is_break set to True. In this case, there are no real results so we
# ignore them. If there are real results, we always print PASS or FAIL to
# standard out as a quick indication of what happened.
#
passed_tests = 0
failed_tests = 0
crashed_tests = 0
valgrind_errors = 0
for i in range(jobs):
job = output_queue.get()
if job.is_break:
continue
if job.is_example or job.is_pyexample:
kind = "Example"
else:
kind = "TestSuite"
if job.is_skip:
status = "SKIP"
skipped_tests = skipped_tests + 1
else:
if job.returncode == 0:
status = "PASS"
passed_tests = passed_tests + 1
elif job.returncode == 1:
failed_tests = failed_tests + 1
status = "FAIL"
elif job.returncode == 2:
valgrind_errors = valgrind_errors + 1
status = "VALGR"
else:
crashed_tests = crashed_tests + 1
status = "CRASH"
print "%s: %s %s" % (status, kind, job.display_name)
if job.is_example or job.is_pyexample:
#
# Examples are the odd man out here. They are written without any
# knowledge that they are going to be run as a test, so we need to
# cook up some kind of output for them. We're writing an xml file,
# so we do some simple XML that says we ran the example.
#
# XXX We could add some timing information to the examples, i.e. run
# them through time and print the results here.
#
f = open(xml_results_file, 'a')
f.write('<Example>\n')
example_name = " <Name>%s</Name>\n" % job.display_name
f.write(example_name)
if status == "PASS":
f.write(' <Result>PASS</Result>\n')
elif status == "FAIL":
f.write(' <Result>FAIL</Result>\n')
elif status == "VALGR":
f.write(' <Result>VALGR</Result>\n')
elif status == "SKIP":
f.write(' <Result>SKIP</Result>\n')
else:
f.write(' <Result>CRASH</Result>\n')
f.write(' <Time real="%.3f"/>\n' % job.elapsed_time)
f.write('</Example>\n')
f.close()
else:
#
# If we're not running an example, we're running a test suite.
# These puppies are running concurrently and generating output
# that was written to a temporary file to avoid collisions.
#
# Now that we are executing sequentially in the main thread, we can
# concatenate the contents of the associated temp file to the main
# results file and remove that temp file.
#
# One thing to consider is that a test suite can crash just as
# well as any other program, so we need to deal with that
# possibility as well. If it ran correctly it will return 0
# if it passed, or 1 if it failed. In this case, we can count
# on the results file it saved being complete. If it crashed, it
# will return some other code, and the file should be considered
# corrupt and useless. If the suite didn't create any XML, then
# we're going to have to do it ourselves.
#
# Another issue is how to deal with a valgrind error. If we run
# a test suite under valgrind and it passes, we will get a return
# code of 0 and there will be a valid xml results file since the code
# ran to completion. If we get a return code of 1 under valgrind,
# the test case failed, but valgrind did not find any problems so the
# test case return code was passed through. We will have a valid xml
# results file here as well since the test suite ran. If we see a
# return code of 2, this means that valgrind found an error (we asked
# it to return 2 if it found a problem in run_job_synchronously) but
# the suite ran to completion so there is a valid xml results file.
# If the suite crashes under valgrind we will see some other error
# return code (like 139). If valgrind finds an illegal instruction or
# some other strange problem, it will die with its own strange return
# code (like 132). However, if the test crashes by itself, not under
# valgrind we will also see some other return code.
#
# If the return code is 0, 1, or 2, we have a valid xml file. If we
# get another return code, we have no xml and we can't really say what
# happened -- maybe the TestSuite crashed, maybe valgrind crashed due
# to an illegal instruction. If we get something beside 0-2, we assume
# a crash and fake up an xml entry. After this is all done, we still
# need to indicate a valgrind error somehow, so we fake up an xml entry
# with a VALGR result. Thus, in the case of a working TestSuite that
# fails valgrind, we'll see the PASS entry for the working TestSuite
# followed by a VALGR failing test suite of the same name.
#
if job.is_skip:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>SKIP</Result>\n')
f.write("</Test>\n")
f.close()
else:
if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
f_to = open(xml_results_file, 'a')
f_from = open(job.tmp_file_name)
f_to.write(f_from.read())
f_to.close()
f_from.close()
else:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>CRASH</Suite>\n')
f.write("</Test>\n")
f.close()
if job.returncode == 2:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>VALGR</Result>\n')
f.write("</Test>\n")
f.close()
#
# We have all of the tests run and the results written out. One final
# bit of housekeeping is to wait for all of the threads to close down
# so we can exit gracefully.
#
for thread in threads:
thread.join()
#
# Back at the beginning of time, we started the body of an XML document
# since the test suites and examples were going to just write their
# individual pieces. So, we need to finish off and close out the XML
# document
#
f = open(xml_results_file, 'a')
f.write('</Results>\n')
f.close()
#
# Print a quick summary of events
#
print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests,
total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)
#
# The last things to do are to translate the XML results file to "human
# readable form" if the user asked for it (or make an XML file somewhere)
#
if len(options.html):
translate_to_html(xml_results_file, options.html)
if len(options.text):
translate_to_text(xml_results_file, options.text)
if len(options.xml):
shutil.copyfile(xml_results_file, options.xml)
#
# Let the user know if they need to turn on tests or examples.
#
if not ENABLE_TESTS or not ENABLE_EXAMPLES:
print
if not ENABLE_TESTS:
print '*** Note: ns-3 tests are currently disabled. Enable them by adding'
print '*** "--enable-tests" to ./waf configure or modifying your .ns3rc file.'
print
if not ENABLE_EXAMPLES:
print '*** Note: ns-3 examples are currently disabled. Enable them by adding'
print '*** "--enable-examples" to ./waf configure or modifying your .ns3rc file.'
print
#
# If we have been asked to retain all of the little temporary files, we
# don't delete tm. If we do delete the temporary files, delete only the
# directory we just created. We don't want to happily delete any retained
# directories, which will probably surprise the user.
#
if not options.retain:
shutil.rmtree(testpy_output_dir)
if passed_tests + skipped_tests == total_tests:
return 0 # success
else:
return 1 # catchall for general errors
def main(argv):
parser = optparse.OptionParser()
parser.add_option("-b", "--buildpath", action="store", type="string", dest="buildpath", default="",
metavar="BUILDPATH",
help="specify the path where ns-3 was built (defaults to the build directory for the current variant)")
parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
metavar="KIND",
help="constrain the test-runner by kind of test")
parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
metavar="EXAMPLE",
help="specify a single example to run (with relative path)")
parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False,
help="If examples use reference data files, get them to re-generate them")
parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
help="run the test suites and examples using valgrind")
parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
help="print the kinds of tests available")
parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
help="print the list of known tests")
parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
help="report multiple failures from test suites and test cases")
parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
help="do not run waf before starting testing")
parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
metavar="PYEXAMPLE",
help="specify a single python example to run (with relative path)")
parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
help="retain all temporary files (which are normally deleted)")
parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
metavar="TEST-SUITE",
help="specify a single test suite to run")
parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
metavar="TEXT-FILE",
help="write detailed test results into TEXT-FILE.txt")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="print progress and informational messages")
parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
metavar="HTML-FILE",
help="write detailed test results into HTML-FILE.html")
parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
metavar="XML-FILE",
help="write detailed test results into XML-FILE.xml")
global options
options = parser.parse_args()[0]
signal.signal(signal.SIGINT, sigint_hook)
return run_tests()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
anaran/kuma | refs/heads/master | vendor/packages/logilab/astng/test/unittest_builder.py | 24 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""tests for the astng builder and rebuilder module"""
import unittest
import sys
from os.path import join, abspath, dirname
from logilab.common.testlib import TestCase, unittest_main
from pprint import pprint
from logilab.astng import builder, nodes, InferenceError, NotFoundError
from logilab.astng.nodes import Module
from logilab.astng.bases import YES, BUILTINS
from logilab.astng.manager import ASTNGManager
MANAGER = ASTNGManager()
from unittest_inference import get_name_node
import data
from data import module as test_module
DATA = join(dirname(abspath(__file__)), 'data')
class FromToLineNoTC(TestCase):
astng = builder.ASTNGBuilder().file_build(join(DATA, 'format.py'))
def test_callfunc_lineno(self):
stmts = self.astng.body
# on line 4:
# function('aeozrijz\
# earzer', hop)
discard = stmts[0]
self.assertIsInstance(discard, nodes.Discard)
self.assertEqual(discard.fromlineno, 4)
self.assertEqual(discard.tolineno, 5)
callfunc = discard.value
self.assertIsInstance(callfunc, nodes.CallFunc)
self.assertEqual(callfunc.fromlineno, 4)
self.assertEqual(callfunc.tolineno, 5)
name = callfunc.func
self.assertIsInstance(name, nodes.Name)
self.assertEqual(name.fromlineno, 4)
self.assertEqual(name.tolineno, 4)
strarg = callfunc.args[0]
self.assertIsInstance(strarg, nodes.Const)
self.assertEqual(strarg.fromlineno, 5) # no way for this one (is 4 actually)
self.assertEqual(strarg.tolineno, 5)
namearg = callfunc.args[1]
self.assertIsInstance(namearg, nodes.Name)
self.assertEqual(namearg.fromlineno, 5)
self.assertEqual(namearg.tolineno, 5)
# on line 10:
# fonction(1,
# 2,
# 3,
# 4)
discard = stmts[2]
self.assertIsInstance(discard, nodes.Discard)
self.assertEqual(discard.fromlineno, 10)
self.assertEqual(discard.tolineno, 13)
callfunc = discard.value
self.assertIsInstance(callfunc, nodes.CallFunc)
self.assertEqual(callfunc.fromlineno, 10)
self.assertEqual(callfunc.tolineno, 13)
name = callfunc.func
self.assertIsInstance(name, nodes.Name)
self.assertEqual(name.fromlineno, 10)
self.assertEqual(name.tolineno, 10)
for i, arg in enumerate(callfunc.args):
self.assertIsInstance(arg, nodes.Const)
self.assertEqual(arg.fromlineno, 10+i)
self.assertEqual(arg.tolineno, 10+i)
def test_function_lineno(self):
stmts = self.astng.body
# on line 15:
# def definition(a,
# b,
# c):
# return a + b + c
function = stmts[3]
self.assertIsInstance(function, nodes.Function)
self.assertEqual(function.fromlineno, 15)
self.assertEqual(function.tolineno, 18)
return_ = function.body[0]
self.assertIsInstance(return_, nodes.Return)
self.assertEqual(return_.fromlineno, 18)
self.assertEqual(return_.tolineno, 18)
if sys.version_info < (3, 0):
self.assertEqual(function.blockstart_tolineno, 17)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
def test_decorated_function_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
@decorator
def function(
arg):
print (arg)
''', __name__, __file__)
function = astng['function']
self.assertEqual(function.fromlineno, 3) # XXX discussable, but that's what is expected by pylint right now
self.assertEqual(function.tolineno, 5)
self.assertEqual(function.decorators.fromlineno, 2)
self.assertEqual(function.decorators.tolineno, 2)
if sys.version_info < (3, 0):
self.assertEqual(function.blockstart_tolineno, 4)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
def test_class_lineno(self):
stmts = self.astng.body
# on line 20:
# class debile(dict,
# object):
# pass
class_ = stmts[4]
self.assertIsInstance(class_, nodes.Class)
self.assertEqual(class_.fromlineno, 20)
self.assertEqual(class_.tolineno, 22)
self.assertEqual(class_.blockstart_tolineno, 21)
pass_ = class_.body[0]
self.assertIsInstance(pass_, nodes.Pass)
self.assertEqual(pass_.fromlineno, 22)
self.assertEqual(pass_.tolineno, 22)
def test_if_lineno(self):
stmts = self.astng.body
# on line 20:
# if aaaa: pass
# else:
# aaaa,bbbb = 1,2
# aaaa,bbbb = bbbb,aaaa
if_ = stmts[5]
self.assertIsInstance(if_, nodes.If)
self.assertEqual(if_.fromlineno, 24)
self.assertEqual(if_.tolineno, 27)
self.assertEqual(if_.blockstart_tolineno, 24)
self.assertEqual(if_.orelse[0].fromlineno, 26)
self.assertEqual(if_.orelse[1].tolineno, 27)
def test_for_while_lineno(self):
for code in ('''
for a in range(4):
print (a)
break
else:
print ("bouh")
''', '''
while a:
print (a)
break
else:
print ("bouh")
''',
):
astng = builder.ASTNGBuilder().string_build(code, __name__, __file__)
stmt = astng.body[0]
self.assertEqual(stmt.fromlineno, 2)
self.assertEqual(stmt.tolineno, 6)
self.assertEqual(stmt.blockstart_tolineno, 2)
self.assertEqual(stmt.orelse[0].fromlineno, 6) # XXX
self.assertEqual(stmt.orelse[0].tolineno, 6)
def test_try_except_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
try:
print (a)
except:
pass
else:
print ("bouh")
''', __name__, __file__)
try_ = astng.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 7)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.orelse[0].fromlineno, 7) # XXX
self.assertEqual(try_.orelse[0].tolineno, 7)
hdlr = try_.handlers[0]
self.assertEqual(hdlr.fromlineno, 4)
self.assertEqual(hdlr.tolineno, 5)
self.assertEqual(hdlr.blockstart_tolineno, 4)
def test_try_finally_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
try:
print (a)
finally:
print ("bouh")
''', __name__, __file__)
try_ = astng.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 5)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.finalbody[0].fromlineno, 5) # XXX
self.assertEqual(try_.finalbody[0].tolineno, 5)
def test_try_finally_25_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
try:
print (a)
except:
pass
finally:
print ("bouh")
''', __name__, __file__)
try_ = astng.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 7)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.finalbody[0].fromlineno, 7) # XXX
self.assertEqual(try_.finalbody[0].tolineno, 7)
def test_with_lineno(self):
astng = builder.ASTNGBuilder().string_build('''
from __future__ import with_statement
with file("/tmp/pouet") as f:
print (f)
''', __name__, __file__)
with_ = astng.body[1]
self.assertEqual(with_.fromlineno, 3)
self.assertEqual(with_.tolineno, 4)
self.assertEqual(with_.blockstart_tolineno, 3)
class BuilderTC(TestCase):
def setUp(self):
self.builder = builder.ASTNGBuilder()
def test_border_cases(self):
"""check that a file with no trailing new line is parseable"""
self.builder.file_build(join(DATA, 'noendingnewline.py'), 'data.noendingnewline')
self.assertRaises(builder.ASTNGBuildingException,
self.builder.file_build, join(DATA, 'inexistant.py'), 'whatever')
def test_inspect_build0(self):
"""test astng tree build from a living object"""
builtin_astng = MANAGER.astng_from_module_name(BUILTINS)
if sys.version_info < (3, 0):
fclass = builtin_astng['file']
self.assertIn('name', fclass)
self.assertIn('mode', fclass)
self.assertIn('read', fclass)
self.assertTrue(fclass.newstyle)
self.assertTrue(fclass.pytype(), '%s.type' % BUILTINS)
self.assertIsInstance(fclass['read'], nodes.Function)
# check builtin function has args.args == None
dclass = builtin_astng['dict']
self.assertIsNone(dclass['has_key'].args.args)
# just check type and object are there
builtin_astng.getattr('type')
objectastng = builtin_astng.getattr('object')[0]
self.assertIsInstance(objectastng.getattr('__new__')[0], nodes.Function)
# check open file alias
builtin_astng.getattr('open')
# check 'help' is there (defined dynamically by site.py)
builtin_astng.getattr('help')
# check property has __init__
pclass = builtin_astng['property']
self.assertIn('__init__', pclass)
self.assertIsInstance(builtin_astng['None'], nodes.Const)
self.assertIsInstance(builtin_astng['True'], nodes.Const)
self.assertIsInstance(builtin_astng['False'], nodes.Const)
if sys.version_info < (3, 0):
self.assertIsInstance(builtin_astng['Exception'], nodes.From)
self.assertIsInstance(builtin_astng['NotImplementedError'], nodes.From)
else:
self.assertIsInstance(builtin_astng['Exception'], nodes.Class)
self.assertIsInstance(builtin_astng['NotImplementedError'], nodes.Class)
def test_inspect_build1(self):
time_astng = MANAGER.astng_from_module_name('time')
self.assertTrue(time_astng)
self.assertEqual(time_astng['time'].args.defaults, [])
def test_inspect_build2(self):
"""test astng tree build from a living object"""
try:
from mx import DateTime
except ImportError:
self.skipTest('test skipped: mxDateTime is not available')
else:
dt_astng = self.builder.inspect_build(DateTime)
dt_astng.getattr('DateTime')
# this one is failing since DateTimeType.__module__ = 'builtins' !
#dt_astng.getattr('DateTimeType')
def test_inspect_build3(self):
self.builder.inspect_build(unittest)
def test_inspect_build_instance(self):
"""test astng tree build from a living object"""
if sys.version_info >= (3, 0):
self.skipTest('The module "exceptions" is gone in py3.x')
import exceptions
builtin_astng = self.builder.inspect_build(exceptions)
fclass = builtin_astng['OSError']
# things like OSError.strerror are now (2.5) data descriptors on the
# class instead of entries in the __dict__ of an instance
container = fclass
self.assertIn('errno', container)
self.assertIn('strerror', container)
self.assertIn('filename', container)
def test_inspect_build_type_object(self):
builtin_astng = MANAGER.astng_from_module_name(BUILTINS)
infered = list(builtin_astng.igetattr('object'))
self.assertEqual(len(infered), 1)
infered = infered[0]
self.assertEqual(infered.name, 'object')
infered.as_string() # no crash test
infered = list(builtin_astng.igetattr('type'))
self.assertEqual(len(infered), 1)
infered = infered[0]
self.assertEqual(infered.name, 'type')
infered.as_string() # no crash test
def test_package_name(self):
"""test base properties and method of a astng module"""
datap = self.builder.file_build(join(DATA, '__init__.py'), 'data')
self.assertEqual(datap.name, 'data')
self.assertEqual(datap.package, 1)
datap = self.builder.file_build(join(DATA, '__init__.py'), 'data.__init__')
self.assertEqual(datap.name, 'data')
self.assertEqual(datap.package, 1)
def test_yield_parent(self):
"""check if we added discard nodes as yield parent (w/ compiler)"""
data = """
def yiell():
yield 0
if noe:
yield more
"""
func = self.builder.string_build(data).body[0]
self.assertIsInstance(func, nodes.Function)
stmt = func.body[0]
self.assertIsInstance(stmt, nodes.Discard)
self.assertIsInstance(stmt.value, nodes.Yield)
self.assertIsInstance(func.body[1].body[0], nodes.Discard)
self.assertIsInstance(func.body[1].body[0].value, nodes.Yield)
def test_object(self):
obj_astng = self.builder.inspect_build(object)
self.assertIn('__setattr__', obj_astng)
def test_newstyle_detection(self):
data = '''
class A:
"old style"
class B(A):
"old style"
class C(object):
"new style"
class D(C):
"new style"
__metaclass__ = type
class E(A):
"old style"
class F:
"new style"
'''
mod_astng = self.builder.string_build(data, __name__, __file__)
self.assertFalse(mod_astng['A'].newstyle)
self.assertFalse(mod_astng['B'].newstyle)
self.assertTrue(mod_astng['C'].newstyle)
self.assertTrue(mod_astng['D'].newstyle)
self.assertFalse(mod_astng['E'].newstyle)
self.assertTrue(mod_astng['F'].newstyle)
def test_globals(self):
data = '''
CSTE = 1
def update_global():
global CSTE
CSTE += 1
def global_no_effect():
global CSTE2
print (CSTE)
'''
astng = self.builder.string_build(data, __name__, __file__)
self.assertEqual(len(astng.getattr('CSTE')), 2)
self.assertIsInstance(astng.getattr('CSTE')[0], nodes.AssName)
self.assertEqual(astng.getattr('CSTE')[0].fromlineno, 2)
self.assertEqual(astng.getattr('CSTE')[1].fromlineno, 6)
self.assertRaises(NotFoundError,
astng.getattr, 'CSTE2')
self.assertRaises(InferenceError,
astng['global_no_effect'].ilookup('CSTE2').next)
def test_socket_build(self):
import socket
astng = self.builder.module_build(socket)
# XXX just check the first one. Actually 3 objects are inferred (look at
# the socket module) but the last one as those attributes dynamically
# set and astng is missing this.
for fclass in astng.igetattr('socket'):
#print fclass.root().name, fclass.name, fclass.lineno
self.assertIn('connect', fclass)
self.assertIn('send', fclass)
self.assertIn('close', fclass)
break
def test_gen_expr_var_scope(self):
data = 'l = list(n for n in range(10))\n'
astng = self.builder.string_build(data, __name__, __file__)
# n unavailable outside gen expr scope
self.assertNotIn('n', astng)
# test n is inferable anyway
n = get_name_node(astng, 'n')
self.assertIsNot(n.scope(), astng)
self.assertEqual([i.__class__ for i in n.infer()],
[YES.__class__])
class FileBuildTC(TestCase):
module = builder.ASTNGBuilder().file_build(join(DATA, 'module.py'), 'data.module')
def test_module_base_props(self):
"""test base properties and method of a astng module"""
module = self.module
self.assertEqual(module.name, 'data.module')
self.assertEqual(module.doc, "test module for astng\n")
self.assertEqual(module.fromlineno, 0)
self.assertIsNone(module.parent)
self.assertEqual(module.frame(), module)
self.assertEqual(module.root(), module)
self.assertEqual(module.file, join(abspath(data.__path__[0]), 'module.py'))
self.assertEqual(module.pure_python, 1)
self.assertEqual(module.package, 0)
self.assertFalse(module.is_statement)
self.assertEqual(module.statement(), module)
self.assertEqual(module.statement(), module)
def test_module_locals(self):
"""test the 'locals' dictionary of a astng module"""
module = self.module
_locals = module.locals
self.assertIs(_locals, module.globals)
keys = sorted(_locals.keys())
should = ['MY_DICT', 'YO', 'YOUPI',
'__revision__', 'global_access','modutils', 'four_args',
'os', 'redirect', 'spawn', 'LocalsVisitor', 'ASTWalker']
should.sort()
self.assertEqual(keys, should)
def test_function_base_props(self):
"""test base properties and method of a astng function"""
module = self.module
function = module['global_access']
self.assertEqual(function.name, 'global_access')
self.assertEqual(function.doc, 'function test')
self.assertEqual(function.fromlineno, 11)
self.assertTrue(function.parent)
self.assertEqual(function.frame(), function)
self.assertEqual(function.parent.frame(), module)
self.assertEqual(function.root(), module)
self.assertEqual([n.name for n in function.args.args], ['key', 'val'])
self.assertEqual(function.type, 'function')
def test_function_locals(self):
"""test the 'locals' dictionary of a astng function"""
_locals = self.module['global_access'].locals
self.assertEqual(len(_locals), 4)
keys = sorted(_locals.keys())
self.assertEqual(keys, ['i', 'key', 'local', 'val'])
def test_class_base_props(self):
"""test base properties and method of a astng class"""
module = self.module
klass = module['YO']
self.assertEqual(klass.name, 'YO')
self.assertEqual(klass.doc, 'hehe')
self.assertEqual(klass.fromlineno, 25)
self.assertTrue(klass.parent)
self.assertEqual(klass.frame(), klass)
self.assertEqual(klass.parent.frame(), module)
self.assertEqual(klass.root(), module)
self.assertEqual(klass.basenames, [])
self.assertEqual(klass.newstyle, False)
def test_class_locals(self):
"""test the 'locals' dictionary of a astng class"""
module = self.module
klass1 = module['YO']
locals1 = klass1.locals
keys = sorted(locals1.keys())
self.assertEqual(keys, ['__init__', 'a'])
klass2 = module['YOUPI']
locals2 = klass2.locals
keys = locals2.keys()
keys.sort()
self.assertEqual(keys, ['__init__', 'class_attr', 'class_method',
'method', 'static_method'])
def test_class_instance_attrs(self):
module = self.module
klass1 = module['YO']
klass2 = module['YOUPI']
self.assertEqual(klass1.instance_attrs.keys(), ['yo'])
self.assertEqual(klass2.instance_attrs.keys(), ['member'])
def test_class_basenames(self):
module = self.module
klass1 = module['YO']
klass2 = module['YOUPI']
self.assertEqual(klass1.basenames, [])
self.assertEqual(klass2.basenames, ['YO'])
def test_method_base_props(self):
"""test base properties and method of a astng method"""
klass2 = self.module['YOUPI']
# "normal" method
method = klass2['method']
self.assertEqual(method.name, 'method')
self.assertEqual([n.name for n in method.args.args], ['self'])
self.assertEqual(method.doc, 'method test')
self.assertEqual(method.fromlineno, 47)
self.assertEqual(method.type, 'method')
# class method
method = klass2['class_method']
self.assertEqual([n.name for n in method.args.args], ['cls'])
self.assertEqual(method.type, 'classmethod')
# static method
method = klass2['static_method']
self.assertEqual(method.args.args, [])
self.assertEqual(method.type, 'staticmethod')
def test_method_locals(self):
"""test the 'locals' dictionary of a astng method"""
method = self.module['YOUPI']['method']
_locals = method.locals
keys = sorted(_locals)
if sys.version_info < (3, 0):
self.assertEqual(len(_locals), 5)
self.assertEqual(keys, ['a', 'autre', 'b', 'local', 'self'])
else:# ListComp variables are no more accessible outside
self.assertEqual(len(_locals), 3)
self.assertEqual(keys, ['autre', 'local', 'self'])
class ModuleBuildTC(FileBuildTC):
def setUp(self):
abuilder = builder.ASTNGBuilder()
self.module = abuilder.module_build(test_module)
class MoreTC(TestCase):
def setUp(self):
self.builder = builder.ASTNGBuilder()
def test_infered_build(self):
code = '''class A: pass
A.type = "class"
def A_ass_type(self):
print (self)
A.ass_type = A_ass_type
'''
astng = self.builder.string_build(code)
lclass = list(astng.igetattr('A'))
self.assertEqual(len(lclass), 1)
lclass = lclass[0]
self.assertIn('ass_type', lclass.locals)
self.assertIn('type', lclass.locals)
def test_augassign_attr(self):
astng = self.builder.string_build("""class Counter:
v = 0
def inc(self):
self.v += 1
""", __name__, __file__)
# Check self.v += 1 generate AugAssign(AssAttr(...)), not AugAssign(GetAttr(AssName...))
def test_dumb_module(self):
astng = self.builder.string_build("pouet")
def test_infered_dont_pollute(self):
code = '''
def func(a=None):
a.custom_attr = 0
def func2(a={}):
a.custom_attr = 0
'''
astng = self.builder.string_build(code)
nonetype = nodes.const_factory(None)
self.assertNotIn('custom_attr', nonetype.locals)
self.assertNotIn('custom_attr', nonetype.instance_attrs)
nonetype = nodes.const_factory({})
self.assertNotIn('custom_attr', nonetype.locals)
self.assertNotIn('custom_attr', nonetype.instance_attrs)
def test_asstuple(self):
code = 'a, b = range(2)'
astng = self.builder.string_build(code)
self.assertIn('b', astng.locals)
code = '''
def visit_if(self, node):
node.test, body = node.tests[0]
'''
astng = self.builder.string_build(code)
self.assertIn('body', astng['visit_if'].locals)
def test_build_constants(self):
'''test expected values of constants after rebuilding'''
code = '''
def func():
return None
return
return 'None'
'''
astng = self.builder.string_build(code)
none, nothing, chain = [ret.value for ret in astng.body[0].body]
self.assertIsInstance(none, nodes.Const)
self.assertIsNone(none.value)
self.assertIsNone(nothing)
self.assertIsInstance(chain, nodes.Const)
self.assertEqual(chain.value, 'None')
def test_lgc_classproperty(self):
'''test expected values of constants after rebuilding'''
code = '''
from logilab.common.decorators import classproperty
class A(object):
@classproperty
def hop(cls):
return None
'''
astng = self.builder.string_build(code)
self.assertEqual(astng['A']['hop'].type, 'classmethod')
if sys.version_info < (3, 0):
guess_encoding = builder._guess_encoding
class TestGuessEncoding(TestCase):
def testEmacs(self):
e = guess_encoding('# -*- coding: UTF-8 -*-')
self.assertEqual(e, 'UTF-8')
e = guess_encoding('# -*- coding:UTF-8 -*-')
self.assertEqual(e, 'UTF-8')
e = guess_encoding('''
### -*- coding: ISO-8859-1 -*-
''')
self.assertEqual(e, 'ISO-8859-1')
e = guess_encoding('''
### -*- coding: ISO-8859-1 -*-
''')
self.assertIsNone(e)
def testVim(self):
e = guess_encoding('# vim:fileencoding=UTF-8')
self.assertEqual(e, 'UTF-8')
e = guess_encoding('''
### vim:fileencoding=ISO-8859-1
''')
self.assertEqual(e, 'ISO-8859-1')
e = guess_encoding('''
### vim:fileencoding= ISO-8859-1
''')
self.assertIsNone(e)
def test_wrong_coding(self):
# setting "coding" varaible
e = guess_encoding("coding = UTF-8")
self.assertIsNone(e)
# setting a dictionnary entry
e = guess_encoding("coding:UTF-8")
self.assertIsNone(e)
# setting an arguement
e = guess_encoding("def do_something(a_word_with_coding=None):")
self.assertIsNone(e)
def testUTF8(self):
e = guess_encoding('\xef\xbb\xbf any UTF-8 data')
self.assertEqual(e, 'UTF-8')
e = guess_encoding(' any UTF-8 data \xef\xbb\xbf')
self.assertIsNone(e)
if __name__ == '__main__':
unittest_main()
|
stone5495/NewsBlur | refs/heads/master | apps/social/migrations/0006_guid_hash.py | 18 | # -*- coding: utf-8 -*-
import datetime
import hashlib
from south.db import db
from south.v2 import DataMigration
from django.db import models
from apps.social.models import MSharedStory
class Migration(DataMigration):
def forwards(self, orm):
shared_stories = MSharedStory.objects.all()
count = shared_stories.count()
print "%s shared stories..." % count
for s, story in enumerate(shared_stories):
if s % 100 == 0:
print "%s/%s" % (s+1, count)
story.story_guid_hash = hashlib.sha1(story.story_guid).hexdigest()[:6]
story.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
}
complete_apps = ['social']
symmetrical = True
|
dunkhong/grr | refs/heads/master | grr/core/grr_response_core/lib/parsers/windows_persistence.py | 2 | #!/usr/bin/env python
"""Parse various Windows persistence mechanisms into PersistenceFiles."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import parser
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.path_detection import windows as path_detection_windows
class WindowsPersistenceMechanismsParser(parser.ArtifactFilesParser):
"""Turn various persistence objects into PersistenceFiles."""
output_types = [rdf_standard.PersistenceFile]
supported_artifacts = ["WindowsPersistenceMechanisms"]
# Required for environment variable expansion
knowledgebase_dependencies = ["environ_systemdrive", "environ_systemroot"]
def _GetFilePaths(self, path, pathtype, kb):
"""Guess windows filenames from a commandline string."""
environ_vars = artifact_utils.GetWindowsEnvironmentVariablesMap(kb)
path_guesses = path_detection_windows.DetectExecutablePaths([path],
environ_vars)
if not path_guesses:
# TODO(user): yield a ParserAnomaly object
return []
return [
rdf_paths.PathSpec(path=path, pathtype=pathtype)
for path in path_guesses
]
def Parse(self, persistence, knowledge_base, download_pathtype):
"""Convert persistence collector output to downloadable rdfvalues."""
pathspecs = []
if isinstance(persistence, rdf_client.WindowsServiceInformation):
if persistence.HasField("binary"):
pathspecs.append(persistence.binary.pathspec)
elif persistence.HasField("image_path"):
pathspecs = self._GetFilePaths(persistence.image_path,
download_pathtype, knowledge_base)
if isinstance(
persistence,
rdf_client_fs.StatEntry) and persistence.HasField("registry_type"):
pathspecs = self._GetFilePaths(persistence.registry_data.string,
download_pathtype, knowledge_base)
for pathspec in pathspecs:
yield rdf_standard.PersistenceFile(pathspec=pathspec)
|
wscullin/spack | refs/heads/qmcpack | var/spack/repos/builtin.mock/packages/flake8/package.py | 3 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Flake8(Package):
"""Package containing as many PEP 8 violations as possible.
All of these violations are exceptions that we allow in
package.py files."""
# Used to tell whether or not the package has been modified
state = 'unmodified'
# Make sure pre-existing noqa is not interfered with
blatant_violation = 'line-that-has-absolutely-no-execuse-for-being-over-79-characters' # noqa
blatant_violation = 'line-that-has-absolutely-no-execuse-for-being-over-79-characters' # noqa: E501
# Keywords exempt from line-length checks
homepage = '#####################################################################'
url = '#####################################################################'
git = '#####################################################################'
svn = '#####################################################################'
hg = '#####################################################################'
list_url = '#####################################################################'
# URL strings exempt from line-length checks
# http://########################################################################
# https://#######################################################################
# ftp://#########################################################################
# file://########################################################################
# Directives exempt from line-length checks
version('2.0', '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef')
version('1.0', '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef')
variant('super-awesome-feature', default=True, description='Enable super awesome feature')
variant('somewhat-awesome-feature', default=False, description='Enable somewhat awesome feature')
provides('lapack', when='@2.0+super-awesome-feature+somewhat-awesome-feature')
extends('python', ignore='bin/(why|does|every|package|that|depends|on|numpy|need|to|copy|f2py3?)')
depends_on('boost+atomic+chrono+date_time~debug+filesystem~graph~icu+iostreams+locale+log+math~mpi+multithreaded+program_options~python+random+regex+serialization+shared+signals~singlethreaded+system~taggedlayout+test+thread+timer+wave')
conflicts('+super-awesome-feature', when='%intel@16:17+somewhat-awesome-feature')
resource(name='Deez-Nuts', destination='White-House', placement='President', when='@2020', url='www.elect-deez-nuts.com')
patch('hyper-specific-patch-that-fixes-some-random-bug-that-probably-only-affects-one-user.patch', when='%[email protected]:3.2.3')
def install(self, spec, prefix):
# Make sure lines with '# noqa' work as expected. Don't just
# remove them entirely. This will mess up the indentation of
# the following lines.
if 'really-long-if-statement' != 'that-goes-over-the-line-length-limit-and-requires-noqa': # noqa
pass
# '@when' decorated functions are exempt from redefinition errors
@when('@2.0')
def install(self, spec, prefix):
pass
|
barbuza/django | refs/heads/master | tests/model_regress/tests.py | 326 | from __future__ import unicode_literals
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, BrokenUnicodeMethod, Department, Event, Model1, Model2, Model3,
NonAutoPK, Party, Worker,
)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
|
ric2b/Vivaldi-browser | refs/heads/master | chromium/chrome/common/extensions/docs/server2/platform_bundle_test.py | 44 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from extensions_paths import CHROME_API, CHROME_EXTENSIONS, EXTENSIONS_API
from mock_file_system import MockFileSystem
from server_instance import ServerInstance
from test_file_system import TestFileSystem
from test_util import ReadFile
_TEST_DATA = {
'api': {
'devtools': {
'inspected_window.json': ReadFile(
CHROME_API, 'devtools', 'inspected_window.json'),
},
'_api_features.json': json.dumps({
'alarms': {},
'app': {'extension_types': ['platform_app']},
'app.runtime': {'noparent': True},
'app.runtime.foo': {'extension_types': ['extension']},
'declarativeWebRequest': {'extension_types': ['extension']},
'devtools.inspectedWindow': {'extension_types': ['extension']},
'input': {'extension_types': 'all'},
'input.ime': {'extension_types': ['extension', 'platform_app']},
'storage': {'extension_types': ['extension']},
}),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
'alarms.idl': ReadFile(EXTENSIONS_API, 'alarms.idl'),
'input_ime.json': ReadFile(CHROME_API, 'input_ime.json'),
'page_action.json': ReadFile(CHROME_API, 'page_action.json'),
},
'docs': {
'templates': {
'json': {
'manifest.json': '{}',
'permissions.json': '{}',
}
}
},
}
class PlatformBundleTest(unittest.TestCase):
def setUp(self):
mock_file_system = MockFileSystem(
TestFileSystem(_TEST_DATA, relative_to=CHROME_EXTENSIONS))
server_instance = ServerInstance.ForTest(file_system=mock_file_system)
self._platform_bundle = server_instance.platform_bundle
def testGetters(self):
self.assertEqual([
'alarms',
'app.runtime',
'declarativeWebRequest',
'devtools.inspectedWindow',
'input',
'storage'
], sorted(self._platform_bundle.GetAPIModels('extensions').GetNames()))
self.assertEqual([
'alarms',
'app',
'app.runtime',
'input'
], sorted(self._platform_bundle.GetAPIModels('apps').GetNames()))
self.assertEqual({
'app.runtime': {
'name': 'app.runtime',
'noparent': True,
'channel': 'stable'
},
'declarativeWebRequest': {
'name': 'declarativeWebRequest',
'channel': 'stable',
'extension_types': ['extension'],
},
'app.runtime.foo': {
'name': 'app.runtime.foo',
'channel': 'stable',
'extension_types': ['extension'],
},
'storage': {
'name': 'storage',
'channel': 'stable',
'extension_types': ['extension'],
},
'input.ime': {
'name': 'input.ime',
'channel': 'stable',
'extension_types': ['extension', 'platform_app'],
},
'alarms': {
'name': 'alarms',
'channel': 'stable'
},
'input': {
'name': 'input',
'channel': 'stable',
'extension_types': 'all'
},
'devtools.inspectedWindow': {
'name': 'devtools.inspectedWindow',
'channel': 'stable',
'extension_types': ['extension'],
}
}, self._platform_bundle.GetFeaturesBundle(
'extensions').GetAPIFeatures().Get())
self.assertEqual({
'app.runtime': {
'name': 'app.runtime',
'noparent': True,
'channel': 'stable'
},
'input': {
'name': 'input',
'channel': 'stable',
'extension_types': 'all'
},
'input.ime': {
'name': 'input.ime',
'channel': 'stable',
'extension_types': ['extension', 'platform_app'],
},
'app': {
'name': 'app',
'channel': 'stable',
'extension_types': ['platform_app'],
},
'alarms': {
'name': 'alarms',
'channel': 'stable'
}
}, self._platform_bundle.GetFeaturesBundle('apps').GetAPIFeatures().Get())
# Check that 'app' is resolved successfully in apps, but is None otherwise.
self.assertNotEqual(
None,
self._platform_bundle.GetReferenceResolver('apps').GetLink('app'))
self.assertEqual(
None,
self._platform_bundle.GetReferenceResolver('extensions').GetLink('app'))
if __name__ == '__main__':
unittest.main()
|
kawamuray/ganeti | refs/heads/build | test/py/ganeti.confd.client_unittest.py | 8 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the confd client module"""
import socket
import unittest
from ganeti import confd
from ganeti import constants
from ganeti import errors
import ganeti.confd.client
import testutils
class ResettableMock(object):
def __init__(self, *args, **kwargs):
self.Reset()
def Reset(self):
pass
class MockLogger(ResettableMock):
def Reset(self):
self.debug_count = 0
self.warn_count = 0
self.error_count = 0
def debug(string):
self.debug_count += 1
def warning(string):
self.warn_count += 1
def error(string):
self.error_count += 1
class MockConfdAsyncUDPClient(ResettableMock):
def Reset(self):
self.send_count = 0
self.last_address = ''
self.last_port = -1
self.last_sent = ''
def enqueue_send(self, address, port, payload):
self.send_count += 1
self.last_payload = payload
self.last_port = port
self.last_address = address
class MockCallback(ResettableMock):
def Reset(self):
self.call_count = 0
self.last_up = None
def __call__(self, up):
"""Callback
@type up: L{ConfdUpcallPayload}
@param up: upper callback
"""
self.call_count += 1
self.last_up = up
class MockTime(ResettableMock):
def Reset(self):
self.mytime = 1254213006.5175071
def time(self):
return self.mytime
def increase(self, delta):
self.mytime += delta
class _BaseClientTest:
"""Base class for client tests"""
mc_list = None
new_peers = None
family = None
def setUp(self):
self.mock_time = MockTime()
confd.client.time = self.mock_time
confd.client.ConfdAsyncUDPClient = MockConfdAsyncUDPClient
self.logger = MockLogger()
hmac_key = "mykeydata"
self.callback = MockCallback()
self.client = confd.client.ConfdClient(hmac_key, self.mc_list,
self.callback, logger=self.logger)
def testRequest(self):
req1 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.assertNotEqual(req1.rsalt, req2.rsalt)
self.assertEqual(req1.protocol, constants.CONFD_PROTOCOL_VERSION)
self.assertEqual(req2.protocol, constants.CONFD_PROTOCOL_VERSION)
self.assertRaises(errors.ConfdClientError, confd.client.ConfdClientRequest,
type=-33)
def testClientSend(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
# Cannot send the same request twice
self.assertRaises(errors.ConfdClientError, self.client.SendRequest, req)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
# Coverage is too big
self.assertRaises(errors.ConfdClientError, self.client.SendRequest,
req2, coverage=15)
self.assertEquals(self.client._socket.send_count,
constants.CONFD_DEFAULT_REQ_COVERAGE)
# Send with max coverage
self.client.SendRequest(req2, coverage=-1)
self.assertEquals(self.client._socket.send_count,
constants.CONFD_DEFAULT_REQ_COVERAGE + len(self.mc_list))
self.assert_(self.client._socket.last_address in self.mc_list)
def testClientExpire(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
# Make a couple of seconds pass ;)
self.mock_time.increase(2)
# Now sending the second request
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req2)
self.mock_time.increase(constants.CONFD_CLIENT_EXPIRE_TIMEOUT - 1)
# First request should be expired, second one should not
self.client.ExpireRequests()
self.assertEquals(self.callback.call_count, 1)
self.assertEquals(self.callback.last_up.type, confd.client.UPCALL_EXPIRE)
self.assertEquals(self.callback.last_up.salt, req.rsalt)
self.assertEquals(self.callback.last_up.orig_request, req)
self.mock_time.increase(3)
self.assertEquals(self.callback.call_count, 1)
self.client.ExpireRequests()
self.assertEquals(self.callback.call_count, 2)
self.assertEquals(self.callback.last_up.type, confd.client.UPCALL_EXPIRE)
self.assertEquals(self.callback.last_up.salt, req2.rsalt)
self.assertEquals(self.callback.last_up.orig_request, req2)
def testClientCascadeExpire(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
self.mock_time.increase(constants.CONFD_CLIENT_EXPIRE_TIMEOUT +1)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req2)
self.assertEquals(self.callback.call_count, 1)
def testUpdatePeerList(self):
self.client.UpdatePeerList(self.new_peers)
self.assertEquals(self.client._peers, self.new_peers)
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
self.assertEquals(self.client._socket.send_count, len(self.new_peers))
self.assert_(self.client._socket.last_address in self.new_peers)
def testSetPeersFamily(self):
self.client._SetPeersAddressFamily()
self.assertEquals(self.client._family, self.family)
mixed_peers = ["192.0.2.99", "2001:db8:beef::13"]
self.client.UpdatePeerList(mixed_peers)
self.assertRaises(errors.ConfdClientError,
self.client._SetPeersAddressFamily)
class TestIP4Client(unittest.TestCase, _BaseClientTest):
"""Client tests"""
mc_list = ["192.0.2.1",
"192.0.2.2",
"192.0.2.3",
"192.0.2.4",
"192.0.2.5",
"192.0.2.6",
"192.0.2.7",
"192.0.2.8",
"192.0.2.9",
]
new_peers = ["198.51.100.1", "198.51.100.2"]
family = socket.AF_INET
def setUp(self):
unittest.TestCase.setUp(self)
_BaseClientTest.setUp(self)
class TestIP6Client(unittest.TestCase, _BaseClientTest):
"""Client tests"""
mc_list = ["2001:db8::1",
"2001:db8::2",
"2001:db8::3",
"2001:db8::4",
"2001:db8::5",
"2001:db8::6",
"2001:db8::7",
"2001:db8::8",
"2001:db8::9",
]
new_peers = ["2001:db8:beef::11", "2001:db8:beef::12"]
family = socket.AF_INET6
def setUp(self):
unittest.TestCase.setUp(self)
_BaseClientTest.setUp(self)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
florian-dacosta/OpenUpgrade | refs/heads/master | addons/analytic/__openerp__.py | 112 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Analytic Accounting',
'version': '1.1',
'author' : 'OpenERP SA',
'website' : 'http://www.openerp.com',
'category': 'Hidden/Dependency',
'depends' : ['base', 'decimal_precision', 'mail'],
'description': """
Module for defining analytic accounting object.
===============================================
In OpenERP, analytic accounts are linked to general accounts but are treated
totally independently. So, you can enter various different analytic operations
that have no counterpart in the general financial accounts.
""",
'data': [
'security/analytic_security.xml',
'security/ir.model.access.csv',
'analytic_sequence.xml',
'analytic_view.xml',
'analytic_data.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
catapult-project/catapult-csm | refs/heads/master | third_party/gsutil/third_party/rsa/rsa/pem.py | 216 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions that load and write PEM-encoded files.'''
import base64
from rsa._compat import b, is_bytes
def _markers(pem_marker):
'''
Returns the start and end PEM markers
'''
if is_bytes(pem_marker):
pem_marker = pem_marker.decode('utf-8')
return (b('-----BEGIN %s-----' % pem_marker),
b('-----END %s-----' % pem_marker))
def load_pem(contents, pem_marker):
'''Loads a PEM file.
@param contents: the contents of the file to interpret
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-decoded content between the start and end markers.
@raise ValueError: when the content is invalid, for example when the start
marker cannot be found.
'''
(pem_start, pem_end) = _markers(pem_marker)
pem_lines = []
in_pem_part = False
for line in contents.splitlines():
line = line.strip()
# Skip empty lines
if not line:
continue
# Handle start marker
if line == pem_start:
if in_pem_part:
raise ValueError('Seen start marker "%s" twice' % pem_start)
in_pem_part = True
continue
# Skip stuff before first marker
if not in_pem_part:
continue
# Handle end marker
if in_pem_part and line == pem_end:
in_pem_part = False
break
# Load fields
if b(':') in line:
continue
pem_lines.append(line)
# Do some sanity checks
if not pem_lines:
raise ValueError('No PEM start marker "%s" found' % pem_start)
if in_pem_part:
raise ValueError('No PEM end marker "%s" found' % pem_end)
# Base64-decode the contents
pem = b('').join(pem_lines)
return base64.decodestring(pem)
def save_pem(contents, pem_marker):
'''Saves a PEM file.
@param contents: the contents to encode in PEM format
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-encoded content between the start and end markers.
'''
(pem_start, pem_end) = _markers(pem_marker)
b64 = base64.encodestring(contents).replace(b('\n'), b(''))
pem_lines = [pem_start]
for block_start in range(0, len(b64), 64):
block = b64[block_start:block_start + 64]
pem_lines.append(block)
pem_lines.append(pem_end)
pem_lines.append(b(''))
return b('\n').join(pem_lines)
|
mov-q/dumpy | refs/heads/master | discariche/model/reallocation.py | 1 | """Person model"""
from sqlalchemy import Column, UniqueConstraint, ForeignKey
from sqlalchemy import schema as saschema
from sqlalchemy.types import Integer, String, Unicode, Float, UnicodeText, DateTime
from discariche.model.meta import Base
class Reallocation(Base):
__tablename__ = "reallocation"
id = Column(Integer, primary_key=True)
id_dump = Column(Integer, saschema.ForeignKey('dump.id', onupdate="CASCADE", ondelete="CASCADE"))
id_dumptype = Column(Integer, saschema.ForeignKey('dumptype.id',onupdate="CASCADE", ondelete="SET NULL"))
start_date = Column(DateTime, nullable=False)
end_date = Column(DateTime, nullable=True)
notes = Column(Unicode(512), nullable=True)
modder = Column(Integer, saschema.ForeignKey('user.id', onupdate="CASCADE", ondelete="SET NULL"))
lastmod = Column(DateTime, nullable=False)
__table_args__ = (
{
"mysql_engine":"InnoDB",
"mysql_charset":"utf8"
}
)
def __init__(self):
pass
def __repr__(self):
pass
|
hnakamur/site-alive-checker | refs/heads/master | wtforms/form.py | 15 | __all__ = (
'BaseForm',
'Form',
)
class BaseForm(object):
"""
Base Form Class. Provides core behaviour like field construction,
validation, and data and error proxying.
"""
def __init__(self, fields, prefix=''):
"""
:param fields:
A dict or sequence of 2-tuples of partially-constructed fields.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
"""
if prefix and prefix[-1] not in '-_;:/.':
prefix += '-'
self._prefix = prefix
self._errors = None
self._fields = {}
if hasattr(fields, 'items'):
fields = fields.items()
translations = self._get_translations()
for name, unbound_field in fields:
field = unbound_field.bind(form=self, name=name, prefix=prefix, translations=translations)
self._fields[name] = field
def __iter__(self):
""" Iterate form fields in arbitrary order """
return self._fields.itervalues()
def __contains__(self, name):
""" Returns `True` if the named field is a member of this form. """
return (name in self._fields)
def __getitem__(self, name):
""" Dict-style access to this form's fields."""
return self._fields[name]
def __setitem__(self, name, value):
""" Bind a field to this form. """
self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix)
def __delitem__(self, name):
""" Remove a field from this form. """
del self._fields[name]
def _get_translations(self):
"""
Override in subclasses to provide alternate translations factory.
Must return an object that provides gettext() and ngettext() methods.
"""
return None
def populate_obj(self, obj):
"""
Populates the attributes of the passed `obj` with data from the form's
fields.
:note: This is a destructive operation; Any attribute with the same name
as a field will be overridden. Use with caution.
"""
for name, field in self._fields.iteritems():
field.populate_obj(obj, name)
def process(self, formdata=None, obj=None, **kwargs):
"""
Take form, object data, and keyword arg input and have the fields
process them.
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
if formdata is not None and not hasattr(formdata, 'getlist'):
if hasattr(formdata, 'getall'):
formdata = WebobInputWrapper(formdata)
else:
raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method")
for name, field, in self._fields.iteritems():
if obj is not None and hasattr(obj, name):
field.process(formdata, getattr(obj, name))
elif name in kwargs:
field.process(formdata, kwargs[name])
else:
field.process(formdata)
def validate(self, extra_validators=None):
"""
Validates the form by calling `validate` on each field.
:param extra_validators:
If provided, is a dict mapping field names to a sequence of
callables which will be passed as extra validators to the field's
`validate` method.
Returns `True` if no errors occur.
"""
self._errors = None
success = True
for name, field in self._fields.iteritems():
if extra_validators is not None and name in extra_validators:
extra = extra_validators[name]
else:
extra = tuple()
if not field.validate(self, extra):
success = False
return success
@property
def data(self):
return dict((name, f.data) for name, f in self._fields.iteritems())
@property
def errors(self):
if self._errors is None:
self._errors = dict((name, f.errors) for name, f in self._fields.iteritems() if f.errors)
return self._errors
class FormMeta(type):
"""
The metaclass for `Form` and any subclasses of `Form`.
`FormMeta`'s responsibility is to create the `_unbound_fields` list, which
is a list of `UnboundField` instances sorted by their order of
instantiation. The list is created at the first instantiation of the form.
If any fields are added/removed from the form, the list is cleared to be
re-generated on the next instantiaton.
Any properties which begin with an underscore or are not `UnboundField`
instances are ignored by the metaclass.
"""
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
cls._unbound_fields = None
def __call__(cls, *args, **kwargs):
"""
Construct a new `Form` instance, creating `_unbound_fields` on the
class if it is empty.
"""
if cls._unbound_fields is None:
fields = []
for name in dir(cls):
if not name.startswith('_'):
unbound_field = getattr(cls, name)
if hasattr(unbound_field, '_formfield'):
fields.append((name, unbound_field))
# We keep the name as the second element of the sort
# to ensure a stable sort.
fields.sort(key=lambda x: (x[1].creation_counter, x[0]))
cls._unbound_fields = fields
return type.__call__(cls, *args, **kwargs)
def __setattr__(cls, name, value):
"""
Add an attribute to the class, clearing `_unbound_fields` if needed.
"""
if not name.startswith('_') and hasattr(value, '_formfield'):
cls._unbound_fields = None
type.__setattr__(cls, name, value)
def __delattr__(cls, name):
"""
Remove an attribute from the class, clearing `_unbound_fields` if
needed.
"""
if not name.startswith('_'):
cls._unbound_fields = None
type.__delattr__(cls, name)
class Form(BaseForm):
"""
Declarative Form base class. Extends BaseForm's core behaviour allowing
fields to be defined on Form subclasses as class attributes.
In addition, form and instance input data are taken at construction time
and passed to `process()`.
"""
__metaclass__ = FormMeta
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
"""
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
super(Form, self).__init__(self._unbound_fields, prefix=prefix)
for name, field in self._fields.iteritems():
# Set all the fields to attributes so that they obscure the class
# attributes with the same names.
setattr(self, name, field)
self.process(formdata, obj, **kwargs)
def __iter__(self):
""" Iterate form fields in their order of definition on the form. """
for name, _ in self._unbound_fields:
if name in self._fields:
yield self._fields[name]
def __setitem__(self, name, value):
raise TypeError('Fields may not be added to Form instances, only classes.')
def __delitem__(self, name):
del self._fields[name]
setattr(self, name, None)
def __delattr__(self, name):
try:
self.__delitem__(name)
except KeyError:
super(Form, self).__delattr__(name)
def validate(self):
"""
Validates the form by calling `validate` on each field, passing any
extra `Form.validate_<fieldname>` validators to the field validator.
"""
extra = {}
for name in self._fields:
inline = getattr(self.__class__, 'validate_%s' % name, None)
if inline is not None:
extra[name] = [inline]
return super(Form, self).validate(extra)
class WebobInputWrapper(object):
"""
Wrap a webob MultiDict for use as passing as `formdata` to Field.
Since for consistency, we have decided in WTForms to support as input a
small subset of the API provided in common between cgi.FieldStorage,
Django's QueryDict, and Werkzeug's MultiDict, we need to wrap Webob, the
only supported framework whose multidict does not fit this API, but is
nevertheless used by a lot of frameworks.
While we could write a full wrapper to support all the methods, this will
undoubtedly result in bugs due to some subtle differences between the
various wrappers. So we will keep it simple.
"""
def __init__(self, multidict):
self._wrapped = multidict
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
def __contains__(self, name):
return (name in self._wrapped)
def getlist(self, name):
return self._wrapped.getall(name)
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_wke_hat_s01.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_wke_hat_s01.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
yakovenkodenis/rethinkdb | refs/heads/next | test/rql_test/connections/connection_star.py | 34 | #!/usr/bin/env python
'''Basic test that `from rethinkdb import *` works'''
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, "common"))
import driver, utils
dbName, tableName = utils.get_test_db_table()
# -- import rethikndb driver via star method
proto_r = utils.import_python_driver()
sys.path.insert(0, os.path.dirname(os.path.realpath(proto_r.__file__)))
from rethinkdb import *
# -- import tests
assert r == rethinkdb
assert issubclass(r, object)
assert issubclass(rethinkdb, object)
"""ReqlCursorEmpty
ReqlError
ReqlCompileError
ReqlRuntimeError
ReqlQueryLogicError
ReqlNonExistenceError
ReqlResourceLimitError
ReqlUserError
ReqlInternalError
ReqlTimeoutError
ReqlAvailabilityError
ReqlOpFailedError
ReqlOpIndeterminateError
ReqlDriverError
ReqlAuthError"""
assert issubclass(ReqlCursorEmpty, Exception)
assert issubclass(ReqlError, Exception)
assert issubclass(ReqlCompileError, ReqlError)
assert issubclass(ReqlRuntimeError, ReqlError)
assert issubclass(ReqlRuntimeError, ReqlError)
assert issubclass(ReqlNonExistenceError, ReqlQueryLogicError)
assert issubclass(ReqlResourceLimitError, ReqlError)
assert issubclass(ReqlUserError, ReqlError)
assert issubclass(ReqlInternalError, ReqlError)
assert issubclass(ReqlTimeoutError, ReqlError)
assert issubclass(ReqlAvailabilityError, ReqlError)
assert issubclass(ReqlOpFailedError, ReqlAvailabilityError)
assert issubclass(ReqlOpIndeterminateError, ReqlAvailabilityError)
assert issubclass(ReqlDriverError, ReqlError)
assert issubclass(ReqlAuthError, ReqlDriverError)
assert issubclass(RqlError, Exception)
assert issubclass(RqlClientError, RqlError)
assert issubclass(RqlCompileError, RqlError)
assert issubclass(RqlRuntimeError, RqlError)
assert issubclass(RqlDriverError, Exception)
# -- simple tests
with driver.Process(wait_until_ready=True) as server:
# - connect
r.connect(host=server.host, port=server.driver_port)
conn = rethinkdb.connect(host=server.host, port=server.driver_port)
# - create database
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
# - create table
if tableName in r.db(dbName).table_list().run(conn):
r.db(dbName).table_drop(tableName).run(conn)
r.db(dbName).table_create(tableName).run(conn)
# - simple querys
r.db_list().run(conn)
rethinkdb.db_list().run(conn)
assert len(r.db(dbName).table_list().run(conn)) > 0
assert len(rethinkdb.db(dbName).table_list().run(conn)) > 0
|
JoelSjostrand/counttrie | refs/heads/master | tests/counttrie_test.py | 1 | #! /usr/bin/env python
"""
Unit-test for run-tests
"""
import unittest
import counttrie.counttrie as ct
class TestCountTrie(unittest.TestCase):
def test_counttrie(self):
print "Creating tree"
t = ct.CountTrie()
t.add("AGAAT")
t.add("AGAGG")
t.add("AGAAG")
t.add("CATAC")
t.add("CGCAG")
t.add("CGCAG")
t.add("CGCAG")
t.add("CGC")
t.add("GCATG")
t.add("TACAC")
t.add("")
t.add("")
t.add("")
t.remove("", False)
t.add("A")
t.add("CA")
t.remove("CA")
t.remove("CA")
t.add("TACAT")
t.add("TACATTTT")
t.add("TACATTTTAA")
t.remove("TACATTTTAA")
#
print "Printing sequences"
l, c = t.get_sequences()
for i in range(len(l)):
print l[i] + "\t" + str(c[i])
#
print "Printing counts"
print t.get_count("")
print t.get_count("CGCAG")
print t.get_count("A")
print t.get_count("C")
print t.size()
print t.size(True)
#
print "Finding with 2 mismatches with indels for AGCAG"
s = t.find("AGCAG", 2, True)
for i in s:
print i
#
print "Removing count range"
t.remove_count_range(1,1)
l, c = t.get_sequences()
for i in range(len(l)):
print l[i] + "\t" + str(c[i])
if __name__ == '__main__':
unittest.main()
|
jaredly/pyjamas | refs/heads/master | pygtkweb/demos/039-itemfactory.py | 7 | #!/usr/bin/env python
# example itemfactory.py
import pygtk
pygtk.require('2.0')
import gtk
class ItemFactoryExample:
# Obligatory basic callback
def print_hello(self, w, data):
print "Hello, World!"
# This is the ItemFactoryEntry structure used to generate new menus.
# Item 1: The menu path. The letter after the underscore indicates an
# accelerator key once the menu is open.
# Item 2: The accelerator key for the entry
# Item 3: The callback.
# Item 4: The callback action. This changes the parameters with
# which the callback is called. The default is 0.
# Item 5: The item type, used to define what kind of an item it is.
# Here are the possible values:
# NULL -> "<Item>"
# "" -> "<Item>"
# "<Title>" -> create a title item
# "<Item>" -> create a simple item
# "<CheckItem>" -> create a check item
# "<ToggleItem>" -> create a toggle item
# "<RadioItem>" -> create a radio item
# <path> -> path of a radio item to link against
# "<Separator>" -> create a separator
# "<Branch>" -> create an item to hold sub items (optional)
# "<LastBranch>" -> create a right justified branch
def get_main_menu(self, window):
accel_group = gtk.AccelGroup()
# This function initializes the item factory.
# Param 1: The type of menu - can be MenuBar, Menu,
# or OptionMenu.
# Param 2: The path of the menu.
# Param 3: A reference to an AccelGroup. The item factory sets up
# the accelerator table while generating menus.
item_factory = gtk.ItemFactory(gtk.MenuBar, "<main>", accel_group)
# This method generates the menu items. Pass to the item factory
# the list of menu items
item_factory.create_items(self.menu_items)
# Attach the new accelerator group to the window.
window.add_accel_group(accel_group)
# need to keep a reference to item_factory to prevent its destruction
self.item_factory = item_factory
# Finally, return the actual menu bar created by the item factory.
return item_factory.get_widget("<main>")
def __init__(self):
self.menu_items = (
( "/_File", None, None, 0, "<Branch>" ),
( "/File/_New", "<control>N", self.print_hello, 0, None ),
( "/File/_Open", "<control>O", self.print_hello, 0, None ),
( "/File/_Save", "<control>S", self.print_hello, 0, None ),
( "/File/Save _As", None, None, 0, None ),
( "/File/sep1", None, None, 0, "<Separator>" ),
( "/File/Quit", "<control>Q", gtk.main_quit, 0, None ),
( "/_Options", None, None, 0, "<Branch>" ),
( "/Options/Test", None, None, 0, None ),
( "/_Help", None, None, 0, "<LastBranch>" ),
( "/_Help/About", None, None, 0, None ),
)
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", lambda w: gtk.main_quit())
window.set_title("Item Factory")
window.set_size_request(300, 200)
main_vbox = gtk.VBox(False, 1)
main_vbox.set_border_width(1)
window.add(main_vbox)
main_vbox.show()
menubar = self.get_main_menu(window)
main_vbox.pack_start(menubar, False, True, 0)
menubar.show()
window.show()
def main():
gtk.main()
return 0
if __name__ == "__main__":
ItemFactoryExample()
main()
|
alpeware/gcms | refs/heads/master | lib/tests/test_service_account.py | 5 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""oauth2client tests.
Unit tests for service account credentials implemented using RSA.
"""
import datetime
import json
import os
import tempfile
import unittest
import mock
import rsa
import six
from six.moves import http_client
from oauth2client import client
from oauth2client import crypt
from oauth2client import service_account
from oauth2client import transport
from tests import http_mock
def data_filename(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
def datafile(filename):
with open(data_filename(filename), 'rb') as file_obj:
return file_obj.read()
class ServiceAccountCredentialsTests(unittest.TestCase):
def setUp(self):
self.orig_signer = crypt.Signer
self.orig_verifier = crypt.Verifier
self.client_id = '123'
self.service_account_email = '[email protected]'
self.private_key_id = 'ABCDEF'
self.private_key = datafile('pem_from_pkcs12.pem')
self.scopes = ['dummy_scope']
self.signer = crypt.Signer.from_string(self.private_key)
self.credentials = service_account.ServiceAccountCredentials(
self.service_account_email,
self.signer,
private_key_id=self.private_key_id,
client_id=self.client_id,
)
def tearDown(self):
crypt.Signer = self.orig_signer
crypt.Verifier = self.orig_verifier
def test__to_json_override(self):
signer = object()
creds = service_account.ServiceAccountCredentials(
'[email protected]', signer)
self.assertEqual(creds._signer, signer)
# Serialize over-ridden data (unrelated to ``creds``).
to_serialize = {'unrelated': 'data'}
serialized_str = creds._to_json([], to_serialize.copy())
serialized_data = json.loads(serialized_str)
expected_serialized = {
'_class': 'ServiceAccountCredentials',
'_module': 'oauth2client.service_account',
'token_expiry': None,
}
expected_serialized.update(to_serialize)
self.assertEqual(serialized_data, expected_serialized)
def test_sign_blob(self):
private_key_id, signature = self.credentials.sign_blob('Google')
self.assertEqual(self.private_key_id, private_key_id)
pub_key = rsa.PublicKey.load_pkcs1_openssl_pem(
datafile('publickey_openssl.pem'))
self.assertTrue(rsa.pkcs1.verify(b'Google', signature, pub_key))
with self.assertRaises(rsa.pkcs1.VerificationError):
rsa.pkcs1.verify(b'Orest', signature, pub_key)
with self.assertRaises(rsa.pkcs1.VerificationError):
rsa.pkcs1.verify(b'Google', b'bad signature', pub_key)
def test_service_account_email(self):
self.assertEqual(self.service_account_email,
self.credentials.service_account_email)
@staticmethod
def _from_json_keyfile_name_helper(payload, scopes=None,
token_uri=None, revoke_uri=None):
filehandle, filename = tempfile.mkstemp()
os.close(filehandle)
try:
with open(filename, 'w') as file_obj:
json.dump(payload, file_obj)
return (
service_account.ServiceAccountCredentials
.from_json_keyfile_name(
filename, scopes=scopes, token_uri=token_uri,
revoke_uri=revoke_uri))
finally:
os.remove(filename)
@mock.patch('oauth2client.crypt.Signer.from_string',
return_value=object())
def test_from_json_keyfile_name_factory(self, signer_factory):
client_id = 'id123'
client_email = '[email protected]'
private_key_id = 'pkid456'
private_key = 's3kr3tz'
payload = {
'type': client.SERVICE_ACCOUNT,
'client_id': client_id,
'client_email': client_email,
'private_key_id': private_key_id,
'private_key': private_key,
}
scopes = ['foo', 'bar']
token_uri = 'baz'
revoke_uri = 'qux'
base_creds = self._from_json_keyfile_name_helper(
payload, scopes=scopes, token_uri=token_uri, revoke_uri=revoke_uri)
self.assertEqual(base_creds._signer, signer_factory.return_value)
signer_factory.assert_called_once_with(private_key)
payload['token_uri'] = token_uri
payload['revoke_uri'] = revoke_uri
creds_with_uris_from_file = self._from_json_keyfile_name_helper(
payload, scopes=scopes)
for creds in (base_creds, creds_with_uris_from_file):
self.assertIsInstance(
creds, service_account.ServiceAccountCredentials)
self.assertEqual(creds.client_id, client_id)
self.assertEqual(creds._service_account_email, client_email)
self.assertEqual(creds._private_key_id, private_key_id)
self.assertEqual(creds._private_key_pkcs8_pem, private_key)
self.assertEqual(creds._scopes, ' '.join(scopes))
self.assertEqual(creds.token_uri, token_uri)
self.assertEqual(creds.revoke_uri, revoke_uri)
def test_from_json_keyfile_name_factory_bad_type(self):
type_ = 'bad-type'
self.assertNotEqual(type_, client.SERVICE_ACCOUNT)
payload = {'type': type_}
with self.assertRaises(ValueError):
self._from_json_keyfile_name_helper(payload)
def test_from_json_keyfile_name_factory_missing_field(self):
payload = {
'type': client.SERVICE_ACCOUNT,
'client_id': 'my-client',
}
with self.assertRaises(KeyError):
self._from_json_keyfile_name_helper(payload)
def _from_p12_keyfile_helper(self, private_key_password=None, scopes='',
token_uri=None, revoke_uri=None):
service_account_email = '[email protected]'
filename = data_filename('privatekey.p12')
with open(filename, 'rb') as file_obj:
key_contents = file_obj.read()
creds_from_filename = (
service_account.ServiceAccountCredentials.from_p12_keyfile(
service_account_email, filename,
private_key_password=private_key_password,
scopes=scopes, token_uri=token_uri, revoke_uri=revoke_uri))
creds_from_file_contents = (
service_account.ServiceAccountCredentials.from_p12_keyfile_buffer(
service_account_email, six.BytesIO(key_contents),
private_key_password=private_key_password,
scopes=scopes, token_uri=token_uri, revoke_uri=revoke_uri))
for creds in (creds_from_filename, creds_from_file_contents):
self.assertIsInstance(
creds, service_account.ServiceAccountCredentials)
self.assertIsNone(creds.client_id)
self.assertEqual(creds._service_account_email,
service_account_email)
self.assertIsNone(creds._private_key_id)
self.assertIsNone(creds._private_key_pkcs8_pem)
self.assertEqual(creds._private_key_pkcs12, key_contents)
if private_key_password is not None:
self.assertEqual(creds._private_key_password,
private_key_password)
self.assertEqual(creds._scopes, ' '.join(scopes))
self.assertEqual(creds.token_uri, token_uri)
self.assertEqual(creds.revoke_uri, revoke_uri)
def _p12_not_implemented_helper(self):
service_account_email = '[email protected]'
filename = data_filename('privatekey.p12')
with self.assertRaises(NotImplementedError):
service_account.ServiceAccountCredentials.from_p12_keyfile(
service_account_email, filename)
@mock.patch('oauth2client.crypt.Signer', new=crypt.PyCryptoSigner)
def test_from_p12_keyfile_with_pycrypto(self):
self._p12_not_implemented_helper()
@mock.patch('oauth2client.crypt.Signer', new=crypt.RsaSigner)
def test_from_p12_keyfile_with_rsa(self):
self._p12_not_implemented_helper()
def test_from_p12_keyfile_defaults(self):
self._from_p12_keyfile_helper()
def test_from_p12_keyfile_explicit(self):
password = 'notasecret'
self._from_p12_keyfile_helper(private_key_password=password,
scopes=['foo', 'bar'],
token_uri='baz', revoke_uri='qux')
def test_create_scoped_required_without_scopes(self):
self.assertTrue(self.credentials.create_scoped_required())
def test_create_scoped_required_with_scopes(self):
signer = object()
self.credentials = service_account.ServiceAccountCredentials(
self.service_account_email,
signer,
scopes=self.scopes,
private_key_id=self.private_key_id,
client_id=self.client_id,
)
self.assertFalse(self.credentials.create_scoped_required())
def test_create_scoped(self):
new_credentials = self.credentials.create_scoped(self.scopes)
self.assertNotEqual(self.credentials, new_credentials)
self.assertIsInstance(new_credentials,
service_account.ServiceAccountCredentials)
self.assertEqual('dummy_scope', new_credentials._scopes)
def test_create_delegated(self):
signer = object()
sub = '[email protected]'
creds = service_account.ServiceAccountCredentials(
'[email protected]', signer)
self.assertNotIn('sub', creds._kwargs)
delegated_creds = creds.create_delegated(sub)
self.assertEqual(delegated_creds._kwargs['sub'], sub)
# Make sure the original is unchanged.
self.assertNotIn('sub', creds._kwargs)
def test_create_delegated_existing_sub(self):
signer = object()
sub1 = '[email protected]'
sub2 = '[email protected]'
creds = service_account.ServiceAccountCredentials(
'[email protected]', signer, sub=sub1)
self.assertEqual(creds._kwargs['sub'], sub1)
delegated_creds = creds.create_delegated(sub2)
self.assertEqual(delegated_creds._kwargs['sub'], sub2)
# Make sure the original is unchanged.
self.assertEqual(creds._kwargs['sub'], sub1)
@mock.patch('oauth2client.client._UTCNOW')
def test_access_token(self, utcnow):
# Configure the patch.
seconds = 11
NOW = datetime.datetime(1992, 12, 31, second=seconds)
utcnow.return_value = NOW
# Create a custom credentials with a mock signer.
signer = mock.Mock()
signed_value = b'signed-content'
signer.sign = mock.Mock(name='sign',
return_value=signed_value)
credentials = service_account.ServiceAccountCredentials(
self.service_account_email,
signer,
private_key_id=self.private_key_id,
client_id=self.client_id,
)
# Begin testing.
lifetime = 2 # number of seconds in which the token expires
EXPIRY_TIME = datetime.datetime(1992, 12, 31,
second=seconds + lifetime)
token1 = u'first_token'
token_response_first = {
'access_token': token1,
'expires_in': lifetime,
}
token2 = u'second_token'
token_response_second = {
'access_token': token2,
'expires_in': lifetime,
}
http = http_mock.HttpMockSequence([
({'status': http_client.OK},
json.dumps(token_response_first).encode('utf-8')),
({'status': http_client.OK},
json.dumps(token_response_second).encode('utf-8')),
])
# Get Access Token, First attempt.
self.assertIsNone(credentials.access_token)
self.assertFalse(credentials.access_token_expired)
self.assertIsNone(credentials.token_expiry)
token = credentials.get_access_token(http=http)
self.assertEqual(credentials.token_expiry, EXPIRY_TIME)
self.assertEqual(token1, token.access_token)
self.assertEqual(lifetime, token.expires_in)
self.assertEqual(token_response_first,
credentials.token_response)
# Two utcnow calls are expected:
# - get_access_token() -> _do_refresh_request (setting expires in)
# - get_access_token() -> _expires_in()
expected_utcnow_calls = [mock.call()] * 2
self.assertEqual(expected_utcnow_calls, utcnow.mock_calls)
# One call to sign() expected: Actual refresh was needed.
self.assertEqual(len(signer.sign.mock_calls), 1)
# Get Access Token, Second Attempt (not expired)
self.assertEqual(credentials.access_token, token1)
self.assertFalse(credentials.access_token_expired)
token = credentials.get_access_token(http=http)
# Make sure no refresh occurred since the token was not expired.
self.assertEqual(token1, token.access_token)
self.assertEqual(lifetime, token.expires_in)
self.assertEqual(token_response_first, credentials.token_response)
# Three more utcnow calls are expected:
# - access_token_expired
# - get_access_token() -> access_token_expired
# - get_access_token -> _expires_in
expected_utcnow_calls = [mock.call()] * (2 + 3)
self.assertEqual(expected_utcnow_calls, utcnow.mock_calls)
# No call to sign() expected: the token was not expired.
self.assertEqual(len(signer.sign.mock_calls), 1 + 0)
# Get Access Token, Third Attempt (force expiration)
self.assertEqual(credentials.access_token, token1)
credentials.token_expiry = NOW # Manually force expiry.
self.assertTrue(credentials.access_token_expired)
token = credentials.get_access_token(http=http)
# Make sure refresh occurred since the token was not expired.
self.assertEqual(token2, token.access_token)
self.assertEqual(lifetime, token.expires_in)
self.assertFalse(credentials.access_token_expired)
self.assertEqual(token_response_second,
credentials.token_response)
# Five more utcnow calls are expected:
# - access_token_expired
# - get_access_token -> access_token_expired
# - get_access_token -> _do_refresh_request
# - get_access_token -> _expires_in
# - access_token_expired
expected_utcnow_calls = [mock.call()] * (2 + 3 + 5)
self.assertEqual(expected_utcnow_calls, utcnow.mock_calls)
# One more call to sign() expected: Actual refresh was needed.
self.assertEqual(len(signer.sign.mock_calls), 1 + 0 + 1)
self.assertEqual(credentials.access_token, token2)
TOKEN_LIFE = service_account._JWTAccessCredentials._MAX_TOKEN_LIFETIME_SECS
T1 = 42
T1_DATE = datetime.datetime(1970, 1, 1, second=T1)
T1_EXPIRY = T1 + TOKEN_LIFE
T1_EXPIRY_DATE = T1_DATE + datetime.timedelta(seconds=TOKEN_LIFE)
T2 = T1 + 100
T2_DATE = T1_DATE + datetime.timedelta(seconds=100)
T2_EXPIRY = T2 + TOKEN_LIFE
T2_EXPIRY_DATE = T2_DATE + datetime.timedelta(seconds=TOKEN_LIFE)
T3 = T1 + TOKEN_LIFE + 1
T3_DATE = T1_DATE + datetime.timedelta(seconds=TOKEN_LIFE + 1)
T3_EXPIRY = T3 + TOKEN_LIFE
T3_EXPIRY_DATE = T3_DATE + datetime.timedelta(seconds=TOKEN_LIFE)
class JWTAccessCredentialsTests(unittest.TestCase):
def setUp(self):
self.client_id = '123'
self.service_account_email = '[email protected]'
self.private_key_id = 'ABCDEF'
self.private_key = datafile('pem_from_pkcs12.pem')
self.signer = crypt.Signer.from_string(self.private_key)
self.url = 'https://test.url.com'
self.jwt = service_account._JWTAccessCredentials(
self.service_account_email, self.signer,
private_key_id=self.private_key_id, client_id=self.client_id,
additional_claims={'aud': self.url})
@mock.patch('oauth2client.client._UTCNOW')
@mock.patch('time.time')
def test_get_access_token_no_claims(self, time, utcnow):
utcnow.return_value = T1_DATE
time.return_value = T1
token_info = self.jwt.get_access_token()
certs = {'key': datafile('public_cert.pem')}
payload = crypt.verify_signed_jwt_with_certs(
token_info.access_token, certs, audience=self.url)
self.assertEqual(len(payload), 5)
self.assertEqual(payload['iss'], self.service_account_email)
self.assertEqual(payload['sub'], self.service_account_email)
self.assertEqual(payload['iat'], T1)
self.assertEqual(payload['exp'], T1_EXPIRY)
self.assertEqual(payload['aud'], self.url)
self.assertEqual(token_info.expires_in, T1_EXPIRY - T1)
# Verify that we vend the same token after 100 seconds
utcnow.return_value = T2_DATE
token_info = self.jwt.get_access_token()
payload = crypt.verify_signed_jwt_with_certs(
token_info.access_token,
{'key': datafile('public_cert.pem')}, audience=self.url)
self.assertEqual(payload['iat'], T1)
self.assertEqual(payload['exp'], T1_EXPIRY)
self.assertEqual(token_info.expires_in, T1_EXPIRY - T2)
# Verify that we vend a new token after _MAX_TOKEN_LIFETIME_SECS
utcnow.return_value = T3_DATE
time.return_value = T3
token_info = self.jwt.get_access_token()
payload = crypt.verify_signed_jwt_with_certs(
token_info.access_token,
{'key': datafile('public_cert.pem')}, audience=self.url)
expires_in = token_info.expires_in
self.assertEqual(payload['iat'], T3)
self.assertEqual(payload['exp'], T3_EXPIRY)
self.assertEqual(expires_in, T3_EXPIRY - T3)
@mock.patch('oauth2client.client._UTCNOW')
@mock.patch('time.time')
def test_get_access_token_additional_claims(self, time, utcnow):
utcnow.return_value = T1_DATE
time.return_value = T1
audience = 'https://test2.url.com'
subject = '[email protected]'
claims = {'aud': audience, 'sub': subject}
token_info = self.jwt.get_access_token(additional_claims=claims)
certs = {'key': datafile('public_cert.pem')}
payload = crypt.verify_signed_jwt_with_certs(
token_info.access_token, certs, audience=audience)
expires_in = token_info.expires_in
self.assertEqual(len(payload), 5)
self.assertEqual(payload['iss'], self.service_account_email)
self.assertEqual(payload['sub'], subject)
self.assertEqual(payload['iat'], T1)
self.assertEqual(payload['exp'], T1_EXPIRY)
self.assertEqual(payload['aud'], audience)
self.assertEqual(expires_in, T1_EXPIRY - T1)
def test_revoke(self):
self.jwt.revoke(None)
def test_create_scoped_required(self):
self.assertTrue(self.jwt.create_scoped_required())
def test_create_scoped(self):
self.jwt._private_key_pkcs12 = ''
self.jwt._private_key_password = ''
new_credentials = self.jwt.create_scoped('dummy_scope')
self.assertNotEqual(self.jwt, new_credentials)
self.assertIsInstance(
new_credentials, service_account.ServiceAccountCredentials)
self.assertEqual('dummy_scope', new_credentials._scopes)
@mock.patch('oauth2client.client._UTCNOW')
@mock.patch('time.time')
def test_authorize_success(self, time, utcnow):
utcnow.return_value = T1_DATE
time.return_value = T1
http = http_mock.HttpMockSequence([
({'status': http_client.OK}, b''),
({'status': http_client.OK}, b''),
])
self.jwt.authorize(http)
transport.request(http, self.url)
# Ensure we use the cached token
utcnow.return_value = T2_DATE
transport.request(http, self.url)
# Verify mocks.
certs = {'key': datafile('public_cert.pem')}
self.assertEqual(len(http.requests), 2)
for info in http.requests:
self.assertEqual(info['method'], 'GET')
self.assertEqual(info['uri'], self.url)
self.assertIsNone(info['body'])
self.assertEqual(len(info['headers']), 1)
bearer, token = info['headers'][b'Authorization'].split()
self.assertEqual(bearer, b'Bearer')
payload = crypt.verify_signed_jwt_with_certs(
token, certs, audience=self.url)
self.assertEqual(len(payload), 5)
self.assertEqual(payload['iss'], self.service_account_email)
self.assertEqual(payload['sub'], self.service_account_email)
self.assertEqual(payload['iat'], T1)
self.assertEqual(payload['exp'], T1_EXPIRY)
self.assertEqual(payload['aud'], self.url)
@mock.patch('oauth2client.client._UTCNOW')
@mock.patch('time.time')
def test_authorize_no_aud(self, time, utcnow):
utcnow.return_value = T1_DATE
time.return_value = T1
jwt = service_account._JWTAccessCredentials(
self.service_account_email, self.signer,
private_key_id=self.private_key_id, client_id=self.client_id)
http = http_mock.HttpMockSequence([
({'status': http_client.OK}, b''),
])
jwt.authorize(http)
transport.request(http, self.url)
# Ensure we do not cache the token
self.assertIsNone(jwt.access_token)
# Verify mocks.
self.assertEqual(len(http.requests), 1)
info = http.requests[0]
self.assertEqual(info['method'], 'GET')
self.assertEqual(info['uri'], self.url)
self.assertIsNone(info['body'])
self.assertEqual(len(info['headers']), 1)
bearer, token = info['headers'][b'Authorization'].split()
self.assertEqual(bearer, b'Bearer')
certs = {'key': datafile('public_cert.pem')}
payload = crypt.verify_signed_jwt_with_certs(
token, certs, audience=self.url)
self.assertEqual(len(payload), 5)
self.assertEqual(payload['iss'], self.service_account_email)
self.assertEqual(payload['sub'], self.service_account_email)
self.assertEqual(payload['iat'], T1)
self.assertEqual(payload['exp'], T1_EXPIRY)
self.assertEqual(payload['aud'], self.url)
@mock.patch('oauth2client.client._UTCNOW')
def test_authorize_stale_token(self, utcnow):
utcnow.return_value = T1_DATE
# Create an initial token
http = http_mock.HttpMockSequence([
({'status': http_client.OK}, b''),
({'status': http_client.OK}, b''),
])
self.jwt.authorize(http)
transport.request(http, self.url)
token_1 = self.jwt.access_token
# Expire the token
utcnow.return_value = T3_DATE
transport.request(http, self.url)
token_2 = self.jwt.access_token
self.assertEquals(self.jwt.token_expiry, T3_EXPIRY_DATE)
self.assertNotEqual(token_1, token_2)
# Verify mocks.
certs = {'key': datafile('public_cert.pem')}
self.assertEqual(len(http.requests), 2)
issued_at_vals = (T1, T3)
exp_vals = (T1_EXPIRY, T3_EXPIRY)
for info, issued_at, exp_val in zip(http.requests, issued_at_vals,
exp_vals):
self.assertEqual(info['uri'], self.url)
self.assertEqual(info['method'], 'GET')
self.assertIsNone(info['body'])
self.assertEqual(len(info['headers']), 1)
bearer, token = info['headers'][b'Authorization'].split()
self.assertEqual(bearer, b'Bearer')
# To parse the token, skip the time check, since this
# test intentionally has stale tokens.
with mock.patch('oauth2client.crypt._verify_time_range',
return_value=True):
payload = crypt.verify_signed_jwt_with_certs(
token, certs, audience=self.url)
self.assertEqual(len(payload), 5)
self.assertEqual(payload['iss'], self.service_account_email)
self.assertEqual(payload['sub'], self.service_account_email)
self.assertEqual(payload['iat'], issued_at)
self.assertEqual(payload['exp'], exp_val)
self.assertEqual(payload['aud'], self.url)
@mock.patch('oauth2client.client._UTCNOW')
def test_authorize_401(self, utcnow):
utcnow.return_value = T1_DATE
http = http_mock.HttpMockSequence([
({'status': http_client.OK}, b''),
({'status': http_client.UNAUTHORIZED}, b''),
({'status': http_client.OK}, b''),
])
self.jwt.authorize(http)
transport.request(http, self.url)
token_1 = self.jwt.access_token
utcnow.return_value = T2_DATE
response, _ = transport.request(http, self.url)
self.assertEquals(response.status, http_client.OK)
token_2 = self.jwt.access_token
# Check the 401 forced a new token
self.assertNotEqual(token_1, token_2)
# Verify mocks.
certs = {'key': datafile('public_cert.pem')}
self.assertEqual(len(http.requests), 3)
issued_at_vals = (T1, T1, T2)
exp_vals = (T1_EXPIRY, T1_EXPIRY, T2_EXPIRY)
for info, issued_at, exp_val in zip(http.requests, issued_at_vals,
exp_vals):
self.assertEqual(info['uri'], self.url)
self.assertEqual(info['method'], 'GET')
self.assertIsNone(info['body'])
self.assertEqual(len(info['headers']), 1)
bearer, token = info['headers'][b'Authorization'].split()
self.assertEqual(bearer, b'Bearer')
# To parse the token, skip the time check, since this
# test intentionally has stale tokens.
with mock.patch('oauth2client.crypt._verify_time_range',
return_value=True):
payload = crypt.verify_signed_jwt_with_certs(
token, certs, audience=self.url)
self.assertEqual(len(payload), 5)
self.assertEqual(payload['iss'], self.service_account_email)
self.assertEqual(payload['sub'], self.service_account_email)
self.assertEqual(payload['iat'], issued_at)
self.assertEqual(payload['exp'], exp_val)
self.assertEqual(payload['aud'], self.url)
@mock.patch('oauth2client.client._UTCNOW')
def test_refresh(self, utcnow):
utcnow.return_value = T1_DATE
token_1 = self.jwt.access_token
utcnow.return_value = T2_DATE
self.jwt.refresh(None)
token_2 = self.jwt.access_token
self.assertEquals(self.jwt.token_expiry, T2_EXPIRY_DATE)
self.assertNotEqual(token_1, token_2)
|
kubernetes-client/python | refs/heads/master | kubernetes/client/models/v2beta2_external_metric_source.py | 1 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2beta2ExternalMetricSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metric': 'V2beta2MetricIdentifier',
'target': 'V2beta2MetricTarget'
}
attribute_map = {
'metric': 'metric',
'target': 'target'
}
def __init__(self, metric=None, target=None, local_vars_configuration=None): # noqa: E501
"""V2beta2ExternalMetricSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metric = None
self._target = None
self.discriminator = None
self.metric = metric
self.target = target
@property
def metric(self):
"""Gets the metric of this V2beta2ExternalMetricSource. # noqa: E501
:return: The metric of this V2beta2ExternalMetricSource. # noqa: E501
:rtype: V2beta2MetricIdentifier
"""
return self._metric
@metric.setter
def metric(self, metric):
"""Sets the metric of this V2beta2ExternalMetricSource.
:param metric: The metric of this V2beta2ExternalMetricSource. # noqa: E501
:type: V2beta2MetricIdentifier
"""
if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
self._metric = metric
@property
def target(self):
"""Gets the target of this V2beta2ExternalMetricSource. # noqa: E501
:return: The target of this V2beta2ExternalMetricSource. # noqa: E501
:rtype: V2beta2MetricTarget
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this V2beta2ExternalMetricSource.
:param target: The target of this V2beta2ExternalMetricSource. # noqa: E501
:type: V2beta2MetricTarget
"""
if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
self._target = target
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta2ExternalMetricSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta2ExternalMetricSource):
return True
return self.to_dict() != other.to_dict()
|
benoitc/bigcouch | refs/heads/master | couchjs/scons/scons-local-2.0.1/SCons/Variables/EnumVariable.py | 61 | """engine.SCons.Variables.EnumVariable
This file defines the option type for SCons allowing only specified
input-values.
Usage example:
opts = Variables()
opts.Add(EnumVariable('debug', 'debug output and symbols', 'no',
allowed_values=('yes', 'no', 'full'),
map={}, ignorecase=2))
...
if env['debug'] == 'full':
...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/EnumVariable.py 5134 2010/08/16 23:02:40 bdeegan"
__all__ = ['EnumVariable',]
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s' % (key, val))
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Variables.Add().
'key' and 'default' are the values to be passed on to Variables.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
darktears/chromium-crosswalk | refs/heads/master | chrome/common/extensions/docs/server2/file_system.py | 53 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import traceback
from future import Future
from path_util import (
AssertIsDirectory, AssertIsValid, IsDirectory, IsValid, SplitParent,
ToDirectory)
def IsFileSystemThrottledError(error):
return type(error).__name__ == 'FileSystemThrottledError'
class _BaseFileSystemException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
@classmethod
def RaiseInFuture(cls, message):
stack = traceback.format_stack()
def boom(): raise cls('%s. Creation stack:\n%s' % (message, ''.join(stack)))
return Future(callback=boom)
class FileNotFoundError(_BaseFileSystemException):
'''Raised when a file isn't found for read or stat.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class FileSystemThrottledError(_BaseFileSystemException):
'''Raised when access to a file system resource is temporarily unavailable
due to service throttling.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class FileSystemError(_BaseFileSystemException):
'''Raised on when there are errors reading or statting files, such as a
network timeout.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class StatInfo(object):
'''The result of calling Stat on a FileSystem.
'''
def __init__(self, version, child_versions=None):
if child_versions:
assert all(IsValid(path) for path in child_versions.iterkeys()), \
child_versions
self.version = version
self.child_versions = child_versions
def __eq__(self, other):
return (isinstance(other, StatInfo) and
self.version == other.version and
self.child_versions == other.child_versions)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return '{version: %s, child_versions: %s}' % (self.version,
self.child_versions)
def __repr__(self):
return str(self)
class FileSystem(object):
'''A FileSystem interface that can read files and directories.
'''
def Read(self, paths, skip_not_found=False):
'''Reads each file in paths and returns a dictionary mapping the path to the
contents. If a path in paths ends with a '/', it is assumed to be a
directory, and a list of files in the directory is mapped to the path.
The contents will be a str.
If any path cannot be found:
- If |skip_not_found| is True, the resulting object will not contain any
mapping for that path.
- Otherwise, and by default, a FileNotFoundError is raised. This is
guaranteed to only happen once the Future has been resolved (Get()
called).
For any other failure, raises a FileSystemError.
'''
raise NotImplementedError(self.__class__)
def ReadSingle(self, path, skip_not_found=False):
'''Reads a single file from the FileSystem. Returns a Future with the same
rules as Read(). If |path| is not found raise a FileNotFoundError on Get(),
or if |skip_not_found| is True then return None.
'''
AssertIsValid(path)
read_single = self.Read([path], skip_not_found=skip_not_found)
return Future(callback=lambda: read_single.Get().get(path, None))
def Exists(self, path):
'''Returns a Future to the existence of |path|; True if |path| exists,
False if not. This method will not throw a FileNotFoundError unlike
the Read* methods, however it may still throw a FileSystemError.
There are several ways to implement this method via the interface but this
method exists to do so in a canonical and most efficient way for caching.
'''
AssertIsValid(path)
if path == '':
# There is always a root directory.
return Future(value=True)
parent, base = SplitParent(path)
def handle(error):
if isinstance(error, FileNotFoundError):
return False
raise error
return self.ReadSingle(ToDirectory(parent)).Then(lambda l: base in l,
handle)
def Refresh(self):
'''Asynchronously refreshes the content of the FileSystem, returning a
future to its completion.
'''
raise NotImplementedError(self.__class__)
# TODO(cduvall): Allow Stat to take a list of paths like Read.
def Stat(self, path):
'''DEPRECATED: Please try to use StatAsync instead.
Returns a |StatInfo| object containing the version of |path|. If |path|
is a directory, |StatInfo| will have the versions of all the children of
the directory in |StatInfo.child_versions|.
If the path cannot be found, raises a FileNotFoundError.
For any other failure, raises a FileSystemError.
'''
# Delegate to this implementation's StatAsync if it has been implemented.
if type(self).StatAsync != FileSystem.StatAsync:
return self.StatAsync(path).Get()
raise NotImplementedError(self.__class__)
def StatAsync(self, path):
'''An async version of Stat. Returns a Future to a StatInfo rather than a
raw StatInfo.
This is a bandaid for a lack of an async Stat function. Stat() should be
async by default but for now just let implementations override this if they
like.
'''
return Future(callback=lambda: self.Stat(path))
def GetIdentity(self):
'''The identity of the file system, exposed for caching classes to
namespace their caches. This will usually depend on the configuration of
that file system - e.g. a LocalFileSystem with a base path of /var is
different to that of a SubversionFileSystem with a base path of /bar, is
different to a LocalFileSystem with a base path of /usr.
'''
raise NotImplementedError(self.__class__)
def GetVersion(self):
'''The version of the file system, exposed for more granular caching.
This may be any serializable data, though generally it should be a revision
number or hash string. The default implementation returns None, indicating
that the FileSystem is not versioned.
'''
return None
def Walk(self, root, depth=-1, file_lister=None):
'''Recursively walk the directories in a file system, starting with root.
Behaviour is very similar to os.walk from the standard os module, yielding
(base, dirs, files) recursively, where |base| is the base path of |files|,
|dirs| relative to |root|, and |files| and |dirs| the list of files/dirs in
|base| respectively. If |depth| is specified and greater than 0, Walk will
only recurse |depth| times.
|file_lister|, if specified, should be a callback of signature
def my_file_lister(root):,
which returns a tuple (dirs, files), where |dirs| is a list of directory
names under |root|, and |files| is a list of file names under |root|. Note
that the listing of files and directories should be for a *single* level
only, i.e. it should not recursively list anything.
Note that directories will always end with a '/', files never will.
If |root| cannot be found, raises a FileNotFoundError.
For any other failure, raises a FileSystemError.
'''
AssertIsDirectory(root)
basepath = root
def walk(root, depth):
if depth == 0:
return
AssertIsDirectory(root)
if file_lister:
dirs, files = file_lister(root)
else:
dirs, files = [], []
for f in self.ReadSingle(root).Get():
if IsDirectory(f):
dirs.append(f)
else:
files.append(f)
yield root[len(basepath):].rstrip('/'), dirs, files
for d in dirs:
for walkinfo in walk(root + d, depth - 1):
yield walkinfo
for walkinfo in walk(root, depth):
yield walkinfo
def __eq__(self, other):
return (isinstance(other, FileSystem) and
self.GetIdentity() == other.GetIdentity())
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '<%s>' % type(self).__name__
def __str__(self):
return repr(self)
|
bgxavier/nova | refs/heads/master | nova/tests/unit/image/fake.py | 22 | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import arch
from nova import exception
import nova.image.glance
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID = '70a599e0-31e7-49b7-b260-868f441e862b'
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64,
'auto_disk_config': 'False'}}
image7 = {'id': AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64,
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, dst_path=None, data=None):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, ''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id, include_locations=False,
show_deleted=True):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warning('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def get_valid_image_id():
return AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
def stub_out_image_service(stubs):
image_service = FakeImageService()
stubs.Set(nova.image.glance, 'get_remote_image_service',
lambda x, y: (image_service, y))
stubs.Set(nova.image.glance, 'get_default_image_service',
lambda: image_service)
return image_service
|
darkoc/clowdflows | refs/heads/master | tweepy/__init__.py | 9 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '2.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResult, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import BasicAuthHandler, OAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
|
ehickox2012/bitraider | refs/heads/master | bitraider/trader_template.py | 2 | import sys
import pytz
import time
import calendar
import configparser
import cmd
from datetime import date, datetime, timedelta
from strategy import strategy
from exchange import cb_exchange_sim, cb_exchange
class runner(cmd.Cmd):
def __init__(self):
"""Create a new runner with provided CLI commands. Default commands are:
\n1. exit: quit autotrader
\n2. help: display all commands
\n3. price: display the most recent bitcoin price
\n4. run: start trading on live data
\n5. backtest: run a backtest on historic data
\n6. load: load a new strategy
"""
print(" __ _ __ _ __ ")
print(" / /_ (_) /__________ _(_)___/ /__ _____")
print(" / __ \/ / __/ ___/ __ `/ / __ / _ \/ ___/")
print(" / /_/ / / /_/ / / /_/ / / /_/ / __/ / ")
print("/_.___/_/\__/_/ \__,_/_/\__,_/\___/_/ ")
print("")
print("Welcome to bitraider v0.0.4, an algorithmic Bitcoin trader!")
cmd.Cmd.__init__(self)
self.prompt = '> '
self.intro = "Type a command to get started or type \'help\'"
# Init config
self.config_path = "settings.ini"
self.config = configparser.ConfigParser()
try:
self.config.read(self.config_path)
except Exception as err:
print(str(err))
# Set up strategy
self.strategies = {}
"""The currently loaded strategies"""
# Try to load a default strategy, if one exists
try:
default_strategy_module = self.config.get("default_strategy", "module")
default_strategy_class = self.config.get("default_strategy", "class")
self.load_strategy(default_strategy_module, default_strategy_class)
except Exception as err:
#print(str(err))
print("No default strategy configured. Run "
"\'config default\' to set one")
try:
self.config.add_section("default_strategy")
except:
pass
self.exchange = cb_exchange_sim(1000, 1)
self.accounts = None
# Get auth credentials from settings.ini, if they exist, authorize
try:
self.auth_key = self.config.get("auth", "key")
self.auth_secret = self.config.get("auth", "secret")
self.auth_password = self.config.get("auth", "password")
self.authenticate()
except Exception as err:
#print(str(err))
print("No authentication configured. Run "
"\'config auth\' to set it")
try:
self.config.add_section("auth")
except:
pass
if self.accounts is not None:
print(str(len(self.accounts))+" accounts were found.")
for i in range(0, len(self.accounts)):
try:
print("Account ID: "+str(self.accounts[i]['id'])+" Available Funds: "+str(self.accounts[i]['available'])+" "+str(self.accounts[i]['currency'])+"")
except Exception as err:
print("Something went wrong while trying to authenticate with the provided credentials. Try running config>auth again.")
def do_exit(self, line):
sys.exit()
def do_price(self, line):
self.print_curr_price()
def do_run(self, line):
self.set_ticker_on()
def do_list(self, line):
self.list_strategies()
def do_config(self, option):
"""usage: \'config \' [option]"""
if option is None:
print("error: no cofiguration option specified")
else:
if option == "auth":
if self.accounts is not None:
print("Are you sure? Reconfiguring auth will wipe your current auth settings. [y/n]")
raw_input = input("> ")
if raw_input == "y":
self.authenticate_exchage()
elif raw_input == "n":
print("Exiting to main menu")
pass
else:
self.authenticate_exchange()
elif option == "default":
print("Type the filename (without .py) containing the class which inherits from bitraider.strategy:")
option = input("> ")
filename = str(option)
self.config.set("default_strategy", "module", filename)
print("Type the name of the class within "+str(option)+" representing the strategy to load:")
option = input("> ")
loaded_strategy = str(option)
if self.strategies is not None:
if loaded_strategy in self.strategies.keys():
print("Error: "+loaded_strategy+" is already loaded")
option = input("> ")
loaded_strategy = str(option)
self.config.set("default_strategy", "class", loaded_strategy)
with open(self.config_path, "wb") as config_file:
self.config.write(config_file)
self.load_strategy(filename, loaded_strategy)
def do_load(self, option):
print("Type the filename (without .py) containing the class which inherits from bitraider.strategy:")
raw_input = input("> ")
filename = str(raw_input)
print("Type the name of the class within "+str(raw_input)+" representing the strategy to load:")
raw_input = input("> ")
loaded_strategy = str(raw_input)
self.load_strategy(filename, loaded_strategy)
def do_backtest(self, option):
strategy_to_backtest = ""
print("Enter the class name of the strategy to backtest, or press enter to\n"
"backtest on the default strategy.")
raw_input = input("> ")
if raw_input == "":
print("Performing backest on default strategy: "+str(self.config.get("default_strategy" ,"class")))
strategy_to_backtest = str(self.config.get("default_strategy", "class"))
else:
strategy_to_backtest = str(raw_input)
usd = 1000
btc = 1
days_back_in_time = 7
print("Enter the number of days back in time to backtest on: ")
raw_input = input("> ")
if raw_input == "":
print("Performing backtest on default of 7 days.")
else:
days_back_in_time = float(raw_input)
print("Performing backtest on last "+str(days_back_in_time)+" days.")
curr_time = datetime.now(tz=self.curr_timezone)
start_time = curr_time - timedelta(seconds=86400*days_back_in_time)
start_time = start_time.isoformat(' ')
end_time = curr_time.isoformat(' ')
print("Enter the initial USD amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting USD amount of $1,000")
else:
usd = float(raw_input)
print("Using starting USD amount of $"+str(usd))
print("Enter the initial BTC amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting BTC amount of 1")
else:
btc = float(raw_input)
print("Using starting BTC amount of "+str(btc))
if strategy_to_backtest is not "":
self.strategies[strategy_to_backtest].exchange = cb_exchange_sim(start_usd=usd, start_btc=btc)
historic_data = self.strategies[strategy_to_backtest].exchange.get_historic_rates(start_time=start_time, end_time=end_time, granularity=self.strategies[strategy_to_backtest].interval)
if type(historic_data) is not list:
print("API error: "+str(historic_data.get("message", "")))
print("Unable to backtest")
pass
else:
print("Backtesting from "+str(start_time)+" to "+str(end_time))
print("with "+str(len(historic_data))+" timeslices of length "+str(self.strategies[strategy_to_backtest].interval)+" seconds each")
self.strategies[strategy_to_backtest].backtest_strategy(historic_data)
def do_optimize(self, line):
usd = 1000
btc = 1
days_back_in_time = 7
print("Enter the class name of the strategy to be optimized:")
raw_input = input("> ")
print(self.strategies.keys())
if raw_input not in self.strategies.keys():
print("Error: not found")
pass
strategy_to_optimize = raw_input
print("Enter the timeframe to optimize for i.e. the time to simulate over:")
days_back_in_time = 7
raw_input = input("> ")
if raw_input == "":
print("Performing optimization for default of last 7 days.")
else:
days_back_in_time = float(raw_input)
print("Performing optimization based on last "+str(days_back_in_time)+" days.")
curr_time = datetime.now(tz=self.curr_timezone)
start_time = curr_time - timedelta(seconds=86400*days_back_in_time)
start_time = start_time.isoformat(' ')
end_time = curr_time.isoformat(' ')
print("Enter the initial USD amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting USD amount of $1,000")
else:
usd = float(raw_input)
print("Using starting USD amount of $"+str(usd))
print("Enter the initial BTC amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting BTC amount of 1")
else:
btc = float(raw_input)
print("Using starting BTC amount of "+str(btc))
strategy = strategy_to_optimize
strategy_attributes = dir(self.strategies[strategy])
bounds_by_attribute = {}
print("Note: strategy interval cannot be optimized due to API restraints")
self.strategies[strategy].exchange = cb_exchange_sim(start_usd=usd, start_btc=btc)
historic_data = self.strategies[strategy].exchange.get_historic_rates(start_time=start_time, end_time=end_time, granularity=self.strategies[strategy].interval)
if type(historic_data) is not list:
print("API error: "+str(historic_data.get("message", "")))
print("Unable to optimize. Try changing strategy's interval")
pass
else:
print("Optimizing based on time frame of "+str(start_time)+" to "+str(end_time))
print("with "+str(len(historic_data))+" timeslices of length "+str(self.strategies[strategy].interval)+" seconds each")
for attribute in strategy_attributes:
if "_" not in str(attribute) and str(attribute) != "interval":
# Optimizing for interval would poll API too frequently
print("Enter the lower bound for attribute: "+str(attribute)+", or press enter to skip:")
raw_input = input("> ")
if raw_input == "":
pass
else:
lower_bound = float(raw_input)
print("Enter the upper bound for attribute: "+str(attribute)+":")
raw_input = input("> ")
upper_bound = float(raw_input)
print("Enter the granularity of this attribute i.e. how many different values to try:")
raw_input = input("> ")
granularity = float(raw_input)
if upper_bound is not None and lower_bound is not None:
bounds_by_attribute[str(attribute)] = {"lower":lower_bound, "upper":upper_bound, "granularity":granularity}
#self.strategies[strategy][attribute] = float(lower_bound)
attribute_vals_by_id = {}
config_id = 0
# Initialize attribute_vals_by id
for attribute in bounds_by_attribute.keys():
num_shades_of_attr = int(bounds_by_attribute[attribute].get("granularity"))
increment = (float(upper_bound) - float(lower_bound))/num_shades_of_attr
attr_val = float(lower_bound)
for shade in range(num_shades_of_attr):
attribute_vals_by_id[str(config_id)] = {}
attribute_vals_by_id[str(config_id)][attribute] = attr_val
config_id += 1
# Fill in all possible values for the attributes
config_id = 0
for attribute in bounds_by_attribute.keys():
num_shades_of_attr = int(bounds_by_attribute[attribute].get("granularity"))
increment = (float(upper_bound) - float(lower_bound))/num_shades_of_attr
step = 0
attr_val = float(lower_bound) + (increment*step)
for shade in range(num_shades_of_attr):
attribute_vals_by_id[str(config_id)][attribute] = attr_val
config_id += 1
step += 1
performance_by_id = {}
performance_vs_mkt = 0
strategy_performance = 0
mkt_performance = 0
# Change the attribute values for this strategy, updating when the performance is highest
for configuration in attribute_vals_by_id.keys():
for attribute in attribute_vals_by_id[configuration]:
setattr(self.strategies[strategy], attribute, attribute_vals_by_id[configuration][attribute])
performance_vs_mkt, strategy_performance, mkt_performance = self.strategies[strategy].backtest_strategy(historic_data)
performance_by_id[str(configuration)] = performance_vs_mkt
best_config = "0"
for configuration in performance_by_id.keys():
if performance_by_id[configuration] > performance_by_id[best_config]:
best_config = configuration
print("The best performing strategy configuration is: "+str(attribute_vals_by_id[best_config]))
print("With a performance vs market of: "+str(performance_by_id[best_config]))
# End python cmd funtions
def authenticate_exchange(self):
print("Paste in your CoinbaseExchange API key:")
raw_input = input("> ")
self.auth_key = raw_input
print("Paste in your CoinbaseExchange API secret:")
raw_input = input("> ")
self.auth_secret = raw_input
print("Paste in your CoinbaseExchange API passphrase:")
raw_input = input("> ")
if raw_input is not "":
self.auth_password = raw_input
self.config.set("auth", "key", self.auth_key)
self.config.set("auth", "secret", self.auth_secret)
self.config.set("auth", "password", self.auth_password)
with open(self.config_path, "w", encoding='utf-8') as config_file:
self.config.write(config_file)
self.authenticate()
def authenticate(self):
#try:
self.exchange = cb_exchange(self.auth_key, self.auth_secret, self.auth_password)
self.accounts = self.exchange.list_accounts()
#except Exception as err:
# print("Error! Only unauthorized endpoints are available.")
# print("error: "+str(err))
# print("If you would like bitraider to walk you through authentication, enter the commands: \'config\' > \'auth\'")
def set_ticker_on(self):
strategy = self.strategies[0]
start_time = time.time()
lower_bound = start_time
upper_bound = start_time + strategy.interval
elapsed_time = 0.0
last_intervals_trades = []
while True:
curr_time = time.time()
elapsed_time = curr_time - start_time
if elapsed_time % strategy.interval == 0:
# if we've reached a new interval, calculate data for the last interval and pass
# it onto the strategy
latest_trades = self.exchange.get_trades('BTC-USD')
interval_data = []
last_intervals_low = 999999999
last_intervals_high = 0.0
last_intervals_close = 0.0
last_intervals_close = 0.0
last_intervals_volume = 0.0
for trade in latest_trades:
# starting with the most recent trade, get trades for the last interval
datestring = str(trade.get("time"))[:-3]
trade_time = float(calendar.timegm(datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S.%f").timetuple()))
if trade_time >= lower_bound and trade_time <= upper_bound:
last_intervals_trades.append(trade)
if float(trade.get('price')) > last_intervals_high:
last_intervals_high = float(trade.get('price'))
if float(trade.get('price')) < last_intervals_low:
last_intervals_low = float(trade.get('price'))
last_intervals_volume += float(trade.get('size'))
if len(last_intervals_trades) > 0:
last_intervals_close = float(last_intervals_trades[0].get('price'))
last_intervals_open = float(last_intervals_trades[-1].get('price'))
interval_start_time = curr_time - strategy.interval
interval_data.extend([interval_start_time, last_intervals_low, last_intervals_high,
last_intervals_open, last_intervals_close, last_intervals_volume])
print("last_intervals_trades: "+str(last_intervals_trades))
print("elapsed: "+str(elapsed_time))
last_intervals_trades = []
lower_bound += strategy.interval
upper_bound += strategy.interval
# Here's where the magic happens:
#strategy.trade(interval_data)
def run(self):
# Time Configuration
self.curr_time = time.time() # Seconds since Jan 1st, 1970
self.curr_timezone = pytz.timezone("US/Central")
self.cmdloop()
def print_curr_price(self):
"""Print the most recent price."""
print(self.exchange.get_last_trade('BTC-USD')['price'])
def load_strategy(self, module, cls):
"""Load a user-defined strategy from a file.
\n`module`: the filename in the current directory containing the strategy class which
inherits from bitraider.strategy (does not include .py)
\n`cls`: the classname within the file to load
"""
import_string = module+"."+cls
classname = str(cls)
_temp = __import__(module)
loaded_strategy_ = getattr(_temp, cls)
instance_of_loaded_strategy = loaded_strategy_()
self.strategies[classname] = instance_of_loaded_strategy
print("Loaded strategy: "+str(cls)+" from file: "+str(module)+".py")
def run():
my_runner = runner()
my_runner.run()
if __name__=="__main__":
my_runner = runner()
my_runner.run()
|
gigitux/lollypop | refs/heads/master | src/player_linear.py | 2 | # Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lollypop.define import NextContext
from lollypop.player_base import BasePlayer
from lollypop.objects import Track, Album
class LinearPlayer(BasePlayer):
"""
Manage normal playback
"""
def __init__(self):
"""
Init linear player
"""
BasePlayer.__init__(self)
def next(self):
"""
Next track based on.current_track context
@return track as Track
"""
# If no album available, repeat current track
if not self._albums:
return self.current_track
track = Track()
if self._albums is not None:
album = Album(self.current_track.album.id, self.context.genre_id)
if self.current_track.id in album.tracks_ids:
new_track_position = album.tracks_ids.index(
self.current_track.id) + 1
# next album
if new_track_position >= len(album.tracks) or\
self.context.next == NextContext.START_NEW_ALBUM:
if self.context.next == NextContext.START_NEW_ALBUM:
self.context.next = NextContext.NONE
pos = self._albums.index(album.id)
# we are on last album, go to first
if pos + 1 >= len(self._albums):
pos = 0
else:
pos += 1
track = Album(self._albums[pos],
self.context.genre_id).tracks[0]
# next track
else:
track = album.tracks[new_track_position]
return track
def prev(self):
"""
Prev track base on.current_track context
@return track as Track
"""
# If no album available, repeat current track
if not self._albums:
return self.current_track
track = Track()
if self._albums is not None:
album = Album(self.current_track.album.id, self.context.genre_id)
if self.current_track.id in album.tracks_ids:
new_track_position = album.tracks_ids.index(
self.current_track.id) - 1
# Previous album
if new_track_position < 0:
pos = self._albums.index(album.id)
if pos - 1 < 0: # we are on last album, go to first
pos = len(self._albums) - 1
else:
pos -= 1
track = Album(self._albums[pos],
self.context.genre_id).tracks[-1]
# Previous track
else:
track = album.tracks[new_track_position]
return track
|
bowlofstew/changes | refs/heads/master | changes/api/build_index.py | 2 | from __future__ import absolute_import, division, unicode_literals
import json
import logging
import uuid
from cStringIO import StringIO
from flask.ext.restful import reqparse
from sqlalchemy.orm import joinedload, subqueryload_all
from werkzeug.datastructures import FileStorage
from changes.api.base import APIView, error
from changes.api.validators.author import AuthorValidator
from changes.config import db, statsreporter
from changes.constants import Result, Status, ProjectStatus
from changes.db.utils import get_or_create
from changes.jobs.create_job import create_job
from changes.jobs.sync_build import sync_build
from changes.models import (
Project, ProjectOptionsHelper, Build, Job, JobPlan, Repository,
RepositoryStatus, Patch, ItemOption, Source, PlanStatus, Revision,
ProjectConfigError
)
from changes.utils.diff_parser import DiffParser
from changes.utils.project_trigger import files_changed_should_trigger_project
from changes.vcs.base import CommandError, InvalidDiffError, UnknownRevision
class MissingRevision(Exception):
pass
def identify_revision(repository, treeish):
"""
Attempt to transform a a commit-like reference into a valid revision.
"""
# try to find it from the database first
if len(treeish) == 40:
revision = Revision.query.filter(
Revision.repository_id == repository.id,
Revision.sha == treeish,
).first()
if revision:
return revision
vcs = repository.get_vcs()
if not vcs:
return
try:
commit = vcs.log(parent=treeish, limit=1).next()
except CommandError:
# update and try one more time
vcs.update()
try:
commit = vcs.log(parent=treeish, limit=1).next()
except CommandError:
# TODO(dcramer): it's possible to DOS the endpoint by passing invalid
# commits so we should really cache the failed lookups
raise MissingRevision('Unable to find revision %s' % (treeish,))
revision, _, __ = commit.save(repository)
return revision
def _get_revision_changed_files(repository, revision):
vcs = repository.get_vcs()
if not vcs:
raise NotImplementedError
try:
diff = vcs.export(revision.sha)
except UnknownRevision:
vcs.update()
try:
diff = vcs.export(revision.sha)
except UnknownRevision:
raise MissingRevision('Unable to find revision %s' % (revision.sha,))
diff_parser = DiffParser(diff)
return diff_parser.get_changed_files()
def find_green_parent_sha(project, sha):
"""
Attempt to find a better revision than ``sha`` that is green.
- If sha is green, let it ride.
- Only search future revisions.
- Find the newest revision (more likely to conflict).
- If there's nothing better, return existing sha.
"""
green_rev = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.repository_id == project.repository_id,
Source.revision_sha == sha,
).first()
filters = []
if green_rev:
if green_rev.status == Status.finished and green_rev.result == Result.passed:
return sha
filters.append(Build.date_created > green_rev.date_created)
latest_green = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Build.status == Status.finished,
Build.result == Result.passed,
Build.project_id == project.id,
Source.patch_id == None, # NOQA
Source.revision_sha != None,
Source.repository_id == project.repository_id,
*filters
).order_by(Build.date_created.desc()).first()
if latest_green:
return latest_green.source.revision_sha
return sha
def create_build(project, collection_id, label, target, message, author,
change=None, patch=None, cause=None, source=None, sha=None,
source_data=None, tag=None):
assert sha or source
repository = project.repository
if source is None:
if patch:
source, _ = get_or_create(Source, where={
'patch': patch,
}, defaults={
'repository': repository,
'revision_sha': sha,
'data': source_data or {},
})
else:
source, _ = get_or_create(Source, where={
'repository': repository,
'patch': None,
'revision_sha': sha,
}, defaults={
'data': source_data or {},
})
statsreporter.stats().incr('new_api_build')
build = Build(
project=project,
project_id=project.id,
collection_id=collection_id,
source=source,
source_id=source.id if source else None,
status=Status.queued,
author=author,
author_id=author.id if author else None,
label=label,
target=target,
message=message,
cause=cause,
tags=[tag] if tag else [],
)
db.session.add(build)
db.session.commit()
execute_build(build=build)
return build
def get_build_plans(project):
return [p for p in project.plans if p.status == PlanStatus.active]
def execute_build(build):
# TODO(dcramer): most of this should be abstracted into sync_build as if it
# were a "im on step 0, create step 1"
project = build.project
jobs = []
for plan in get_build_plans(project):
job = Job(
build=build,
build_id=build.id,
project=project,
project_id=project.id,
source=build.source,
source_id=build.source_id,
status=build.status,
label=plan.label,
)
db.session.add(job)
jobplan = JobPlan.build_jobplan(plan, job)
db.session.add(jobplan)
jobs.append(job)
db.session.commit()
for job in jobs:
create_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
db.session.commit()
sync_build.delay(
build_id=build.id.hex,
task_id=build.id.hex,
)
return build
def get_repository_by_callsign(callsign):
# It's possible to have multiple repositories with the same callsign due
# to us not enforcing a unique constraint (via options). Given that it is
# complex and shouldn't actually happen we make an assumption that there's
# only a single repo
item_id_list = db.session.query(ItemOption.item_id).filter(
ItemOption.name == 'phabricator.callsign',
ItemOption.value == callsign,
)
repo_list = list(Repository.query.filter(
Repository.id.in_(item_id_list),
Repository.status == RepositoryStatus.active,
))
if len(repo_list) > 1:
logging.warning('Multiple repositories found matching phabricator.callsign=%s', callsign)
elif not repo_list:
return None # Match behavior of project and repository parameters
return repo_list[0]
def get_repository_by_url(url):
return Repository.query.filter(
Repository.url == url,
Repository.status == RepositoryStatus.active,
).first()
def try_get_projects_and_repository(args):
"""Given a set of HTTP POST arguments, try and find the appropriate
projects and repository.
Possible inputs:
project
Returns: (A list containing only this project) * its repository
repository
Returns: All active projects for this repo * repo
repository living at key 'repository[phabricator.callsign]'
Returns: All active projects for this repo * repo
"""
if args.project:
repository = Repository.query.get(args.project.repository_id)
return [args.project], repository
elif args.repository:
repository = args.repository
projects = list(Project.query.options(
subqueryload_all('plans'),
).filter(
Project.status == ProjectStatus.active,
Project.repository_id == repository.id,
))
return projects, repository
elif args['repository[phabricator.callsign]']:
repository = args['repository[phabricator.callsign]']
projects = list(Project.query.options(
subqueryload_all('plans'),
).filter(
Project.status == ProjectStatus.active,
Project.repository_id == repository.id,
))
return projects, repository
else:
return None, None
class BuildIndexAPIView(APIView):
parser = reqparse.RequestParser()
"""The commit ID to base this build on. A patch may also be applied - see below.
"""
parser.add_argument('sha', type=str, required=True)
"""The project slug to build.
Optional
"""
parser.add_argument('project', type=lambda x: Project.query.filter(
Project.slug == x,
Project.status == ProjectStatus.active,
).first())
# TODO(dcramer): it might make sense to move the repository and callsign
# options into something like a "repository builds index" endpoint
"""The repository url for the repo to build with.
Optional
"""
parser.add_argument('repository', type=get_repository_by_url)
"""The Phabricator callsign for the repo to build with.
Optional
"""
parser.add_argument('repository[phabricator.callsign]', type=get_repository_by_callsign)
"""Optional author for this build. If nothing is passed in, the commit
author is used. See AuthorValidator for format.
"""
parser.add_argument('author', type=AuthorValidator())
"""Optional label to store with this build. If nothing is passed in,
the commit subject for the revision is used.
"""
parser.add_argument('label', type=unicode)
"""Optional indicator of what is being built, like a Phabricator revision
D1234. If nothing is passed in, parts of the sha is used.
"""
parser.add_argument('target', type=unicode)
"""Optional message to tag along with the created builds. If nothing is passed
in, the commit message of the revision is used.
"""
parser.add_argument('message', type=unicode)
"""The optional patch to apply to the given revision before building. This must
be the same format as a `git diff` for a git repo. This is attached as a file.
Use this to create a diff build. Omit to create a commit build.
"""
parser.add_argument('patch', type=FileStorage, dest='patch_file', location='files')
"""Additional metadata to attach to the patch. This must be in serialized
JSON format, and will be stored in the Source model as the data column.
If nothing is passed in, then an empty dictionary is saved in the data column
"""
parser.add_argument('patch[data]', type=unicode, dest='patch_data')
"""A tag that will get stored with created build. Can be used to track
the cause of this build (i.e. commit-queue)
"""
parser.add_argument('tag', type=unicode)
"""A JSON list of project slugs that will act as a whitelist, meaning
only projects with these slugs will be created.
Optional - if nothing is given, no whitelisting is applied
"""
parser.add_argument('project_whitelist', type=lambda x: json.loads(x))
"""Deprecated. This means the same thing as `apply_project_files_trigger`,
and if both are present, `apply_project_files_trigger` is used.
"""
parser.add_argument('apply_file_whitelist', type=bool)
"""A flag to indicate whether the file blacklist and whitelist should be
used to filter out projects. Defaults to true for diff builds and false for
commit builds for compatibility reasons.
"""
parser.add_argument('apply_project_files_trigger', type=bool)
"""A flag to indicate that for each project, if there is an existing build,
return the latest build. Only when there are no builds for a project is
one created. This is done at the very end, after all the filters.
TODO: right now this only works with a commit build. The issue is that
for diff build, we are always creating a new Patch object. We can
change that behavior to instead retrieve an existing Patch object, but
that would be a potentially significant behavior change and should only
be done when we actually have a use case for ensure-only mode in a diff
build.
Optional - defaults to False
"""
parser.add_argument('ensure_only', type=bool, default=False)
def get(self):
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def post(self):
"""
Create a new commit or diff build. The API roughly goes like this:
1. Identify the project(s) to build for. This can be done by specifying
``project``, ``repository``, or ``repository[callsign]``. If a repository is
specified somehow, then all projects for that repository are considered
for building.
2. Using the ``sha``, find the appropriate revision object. This may
involve updating the repo.
3. If ``patch`` is given, then apply the patch and mark this as a diff build.
Otherwise, this is a commit build.
4. If provided, apply project_whitelist, filtering out projects not in
this whitelist.
5. Based on the flag ``apply_project_files_trigger`` (see comment on the argument
itself for default values), decide whether or not to filter out projects
by file blacklist and whitelist.
6. Attach metadata and create/ensure existence of a build for each project,
depending on the flag ``ensure_only``.
NOTE: In ensure-only mode, the collection_ids of the returned builds are
not necessarily identical, as we give new builds new collection IDs
and preserve the existing builds' collection IDs.
NOTE: If ``patch`` is specified ``sha`` is assumed to be the original
base revision to apply the patch.
Not relevant until we fix TODO: ``sha`` is **not** guaranteed to be the rev
used to apply the patch. See ``find_green_parent_sha`` for the logic of
identifying the correct revision.
"""
args = self.parser.parse_args()
if args.patch_file and args.ensure_only:
return error("Ensure-only mode does not work with a diff build yet.", problems=["patch", "ensure_only"])
if not (args.project or args.repository or args['repository[phabricator.callsign]']):
return error("Project or repository must be specified",
problems=["project", "repository",
"repository[phabricator.callsign]"])
# read arguments
if args.patch_data:
try:
patch_data = json.loads(args.patch_data)
except Exception:
return error("Invalid patch data (must be JSON dict)",
problems=["patch[data]"])
if not isinstance(patch_data, dict):
return error("Invalid patch data (must be JSON dict)",
problems=["patch[data]"])
else:
patch_data = None
# 1. identify project(s)
projects, repository = try_get_projects_and_repository(args)
if not projects:
return error("Unable to find project(s).")
# read arguments
label = args.label
author = args.author
message = args.message
tag = args.tag
if not tag and args.patch_file:
tag = 'patch'
# 2. find revision
try:
revision = identify_revision(repository, args.sha)
except MissingRevision:
# if the default fails, we absolutely can't continue and the
# client should send a valid revision
return error("Unable to find commit %s in %s." % (
args.sha, repository.url), problems=['sha', 'repository'])
# get default values for arguments
if revision:
if not author:
author = revision.author
if not label:
label = revision.subject
# only default the message if its absolutely not set
if message is None:
message = revision.message
sha = revision.sha
else:
sha = args.sha
if not args.target:
target = sha[:12]
else:
target = args.target[:128]
if not label:
if message:
label = message.splitlines()[0]
if not label:
label = 'A homeless build'
label = label[:128]
# 3. Check for patch
if args.patch_file:
fp = StringIO()
for line in args.patch_file:
fp.write(line)
patch_file = fp
else:
patch_file = None
if patch_file:
patch = Patch(
repository=repository,
parent_revision_sha=sha,
diff=patch_file.getvalue(),
)
db.session.add(patch)
else:
patch = None
project_options = ProjectOptionsHelper.get_options(projects, ['build.file-whitelist'])
# mark as commit or diff build
if not patch:
is_commit_build = True
else:
is_commit_build = False
apply_project_files_trigger = args.apply_project_files_trigger
if apply_project_files_trigger is None:
apply_project_files_trigger = args.apply_file_whitelist
if apply_project_files_trigger is None:
if is_commit_build:
apply_project_files_trigger = False
else:
apply_project_files_trigger = True
if apply_project_files_trigger:
if patch:
diff_parser = DiffParser(patch.diff)
files_changed = diff_parser.get_changed_files()
elif revision:
try:
files_changed = _get_revision_changed_files(repository, revision)
except MissingRevision:
return error("Unable to find commit %s in %s." % (
args.sha, repository.url), problems=['sha', 'repository'])
else:
# the only way that revision can be null is if this repo does not have a vcs backend
logging.warning('Revision and patch are both None for sha %s. This is because the repo %s does not have a VCS backend.', sha, repository.url)
files_changed = None
else:
# we won't be applying file whitelist, so there is no need to get the list of changed files.
files_changed = None
collection_id = uuid.uuid4()
builds = []
for project in projects:
plan_list = get_build_plans(project)
if not plan_list:
logging.warning('No plans defined for project %s', project.slug)
continue
# 4. apply project whitelist as appropriate
if args.project_whitelist is not None and project.slug not in args.project_whitelist:
logging.info('Project %s is not in the supplied whitelist', project.slug)
continue
forced_sha = sha
# TODO(dcramer): find_green_parent_sha needs to take branch
# into account
# if patch_file:
# forced_sha = find_green_parent_sha(
# project=project,
# sha=sha,
# )
# 5. apply file whitelist as appropriate
diff = None
if patch is not None:
diff = patch.diff
try:
if (
apply_project_files_trigger
and files_changed is not None
and not files_changed_should_trigger_project(
files_changed, project, project_options[project.id], sha, diff)
):
logging.info('Changed files do not trigger build for project %s', project.slug)
continue
except InvalidDiffError:
# ok, the build will fail and the user will be notified.
pass
except ProjectConfigError:
author_name = '(Unknown)'
if author:
author_name = author.name
logging.error('Project config for project %s is not in a valid format. Author is %s.', project.slug, author_name, exc_info=True)
# 6. create/ensure build
if args.ensure_only:
potentials = list(Build.query.filter(
Build.project_id == project.id,
Build.source.has(revision_sha=sha, patch=patch),
).order_by(
Build.date_created.desc() # newest first
).limit(1))
if len(potentials) == 0:
builds.append(create_build(
project=project,
collection_id=collection_id,
sha=forced_sha,
target=target,
label=label,
message=message,
author=author,
patch=patch,
source_data=patch_data,
tag=tag,
))
else:
builds.append(potentials[0])
else:
builds.append(create_build(
project=project,
collection_id=collection_id,
sha=forced_sha,
target=target,
label=label,
message=message,
author=author,
patch=patch,
source_data=patch_data,
tag=tag,
))
return self.respond(builds)
|
RossBrunton/django | refs/heads/master | tests/gis_tests/gis_migrations/migrations/0001_initial.py | 46 | from django.db import connection, migrations, models
from ...models import models as gis_models
ops = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', gis_models.MultiPolygonField(srid=4326)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Household',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighborhood', models.ForeignKey(to='gis_migrations.Neighborhood', to_field='id', null=True)),
('address', models.CharField(max_length=100)),
('zip_code', models.IntegerField(null=True, blank=True)),
('geom', gis_models.PointField(srid=4326, geography=True)),
],
options={
'required_db_features': ['gis_enabled'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='household',
name='family',
field=models.ForeignKey(blank=True, to='gis_migrations.Family', null=True),
preserve_default=True,
)
]
if connection.features.gis_enabled and connection.features.supports_raster:
ops += [
migrations.CreateModel(
name='Heatmap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('rast', gis_models.fields.RasterField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
]
class Migration(migrations.Migration):
"""
Used for gis-specific migration tests.
"""
operations = ops
|
remap/ndn-flow | refs/heads/master | framework/ndn_iot_python/examples/test_producing.py | 1 | #!/usr/bin/python
from pyndn import Name, Interest, Data
from pyndn.util.memory_content_cache import MemoryContentCache
from pyndn.security import KeyChain
from pyndn.threadsafe_face import ThreadsafeFace
from ndn_iot_python.bootstrap.bootstrap import Bootstrap
import time
import sys
import json
import logging
try:
import asyncio
except ImportError:
import trollius as asyncio
# TODO: debug, always seems to need to restart iot_controller for the command to be handled correctly
# Follow-up: it seems to be timestamp related
class AppProducer():
def __init__(self, face, certificateName, keyChain, dataPrefix):
self._keyChain = keyChain
self._certificateName = certificateName
self._face = face
self._dataPrefix = dataPrefix
return
def start(self):
self._dataCache = MemoryContentCache(self._face, 100000)
self.registerCachePrefix()
print "Serving data at {}".format(self._dataPrefix.toUri())
self._face.callLater(5000, self.publishData)
return
def registerCachePrefix(self):
self._dataCache.registerPrefix(self._dataPrefix, self.cacheRegisterFail, self.onDataMissing)
def cacheRegisterFail(self, interest):
# just try again
self.log.warn('Could not register data cache')
self.registerCachePrefix()
def onDataMissing(self, prefix, interest, face, interestFilterId, filter):
print "data not found for " + interest.getName().toUri()
# let it timeout
def publishData(self):
timestamp = time.time()
cpu_use = ps.cpu_percent()
users = [u.name for u in ps.users()]
nProcesses = len(ps.pids())
memUse = ps.virtual_memory().percent
swapUse = ps.swap_memory().percent
info = {'cpu_usage':cpu_use, 'users':users, 'processes':nProcesses,
'memory_usage':memUse, 'swap_usage':swapUse}
dataOut = Data(Name(self._dataPrefix).appendVersion(int(timestamp)))
dataOut.setContent(json.dumps(info))
dataOut.getMetaInfo().setFreshnessPeriod(10000)
self._keyChain.sign(dataOut, self._certificateName)
self._dataCache.add(dataOut)
print "data added: " + dataOut.getName().toUri()
# repeat every 5 seconds
self._face.callLater(5000, self.publishData)
if __name__ == '__main__':
try:
import psutil as ps
except Exception as e:
print str(e)
loop = asyncio.get_event_loop()
face = ThreadsafeFace(loop)
bootstrap = Bootstrap(face)
appName = "flow"
dataPrefix = Name("/home/flow/ps-publisher-4")
def onSetupComplete(defaultCertificateName, keyChain):
def onRequestSuccess():
print "data production authorized by controller"
producer = AppProducer(face, defaultCertificateName, keyChain, dataPrefix)
producer.start()
return
def onRequestFailed(msg):
print "data production not authorized by controller : " + msg
# For this test, we start anyway
producer = AppProducer(face, defaultCertificateName, keyChain, dataPrefix)
producer.start()
return
bootstrap.requestProducerAuthorization(dataPrefix, appName, onRequestSuccess, onRequestFailed)
def onSetupFailed(msg):
print("Setup failed " + msg)
bootstrap.setupDefaultIdentityAndRoot("app.conf", onSetupComplete = onSetupComplete, onSetupFailed = onSetupFailed)
loop.run_forever() |
shin-/compose | refs/heads/master | tests/unit/timeparse_test.py | 11 | from __future__ import absolute_import
from __future__ import unicode_literals
from compose import timeparse
def test_milli():
assert timeparse.timeparse('5ms') == 0.005
def test_milli_float():
assert timeparse.timeparse('50.5ms') == 0.0505
def test_second_milli():
assert timeparse.timeparse('200s5ms') == 200.005
def test_second_milli_micro():
assert timeparse.timeparse('200s5ms10us') == 200.00501
def test_second():
assert timeparse.timeparse('200s') == 200
def test_second_as_float():
assert timeparse.timeparse('20.5s') == 20.5
def test_minute():
assert timeparse.timeparse('32m') == 1920
def test_hour_minute():
assert timeparse.timeparse('2h32m') == 9120
def test_minute_as_float():
assert timeparse.timeparse('1.5m') == 90
def test_hour_minute_second():
assert timeparse.timeparse('5h34m56s') == 20096
def test_invalid_with_space():
assert timeparse.timeparse('5h 34m 56s') is None
def test_invalid_with_comma():
assert timeparse.timeparse('5h,34m,56s') is None
def test_invalid_with_empty_string():
assert timeparse.timeparse('') is None
|
XiaodunServerGroup/ddyedx | refs/heads/master | common/lib/xmodule/xmodule/lti_module.py | 6 | """
Learning Tools Interoperability (LTI) module.
Resources
---------
Theoretical background and detailed specifications of LTI can be found on:
http://www.imsglobal.org/LTI/v1p1p1/ltiIMGv1p1p1.html
This module is based on the version 1.1.1 of the LTI specifications by the
IMS Global authority. For authentication, it uses OAuth1.
When responding back to the LTI tool provider, we must issue a correct
response. Types of responses and their message payload is available at:
Table A1.2 Interpretation of the 'CodeMajor/severity' matrix.
http://www.imsglobal.org/gws/gwsv1p0/imsgws_wsdlBindv1p0.html
A resource to test the LTI protocol (PHP realization):
http://www.imsglobal.org/developers/LTI/test/v1p1/lms.php
What is supported:
------------------
1.) Display of simple LTI in iframe or a new window.
2.) Multiple LTI components on a single page.
3.) The use of multiple LTI providers per course.
4.) Use of advanced LTI component that provides back a grade.
a.) The LTI provider sends back a grade to a specified URL.
b.) Currently only action "update" is supported. "Read", and "delete"
actions initially weren't required.
"""
import logging
import oauthlib.oauth1
from oauthlib.oauth1.rfc5849 import signature
import hashlib
import base64
import urllib
import textwrap
import json
from lxml import etree
from webob import Response
import mock
from xml.sax.saxutils import escape
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.x_module import XModule, module_attr
from xmodule.course_module import CourseDescriptor
from pkg_resources import resource_string
from xblock.core import String, Scope, List, XBlock
from xblock.fields import Boolean, Float
log = logging.getLogger(__name__)
class LTIError(Exception):
pass
class LTIFields(object):
"""
Fields to define and obtain LTI tool from provider are set here,
except credentials, which should be set in course settings::
`lti_id` is id to connect tool with credentials in course settings. It should not contain :: (double semicolon)
`launch_url` is launch URL of tool.
`custom_parameters` are additional parameters to navigate to proper book and book page.
For example, for Vitalsource provider, `launch_url` should be
*https://bc-staging.vitalsource.com/books/book*,
and to get to proper book and book page, you should set custom parameters as::
vbid=put_book_id_here
book_location=page/put_page_number_here
Default non-empty URL for `launch_url` is needed due to oauthlib demand (URL scheme should be presented)::
https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
"""
display_name = String(display_name="Display Name", help="Display name for this module", scope=Scope.settings, default="LTI")
lti_id = String(help="Id of the tool", default='', scope=Scope.settings)
launch_url = String(help="URL of the tool", default='http://www.example.com', scope=Scope.settings)
custom_parameters = List(help="Custom parameters (vbid, book_location, etc..)", scope=Scope.settings)
open_in_a_new_page = Boolean(help="Should LTI be opened in new page?", default=True, scope=Scope.settings)
graded = Boolean(help="Grades will be considered in overall score.", default=False, scope=Scope.settings)
weight = Float(
help="Weight for student grades.",
default=1.0,
scope=Scope.settings,
values={"min": 0},
)
has_score = Boolean(help="Does this LTI module have score?", default=False, scope=Scope.settings)
class LTIModule(LTIFields, XModule):
"""
Module provides LTI integration to course.
Except usual Xmodule structure it proceeds with OAuth signing.
How it works::
1. Get credentials from course settings.
2. There is minimal set of parameters need to be signed (presented for Vitalsource)::
user_id
oauth_callback
lis_outcome_service_url
lis_result_sourcedid
launch_presentation_return_url
lti_message_type
lti_version
roles
*+ all custom parameters*
These parameters should be encoded and signed by *OAuth1* together with
`launch_url` and *POST* request type.
3. Signing proceeds with client key/secret pair obtained from course settings.
That pair should be obtained from LTI provider and set into course settings by course author.
After that signature and other OAuth data are generated.
OAuth data which is generated after signing is usual::
oauth_callback
oauth_nonce
oauth_consumer_key
oauth_signature_method
oauth_timestamp
oauth_version
4. All that data is passed to form and sent to LTI provider server by browser via
autosubmit via JavaScript.
Form example::
<form
action="${launch_url}"
name="ltiLaunchForm-${element_id}"
class="ltiLaunchForm"
method="post"
target="ltiLaunchFrame-${element_id}"
encType="application/x-www-form-urlencoded"
>
<input name="launch_presentation_return_url" value="" />
<input name="lis_outcome_service_url" value="" />
<input name="lis_result_sourcedid" value="" />
<input name="lti_message_type" value="basic-lti-launch-request" />
<input name="lti_version" value="LTI-1p0" />
<input name="oauth_callback" value="about:blank" />
<input name="oauth_consumer_key" value="${oauth_consumer_key}" />
<input name="oauth_nonce" value="${oauth_nonce}" />
<input name="oauth_signature_method" value="HMAC-SHA1" />
<input name="oauth_timestamp" value="${oauth_timestamp}" />
<input name="oauth_version" value="1.0" />
<input name="user_id" value="${user_id}" />
<input name="role" value="student" />
<input name="oauth_signature" value="${oauth_signature}" />
<input name="custom_1" value="${custom_param_1_value}" />
<input name="custom_2" value="${custom_param_2_value}" />
<input name="custom_..." value="${custom_param_..._value}" />
<input type="submit" value="Press to Launch" />
</form>
5. LTI provider has same secret key and it signs data string via *OAuth1* and compares signatures.
If signatures are correct, LTI provider redirects iframe source to LTI tool web page,
and LTI tool is rendered to iframe inside course.
Otherwise error message from LTI provider is generated.
"""
css = {'scss': [resource_string(__name__, 'css/lti/lti.scss')]}
def get_input_fields(self):
# LTI provides a list of default parameters that might be passed as
# part of the POST data. These parameters should not be prefixed.
# Likewise, The creator of an LTI link can add custom key/value parameters
# to a launch which are to be included with the launch of the LTI link.
# In this case, we will automatically add `custom_` prefix before this parameters.
# See http://www.imsglobal.org/LTI/v1p1p1/ltiIMGv1p1p1.html#_Toc316828520
PARAMETERS = [
"lti_message_type",
"lti_version",
"resource_link_id",
"resource_link_title",
"resource_link_description",
"user_id",
"user_image",
"roles",
"lis_person_name_given",
"lis_person_name_family",
"lis_person_name_full",
"lis_person_contact_email_primary",
"lis_person_sourcedid",
"role_scope_mentor",
"context_id",
"context_type",
"context_title",
"context_label",
"launch_presentation_locale",
"launch_presentation_document_target",
"launch_presentation_css_url",
"launch_presentation_width",
"launch_presentation_height",
"launch_presentation_return_url",
"tool_consumer_info_product_family_code",
"tool_consumer_info_version",
"tool_consumer_instance_guid",
"tool_consumer_instance_name",
"tool_consumer_instance_description",
"tool_consumer_instance_url",
"tool_consumer_instance_contact_email",
]
client_key, client_secret = self.get_client_key_secret()
# parsing custom parameters to dict
custom_parameters = {}
for custom_parameter in self.custom_parameters:
try:
param_name, param_value = [p.strip() for p in custom_parameter.split('=', 1)]
except ValueError:
raise LTIError('Could not parse custom parameter: {0!r}. \
Should be "x=y" string.'.format(custom_parameter))
# LTI specs: 'custom_' should be prepended before each custom parameter, as pointed in link above.
if param_name not in PARAMETERS:
param_name = 'custom_' + param_name
custom_parameters[unicode(param_name)] = unicode(param_value)
return self.oauth_params(
custom_parameters,
client_key,
client_secret,
)
def get_context(self):
"""
Returns a context.
"""
return {
'input_fields': self.get_input_fields(),
# These parameters do not participate in OAuth signing.
'launch_url': self.launch_url.strip(),
'element_id': self.location.html_id(),
'element_class': self.category,
'open_in_a_new_page': self.open_in_a_new_page,
'display_name': self.display_name,
'form_url': self.runtime.handler_url(self, 'preview_handler').rstrip('/?'),
}
def get_html(self):
"""
Renders parameters to template.
"""
return self.system.render_template('lti.html', self.get_context())
@XBlock.handler
def preview_handler(self, _, __):
"""
This is called to get context with new oauth params to iframe.
"""
template = self.system.render_template('lti_form.html', self.get_context())
return Response(template, content_type='text/html')
def get_user_id(self):
user_id = self.runtime.anonymous_student_id
assert user_id is not None
return unicode(urllib.quote(user_id))
def get_outcome_service_url(self):
"""
Return URL for storing grades.
To test LTI on sandbox we must use http scheme.
While testing locally and on Jenkins, mock_lti_server use http.referer
to obtain scheme, so it is ok to have http(s) anyway.
"""
scheme = 'http' if 'sandbox' in self.system.hostname or self.system.debug else 'https'
uri = '{scheme}://{host}{path}'.format(
scheme=scheme,
host=self.system.hostname,
path=self.runtime.handler_url(self, 'grade_handler', thirdparty=True).rstrip('/?')
)
return uri
def get_resource_link_id(self):
"""
This is an opaque unique identifier that the TC guarantees will be unique
within the TC for every placement of the link.
If the tool / activity is placed multiple times in the same context,
each of those placements will be distinct.
This value will also change if the item is exported from one system or
context and imported into another system or context.
This parameter is required.
"""
return unicode(urllib.quote(self.id))
def get_lis_result_sourcedid(self):
"""
This field contains an identifier that indicates the LIS Result Identifier (if any)
associated with this launch. This field identifies a unique row and column within the
TC gradebook. This field is unique for every combination of context_id / resource_link_id / user_id.
This value may change for a particular resource_link_id / user_id from one launch to the next.
The TP should only retain the most recent value for this field for a particular resource_link_id / user_id.
This field is generally optional, but is required for grading.
context_id is - is an opaque identifier that uniquely identifies the context that contains
the link being launched.
lti_id should be context_id by meaning.
"""
return "{id}:{resource_link}:{user_id}".format(
id=urllib.quote(self.lti_id),
resource_link=urllib.quote(self.get_resource_link_id()),
user_id=urllib.quote(self.get_user_id())
)
def get_course(self):
"""
Return course by course id.
"""
course_location = CourseDescriptor.id_to_location(self.course_id)
course = self.descriptor.runtime.modulestore.get_item(course_location)
return course
@property
def role(self):
"""
Get system user role and convert it to LTI role.
"""
roles = {
'student': u'Student',
'staff': u'Administrator',
'instructor': u'Instructor',
}
return roles.get(self.system.get_user_role(), u'Student')
def oauth_params(self, custom_parameters, client_key, client_secret):
"""
Signs request and returns signature and OAuth parameters.
`custom_paramters` is dict of parsed `custom_parameter` field
`client_key` and `client_secret` are LTI tool credentials.
Also *anonymous student id* is passed to template and therefore to LTI provider.
"""
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
# Must have parameters for correct signing from LTI:
body = {
u'user_id': self.get_user_id(),
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': self.role,
# Parameters required for grading:
u'resource_link_id': self.get_resource_link_id(),
u'lis_result_sourcedid': self.get_lis_result_sourcedid(),
}
if self.has_score:
body.update({
u'lis_outcome_service_url': self.get_outcome_service_url()
})
# Appending custom parameter for signing.
body.update(custom_parameters)
headers = {
# This is needed for body encoding:
'Content-Type': 'application/x-www-form-urlencoded',
}
try:
__, headers, __ = client.sign(
unicode(self.launch_url.strip()),
http_method=u'POST',
body=body,
headers=headers)
except ValueError: # Scheme not in url.
# https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
# Stubbing headers for now:
headers = {
u'Content-Type': u'application/x-www-form-urlencoded',
u'Authorization': u'OAuth oauth_nonce="80966668944732164491378916897", \
oauth_timestamp="1378916897", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="", oauth_signature="frVp4JuvT1mVXlxktiAUjQ7%2F1cw%3D"'}
params = headers['Authorization']
# Parse headers to pass to template as part of context:
params = dict([param.strip().replace('"', '').split('=') for param in params.split(',')])
params[u'oauth_nonce'] = params[u'OAuth oauth_nonce']
del params[u'OAuth oauth_nonce']
# oauthlib encodes signature with
# 'Content-Type': 'application/x-www-form-urlencoded'
# so '='' becomes '%3D'.
# We send form via browser, so browser will encode it again,
# So we need to decode signature back:
params[u'oauth_signature'] = urllib.unquote(params[u'oauth_signature']).decode('utf8')
# Add LTI parameters to OAuth parameters for sending in form.
params.update(body)
return params
def max_score(self):
return self.weight if self.has_score else None
@XBlock.handler
def grade_handler(self, request, dispatch):
"""
This is called by courseware.module_render, to handle an AJAX call.
Used only for grading. Returns XML response.
Example of request body from LTI provider::
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns = "some_link (may be not required)">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>528243ba5241b</imsx_messageIdentifier>
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>feb-123-456-2929::28883</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>0.4</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
Example of correct/incorrect answer XML body:: see response_xml_template.
"""
response_xml_template = textwrap.dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier>
<imsx_statusInfo>
<imsx_codeMajor>{imsx_codeMajor}</imsx_codeMajor>
<imsx_severity>status</imsx_severity>
<imsx_description>{imsx_description}</imsx_description>
<imsx_messageRefIdentifier>
</imsx_messageRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>{response}</imsx_POXBody>
</imsx_POXEnvelopeResponse>
""")
# Returns when `action` is unsupported.
# Supported actions:
# - replaceResultRequest.
unsupported_values = {
'imsx_codeMajor': 'unsupported',
'imsx_description': 'Target does not support the requested operation.',
'imsx_messageIdentifier': 'unknown',
'response': ''
}
# Returns if:
# - score is out of range;
# - can't parse response from TP;
# - can't verify OAuth signing or OAuth signing is incorrect.
failure_values = {
'imsx_codeMajor': 'failure',
'imsx_description': 'The request has failed.',
'imsx_messageIdentifier': 'unknown',
'response': ''
}
try:
imsx_messageIdentifier, sourcedId, score, action = self.parse_grade_xml_body(request.body)
except Exception as e:
error_message = "Request body XML parsing error: " + escape(e.message)
log.debug("[LTI]: " + error_message)
failure_values['imsx_description'] = error_message
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
# Verify OAuth signing.
try:
self.verify_oauth_body_sign(request)
except (ValueError, LTIError) as e:
failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
error_message = "OAuth verification error: " + escape(e.message)
failure_values['imsx_description'] = error_message
log.debug("[LTI]: " + error_message)
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
real_user = self.system.get_real_user(urllib.unquote(sourcedId.split(':')[-1]))
if not real_user: # that means we can't save to database, as we do not have real user id.
failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
failure_values['imsx_description'] = "User not found."
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
if action == 'replaceResultRequest':
self.system.publish(
self,
{
'event_name': 'grade',
'value': score * self.max_score(),
'max_value': self.max_score(),
},
custom_user=real_user
)
values = {
'imsx_codeMajor': 'success',
'imsx_description': 'Score for {sourced_id} is now {score}'.format(sourced_id=sourcedId, score=score),
'imsx_messageIdentifier': escape(imsx_messageIdentifier),
'response': '<replaceResultResponse/>'
}
log.debug("[LTI]: Grade is saved.")
return Response(response_xml_template.format(**values), content_type="application/xml")
unsupported_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
log.debug("[LTI]: Incorrect action.")
return Response(response_xml_template.format(**unsupported_values), content_type='application/xml')
@classmethod
def parse_grade_xml_body(cls, body):
"""
Parses XML from request.body and returns parsed data
XML body should contain nsmap with namespace, that is specified in LTI specs.
Returns tuple: imsx_messageIdentifier, sourcedId, score, action
Raises Exception if can't parse.
"""
lti_spec_namespace = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0"
namespaces = {'def': lti_spec_namespace}
data = body.strip().encode('utf-8')
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
root = etree.fromstring(data, parser=parser)
imsx_messageIdentifier = root.xpath("//def:imsx_messageIdentifier", namespaces=namespaces)[0].text
sourcedId = root.xpath("//def:sourcedId", namespaces=namespaces)[0].text
score = root.xpath("//def:textString", namespaces=namespaces)[0].text
action = root.xpath("//def:imsx_POXBody", namespaces=namespaces)[0].getchildren()[0].tag.replace('{'+lti_spec_namespace+'}', '')
# Raise exception if score is not float or not in range 0.0-1.0 regarding spec.
score = float(score)
if not 0 <= score <= 1:
raise LTIError('score value outside the permitted range of 0-1.')
return imsx_messageIdentifier, sourcedId, score, action
def verify_oauth_body_sign(self, request):
"""
Verify grade request from LTI provider using OAuth body signing.
Uses http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html::
This specification extends the OAuth signature to include integrity checks on HTTP request bodies
with content types other than application/x-www-form-urlencoded.
Arguments:
request: DjangoWebobRequest.
Raises:
LTIError if request is incorrect.
"""
client_key, client_secret = self.get_client_key_secret()
headers = {
'Authorization':unicode(request.headers.get('Authorization')),
'Content-Type': 'application/x-www-form-urlencoded',
}
sha1 = hashlib.sha1()
sha1.update(request.body)
oauth_body_hash = base64.b64encode(sha1.digest())
oauth_params = signature.collect_parameters(headers=headers, exclude_oauth_signature=False)
oauth_headers =dict(oauth_params)
oauth_signature = oauth_headers.pop('oauth_signature')
mock_request = mock.Mock(
uri=unicode(urllib.unquote(request.url)),
http_method=unicode(request.method),
params=oauth_headers.items(),
signature=oauth_signature
)
if oauth_body_hash != oauth_headers.get('oauth_body_hash'):
raise LTIError("OAuth body hash verification is failed.")
if not signature.verify_hmac_sha1(mock_request, client_secret):
raise LTIError("OAuth signature verification is failed.")
def get_client_key_secret(self):
"""
Obtains client_key and client_secret credentials from current course.
"""
course = self.get_course()
for lti_passport in course.lti_passports:
try:
lti_id, key, secret = [i.strip() for i in lti_passport.split(':')]
except ValueError:
raise LTIError('Could not parse LTI passport: {0!r}. \
Should be "id:key:secret" string.'.format(lti_passport))
if lti_id == self.lti_id.strip():
return key, secret
return '', ''
class LTIDescriptor(LTIFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""
Descriptor for LTI Xmodule.
"""
module_class = LTIModule
grade_handler = module_attr('grade_handler')
preview_handler = module_attr('preview_handler')
|
ActiveState/code | refs/heads/master | recipes/Python/286165_ilines__universal_newlines_any/recipe-286165.py | 1 | def ilines(source_iterable):
'''yield lines as in universal-newlines from a stream of data blocks'''
tail = ''
for block in source_iterable:
if not block:
continue
if tail.endswith('\015'):
yield tail[:-1] + '\012'
if block.startswith('\012'):
pos = 1
else:
tail = ''
else:
pos = 0
try:
while True: # While we are finding LF.
npos = block.index('\012', pos) + 1
try:
rend = npos - 2
rpos = block.index('\015', pos, rend)
if pos:
yield block[pos : rpos] + '\n'
else:
yield tail + block[:rpos] + '\n'
pos = rpos + 1
while True: # While CRs 'inside' the LF
rpos = block.index('\015', pos, rend)
yield block[pos : rpos] + '\n'
pos = rpos + 1
except ValueError:
pass
if '\015' == block[rend]:
if pos:
yield block[pos : rend] + '\n'
else:
yield tail + block[:rend] + '\n'
elif pos:
yield block[pos : npos]
else:
yield tail + block[:npos]
pos = npos
except ValueError:
pass
# No LFs left in block. Do all but final CR (in case LF)
try:
while True:
rpos = block.index('\015', pos, -1)
if pos:
yield block[pos : rpos] + '\n'
else:
yield tail + block[:rpos] + '\n'
pos = rpos + 1
except ValueError:
pass
if pos:
tail = block[pos:]
else:
tail += block
if tail:
yield tail
|
xtopsoft/grpc | refs/heads/master | src/python/interop/interop/methods.py | 8 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations of interoperability test methods."""
import enum
import json
import os
import threading
from oauth2client import client as oauth2client_client
from grpc.framework.alpha import utilities
from interop import empty_pb2
from interop import messages_pb2
_TIMEOUT = 7
def _empty_call(request, unused_context):
return empty_pb2.Empty()
_CLIENT_EMPTY_CALL = utilities.unary_unary_invocation_description(
empty_pb2.Empty.SerializeToString, empty_pb2.Empty.FromString)
_SERVER_EMPTY_CALL = utilities.unary_unary_service_description(
_empty_call, empty_pb2.Empty.FromString,
empty_pb2.Empty.SerializeToString)
def _unary_call(request, unused_context):
return messages_pb2.SimpleResponse(
payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
_CLIENT_UNARY_CALL = utilities.unary_unary_invocation_description(
messages_pb2.SimpleRequest.SerializeToString,
messages_pb2.SimpleResponse.FromString)
_SERVER_UNARY_CALL = utilities.unary_unary_service_description(
_unary_call, messages_pb2.SimpleRequest.FromString,
messages_pb2.SimpleResponse.SerializeToString)
def _streaming_output_call(request, unused_context):
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.response_type,
body=b'\x00' * response_parameters.size))
_CLIENT_STREAMING_OUTPUT_CALL = utilities.unary_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_STREAMING_OUTPUT_CALL = utilities.unary_stream_service_description(
_streaming_output_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
def _streaming_input_call(request_iterator, unused_context):
aggregate_size = 0
for request in request_iterator:
if request.payload and request.payload.body:
aggregate_size += len(request.payload.body)
return messages_pb2.StreamingInputCallResponse(
aggregated_payload_size=aggregate_size)
_CLIENT_STREAMING_INPUT_CALL = utilities.stream_unary_invocation_description(
messages_pb2.StreamingInputCallRequest.SerializeToString,
messages_pb2.StreamingInputCallResponse.FromString)
_SERVER_STREAMING_INPUT_CALL = utilities.stream_unary_service_description(
_streaming_input_call,
messages_pb2.StreamingInputCallRequest.FromString,
messages_pb2.StreamingInputCallResponse.SerializeToString)
def _full_duplex_call(request_iterator, unused_context):
for request in request_iterator:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.payload.type,
body=b'\x00' * request.response_parameters[0].size))
_CLIENT_FULL_DUPLEX_CALL = utilities.stream_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_FULL_DUPLEX_CALL = utilities.stream_stream_service_description(
_full_duplex_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
# NOTE(nathaniel): Apparently this is the same as the full-duplex call?
_CLIENT_HALF_DUPLEX_CALL = utilities.stream_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_HALF_DUPLEX_CALL = utilities.stream_stream_service_description(
_full_duplex_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
SERVICE_NAME = 'grpc.testing.TestService'
_EMPTY_CALL_METHOD_NAME = 'EmptyCall'
_UNARY_CALL_METHOD_NAME = 'UnaryCall'
_STREAMING_OUTPUT_CALL_METHOD_NAME = 'StreamingOutputCall'
_STREAMING_INPUT_CALL_METHOD_NAME = 'StreamingInputCall'
_FULL_DUPLEX_CALL_METHOD_NAME = 'FullDuplexCall'
_HALF_DUPLEX_CALL_METHOD_NAME = 'HalfDuplexCall'
CLIENT_METHODS = {
_EMPTY_CALL_METHOD_NAME: _CLIENT_EMPTY_CALL,
_UNARY_CALL_METHOD_NAME: _CLIENT_UNARY_CALL,
_STREAMING_OUTPUT_CALL_METHOD_NAME: _CLIENT_STREAMING_OUTPUT_CALL,
_STREAMING_INPUT_CALL_METHOD_NAME: _CLIENT_STREAMING_INPUT_CALL,
_FULL_DUPLEX_CALL_METHOD_NAME: _CLIENT_FULL_DUPLEX_CALL,
_HALF_DUPLEX_CALL_METHOD_NAME: _CLIENT_HALF_DUPLEX_CALL,
}
SERVER_METHODS = {
_EMPTY_CALL_METHOD_NAME: _SERVER_EMPTY_CALL,
_UNARY_CALL_METHOD_NAME: _SERVER_UNARY_CALL,
_STREAMING_OUTPUT_CALL_METHOD_NAME: _SERVER_STREAMING_OUTPUT_CALL,
_STREAMING_INPUT_CALL_METHOD_NAME: _SERVER_STREAMING_INPUT_CALL,
_FULL_DUPLEX_CALL_METHOD_NAME: _SERVER_FULL_DUPLEX_CALL,
_HALF_DUPLEX_CALL_METHOD_NAME: _SERVER_HALF_DUPLEX_CALL,
}
def _large_unary_common_behavior(stub, fill_username, fill_oauth_scope):
with stub:
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE, response_size=314159,
payload=messages_pb2.Payload(body=b'\x00' * 271828),
fill_username=fill_username, fill_oauth_scope=fill_oauth_scope)
response_future = stub.UnaryCall.async(request, _TIMEOUT)
response = response_future.result()
if response.payload.type is not messages_pb2.COMPRESSABLE:
raise ValueError(
'response payload type is "%s"!' % type(response.payload.type))
if len(response.payload.body) != 314159:
raise ValueError(
'response body of incorrect size %d!' % len(response.payload.body))
return response
def _empty_unary(stub):
with stub:
response = stub.EmptyCall(empty_pb2.Empty(), _TIMEOUT)
if not isinstance(response, empty_pb2.Empty):
raise TypeError(
'response is of type "%s", not empty_pb2.Empty!', type(response))
def _large_unary(stub):
_large_unary_common_behavior(stub, False, False)
def _client_streaming(stub):
with stub:
payload_body_sizes = (27182, 8, 1828, 45904)
payloads = (
messages_pb2.Payload(body=b'\x00' * size)
for size in payload_body_sizes)
requests = (
messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads)
response = stub.StreamingInputCall(requests, _TIMEOUT)
if response.aggregated_payload_size != 74922:
raise ValueError(
'incorrect size %d!' % response.aggregated_payload_size)
def _server_streaming(stub):
sizes = (31415, 9, 2653, 58979)
with stub:
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=sizes[0]),
messages_pb2.ResponseParameters(size=sizes[1]),
messages_pb2.ResponseParameters(size=sizes[2]),
messages_pb2.ResponseParameters(size=sizes[3]),
))
response_iterator = stub.StreamingOutputCall(request, _TIMEOUT)
for index, response in enumerate(response_iterator):
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != sizes[index]:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
def _cancel_after_begin(stub):
with stub:
sizes = (27182, 8, 1828, 45904)
payloads = [messages_pb2.Payload(body=b'\x00' * size) for size in sizes]
requests = [messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads]
responses = stub.StreamingInputCall.async(requests, _TIMEOUT)
responses.cancel()
if not responses.cancelled():
raise ValueError('expected call to be cancelled')
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _ping_pong(stub):
request_response_sizes = (31415, 9, 2653, 58979)
request_payload_sizes = (27182, 8, 1828, 45904)
with stub, _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe, _TIMEOUT)
print 'Starting ping-pong with response iterator %s' % response_iterator
for response_size, payload_size in zip(
request_response_sizes, request_payload_sizes):
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(messages_pb2.ResponseParameters(
size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != response_size:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
def _cancel_after_first_response(stub):
request_response_sizes = (31415, 9, 2653, 58979)
request_payload_sizes = (27182, 8, 1828, 45904)
with stub, _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe, _TIMEOUT)
response_size = request_response_sizes[0]
payload_size = request_payload_sizes[0]
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(messages_pb2.ResponseParameters(
size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
# We test the contents of `response` in the Ping Pong test - don't check
# them here.
response_iterator.cancel()
try:
next(response_iterator)
except Exception:
pass
else:
raise ValueError('expected call to be cancelled')
def _compute_engine_creds(stub, args):
response = _large_unary_common_behavior(stub, True, True)
if args.default_service_account != response.username:
raise ValueError(
'expected username %s, got %s' % (args.default_service_account,
response.username))
def _service_account_creds(stub, args):
json_key_filename = os.environ[
oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
response = _large_unary_common_behavior(stub, True, True)
if wanted_email != response.username:
raise ValueError(
'expected username %s, got %s' % (wanted_email, response.username))
if args.oauth_scope.find(response.oauth_scope) == -1:
raise ValueError(
'expected to find oauth scope "%s" in received "%s"' %
(response.oauth_scope, args.oauth_scope))
@enum.unique
class TestCase(enum.Enum):
EMPTY_UNARY = 'empty_unary'
LARGE_UNARY = 'large_unary'
SERVER_STREAMING = 'server_streaming'
CLIENT_STREAMING = 'client_streaming'
PING_PONG = 'ping_pong'
CANCEL_AFTER_BEGIN = 'cancel_after_begin'
CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
SERVICE_ACCOUNT_CREDS = 'service_account_creds'
def test_interoperability(self, stub, args):
if self is TestCase.EMPTY_UNARY:
_empty_unary(stub)
elif self is TestCase.LARGE_UNARY:
_large_unary(stub)
elif self is TestCase.SERVER_STREAMING:
_server_streaming(stub)
elif self is TestCase.CLIENT_STREAMING:
_client_streaming(stub)
elif self is TestCase.PING_PONG:
_ping_pong(stub)
elif self is TestCase.CANCEL_AFTER_BEGIN:
_cancel_after_begin(stub)
elif self is TestCase.CANCEL_AFTER_FIRST_RESPONSE:
_cancel_after_first_response(stub)
elif self is TestCase.COMPUTE_ENGINE_CREDS:
_compute_engine_creds(stub, args)
elif self is TestCase.SERVICE_ACCOUNT_CREDS:
_service_account_creds(stub, args)
else:
raise NotImplementedError('Test case "%s" not implemented!' % self.name)
|
apache/incubator-mxnet | refs/heads/master | tests/python/unittest/test_gluon_rnn.py | 1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon, np
import numpy as _np
import copy
from functools import partial
from numpy.testing import assert_allclose
import pytest
from mxnet.test_utils import almost_equal, assert_almost_equal, default_context
from common import assert_raises_cudnn_not_satisfied, retry
def check_rnn_states(fused_states, stack_states, num_layers, bidirectional=False, is_lstm=True):
directions = 2 if bidirectional else 1
assert len(stack_states) / len(fused_states) == num_layers * directions
fused_states = [state.asnumpy() for state in fused_states]
stack_states = [_np.expand_dims(state.asnumpy(), axis=0) for state in stack_states]
if is_lstm:
stack_states_h = stack_states[0::2]
stack_states_c = stack_states[1::2]
stack_states = [_np.concatenate(stack_states_h, axis=0), _np.concatenate(stack_states_c, axis=0)]
else:
stack_states = [_np.concatenate(stack_states, axis=0)]
for f, s in zip(fused_states, stack_states):
assert f.shape == s.shape
assert_almost_equal(f, s, atol=1e-4, rtol=1e-4)
@mx.util.use_np
def test_rnn():
cell = gluon.rnn.RNNCell(100)
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
assert sorted(cell.collect_params().keys()) == ['h2h_bias', 'h2h_weight',
'i2h_bias', 'i2h_weight']
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
@mx.util.use_np
def test_lstm():
cell = gluon.rnn.LSTMCell(100)
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
assert sorted(cell.collect_params().keys()) == ['h2h_bias', 'h2h_weight', 'i2h_bias', 'i2h_weight']
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
@assert_raises_cudnn_not_satisfied(min_version='7.2.1')
@pytest.mark.serial
def test_lstmp():
hidden_size, projection_size = 512, 256
rtol, atol = 1e-4, 1e-4
batch_size, seq_len = 5, 3
input_size = 128
lstm_input = mx.np.random.uniform(size=(seq_len, batch_size, input_size))
# ==== Unidirectional Layer ====
for num_layers in [1, 3]:
fused_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,
num_layers=num_layers, layout='TNC', bidirectional=False)
stack_layer = mx.gluon.rnn.HybridSequentialRNNCell()
for i in range(num_layers):
stack_layer.add(gluon.rnn.LSTMPCell(hidden_size,
projection_size=projection_size))
fused_layer.initialize()
fused_begin_state = fused_layer.begin_state(batch_size)
stack_begin_state = stack_layer.begin_state(batch_size=batch_size)
fused_layer.infer_shape(lstm_input, fused_begin_state)
stack_layer.infer_shape(0, lstm_input, False)
stack_layer.initialize()
fused_layer_params = fused_layer.collect_params()
stack_layer_params = stack_layer.collect_params()
for name, value in fused_layer_params.items():
w = mx.np.random.uniform(size=value.shape)
value.set_data(w.copy())
stack_layer_params[name[1:].replace('_', '.', 1)].set_data(w.copy())
fused_output, fused_states = fused_layer(lstm_input.copy(), fused_begin_state)
stack_output, stack_states = stack_layer.unroll(seq_len, lstm_input.copy(), begin_state=stack_begin_state,
layout='TNC',
merge_outputs=True)
assert_almost_equal(fused_output.asnumpy(), stack_output.asnumpy(), rtol=rtol, atol=atol)
check_rnn_states(fused_states, stack_states, num_layers, False)
# ==== Bidirectional Layer ====
for num_layers in [1, 3]:
fused_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,
num_layers=num_layers, layout='TNC', bidirectional=True)
stack_layer = mx.gluon.rnn.HybridSequentialRNNCell()
for i in range(num_layers):
stack_layer.add(
gluon.rnn.BidirectionalCell(gluon.rnn.LSTMPCell(hidden_size,
projection_size=projection_size),
gluon.rnn.LSTMPCell(hidden_size,
projection_size=projection_size)))
fused_layer.initialize()
stack_layer.initialize()
fused_begin_state = fused_layer.begin_state(batch_size)
stack_begin_state = stack_layer.begin_state(batch_size=batch_size)
fused_layer.infer_shape(lstm_input, fused_begin_state)
stack_layer.infer_shape(0, lstm_input, False)
stack_layer.initialize()
fused_layer_params = fused_layer.collect_params()
stack_layer_params = stack_layer.collect_params()
for name, value in fused_layer_params.items():
w = mx.np.random.uniform(size=value.shape)
value.set_data(w.copy())
cur = name.split("_")[0]
stack_layer_params["{}.{}_cell.{}".format(cur[1:], name[0], name[len(cur)+1:])].set_data(w.copy())
fused_output, fused_states = fused_layer(lstm_input.copy(), fused_begin_state)
stack_output, stack_states = stack_layer.unroll(seq_len, lstm_input.copy(), begin_state=stack_begin_state,
layout='TNC',
merge_outputs=True)
assert_almost_equal(fused_output.asnumpy(), stack_output.asnumpy(), rtol=rtol, atol=atol)
check_rnn_states(fused_states, stack_states, num_layers, True)
@mx.util.use_np
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_cpu_inference():
# should behave the same as lstm cell
EXPECTED_LSTM_OUTPUT = np.array([[[0.72045636, 0.72045636, 0.95215213, 0.95215213],
[0.72045636, 0.72045636, 0.95215213, 0.95215213]],
[[0.95215213, 0.95215213, 0.72045636, 0.72045636],
[0.95215213, 0.95215213, 0.72045636, 0.72045636]]])
x = mx.np.ones(shape=(2, 2, 2))
model = mx.gluon.rnn.LSTM(2, num_layers=6, bidirectional=True)
model.initialize(mx.init.One())
y = model(x).asnumpy()
mx.test_utils.assert_almost_equal(y, EXPECTED_LSTM_OUTPUT,
rtol=1e-3, atol=1e-5)
@mx.util.use_np
def test_gru():
cell = gluon.rnn.GRUCell(100, activation='relu', recurrent_activation='tanh')
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
assert sorted(cell.collect_params().keys()) == ['h2h_bias', 'h2h_weight', 'i2h_bias', 'i2h_weight']
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
@mx.util.use_np
@pytest.mark.serial
def test_residual():
cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50))
inputs = [mx.np.ones((10,50)) for i in range(2)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(2, inputs)
params = cell.collect_params()
assert sorted(params.keys()) == \
['base_cell.h2h_bias', 'base_cell.h2h_weight', 'base_cell.i2h_bias', 'base_cell.i2h_weight']
outs = [o.shape for o in outputs]
assert outs == [(10, 50), (10, 50)]
@mx.util.use_np
@pytest.mark.serial
def test_residual_bidirectional():
cell = gluon.rnn.ResidualCell(
gluon.rnn.BidirectionalCell(
gluon.rnn.GRUCell(25),
gluon.rnn.GRUCell(25)))
inputs = [mx.np.ones((10,50)) for i in range(2)]
cell.infer_shape(0, inputs[0], True)
cell.initialize()
outputs, _ = cell.unroll(2, inputs, merge_outputs=False)
params = cell.collect_params()
assert sorted(params.keys()) == \
['base_cell.l_cell.h2h_bias', 'base_cell.l_cell.h2h_weight',
'base_cell.l_cell.i2h_bias', 'base_cell.l_cell.i2h_weight',
'base_cell.r_cell.h2h_bias', 'base_cell.r_cell.h2h_weight',
'base_cell.r_cell.i2h_bias', 'base_cell.r_cell.i2h_weight']
outs = [o.shape for o in outputs]
assert outs == [(10, 50), (10, 50)]
@mx.util.use_np
def test_stack():
cell = gluon.rnn.SequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100)))
else:
cell.add(gluon.rnn.LSTMCell(100))
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
keys = sorted(cell.collect_params().keys())
for i in range(5):
if i==1:
continue
assert '%d.h2h_weight'%i in keys
assert '%d.h2h_bias'%i in keys
assert '%d.i2h_weight'%i in keys
assert '%d.i2h_bias'%i in keys
assert '1.base_cell.h2h_weight' in keys
assert '1.base_cell.h2h_bias' in keys
assert '1.base_cell.i2h_weight' in keys
assert '1.base_cell.i2h_bias' in keys
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
@mx.util.use_np
@pytest.mark.serial
def test_hybridstack():
cell = gluon.rnn.HybridSequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100)))
else:
cell.add(gluon.rnn.LSTMCell(100))
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
keys = sorted(cell.collect_params().keys())
for i in range(5):
if i==1:
continue
assert '%d.h2h_weight'%i in keys
assert '%d.h2h_bias'%i in keys
assert '%d.i2h_weight'%i in keys
assert '%d.i2h_bias'%i in keys
assert '1.base_cell.h2h_weight' in keys
assert '1.base_cell.h2h_bias' in keys
assert '1.base_cell.i2h_weight' in keys
assert '1.base_cell.i2h_bias' in keys
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
# Test HybridSequentialRNNCell nested in nn.HybridBlock, SequentialRNNCell will fail in this case
class BidirectionalOfSequential(gluon.HybridBlock):
def __init__(self):
super(BidirectionalOfSequential, self).__init__()
cell0 = gluon.rnn.HybridSequentialRNNCell()
cell0.add(gluon.rnn.LSTMCell(100))
cell0.add(gluon.rnn.LSTMCell(100))
cell1 = gluon.rnn.HybridSequentialRNNCell()
cell1.add(gluon.rnn.LSTMCell(100))
cell1.add(gluon.rnn.LSTMCell(100))
self.rnncell = gluon.rnn.BidirectionalCell(cell0, cell1)
def forward(self, x):
return self.rnncell.unroll(3, x, layout="NTC", merge_outputs=True)
def infer_shape(self, x, *args):
self.rnncell.infer_shape(0, x, True)
x = mx.np.random.uniform(size=(10, 3, 100))
net = BidirectionalOfSequential()
net.infer_shape(x)
net.initialize()
outs, _ = net(x)
assert outs.shape == (10, 3, 200)
@mx.util.use_np
def test_bidirectional():
cell = gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(100),
gluon.rnn.LSTMCell(100))
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
outs = [o.shape for o in outputs]
assert outs == [(10, 200), (10, 200), (10, 200)]
@mx.util.use_np
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_layer_bidirectional():
class RefBiLSTM(gluon.Block):
def __init__(self, size, **kwargs):
super(RefBiLSTM, self).__init__(**kwargs)
self._lstm_fwd = gluon.rnn.LSTM(size, bidirectional=False)
self._lstm_bwd = gluon.rnn.LSTM(size, bidirectional=False)
def forward(self, inpt):
fwd = self._lstm_fwd(inpt)
bwd_inpt = np.flip(inpt, 0)
bwd = self._lstm_bwd(bwd_inpt)
bwd = np.flip(bwd, 0)
return np.concatenate([fwd, bwd], axis=2)
size = 7
in_size = 5
weights = {}
for d in ['l', 'r']:
weights['{}0_i2h_weight'.format(d)] = mx.np.random.uniform(size=(size*4, in_size))
weights['{}0_h2h_weight'.format(d)] = mx.np.random.uniform(size=(size*4, size))
weights['{}0_i2h_bias'.format(d)] = mx.np.random.uniform(size=(size*4,))
weights['{}0_h2h_bias'.format(d)] = mx.np.random.uniform(size=(size*4,))
net = gluon.rnn.LSTM(size, bidirectional=True)
ref_net = RefBiLSTM(size)
net.initialize()
ref_net.initialize()
net_params = net.collect_params()
ref_net_params = ref_net.collect_params()
for k in weights:
net_params[k].set_data(weights[k])
ref_net_params[k.replace('l0', '_lstm_fwd.l0').replace('r0', '_lstm_bwd.l0')].set_data(weights[k])
data = mx.np.random.uniform(size=(11, 10, in_size))
assert_allclose(net(data).asnumpy(), ref_net(data).asnumpy(), rtol=1e-04, atol=1e-02)
def test_zoneout():
cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100), zoneout_outputs=0.5,
zoneout_states=0.5)
inputs = [mx.np.ones((10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
@pytest.mark.serial
def test_unroll_layout():
cell = gluon.rnn.HybridSequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100)))
else:
cell.add(gluon.rnn.LSTMCell(100))
inputs = [mx.np.random.uniform(size=(10,50)) for _ in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs, layout='TNC')
assert outputs[0].shape == (10, 100)
assert outputs[1].shape == (10, 100)
assert outputs[2].shape == (10, 100)
outputs, _ = cell.unroll(3, inputs, layout='NTC')
assert outputs[0].shape == (10, 100)
assert outputs[1].shape == (10, 100)
assert outputs[2].shape == (10, 100)
def check_rnn_forward_backward(layer, merged_inputs, hybridize, merge_outputs, deterministic):
input_size = 5
if merged_inputs:
inputs = mx.np.ones((8, 3, 5))
inputs.attach_grad()
else:
inputs = [mx.np.ones((8, 5)) for _ in range(3)]
for x in inputs:
x.attach_grad()
if hybridize:
layer.hybridize()
layer.initialize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=merge_outputs)[0]
mx.autograd.backward(out)
if hasattr(layer, 'i2h_weight'):
assert layer.i2h_weight.shape[1] == input_size, (layer.i2h_weight.shape[1], input_size)
if merge_outputs:
np_out = out.asnumpy()
else:
np_out = _np.stack([x.asnumpy() for x in out], axis=1)
if merged_inputs:
np_dx = inputs.grad.asnumpy()
else:
np_dx = _np.stack([x.grad.asnumpy() for x in inputs], axis=1)
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=not merge_outputs)[0]
mx.autograd.backward(out)
if merged_inputs:
input_grads = inputs.grad.asnumpy()
else:
input_grads = _np.stack([x.grad.asnumpy() for x in inputs], axis=1)
if deterministic:
if not merge_outputs:
ref_np_out = out.asnumpy()
else:
ref_np_out = _np.stack([x.asnumpy() for x in out], axis=1)
mx.test_utils.assert_almost_equal(np_out, ref_np_out, rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, input_grads, rtol=1e-3, atol=1e-5)
@retry(3)
@pytest.mark.parametrize('layer,determinism', [
(gluon.rnn.LSTMCell(10, input_size=5), True),
(gluon.rnn.RNNCell(10, input_size=5), True),
(gluon.rnn.GRUCell(10, input_size=5), True),
(gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(10, input_size=5),
gluon.rnn.LSTMCell(10, input_size=5)
), True),
(gluon.rnn.DropoutCell(0.5), False),
])
@pytest.mark.parametrize('merged_inputs', [True, False])
@pytest.mark.parametrize('hybridize', [True, False])
@pytest.mark.parametrize('merge_outputs', [True, False, None])
@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18225')
def test_rnn_forward_backward(layer, merged_inputs, hybridize, merge_outputs, determinism):
check_rnn_forward_backward(layer, merged_inputs, hybridize, merge_outputs, determinism)
@pytest.mark.parametrize('seq_rnn_type', [
gluon.rnn.SequentialRNNCell,
gluon.rnn.HybridSequentialRNNCell
])
@pytest.mark.parametrize('determinism', [True, False])
@pytest.mark.parametrize('merged_inputs', [True, False])
@pytest.mark.parametrize('hybridize', [True, False])
@pytest.mark.parametrize('merge_outputs', [True, False, None])
@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18291')
def test_sequential_rnn_cells(seq_rnn_type, determinism, merged_inputs, hybridize, merge_outputs):
net = gluon.rnn.SequentialRNNCell()
net.add(gluon.rnn.LSTMCell(10, input_size=5))
net.add(gluon.rnn.RNNCell(10, input_size=10))
net.add(gluon.rnn.GRUCell(10, input_size=10))
if not determinism:
net.add(gluon.rnn.DropoutCell(0.5))
check_rnn_forward_backward(net, merged_inputs, hybridize, merge_outputs, determinism)
@mx.util.use_np
def test_rnn_cells_export_import():
class RNNLayer(gluon.HybridBlock):
def __init__(self):
super(RNNLayer, self).__init__()
self.cell = gluon.rnn.RNNCell(hidden_size=1)
def forward(self, seq):
outputs, state = self.cell.unroll(inputs=seq, length=2, merge_outputs=True)
return outputs
def infer_shape(self, input):
self.cell.infer_shape(0, input, False)
class LSTMLayer(gluon.HybridBlock):
def __init__(self):
super(LSTMLayer, self).__init__()
self.cell = gluon.rnn.LSTMCell(hidden_size=1)
def forward(self, seq):
outputs, state = self.cell.unroll(inputs=seq, length=2, merge_outputs=True)
return outputs
def infer_shape(self, input):
self.cell.infer_shape(0, input, False)
class GRULayer(gluon.HybridBlock):
def __init__(self):
super(GRULayer, self).__init__()
self.cell = gluon.rnn.GRUCell(hidden_size=1)
def forward(self, seq):
outputs, state = self.cell.unroll(inputs=seq, length=2, merge_outputs=True)
return outputs
def infer_shape(self, input):
self.cell.infer_shape(0, input, False)
for hybrid in [RNNLayer(), LSTMLayer(), GRULayer()]:
input = mx.np.ones(shape=(1, 2, 1), ctx=mx.context.current_context())
hybrid.infer_shape(input)
hybrid.initialize()
hybrid.hybridize()
output1 = hybrid(input)
hybrid.export(path="./model", epoch=0)
symbol = mx.gluon.SymbolBlock.imports(
symbol_file="./model-symbol.json",
input_names=["data"],
param_file="./model-0000.params",
ctx=mx.context.current_context()
)
output2 = symbol(input)
assert_almost_equal(output1.asnumpy(), output2.asnumpy())
def check_rnn_layer_forward(layer, inputs, states=None, run_only=False, ctx=mx.cpu()):
layer.initialize(ctx=ctx)
inputs = inputs.as_in_context(ctx)
inputs.attach_grad()
if states is not None:
if isinstance(states, (list, tuple)):
states = [s.as_in_context(ctx) for s in states]
else:
states = states.as_in_context(ctx)
with mx.autograd.record():
if states is None:
out = layer(inputs)
else:
out = layer(inputs, states)
if states is not None:
assert isinstance(out, (list, tuple)) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.np.ndarray)
out.backward()
np_out = out.asnumpy()
np_dx = inputs.grad.asnumpy()
layer.hybridize()
with mx.autograd.record():
if states is not None:
out = layer(inputs, states)
assert isinstance(out, (list, tuple)) and len(out) == 2
out = out[0]
else:
out = layer(inputs)
assert isinstance(out, mx.np.ndarray)
out.backward()
if states is not None:
layer(inputs, states) # test is_training = false
else:
layer(inputs)
if not run_only:
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)
@mx.util.use_np
def run_rnn_layers(dtype, dtype2, ctx=mx.cpu()):
check_rnn_layer_forward(gluon.rnn.RNN(10, 2, dtype=dtype), mx.np.ones((8, 3, 20), dtype=dtype), ctx=ctx)
check_rnn_layer_forward(gluon.rnn.RNN(10, 2, dtype=dtype, bidirectional=True), mx.np.ones((8, 3, 20), dtype=dtype), mx.np.ones((4, 3, 10), dtype=dtype), ctx=ctx)
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2,dtype=dtype), mx.np.ones((8, 3, 20), dtype=dtype), ctx=ctx)
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2,dtype=dtype, bidirectional=True), mx.np.ones((8, 3, 20), dtype=dtype), [mx.np.ones((4, 3, 10), dtype=dtype), mx.np.ones((4, 3, 10), dtype=dtype)],ctx=ctx)
check_rnn_layer_forward(gluon.rnn.GRU(10, 2, dtype=dtype, ), mx.np.ones((8, 3, 20), dtype=dtype),ctx=ctx)
check_rnn_layer_forward(gluon.rnn.GRU(10, 2, dtype=dtype, bidirectional=True), mx.np.ones((8, 3, 20), dtype=dtype), mx.np.ones((4, 3, 10), dtype=dtype),ctx=ctx)
check_rnn_layer_forward(gluon.rnn.RNN(10, 2, dtype=dtype, dropout=0.5), mx.np.ones((8, 3, 20), dtype=dtype),
run_only=True, ctx=ctx)
check_rnn_layer_forward(gluon.rnn.RNN(10, 2, bidirectional=True, dropout=0.5, dtype=dtype),
mx.np.ones((8, 3, 20), dtype=dtype), mx.np.ones((4, 3, 10), dtype=dtype), run_only=True, ctx=ctx)
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, dropout=0.5, dtype=dtype), mx.np.ones((8, 3, 20), dtype=dtype),
run_only=True, ctx=ctx)
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, bidirectional=True, dropout=0.5, dtype=dtype),
mx.np.ones((8, 3, 20), dtype=dtype),
[mx.np.ones((4, 3, 10), dtype=dtype), mx.np.ones((4, 3, 10), dtype=dtype)], run_only=True, ctx=ctx)
check_rnn_layer_forward(gluon.rnn.GRU(10, 2, dropout=0.5, dtype=dtype), mx.np.ones((8, 3, 20), dtype=dtype),
run_only=True, ctx=ctx)
check_rnn_layer_forward(gluon.rnn.GRU(10, 2, bidirectional=True, dropout=0.5, dtype=dtype),
mx.np.ones((8, 3, 20), dtype=dtype), mx.np.ones((4, 3, 10), dtype=dtype), run_only=True, ctx=ctx)
net = gluon.nn.Sequential()
net.add(gluon.rnn.LSTM(10, bidirectional=True, dtype=dtype2))
net.add(gluon.nn.BatchNorm(axis=2))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(3, activation='relu'))
net.initialize(ctx=ctx)
net.cast(dtype)
with mx.autograd.record():
out = net(mx.np.ones((2, 3, 10), dtype=dtype, ctx=ctx))
out.backward()
out = out.asnumpy()
net2 = gluon.nn.HybridSequential()
net2.add(gluon.rnn.LSTM(10, bidirectional=True, dtype=dtype2))
net2.add(gluon.nn.BatchNorm(axis=2))
net2.add(gluon.nn.Flatten())
net2.add(gluon.nn.Dense(3, activation='relu'))
net2.hybridize()
net2.initialize(ctx=ctx)
net2.cast(dtype)
with mx.autograd.record():
out = net2(mx.np.ones((2, 3, 10), dtype=dtype, ctx=ctx))
out.backward()
out = out.asnumpy()
net3 = gluon.nn.HybridSequential()
net3.add(gluon.rnn.LSTM(10, bidirectional=True, dtype=dtype))
net3.add(gluon.nn.BatchNorm(axis=2))
net3.add(gluon.nn.Flatten())
net3.add(gluon.nn.Dense(3, activation='relu'))
net3.hybridize()
net3.initialize(ctx=ctx)
net3.cast(dtype2)
with mx.autograd.record():
out = net3(mx.np.ones((2, 3, 10), dtype=dtype2, ctx=ctx))
out.backward()
out = out.asnumpy()
@pytest.mark.serial
def test_rnn_layers_fp32():
run_rnn_layers('float32', 'float32')
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.skipif(mx.context.num_gpus() == 0, reason="RNN FP16 only implemented for GPU for now")
@pytest.mark.serial
def test_rnn_layers_fp16():
run_rnn_layers('float16', 'float32', mx.gpu())
def check_rnn_consistency(fused_layer, stack_layer, loss, input_size, hidden_size, bidirectional=False, rtol=1e-2, atol=1e-4):
x = mx.np.random.normal(size=(1, 5, input_size))
fused_begin_state = fused_layer.begin_state(1)
stack_states = stack_layer.begin_state(batch_size=1)
fused_layer.infer_shape(x, fused_begin_state)
fused_layer_params = fused_layer.collect_params()
stack_layer.infer_shape(0, x, False)
stack_layer.initialize()
stack_layer_params = stack_layer.collect_params()
for name, value in fused_layer_params.items():
if 'weight' in name:
w = mx.np.zeros(shape=value.shape)
else:
w = mx.np.random.normal(size=value.shape)
value.set_data(w.copy())
cur = name.split('_')[0]
num = cur[1:]
stack_name = ('{}.{}_cell.'.format(num, name[0]) if bidirectional else num + '.' ) + name[len(cur)+1:]
stack_layer_params[stack_name].set_data(w.copy())
fx = x.copy()
sx = x.copy()
y = mx.np.random.uniform(size=(1, 5, hidden_size * 2 if bidirectional else hidden_size))
fx.attach_grad()
with mx.autograd.record():
fused_out, fused_states = fused_layer(fx, fused_begin_state)
l = loss(fused_out, y).mean()
l.backward()
mx.npx.waitall()
fused_grads = dict([(name, p.grad()) for name, p in fused_layer.collect_params().items()])
fused_input_grad = fx.grad.asnumpy()
sx.attach_grad()
with mx.autograd.record():
stack_out, stack_states = stack_layer.unroll(5, sx, begin_state=stack_states, merge_outputs=True)
l = loss(stack_out, y).mean()
l.backward()
mx.npx.waitall()
stack_grads = dict([(name, p.grad()) for name, p in stack_layer.collect_params().items()])
stack_input_grad = sx.grad.asnumpy()
assert_allclose(fused_out.asnumpy(), stack_out.asnumpy(), rtol=rtol, atol=atol)
assert_allclose(fused_input_grad, stack_input_grad, rtol=rtol, atol=atol)
for name, value in fused_grads.items():
cur = name.split('_')[0]
num = cur[1:]
stack_name = ('{}.{}_cell.'.format(num, name[0]) if bidirectional else num + '.' ) + name[len(cur)+1:]
assert_allclose(value.asnumpy(), stack_grads[stack_name].asnumpy(), rtol=rtol, atol=atol)
num_layers = fused_begin_state[0].shape[0] // (2 if bidirectional else 1)
check_rnn_states(fused_states, stack_states, num_layers, bidirectional, len(fused_begin_state) == 2)
def create_op_by_mode(mode):
if mode == 'lstm':
fused_op = gluon.rnn.LSTM
stack_op = gluon.rnn.LSTMCell
recurrent_block_prefix = 'lstm0_'
elif mode == 'gru':
fused_op = gluon.rnn.GRU
stack_op = gluon.rnn.GRUCell
recurrent_block_prefix = 'gru0_'
elif mode == 'rnn_relu':
fused_op = partial(gluon.rnn.RNN, activation='relu')
stack_op = partial(gluon.rnn.RNNCell, activation='relu')
recurrent_block_prefix = 'rnn0_'
elif mode == 'rnn_tanh':
fused_op = partial(gluon.rnn.RNN, activation='tanh')
stack_op = partial(gluon.rnn.RNNCell, activation='tanh')
recurrent_block_prefix = 'rnn0_'
return fused_op, stack_op, recurrent_block_prefix
def check_rnn_unidir_layer_gradients(mode, input_size, hidden_size, num_layers, loss):
fused_op, stack_op, recurrent_block_prefix = create_op_by_mode(mode)
fused_layer = fused_op(hidden_size, num_layers=num_layers, layout='NTC', bidirectional=False)
fused_layer.initialize()
stack_layer = mx.gluon.rnn.HybridSequentialRNNCell()
for n in range(num_layers):
stack_layer.add(stack_op(hidden_size))
stack_layer.initialize()
check_rnn_consistency(fused_layer, stack_layer, loss, input_size, hidden_size)
def check_rnn_bidir_layer_gradients(mode, input_size, hidden_size, num_layers, loss):
fused_op, stack_op, recurrent_block_prefix = create_op_by_mode(mode)
fused_layer = fused_op(hidden_size, num_layers=num_layers, layout='NTC', bidirectional=True)
fused_layer.initialize()
stack_layer = mx.gluon.rnn.HybridSequentialRNNCell()
for n in range(num_layers):
stack_layer.add(gluon.rnn.BidirectionalCell(stack_op(hidden_size),
stack_op(hidden_size)))
stack_layer.initialize()
check_rnn_consistency(fused_layer, stack_layer, loss, input_size, hidden_size, bidirectional=True)
@mx.util.use_np
@pytest.mark.parametrize('input_size', [8])
@pytest.mark.parametrize('hidden_size', [8, 16])
@pytest.mark.parametrize('num_layers', [1, 2, 3, 4])
@pytest.mark.parametrize('func', [check_rnn_unidir_layer_gradients,
check_rnn_bidir_layer_gradients])
@pytest.mark.parametrize('mode', ['lstm', 'gru', 'rnn_relu', 'rnn_tanh'])
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_fused_layer(input_size, hidden_size, num_layers, func, mode):
loss = mx.gluon.loss.L2Loss()
func(mode, input_size, hidden_size, num_layers, loss)
@pytest.mark.serial
def test_rnn_unroll_variant_length():
# Test for imperative usage
cell_list = []
for base_cell_class in [gluon.rnn.RNNCell, gluon.rnn.LSTMCell, gluon.rnn.GRUCell]:
cell_list.append(base_cell_class(20))
cell_list.append(gluon.rnn.BidirectionalCell(
l_cell=base_cell_class(20),
r_cell=base_cell_class(20)))
cell_list.append(gluon.rnn.VariationalDropoutCell(base_cell=base_cell_class(20)))
stack_res_rnn_cell = gluon.rnn.SequentialRNNCell()
stack_res_rnn_cell.add(gluon.rnn.ResidualCell(base_cell=gluon.rnn.RNNCell(20)))
stack_res_rnn_cell.add(gluon.rnn.ResidualCell(base_cell=gluon.rnn.RNNCell(20)))
cell_list.append(stack_res_rnn_cell)
batch_size = 4
max_length = 10
valid_length = [3, 10, 5, 6]
valid_length_nd = mx.np.array(valid_length)
for cell in cell_list:
# Test for NTC layout
print(type(cell))
data_nd = mx.np.random.normal(0, 1, size=(batch_size, max_length, 20))
if isinstance(cell, (gluon.rnn.HybridSequentialRNNCell, gluon.rnn.SequentialRNNCell)):
cell.infer_shape(0, data_nd, False)
elif isinstance(cell, gluon.rnn.BidirectionalCell):
cell.infer_shape(0, data_nd, True)
else:
cell.infer_shape(0, data_nd, False)
cell.initialize()
cell.hybridize()
outs, states = cell.unroll(length=max_length, inputs=data_nd,
valid_length=valid_length_nd,
merge_outputs=True,
layout='NTC')
for i, ele_length in enumerate(valid_length):
# Explicitly unroll each sequence and compare the final states and output
ele_out, ele_states = cell.unroll(length=ele_length,
inputs=data_nd[i:(i+1), :ele_length, :],
merge_outputs=True,
layout='NTC')
assert_allclose(ele_out.asnumpy(), outs[i:(i+1), :ele_length, :].asnumpy(),
atol=1E-4, rtol=1E-4)
if ele_length < max_length:
# Check the padded outputs are all zero
assert_allclose(outs[i:(i+1), ele_length:max_length, :].asnumpy(), 0)
for valid_out_state, gt_state in zip(states, ele_states):
assert_allclose(valid_out_state[i:(i+1)].asnumpy(), gt_state.asnumpy(),
atol=1E-4, rtol=1E-4)
# Test for TNC layout
data_nd = mx.np.random.normal(0, 1, size=(max_length, batch_size, 20))
outs, states = cell.unroll(length=max_length, inputs=data_nd,
valid_length=valid_length_nd,
layout='TNC')
for i, ele_length in enumerate(valid_length):
# Explicitly unroll each sequence and compare the final states and output
ele_out, ele_states = cell.unroll(length=ele_length,
inputs=data_nd[:ele_length, i:(i+1), :],
merge_outputs=True,
layout='TNC')
assert_allclose(ele_out.asnumpy(), outs[:ele_length, i:(i + 1), :].asnumpy(),
atol=1E-4, rtol=1E-4)
if ele_length < max_length:
# Check the padded outputs are all zero
assert_allclose(outs[ele_length:max_length, i:(i+1), :].asnumpy(), 0)
for valid_out_state, gt_state in zip(states, ele_states):
assert_allclose(valid_out_state[i:(i+1)].asnumpy(), gt_state.asnumpy(),
atol=1E-4, rtol=1E-4)
def test_cell_fill_shape():
cell = gluon.rnn.LSTMCell(10, input_size=7)
cell.hybridize()
assert cell.i2h_weight.shape[1] == 7, cell.i2h_weight.shape[1]
def test_layer_fill_shape():
layer = gluon.rnn.LSTM(10)
layer.hybridize()
check_rnn_layer_forward(layer, mx.np.ones((3, 2, 7)))
print(layer)
assert layer.l0_i2h_weight.shape[1] == 7, layer.l0_i2h_weight.shape[1]
@pytest.mark.serial
def test_bidirectional_unroll_valid_length():
def _check_bidirectional_unroll_valid_length(length):
class BiLSTM(gluon.nn.HybridBlock):
def __init__(self, rnn_size, time_step, **kwargs):
super(BiLSTM, self).__init__(**kwargs)
self.time_step = time_step
self.bi_lstm = gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(rnn_size),
gluon.rnn.LSTMCell(rnn_size))
def forward(self, inputs, valid_len):
outputs, states = self.bi_lstm.unroll(self.time_step, inputs, valid_length=valid_len,
layout='NTC', merge_outputs=True)
return outputs, states
def infer_shape(self, x, *args):
self.bi_lstm.infer_shape(0, x, True)
rnn_size = 100
net = BiLSTM(rnn_size, length)
inputs_data = mx.np.random.uniform(size=(10, length, 50))
net.infer_shape(inputs_data)
net.initialize()
net.hybridize()
valid_len = mx.np.array([length]*10)
outputs, _ = net(inputs_data, valid_len)
assert outputs.shape == (10, length, 200)
_check_bidirectional_unroll_valid_length(1)
_check_bidirectional_unroll_valid_length(3)
def check_rnn_forward(layer, inputs):
inputs.attach_grad()
layer.initialize()
with mx.autograd.record():
layer.unroll(3, inputs, merge_outputs=True)[0].backward()
mx.autograd.backward(layer.unroll(3, inputs, merge_outputs=False)[0])
mx.npx.waitall()
def test_rnn_cells():
check_rnn_forward(gluon.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)),
mx.np.ones((8, 3, 5, 7)))
check_rnn_forward(gluon.rnn.Conv1DRNNCell((5, 7), 10, (3,), (3,)),
mx.np.ones((8, 3, 5, 7)))
check_rnn_forward(gluon.rnn.Conv1DGRUCell((5, 7), 10, (3,), (3,)),
mx.np.ones((8, 3, 5, 7)))
net = mx.gluon.rnn.SequentialRNNCell()
net.add(gluon.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)))
net.add(gluon.rnn.Conv1DRNNCell((10, 5), 11, (3,), (3,)))
net.add(gluon.rnn.Conv1DGRUCell((11, 3), 12, (3,), (3,)))
check_rnn_forward(net, mx.np.ones((8, 3, 5, 7)))
@mx.util.use_np
def check_rnn_cell(cell, in_shape=(10, 50), out_shape=(10, 100), begin_state=None):
inputs = [mx.np.ones(shape=in_shape) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs, begin_state=begin_state)
assert sorted(cell.collect_params().keys()) == ['h2h_bias', 'h2h_weight',
'i2h_bias', 'i2h_weight']
outs = [o.shape for o in outputs]
assert outs == [out_shape] * 3
@mx.util.use_np
def test_convrnn():
cell = gluon.rnn.Conv1DRNNCell((10, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = gluon.rnn.Conv2DRNNCell((10, 20, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = gluon.rnn.Conv3DRNNCell((10, 20, 30, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
@mx.util.use_np
def test_convlstm():
cell = gluon.rnn.Conv1DLSTMCell((10, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = gluon.rnn.Conv2DLSTMCell((10, 20, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = gluon.rnn.Conv3DLSTMCell((10, 20, 30, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
@mx.util.use_np
def test_convgru():
cell = gluon.rnn.Conv1DGRUCell((10, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = gluon.rnn.Conv2DGRUCell((10, 20, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = gluon.rnn.Conv3DGRUCell((10, 20, 30, 50), 100, 3, 3)
check_rnn_cell(cell, in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
@mx.util.use_np
def test_conv_fill_shape():
input = mx.np.ones((8, 3, 5, 7))
cell = gluon.rnn.Conv1DLSTMCell((-1, 7), 10, (3,), (3,))
cell.infer_shape(0, input, False)
cell.hybridize()
check_rnn_forward(cell, input)
assert cell.i2h_weight.shape[1] == 5, cell.i2h_weight.shape[1]
@mx.util.use_np
def test_lstmp():
nhid = 100
nproj = 64
cell = gluon.rnn.LSTMPCell(nhid, nproj)
inputs = [mx.np.ones(shape=(10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs)
expected_params = ['h2h_bias', 'h2h_weight', 'h2r_weight', 'i2h_bias', 'i2h_weight']
assert sorted(cell.collect_params().keys()) == expected_params
outs = [o.shape for o in outputs]
assert outs == [(10, nproj)] * 3
@mx.util.use_np
def test_vardrop():
def check_vardrop(drop_inputs, drop_states, drop_outputs):
cell = gluon.rnn.VariationalDropoutCell(mx.gluon.rnn.RNNCell(100),
drop_outputs=drop_outputs,
drop_states=drop_states,
drop_inputs=drop_inputs)
input_data = mx.np.random.uniform(size=(10, 3, 50), ctx=mx.context.current_context())
cell.infer_shape(0, input_data, False)
cell.initialize(init='xavier')
with mx.autograd.record():
outputs1, _ = cell.unroll(3, input_data, merge_outputs=True)
mx.npx.waitall()
outputs2, _ = cell.unroll(3, input_data, merge_outputs=True)
assert not almost_equal(outputs1.asnumpy(), outputs2.asnumpy())
inputs = [mx.np.ones(shape=(10,50)) for i in range(3)]
cell.infer_shape(0, inputs[0], False)
cell.initialize()
outputs, _ = cell.unroll(3, inputs, merge_outputs=False)
outs = [o.shape for o in outputs]
assert outs == [(10, 100), (10, 100), (10, 100)]
cell.reset()
cell.hybridize()
with mx.autograd.record():
outputs3, _ = cell.unroll(3, input_data, merge_outputs=True)
mx.npx.waitall()
outputs4, _ = cell.unroll(3, input_data, merge_outputs=True)
assert not almost_equal(outputs3.asnumpy(), outputs4.asnumpy())
assert not almost_equal(outputs1.asnumpy(), outputs3.asnumpy())
check_vardrop(0.5, 0.5, 0.5)
check_vardrop(0.5, 0, 0.5)
@mx.util.use_np
@pytest.mark.parametrize('cell_type,num_states', [
(gluon.rnn.RNNCell, 1),
(gluon.rnn.LSTMCell, 2),
(gluon.rnn.GRUCell, 1)
])
@pytest.mark.parametrize('layout', ['NTC', 'TNC'])
def test_unroll(cell_type, num_states, layout):
class RNNLayer(gluon.HybridBlock):
def __init__(self, cell_type, hidden_size, layout):
super(RNNLayer, self).__init__()
self.cell = cell_type(hidden_size)
self.layout = layout
def forward(self, inputs, states, valid_length):
if isinstance(valid_length, list) and len(valid_length) == 0:
valid_length = None
return gluon.rnn.rnn_cell.dynamic_unroll(self.cell, inputs, states,
valid_length=valid_length,
layout=self.layout)
def infer_shape(self, x, *args):
self.cell.infer_shape(0, x, False)
batch_size = 20
input_size = 50
hidden_size = 30
seq_len = 10
ctx = default_context()
if layout == 'TNC':
rnn_data = mx.np.random.normal(loc=0, scale=1, size=(seq_len, batch_size, input_size), ctx=ctx)
elif layout == 'NTC':
rnn_data = mx.np.random.normal(loc=0, scale=1, size=(batch_size, seq_len, input_size), ctx=ctx)
else:
print("Wrong layout")
return
valid_length = mx.np.round(mx.np.random.uniform(low=1, high=10, size=(batch_size), ctx=ctx))
state_shape = (batch_size, hidden_size)
states = [mx.np.random.normal(loc=0, scale=1, size=state_shape, ctx=ctx) for i in range(num_states)]
cell = cell_type(hidden_size)
if layout == 'TNC':
cell.infer_shape(0, rnn_data[0], False)
cell.initialize(ctx=default_context())
cell(rnn_data[0], states)
else:
cell.infer_shape(0, rnn_data[:,0,:], False)
cell.initialize(ctx=default_context())
cell(rnn_data[:,0,:], states)
params1 = cell.collect_params()
orig_params1 = copy.deepcopy(params1)
trainer = gluon.Trainer(params1, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res1, states1 = cell.unroll(seq_len, rnn_data, states, valid_length=valid_length,
layout=layout, merge_outputs=True)
res1.backward()
trainer.step(batch_size)
configs = [
lambda layer: None,
lambda layer: layer.hybridize(),
lambda layer: layer.hybridize({'inline_limit': 0}),
lambda layer: layer.hybridize({'static_alloc': True}),
lambda layer: layer.hybridize({'static_alloc': True, 'static_shape': True}) ]
# We can't pass None to a hybrid block, but it accepts an empty list.
# so we use an empty list to represent valid_length if it's None.
if valid_length is None:
valid_length = []
for config in configs:
layer = RNNLayer(cell_type, hidden_size, layout)
layer.infer_shape(rnn_data)
layer.initialize(ctx=default_context())
config(layer)
res2, states2 = layer(rnn_data, states, valid_length)
params2 = layer.collect_params()
for key, val in orig_params1.items():
params2['cell.' + key].set_data(copy.deepcopy(val.data()))
trainer = gluon.Trainer(params2, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res2, states2 = layer(rnn_data, states, valid_length)
assert_almost_equal(res1, res2, rtol=0.001, atol=0.0001)
assert len(states1) == len(states2)
for i in range(len(states1)):
assert_almost_equal(states1[i], states2[i], rtol=0.001, atol=0.0001)
res2.backward()
trainer.step(batch_size)
for key, val in params1.items():
weight1 = val.data()
weight2 = params2['cell.' + key].data()
# Subgraph created from npx.foreach in deferred compute is
# little bit different from the legacy foreach operator.
assert_almost_equal(weight1, weight2, rtol=0.1, atol=0.1)
|
GarySparrow/mFlaskWeb | refs/heads/master | venv/Lib/site-packages/werkzeug/local.py | 148 | # -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
|
abantam/pmtud | refs/heads/master | utils/python-unit-tests.py | 155 | import unittest
from ns.core import Simulator, Seconds, Config, int64x64_t
import ns.core
import ns.network
import ns.internet
import ns.mobility
import ns.csma
import ns.applications
class TestSimulator(unittest.TestCase):
def testScheduleNow(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.ScheduleNow(callback, "args")
Simulator.Run()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 0.0)
def testSchedule(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.Schedule(Seconds(123), callback, "args")
Simulator.Run()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleDestroy(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
def null(): pass
Simulator.Schedule(Seconds(123), null)
Simulator.ScheduleDestroy(callback, "args")
Simulator.Run()
Simulator.Destroy()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleWithContext(self):
def callback(context, args):
self._context_received = context
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
self._context_received = None
Simulator.ScheduleWithContext(54321, Seconds(123), callback, "args")
Simulator.Run()
self.assertEqual(self._context_received, 54321)
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testTimeComparison(self):
self.assert_(Seconds(123) == Seconds(123))
self.assert_(Seconds(123) >= Seconds(123))
self.assert_(Seconds(123) <= Seconds(123))
self.assert_(Seconds(124) > Seconds(123))
self.assert_(Seconds(123) < Seconds(124))
def testTimeNumericOperations(self):
self.assertEqual(Seconds(10) + Seconds(5), Seconds(15))
self.assertEqual(Seconds(10) - Seconds(5), Seconds(5))
v1 = int64x64_t(5.0)*int64x64_t(10)
self.assertEqual(v1, int64x64_t(50))
def testConfig(self):
Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.UintegerValue(123))
# hm.. no Config.Get?
def testSocket(self):
node = ns.network.Node()
internet = ns.internet.InternetStackHelper()
internet.Install(node)
self._received_packet = None
def rx_callback(socket):
assert self._received_packet is None
self._received_packet = socket.Recv()
sink = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName("ns3::UdpSocketFactory"))
sink.Bind(ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), 80))
sink.SetRecvCallback(rx_callback)
source = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName("ns3::UdpSocketFactory"))
source.SendTo(ns.network.Packet(19), 0, ns.network.InetSocketAddress(ns.network.Ipv4Address("127.0.0.1"), 80))
Simulator.Run()
self.assert_(self._received_packet is not None)
self.assertEqual(self._received_packet.GetSize(), 19)
def testAttributes(self):
##
## Yes, I know, the GetAttribute interface for Python is
## horrible, we should fix this soon, I hope.
##
queue = ns.network.DropTailQueue()
queue.SetAttribute("MaxPackets", ns.core.UintegerValue(123456))
limit = ns.core.UintegerValue()
queue.GetAttribute("MaxPackets", limit)
self.assertEqual(limit.Get(), 123456)
## -- object pointer values
mobility = ns.mobility.RandomWaypointMobilityModel()
ptr = ns.core.PointerValue()
mobility.GetAttribute("PositionAllocator", ptr)
self.assertEqual(ptr.GetObject(), None)
pos = ns.mobility.ListPositionAllocator()
mobility.SetAttribute("PositionAllocator", ns.core.PointerValue(pos))
ptr = ns.core.PointerValue()
mobility.GetAttribute("PositionAllocator", ptr)
self.assert_(ptr.GetObject() is not None)
def testIdentity(self):
csma = ns.csma.CsmaNetDevice()
channel = ns.csma.CsmaChannel()
csma.Attach(channel)
c1 = csma.GetChannel()
c2 = csma.GetChannel()
self.assert_(c1 is c2)
def testTypeId(self):
typeId1 = ns.core.TypeId.LookupByNameFailSafe("ns3::UdpSocketFactory")
self.assertEqual(typeId1.GetName (), "ns3::UdpSocketFactory")
self.assertRaises(KeyError, ns.core.TypeId.LookupByNameFailSafe, "__InvalidTypeName__")
def testCommandLine(self):
cmd = ns.core.CommandLine()
cmd.AddValue("Test1", "this is a test option")
cmd.AddValue("Test2", "this is a test option")
cmd.AddValue("Test3", "this is a test option", variable="test_xxx")
cmd.Test1 = None
cmd.Test2 = None
cmd.test_xxx = None
class Foo:
pass
foo = Foo()
foo.test_foo = None
cmd.AddValue("Test4", "this is a test option", variable="test_foo", namespace=foo)
cmd.Parse(["python", "--Test1=value1", "--Test2=value2", "--Test3=123", "--Test4=xpto"])
self.assertEqual(cmd.Test1, "value1")
self.assertEqual(cmd.Test2, "value2")
self.assertEqual(cmd.test_xxx, "123")
self.assertEqual(foo.test_foo, "xpto")
def testSubclass(self):
class MyNode(ns.network.Node):
def __init__(self):
super(MyNode, self).__init__()
node = MyNode()
if __name__ == '__main__':
unittest.main()
|
tchellomello/home-assistant | refs/heads/dev | tests/components/dyson/test_vacuum.py | 13 | """Test the Dyson 360 eye robot vacuum component."""
import unittest
from unittest import mock
from libpurecool.const import Dyson360EyeMode, PowerMode
from libpurecool.dyson_360_eye import Dyson360Eye
from homeassistant.components.dyson import vacuum as dyson
from homeassistant.components.dyson.vacuum import Dyson360EyeDevice
from tests.common import get_test_home_assistant
def _get_non_vacuum_device():
"""Return a non vacuum device."""
device = mock.Mock()
device.name = "Device_Fan"
device.state = None
return device
def _get_vacuum_device_cleaning():
"""Return a vacuum device running."""
device = mock.Mock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = Dyson360EyeMode.FULL_CLEAN_RUNNING
device.state.battery_level = 85
device.state.power_mode = PowerMode.QUIET
device.state.position = (0, 0)
return device
def _get_vacuum_device_charging():
"""Return a vacuum device charging."""
device = mock.Mock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = Dyson360EyeMode.INACTIVE_CHARGING
device.state.battery_level = 40
device.state.power_mode = PowerMode.QUIET
device.state.position = (0, 0)
return device
def _get_vacuum_device_pause():
"""Return a vacuum device in pause."""
device = mock.MagicMock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = Dyson360EyeMode.FULL_CLEAN_PAUSED
device.state.battery_level = 40
device.state.power_mode = PowerMode.QUIET
device.state.position = (0, 0)
return device
def _get_vacuum_device_unknown_state():
"""Return a vacuum device with unknown state."""
device = mock.Mock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = "Unknown"
return device
class DysonTest(unittest.TestCase):
"""Dyson 360 eye robot vacuum component test class."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component_with_no_devices(self):
"""Test setup component with no devices."""
self.hass.data[dyson.DYSON_DEVICES] = []
add_entities = mock.MagicMock()
dyson.setup_platform(self.hass, {}, add_entities)
add_entities.assert_called_with([])
def test_setup_component(self):
"""Test setup component with devices."""
def _add_device(devices):
assert len(devices) == 1
assert devices[0].name == "Device_Vacuum"
device_vacuum = _get_vacuum_device_cleaning()
device_non_vacuum = _get_non_vacuum_device()
self.hass.data[dyson.DYSON_DEVICES] = [device_vacuum, device_non_vacuum]
dyson.setup_platform(self.hass, {}, _add_device)
def test_on_message(self):
"""Test when message is received."""
device = _get_vacuum_device_cleaning()
component = Dyson360EyeDevice(device)
component.entity_id = "entity_id"
component.schedule_update_ha_state = mock.Mock()
component.on_message(mock.Mock())
assert component.schedule_update_ha_state.called
def test_should_poll(self):
"""Test polling is disable."""
device = _get_vacuum_device_cleaning()
component = Dyson360EyeDevice(device)
assert not component.should_poll
def test_properties(self):
"""Test component properties."""
device1 = _get_vacuum_device_cleaning()
device2 = _get_vacuum_device_unknown_state()
device3 = _get_vacuum_device_charging()
component = Dyson360EyeDevice(device1)
component2 = Dyson360EyeDevice(device2)
component3 = Dyson360EyeDevice(device3)
assert component.name == "Device_Vacuum"
assert component.is_on
assert component.status == "Cleaning"
assert component2.status == "Unknown"
assert component.battery_level == 85
assert component.fan_speed == "Quiet"
assert component.fan_speed_list == ["Quiet", "Max"]
assert component.device_state_attributes["position"] == "(0, 0)"
assert component.available
assert component.supported_features == 255
assert component.battery_icon == "mdi:battery-80"
assert component3.battery_icon == "mdi:battery-charging-40"
def test_turn_on(self):
"""Test turn on vacuum."""
device1 = _get_vacuum_device_charging()
component1 = Dyson360EyeDevice(device1)
component1.turn_on()
assert device1.start.called
device2 = _get_vacuum_device_pause()
component2 = Dyson360EyeDevice(device2)
component2.turn_on()
assert device2.resume.called
def test_turn_off(self):
"""Test turn off vacuum."""
device1 = _get_vacuum_device_cleaning()
component1 = Dyson360EyeDevice(device1)
component1.turn_off()
assert device1.pause.called
def test_stop(self):
"""Test stop vacuum."""
device1 = _get_vacuum_device_cleaning()
component1 = Dyson360EyeDevice(device1)
component1.stop()
assert device1.pause.called
def test_set_fan_speed(self):
"""Test set fan speed vacuum."""
device1 = _get_vacuum_device_cleaning()
component1 = Dyson360EyeDevice(device1)
component1.set_fan_speed("Max")
device1.set_power_mode.assert_called_with(PowerMode.MAX)
def test_start_pause(self):
"""Test start/pause."""
device1 = _get_vacuum_device_charging()
component1 = Dyson360EyeDevice(device1)
component1.start_pause()
assert device1.start.called
device2 = _get_vacuum_device_pause()
component2 = Dyson360EyeDevice(device2)
component2.start_pause()
assert device2.resume.called
device3 = _get_vacuum_device_cleaning()
component3 = Dyson360EyeDevice(device3)
component3.start_pause()
assert device3.pause.called
def test_return_to_base(self):
"""Test return to base."""
device = _get_vacuum_device_pause()
component = Dyson360EyeDevice(device)
component.return_to_base()
assert device.abort.called
|
codrut3/tensorflow | refs/heads/master | tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py | 17 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import dnn_linear_combined
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
def _dnn_only_estimator_fn(
hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension),
model_dir=model_dir,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyEstimatorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_only_estimator_fn)
class DNNOnlyEstimatorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_only_estimator_fn)
class DNNOnlyEstimatorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_only_estimator_fn)
def _linear_only_estimator_fn(
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension),
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyEstimatorEvaluateTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_only_estimator_fn)
class LinearOnlyEstimatorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_only_estimator_fn)
class LinearOnlyEstimatorTrainTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_only_estimator_fn)
class DNNLinearCombinedEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(label_dimension=label_dimension),
linear_feature_columns=linear_feature_columns,
dnn_feature_columns=dnn_feature_columns,
dnn_hidden_units=(2, 2),
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
|
schelleg/PYNQ | refs/heads/master | pynq/lib/arduino/arduino_grove_dlight.py | 4 | # Copyright (c) 2016, NECST Laboratory, Politecnico di Milano
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import Arduino
from . import ARDUINO_GROVE_I2C
__author__ = "Marco Rabozzi, Luca Cerina, Giuseppe Natale"
__copyright__ = "Copyright 2016, NECST Laboratory, Politecnico di Milano"
ARDUINO_GROVE_DLIGHT_PROGRAM = "arduino_grove_dlight.bin"
CONFIG_IOP_SWITCH = 0x1
GET_LIGHT_VALUE = 0x3
GET_LUX_VALUE = 0x5
class Grove_Dlight(object):
"""This class controls the Grove IIC color sensor.
Grove Color sensor based on the TCS3414CS.
Hardware version: v1.3.
Attributes
----------
microblaze : Arduino
Microblaze processor instance used by this module.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of an Grove_Dlight object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on arduino-grove shield.
"""
if gr_pin not in [ARDUINO_GROVE_I2C]:
raise ValueError("Group number can only be I2C.")
self.microblaze = Arduino(mb_info, ARDUINO_GROVE_DLIGHT_PROGRAM)
self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH)
def read_raw_light(self):
"""Read the visible and IR channel values.
Read the values from the grove digital light peripheral.
Returns
-------
tuple
A tuple containing 2 integer values ch0 (visible) and ch1 (IR).
"""
self.microblaze.write_blocking_command(GET_LIGHT_VALUE)
ch0, ch1 = self.microblaze.read_mailbox(0, 2)
return ch0, ch1
def read_lux(self):
"""Read the computed lux value of the sensor.
Returns
-------
int
The lux value from the sensor
"""
self.microblaze.write_blocking_command(GET_LUX_VALUE)
lux = self.microblaze.read_mailbox(0x8)
return lux
|
michi88/Zappa | refs/heads/master | tests/test_handler.py | 4 | import unittest
from zappa.handler import LambdaHandler
def no_args():
return
def one_arg(first):
return first
def two_args(first, second):
return first, second
def var_args(*args):
return args
def var_args_with_one(first, *args):
return first, args[0]
def unsupported(first, second, third):
return first, second, third
class TestZappa(unittest.TestCase):
def test_run_function(self):
self.assertIsNone(LambdaHandler.run_function(no_args, 'e', 'c'))
self.assertEqual(LambdaHandler.run_function(one_arg, 'e', 'c'), 'e')
self.assertEqual(LambdaHandler.run_function(two_args, 'e', 'c'), ('e', 'c'))
self.assertEqual(LambdaHandler.run_function(var_args, 'e', 'c'), ('e', 'c'))
self.assertEqual(LambdaHandler.run_function(var_args_with_one, 'e', 'c'), ('e', 'c'))
try:
LambdaHandler.run_function(unsupported, 'e', 'c')
self.fail('Exception expected')
except RuntimeError as e:
pass |
KokareIITP/django | refs/heads/master | tests/schema/fields.py | 203 | from django.db import models
from django.db.models.fields.related import (
RECURSIVE_RELATIONSHIP_CONSTANT, ManyRelatedObjectsDescriptor,
ManyToManyField, ManyToManyRel, RelatedField,
create_many_to_many_intermediary_model,
)
from django.utils.functional import curry
class CustomManyToManyField(RelatedField):
"""
Ticket #24104 - Need to have a custom ManyToManyField,
which is not an inheritor of ManyToManyField.
"""
many_to_many = True
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError:
to = str(to)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(CustomManyToManyField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
super(CustomManyToManyField, self).contribute_to_class(cls, name, **kwargs)
if not self.remote_field.through and not cls._meta.abstract and not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
setattr(cls, self.name, ManyRelatedObjectsDescriptor(self.remote_field))
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def get_internal_type(self):
return 'ManyToManyField'
# Copy those methods from ManyToManyField because they don't call super() internally
contribute_to_related_class = ManyToManyField.__dict__['contribute_to_related_class']
_get_m2m_attr = ManyToManyField.__dict__['_get_m2m_attr']
_get_m2m_reverse_attr = ManyToManyField.__dict__['_get_m2m_reverse_attr']
_get_m2m_db_table = ManyToManyField.__dict__['_get_m2m_db_table']
class InheritedManyToManyField(ManyToManyField):
pass
class MediumBlobField(models.BinaryField):
"""
A MySQL BinaryField that uses a different blob size.
"""
def db_type(self, connection):
return 'MEDIUMBLOB'
|
wiltonlazary/arangodb | refs/heads/devel | 3rdParty/boost/1.61.0/tools/build/test/example_gettext.py | 64 | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test the 'gettext' example.
import BoostBuild
import os
import string
t = BoostBuild.Tester()
t.set_tree("../example/gettext")
t.run_build_system(stderr=None)
t.expect_addition(["bin/$toolset/debug/main.exe",
"bin/$toolset/debug/russian.mo"])
file = t.adjust_names(["bin/$toolset/debug/main.exe"])[0]
input_fd = os.popen(file)
input = input_fd.read();
t.fail_test(string.find(input, "international hello") != 0)
t.cleanup()
|
marioidival/pyramid_mongoengine | refs/heads/master | pyramid_mongoengine/__init__.py | 1 | # Copyright (c) 2015 Idival, Mario <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import mongoengine
from mongoengine.errors import ValidationError
from mongoengine.queryset import (
MultipleObjectsReturned,
DoesNotExist,
QuerySet
)
from pyramid.httpexceptions import exception_response
from pyramid.renderers import JSON
from .utils import new_adapters
def _include_mongoengine(obj):
for module in mongoengine, mongoengine.fields:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
def _connect_database(config):
"""Create simple connection with Mongodb
config comes with settings from .ini file.
"""
settings = config.registry.settings
mongodb_url = "mongodb://localhost:27017"
mongodb_name = "test"
mongodb_rs = None
mongodb_user = None
mongodb_pass = None
if settings.get("mongo_url"):
mongodb_url = settings["mongo_url"]
if settings.get("mongodb_name"):
mongodb_name = settings["mongodb_name"]
if "?" in mongodb_name:
mongodb_rs = mongodb_name.split('?')[1].split("=")[1]
mongodb_name = mongodb_name.split('?')[0]
if settings.get("mongodb_replicaset"):
mongodb_rs = settings["mongodb_replicaset"]
if settings.get("mongodb_user"):
mongodb_user = settings["mongodb_user"]
if settings.get("mongodb_password"):
mongodb_pass = settings["mongodb_password"]
if not mongodb_user and mongodb_rs: # with no user and replicaSet
from pymongo import ReadPreference
mongo_connection = mongoengine.connect(
mongodb_name,
host=mongodb_url,
replicaSet=mongodb_rs,
read_preference=ReadPreference.SECONDARY_PREFERRED)
elif mongodb_user and mongodb_rs: # with user and replicaSet
from pymongo import ReadPreference
mongo_connection = mongoengine.connect(
mongodb_name,
username=mongodb_user,
password=mongodb_pass,
authentication_source='admin',
host=mongodb_url,
replicaSet=mongodb_rs,
read_preference=ReadPreference.SECONDARY_PREFERRED)
elif mongodb_user and not mongodb_rs: # with user and without replicaSet
mongo_connection = mongoengine.connect(
mongodb_name,
username=mongodb_user,
password=mongodb_pass,
authentication_source='admin',
host=mongodb_url)
else: # without user and without replicaSet
mongo_connection = mongoengine.connect(
mongodb_name,
host=mongodb_url)
return mongo_connection
class MongoEngine(object):
""" MongoEngine class based on flask-mongoengine """
def __init__(self):
_include_mongoengine(self)
self.Document = Document
self.DynamicDocument = DynamicDocument
class BaseQuerySet(QuerySet):
"""
A base queryset with handy extras.
BaseQuerySet based on flask-mongoengine
"""
def get_or_404(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except (MultipleObjectsReturned, DoesNotExist, ValidationError):
raise exception_response(404)
def first_or_404(self):
obj = self.first()
if obj is None:
raise exception_response(404)
return obj
class Document(mongoengine.Document):
"""Abstract document with extra helpers in the queryset class.
Document class based on flask-mongoengine
"""
meta = {"abstract": True, "queryset_class": BaseQuerySet}
class DynamicDocument(mongoengine.DynamicDocument):
"""Abstract Dynamic document with extra helpers in the queryset class.
DynamicDocument class based on flask-mongoengine
"""
meta = {"abstract": True, "queryset_class": BaseQuerySet}
def includeme(config):
# How to connect: config.add_connection_database()
config.add_directive("add_connection_database", _connect_database)
# Modify default JSON renderer
json_adapter = JSON(adapters=new_adapters)
config.add_renderer("json", json_adapter)
|
delinhabit/django | refs/heads/master | tests/auth_tests/test_remote_user.py | 275 | from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from django.test import TestCase, modify_settings, override_settings
from django.utils import timezone
@override_settings(ROOT_URLCONF='auth_tests.urls')
class RemoteUserTest(TestCase):
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
header = 'REMOTE_USER'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={'append': self.backend},
MIDDLEWARE_CLASSES={'append': self.middleware},
)
self.patched_settings.enable()
def tearDown(self):
self.patched_settings.disable()
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: ''})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# Test that a different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/',
**{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
def test_header_disappears(self):
"""
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
def test_user_switch_forces_new_login(self):
"""
Tests that if the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get('/remote_user/',
**{self.header: "newnewuser"})
# Ensure that the current user is not the prior remote_user
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context['user'].username, 'knownuser')
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend = 'auth_tests.test_remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = '[email protected]'
user.save()
return user
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = 'auth_tests.test_remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = '[email protected]'
known_user2 = '[email protected]'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, '[email protected]')
class CustomHeaderMiddleware(RemoteUserMiddleware):
"""
Middleware that overrides custom HTTP auth user header.
"""
header = 'HTTP_AUTHUSER'
class CustomHeaderRemoteUserTest(RemoteUserTest):
"""
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
"""
middleware = (
'auth_tests.test_remote_user.CustomHeaderMiddleware'
)
header = 'HTTP_AUTHUSER'
class PersistentRemoteUserTest(RemoteUserTest):
"""
PersistentRemoteUserMiddleware keeps the user logged in even if the
subsequent calls do not contain the header value.
"""
middleware = 'django.contrib.auth.middleware.PersistentRemoteUserMiddleware'
require_header = False
def test_header_disappears(self):
"""
A logged in user is kept logged in even if the REMOTE_USER header
disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# Should stay logged in if the REMOTE_USER header disappears.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), False)
self.assertEqual(response.context['user'].username, 'knownuser')
|
ZeroEpoch1969/RubyRoseBot | refs/heads/master | utils/mysql.py | 1 | import sqlite3
conn = sqlite3.connect("data/data.db")
conn.row_factory = sqlite3.Row
cur = conn.cursor()
def create_tables():
cur.execute("""CREATE TABLE IF NOT EXISTS guilds(id INTEGER, type TEXT, value TEXT)""")
cur.execute("""CREATE TABLE IF NOT EXISTS blacklist(id INTEGER, name TEXT, discrim TEXT, reason TEXT)""")
def insert_data_entry(id, type, value):
cur.execute("""INSERT INTO guilds(id, type, value) VALUES (?, ?, ?)""", (id, type, value))
conn.commit()
def read_data_entry(id, type):
cur.execute("""SELECT value FROM guilds WHERE id=(?) AND type=(?)""", (id, type))
val = None
try:
val = cur.fetchone()[0]
except:
if type == "mod-role":
val = None
elif type == "admin-role":
val = None
elif type == "mute-role":
val = None
elif type == "join-role":
val = None
insert_data_entry(id, type, val)
return val
def update_data_entry(id, type, value):
exists = read_data_entry(id, type)
cur.execute("""UPDATE guilds SET value=(?) WHERE id=(?) AND type=(?)""", (value, id, type))
conn.commit()
def delete_data_entry(id, type):
cur.execute("""DELETE FROM guilds WHERE id=(?) AND type=(?)""", (id, type))
conn.commit()
def blacklistuser(id, name, discrim, reason):
cur.execute("""INSERT INTO blacklist(id, name, discrim, reason) VALUES (?, ?, ?, ?)""", (id, name, discrim, reason))
conn.commit()
def unblacklistuser(id):
cur.execute("""DELETE FROM blacklist WHERE id=""" + str(id))
conn.commit()
def getblacklistentry(id):
cur.execute("""SELECT id FROM blacklist WHERE id=""" + str(id))
try:
id = cur.fetchone()[0]
except:
return None
cur.execute("""SELECT name FROM blacklist WHERE id=""" + str(id))
name = cur.fetchone()[0]
cur.execute("""SELECT discrim FROM blacklist WHERE id=""" + str(id))
discrim = cur.fetchone()[0]
cur.execute("""SELECT reason FROM blacklist WHERE id=""" + str(id))
reason = cur.fetchone()[0]
blacklistentry = {"id":id, "name":name, "discrim":discrim, "reason":reason}
return blacklistentry
def getblacklist():
cur.execute("""SELECT id, name, discrim, reason FROM blacklist""")
entries = []
rows = cur.fetchall()
for row in rows:
entry = "ID: \"{}\" Name: \"{}\" Discrim: \"{}\" Reason: \"{}\"".format(row["id"], row["name"], row["discrim"], row["reason"])
entries.append(entry)
return entries
create_tables()
|
Curso-OpenShift/Formulario | refs/heads/master | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/core/management/commands/startapp.py | 117 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
class Command(TemplateCommand):
help = (
"Creates a Django app directory structure for the given app name in "
"the current directory or optionally in the given directory."
)
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError(
"%r conflicts with the name of an existing Python module and "
"cannot be used as an app name. Please try another name." % app_name
)
super(Command, self).handle('app', app_name, target, **options)
|
2013Commons/HUE-SHARK | refs/heads/master | desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/contrib/localflavor/us/models.py | 83 | from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from django.contrib.localflavor.us.us_states import STATE_CHOICES
class USStateField(CharField):
description = _("U.S. state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(USStateField, self).__init__(*args, **kwargs)
class PhoneNumberField(CharField):
description = _("Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(PhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
|
xuru/pyvisdk | refs/heads/master | pyvisdk/do/dvs_summary.py | 1 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DVSSummary(vim, *args, **kwargs):
'''Summary of the switch configuration.'''
obj = vim.client.factory.create('ns0:DVSSummary')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'name', 'numPorts', 'uuid' ]
optional = [ 'contact', 'description', 'host', 'hostMember', 'portgroupName', 'productInfo',
'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
hiteshwadekar/ns-3-dev-ndnSIM | refs/heads/ndnSIM | doc/tutorial/pickle-to-xml.py | 392 | #!/usr/bin/python
# output xml format:
# <pages>
# <page url="xx"><prev url="yyy">zzz</prev><next url="hhh">lll</next><fragment>file.frag</fragment></page>
# ...
# </pages>
import pickle
import os
import codecs
def dump_pickles(out, dirname, filename, path):
f = open(os.path.join(dirname, filename), 'r')
data = pickle.load(f)
fragment_file = codecs.open(data['current_page_name'] + '.frag', mode='w', encoding='utf-8')
fragment_file.write(data['body'])
fragment_file.close()
out.write(' <page url="%s">\n' % path)
out.write(' <fragment>%s.frag</fragment>\n' % data['current_page_name'])
if data['prev'] is not None:
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
data['prev']['title']))
if data['next'] is not None:
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
data['next']['title']))
out.write(' </page>\n')
f.close()
if data['next'] is not None:
next_path = os.path.normpath(os.path.join(path, data['next']['link']))
next_filename = os.path.basename(next_path) + '.fpickle'
dump_pickles(out, dirname, next_filename, next_path)
return
import sys
sys.stdout.write('<pages>\n')
dump_pickles(sys.stdout, os.path.dirname(sys.argv[1]), os.path.basename(sys.argv[1]), '/')
sys.stdout.write('</pages>')
|
mineo/abzer | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
from __future__ import print_function
from codecs import open
from setuptools import setup
setup(name="abzer",
author="Wieland Hoffmann",
author_email="[email protected]",
packages=["abzer"],
package_dir={"abzer": "abzer"},
download_url="https://github.com/mineo/abzer/tarball/master",
url="https://github.com/mineo/abzer",
license="MIT",
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
"Programming Language :: Python :: 3.8"
"Programming Language :: Python :: 3.9"],
description="AcousticBrainz submission tool",
long_description=open("README.txt", encoding="utf-8").read(),
setup_requires=["pytest-runner", "setuptools_scm"],
use_scm_version={"write_to": "abzer/version.py"},
install_requires=["aiohttp"],
tests_require=["pytest", "pytest-aiohttp"],
extras_require={
'docs': ['sphinx', 'sphinxcontrib-autoprogram']},
python_requires='>=3.5',
entry_points={
'console_scripts': ['abzer=abzer.__main__:main']
})
|
xsynergy510x/android_external_chromium_org | refs/heads/cm-12.1 | tools/perf/metrics/system_memory.py | 46 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import memory
from metrics import Metric
from telemetry.value import scalar
class SystemMemoryMetric(Metric):
"""SystemMemoryMetric gathers system memory statistic.
This metric collects system memory stats per test. It reports the difference
(delta) in system memory starts from the start of the test to the end of it.
"""
def __init__(self, browser):
super(SystemMemoryMetric, self).__init__()
self._browser = browser
self._memory_stats_start = None
self._memory_stats_end = None
def Start(self, page, tab):
"""Start the per-page preparation for this metric.
Records the system memory stats at this point.
"""
self._memory_stats_start = self._browser.memory_stats
def Stop(self, page, tab):
"""Prepare the results for this page.
The results are the differences between the current system memory stats
and the values when Start() was called.
"""
assert self._memory_stats_start, 'Must call Start() first'
self._memory_stats_end = self._browser.memory_stats
# |trace_name| and |exclude_metrics| args are not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, trace_name=None, exclude_metrics=None):
"""Add results for this page to the results object.
Reports the delta in memory stats between the start stats and the end stats
(as *_delta metrics). It reports end memory stats in case no matching start
memory stats exists.
Args:
trace_name: Trace name to identify the summary results for current page.
exclude_metrics: List of memory metrics to exclude from results,
e.g. VM, VMPeak, etc. See AddResultsForProcesses().
"""
assert self._memory_stats_end, 'Must call Stop() first'
memory_stats = _SubtractMemoryStats(self._memory_stats_end,
self._memory_stats_start)
if not memory_stats['Browser']:
return
exclude_metrics = exclude_metrics or {}
memory.AddResultsForProcesses(results, memory_stats,
metric_trace_name=trace_name, chart_trace_name='delta',
exclude_metrics=exclude_metrics)
if 'SystemCommitCharge' not in exclude_metrics:
results.AddValue(scalar.ScalarValue(
results.current_page,
'commit_charge_delta.%s' % (trace_name or 'commit_charge'), 'kb',
memory_stats['SystemCommitCharge'], important=False))
if 'ProcessCount' not in exclude_metrics:
results.AddValue(scalar.ScalarValue(
results.current_page,
'processes_delta.%s' % (trace_name or 'processes'), 'count',
memory_stats['ProcessCount'], important=False))
def _SubtractMemoryStats(end_memory_stats, start_memory_stats):
"""Computes the difference in memory usage stats.
Each of the two stats arguments is a dict with the following format:
{'Browser': {metric: value, ...},
'Renderer': {metric: value, ...},
'Gpu': {metric: value, ...},
'ProcessCount': value,
etc
}
The metrics can be VM, WorkingSetSize, ProportionalSetSize, etc depending on
the platform/test.
NOTE: The only metrics that are not subtracted from original are the *Peak*
memory values.
Returns:
A dict of process type names (Browser, Renderer, etc.) to memory usage
metrics between the end collected stats and the start collected stats.
"""
memory_stats = {}
end_memory_stats = end_memory_stats or {}
start_memory_stats = start_memory_stats or {}
for process_type in end_memory_stats:
memory_stats[process_type] = {}
end_process_memory = end_memory_stats[process_type]
if not end_process_memory:
continue
# If a process has end stats without start stats then report the end stats.
# For example, a GPU process that started just after media playback.
if (process_type not in start_memory_stats or
not start_memory_stats[process_type]):
memory_stats[process_type] = end_process_memory
continue
if not isinstance(end_process_memory, dict):
start_value = start_memory_stats[process_type] or 0
memory_stats[process_type] = end_process_memory - start_value
else:
for metric in end_process_memory:
end_value = end_process_memory[metric]
start_value = start_memory_stats[process_type][metric] or 0
if 'Peak' in metric:
memory_stats[process_type][metric] = end_value
else:
memory_stats[process_type][metric] = end_value - start_value
return memory_stats
|
mralext20/apex-sigma | refs/heads/master | config_example.py | 1 | # Sigma's internal version control.
sigma_version = 'Beta 0.41b'
# 0 - Bot Application; 1 - User Account; Must be either 0 or 1.
StartupType = '0'
# Bot Application Token; Must be filled if the startup type is 0.
Token = ''
ClientID = ''
# GitHub WebHook Server Settings.
GitHubWebserverPort = 37812
GitHubWebserverAddr = ''
# Discord User Account login details; Must be filled if the startup type is 1.
dsc_email = ''
dsc_password = ''
# API Keys and Login Details for Various Services.
OpenWeatherMapKey = ''
MashapeKey = ''
RiotAPIKey = ''
GoogleAPIKey = ''
LastFMAPIKey = ''
ITADKey = ''
SteamAPI = ''
SonarrKey = ''
BlizzardKey = ''
RLAPIKey = ''
ImgurClientID = ''
ImgurClientSecret = ''
WarGamingAppID = ''
mal_un = ''
mal_pw = ''
reddit_un = ''
reddit_pw = ''
# Bot Prefix for Commands.
Prefix = '>>'
# Aurora Project Donators.
donators = []
# Permitted IDs and Roles.
permitted_id = ['1234567891234569']
permitted_roles = []
|
gmarke/erpnext | refs/heads/develop | erpnext/hr/doctype/salary_structure_deduction/salary_structure_deduction.py | 66 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SalaryStructureDeduction(Document):
pass |
Mediamoose/django-svg-templatetag | refs/heads/master | tests/test_templatetags.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-svg-templatetag
---------------------------
Tests for `django-svg-templatetag`.
"""
from django.template import Context, Template, TemplateSyntaxError
from django.test.testcases import TestCase
from svg_templatetag.templatetags.svg import SvgNode
class SvgTagsTestCase(TestCase):
context = Context()
svg_file = 'user.svg'
def test_svg_path(self):
with self.assertRaises(TemplateSyntaxError):
Template('{% load svg %}{% svg %}')
with self.assertRaises(ValueError):
t = Template('{% load svg %}{% svg "test.jpg" %}')
t.render(self.context)
with self.assertRaises(IOError):
t = Template('{% load svg %}{% svg "test.svg" %}')
t.render(self.context)
with self.assertRaises(TemplateSyntaxError):
node = SvgNode()
node.__init__(node, path=None)
def test_do_svg(self):
t = Template('{{% load svg %}}{{% svg "{}" as test_svg %}}'.format(
self.svg_file))
t.render(self.context)
assert 'test_svg' in self.context
t = Template('{{% load svg %}}{{% svg "{}" %}}'.format(self.svg_file))
result = t.render(self.context)
assert '<svg' in result
|
xadahiya/django | refs/heads/master | django/contrib/contenttypes/forms.py | 376 | from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.forms import ModelForm, modelformset_factory
from django.forms.models import BaseModelFormSet
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix,
**kwargs
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join(
(opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name)
)
def save_new(self, form, commit=True):
setattr(form.instance, self.ct_field.get_attname(),
ContentType.objects.get_for_model(self.instance).pk)
setattr(form.instance, self.ct_fk_field.get_attname(),
self.instance.pk)
return form.save(commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None, formfield_callback=None,
validate_max=False, for_concrete_model=True,
min_num=None, validate_min=False):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.remote_field.model != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max, min_num=min_num,
validate_min=validate_min)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
|
michaelaye/vispy | refs/heads/master | vispy/ext/six.py | 9 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# This shim has been imported from Astropy.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Handle loading six package from system or from the bundled copy
"""
try:
import importlib
except ImportError:
importlib = None
import imp
import io
import sys
from distutils.version import StrictVersion
_SIX_MIN_VERSION = StrictVersion('1.8.0')
_SIX_SEARCH_PATH = ['vispy.ext._bundled.six', 'six']
def _find_module(name, path=None):
"""
Alternative to `imp.find_module` that can also search in subpackages.
"""
parts = name.split('.')
for part in parts:
if path is not None:
path = [path]
fh, path, descr = imp.find_module(part, path)
if fh is not None and part != parts[-1]:
fh.close()
return fh, path, descr
def _import_six(search_path=_SIX_SEARCH_PATH):
for mod_name in search_path:
if importlib is not None:
try:
six_mod = importlib.import_module(mod_name)
except ImportError:
continue
else:
try:
mod_info = _find_module(mod_name)
except ImportError:
continue
else:
try:
# Using __name__ causes the import to effectively overwrite
# this shim.
six_mod = imp.load_module(__name__, *mod_info)
finally:
if mod_info[0] is not None:
mod_info[0].close()
try:
if StrictVersion(six_mod.__version__) >= _SIX_MIN_VERSION:
break
except (AttributeError, ValueError):
# Attribute error if the six module isn't what it should be and
# doesn't have a .__version__; ValueError if the version string
# exists but is somehow bogus/unparseable
continue
else:
raise ImportError(
"Vispy requires the 'six' module of minimum version {0}; "
"normally this is bundled with the Vispy package so if you get "
"this warning consult the packager of your Vispy "
"distribution.".format(_SIX_MIN_VERSION))
# Using importlib does not overwrite this shim, so do it ourselves.
this_module = sys.modules[__name__]
if not hasattr(this_module, '_importer'):
# Copy all main six attributes.
for name, value in six_mod.__dict__.items():
if name.startswith('__'):
continue
this_module.__dict__[name] = value
# Tell six's importer to accept this shim's name as its own.
importer = six_mod._importer
known_modules = list(importer.known_modules.items())
for name, mod in known_modules:
this_name = __name__ + name[len(mod_name):]
importer.known_modules[this_name] = mod
# Turn this shim into a package just like six does.
this_module.__path__ = [] # required for PEP 302 and PEP 451
this_module.__package__ = __name__ # see PEP 366
if this_module.__dict__.get('__spec__') is not None:
this_module.__spec__.submodule_search_locations = [] # PEP 451
_import_six()
if PY3:
file_types = (io.TextIOWrapper, io.BufferedRandom)
else:
file_types = (file, io.TextIOWrapper, io.BufferedRandom)
|
zxytim/pynojo | refs/heads/master | pynojo/lib/acl/__init__.py | 3 | # $File: __init__.py
# $Date: Mon Mar 05 19:45:19 2012 +0800
#
# Copyright (C) 2012 the pynojo development team <see AUTHORS file>
#
# Contributors to this file:
# Kai Jia <[email protected]>
#
# This file is part of pynojo
#
# pynojo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynojo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynojo. If not, see <http://www.gnu.org/licenses/>.
#
"""access limiter, see also :ref:`perm-model.acl`"""
__all__ = ['check']
from pkgutil import walk_packages
for loader, module_name, is_pkg in walk_packages(__path__, __name__ + '.'):
__import__(module_name, globals(), locals(), [], -1)
from pynojo.lib.acl._base import Base
from pynojo.model.acl import ACLMdl
def check(request, acl_id):
"""Check whether the ACL with id *acl_id* allows the access request."""
return Base.from_id(acl_id).check(request)
|
BeegorMif/HTPC-Manager | refs/heads/master | lib/unidecode/x0c6.py | 253 | data = (
'yeoss', # 0x00
'yeong', # 0x01
'yeoj', # 0x02
'yeoc', # 0x03
'yeok', # 0x04
'yeot', # 0x05
'yeop', # 0x06
'yeoh', # 0x07
'ye', # 0x08
'yeg', # 0x09
'yegg', # 0x0a
'yegs', # 0x0b
'yen', # 0x0c
'yenj', # 0x0d
'yenh', # 0x0e
'yed', # 0x0f
'yel', # 0x10
'yelg', # 0x11
'yelm', # 0x12
'yelb', # 0x13
'yels', # 0x14
'yelt', # 0x15
'yelp', # 0x16
'yelh', # 0x17
'yem', # 0x18
'yeb', # 0x19
'yebs', # 0x1a
'yes', # 0x1b
'yess', # 0x1c
'yeng', # 0x1d
'yej', # 0x1e
'yec', # 0x1f
'yek', # 0x20
'yet', # 0x21
'yep', # 0x22
'yeh', # 0x23
'o', # 0x24
'og', # 0x25
'ogg', # 0x26
'ogs', # 0x27
'on', # 0x28
'onj', # 0x29
'onh', # 0x2a
'od', # 0x2b
'ol', # 0x2c
'olg', # 0x2d
'olm', # 0x2e
'olb', # 0x2f
'ols', # 0x30
'olt', # 0x31
'olp', # 0x32
'olh', # 0x33
'om', # 0x34
'ob', # 0x35
'obs', # 0x36
'os', # 0x37
'oss', # 0x38
'ong', # 0x39
'oj', # 0x3a
'oc', # 0x3b
'ok', # 0x3c
'ot', # 0x3d
'op', # 0x3e
'oh', # 0x3f
'wa', # 0x40
'wag', # 0x41
'wagg', # 0x42
'wags', # 0x43
'wan', # 0x44
'wanj', # 0x45
'wanh', # 0x46
'wad', # 0x47
'wal', # 0x48
'walg', # 0x49
'walm', # 0x4a
'walb', # 0x4b
'wals', # 0x4c
'walt', # 0x4d
'walp', # 0x4e
'walh', # 0x4f
'wam', # 0x50
'wab', # 0x51
'wabs', # 0x52
'was', # 0x53
'wass', # 0x54
'wang', # 0x55
'waj', # 0x56
'wac', # 0x57
'wak', # 0x58
'wat', # 0x59
'wap', # 0x5a
'wah', # 0x5b
'wae', # 0x5c
'waeg', # 0x5d
'waegg', # 0x5e
'waegs', # 0x5f
'waen', # 0x60
'waenj', # 0x61
'waenh', # 0x62
'waed', # 0x63
'wael', # 0x64
'waelg', # 0x65
'waelm', # 0x66
'waelb', # 0x67
'waels', # 0x68
'waelt', # 0x69
'waelp', # 0x6a
'waelh', # 0x6b
'waem', # 0x6c
'waeb', # 0x6d
'waebs', # 0x6e
'waes', # 0x6f
'waess', # 0x70
'waeng', # 0x71
'waej', # 0x72
'waec', # 0x73
'waek', # 0x74
'waet', # 0x75
'waep', # 0x76
'waeh', # 0x77
'oe', # 0x78
'oeg', # 0x79
'oegg', # 0x7a
'oegs', # 0x7b
'oen', # 0x7c
'oenj', # 0x7d
'oenh', # 0x7e
'oed', # 0x7f
'oel', # 0x80
'oelg', # 0x81
'oelm', # 0x82
'oelb', # 0x83
'oels', # 0x84
'oelt', # 0x85
'oelp', # 0x86
'oelh', # 0x87
'oem', # 0x88
'oeb', # 0x89
'oebs', # 0x8a
'oes', # 0x8b
'oess', # 0x8c
'oeng', # 0x8d
'oej', # 0x8e
'oec', # 0x8f
'oek', # 0x90
'oet', # 0x91
'oep', # 0x92
'oeh', # 0x93
'yo', # 0x94
'yog', # 0x95
'yogg', # 0x96
'yogs', # 0x97
'yon', # 0x98
'yonj', # 0x99
'yonh', # 0x9a
'yod', # 0x9b
'yol', # 0x9c
'yolg', # 0x9d
'yolm', # 0x9e
'yolb', # 0x9f
'yols', # 0xa0
'yolt', # 0xa1
'yolp', # 0xa2
'yolh', # 0xa3
'yom', # 0xa4
'yob', # 0xa5
'yobs', # 0xa6
'yos', # 0xa7
'yoss', # 0xa8
'yong', # 0xa9
'yoj', # 0xaa
'yoc', # 0xab
'yok', # 0xac
'yot', # 0xad
'yop', # 0xae
'yoh', # 0xaf
'u', # 0xb0
'ug', # 0xb1
'ugg', # 0xb2
'ugs', # 0xb3
'un', # 0xb4
'unj', # 0xb5
'unh', # 0xb6
'ud', # 0xb7
'ul', # 0xb8
'ulg', # 0xb9
'ulm', # 0xba
'ulb', # 0xbb
'uls', # 0xbc
'ult', # 0xbd
'ulp', # 0xbe
'ulh', # 0xbf
'um', # 0xc0
'ub', # 0xc1
'ubs', # 0xc2
'us', # 0xc3
'uss', # 0xc4
'ung', # 0xc5
'uj', # 0xc6
'uc', # 0xc7
'uk', # 0xc8
'ut', # 0xc9
'up', # 0xca
'uh', # 0xcb
'weo', # 0xcc
'weog', # 0xcd
'weogg', # 0xce
'weogs', # 0xcf
'weon', # 0xd0
'weonj', # 0xd1
'weonh', # 0xd2
'weod', # 0xd3
'weol', # 0xd4
'weolg', # 0xd5
'weolm', # 0xd6
'weolb', # 0xd7
'weols', # 0xd8
'weolt', # 0xd9
'weolp', # 0xda
'weolh', # 0xdb
'weom', # 0xdc
'weob', # 0xdd
'weobs', # 0xde
'weos', # 0xdf
'weoss', # 0xe0
'weong', # 0xe1
'weoj', # 0xe2
'weoc', # 0xe3
'weok', # 0xe4
'weot', # 0xe5
'weop', # 0xe6
'weoh', # 0xe7
'we', # 0xe8
'weg', # 0xe9
'wegg', # 0xea
'wegs', # 0xeb
'wen', # 0xec
'wenj', # 0xed
'wenh', # 0xee
'wed', # 0xef
'wel', # 0xf0
'welg', # 0xf1
'welm', # 0xf2
'welb', # 0xf3
'wels', # 0xf4
'welt', # 0xf5
'welp', # 0xf6
'welh', # 0xf7
'wem', # 0xf8
'web', # 0xf9
'webs', # 0xfa
'wes', # 0xfb
'wess', # 0xfc
'weng', # 0xfd
'wej', # 0xfe
'wec', # 0xff
)
|
certik/pyjamas | refs/heads/master | examples/timesheet/view/components/FileOpenDlg.py | 5 |
# vim: set ts=4 sw=4 expandtab:
from ApplicationConstants import Notification
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.DialogBox import DialogBox
from pyjamas.ui.FormPanel import FormPanel
from pyjamas.ui.FileUpload import FileUpload
from pyjamas.ui.HTML import HTML
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.Frame import Frame
import pyjamas.DOM as DOM
from __pyjamas__ import doc
from pyjamas.Window import alert
from pyjamas import Window
import sys
has_getAsText = True
class FileOpenDlg(DialogBox):
files = None
def __init__(self, left = 50, top = 50, fileLocation = None):
global has_getAsText
try:
DialogBox.__init__(self, modal = False)
self.filename = None
self.data = None
self.setPopupPosition(left, top)
self.dockPanel = DockPanel()
self.dockPanel.setSpacing(4)
self.setText("File Open")
if not fileLocation is None:
msg = HTML("Loading file...", True)
self.dockPanel.add(msg, DockPanel.NORTH)
location = fileLocation
if fileLocation.find("://") < 0:
base = Window.getLocation().getHref()
if base.find('/') >= 0:
sep = '/'
else:
sep = '\\'
base = sep.join(base.split(sep)[:-1]) + sep
location = base + fileLocation
self.iframe = Frame(location)
self.dockPanel.add(self.iframe, DockPanel.CENTER)
else:
msg = HTML("Choose a file", True)
self.browseFile = FileUpload()
elem = self.browseFile.getElement()
if False and has_getAsText and hasattr(elem, 'files'):
self.iframe = None
self.files = elem.files
self.dockPanel.add(self.browseFile, DockPanel.CENTER)
else:
self.browseFile = None
self.files = None
base = '' + doc().location
if base.find('/') >= 0:
sep = '/'
else:
sep = '\\'
if not base.lower()[:5] == "file:":
base = "file:///C:/"
msg = HTML("You'll have to place the application on a local file system, otherwise the browser forbids access.", True)
else:
base = sep.join(base.split(sep)[:-1]) + sep
self.iframe = Frame(base)
self.dockPanel.add(self.iframe, DockPanel.CENTER)
self.dockPanel.add(msg, DockPanel.NORTH)
if self.iframe:
self.iframe.setWidth("36em")
hpanel = HorizontalPanel()
self.openBtn = Button("Open", self.onClickOpen)
hpanel.add(self.openBtn)
self.cancelBtn = Button("Cancel", self.onClickCancel)
hpanel.add(self.cancelBtn)
self.dockPanel.add(hpanel, DockPanel.SOUTH)
self.setWidget(self.dockPanel)
except:
raise
def onClickCancel(self, sender):
self.hide()
def onClickOpen(self, sender):
global has_getAsText
data = None
filename = None
if self.files:
if self.files.length == 0:
return
if self.files.length > 1:
alert("Cannot open more than one file")
return
file = self.files.item(0)
filename = file.fileName
try:
data = file.getAsText("")
except AttributeError, e:
has_getAsText = False
alert("Sorry. cannot retrieve file in this browser.\nTry again.")
else:
elem = self.iframe.getElement()
# On firefox, this runs into:
# Permission denied to get property Window.document
# when the file is not in the current domain
body = elem.contentWindow.document.body
try:
filename = '' + elem.contentWindow.location
except:
filename = None
if body.childNodes.length == 1:
data = '' + body.childNodes.item(0).innerHTML
else:
data = '' + body.innerHTML
self.hide()
if data:
self.data = data
self.filename = filename
|
Subsets and Splits